Пример #1
0
def test(iraf):

    # Test dimensions: should be the same as the one input image
    print "Dimensions:", Intervals.dimensionsAsLongArray(iraf)

    # Test Cursor
    c = iraf.cursor()
    pos = zeros(2, 'l')
    while c.hasNext():
        c.fwd()
        c.localize(pos)
        print "Cursor:", pos, "::", c.get()

    # Test RandomAccess
    ra = iraf.randomAccess()
    c = iraf.cursor()
    while c.hasNext():
        c.fwd()
        ra.setPosition(c)
        c.localize(pos)
        print "RandomAccess:", pos, "::", ra.get()

    # Test source img: should be untouched
    c = img.cursor()
    while c.hasNext():
        print "source:", c.next()

    # Test interval view: the middle 2x2 square
    v = Views.interval(iraf, [1, 1], [2, 2])
    IL.wrap(v, "+2 view").show()
Пример #2
0
def makeInterpolatedImage(img1, img2, weight):
    """ weight: float between 0 and 1 """
    edge_pix1 = findEdgePixels(img1)
    kdtree1 = KDTree(edge_pix1, edge_pix1)
    search1 = NearestNeighborSearchOnKDTree(kdtree1)
    edge_pix2 = findEdgePixels(img2)
    kdtree2 = KDTree(edge_pix2, edge_pix2)
    search2 = NearestNeighborSearchOnKDTree(kdtree2)
    img3 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img1))
    c1 = img1.cursor()
    c2 = img2.cursor()
    c3 = img3.cursor()
    pos = zeros(img1.numDimensions(), 'l')
    while c3.hasNext():
        t1 = c1.next()
        t2 = c2.next()
        t3 = c3.next()
        sign1 = -1 if 0 == t1.get() else 1
        sign2 = -1 if 0 == t2.get() else 1
        search1.search(c1)
        search2.search(c2)
        value1 = sign1 * search1.getDistance() * weight
        value2 = sign2 * search2.getDistance() * (1 - weight)
        if value1 + value2 > 0:
            t3.setOne()
    return img3
Пример #3
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
Пример #5
0
 def projectMax(img, minC, maxC, reduce_max):
     imgA = ArrayImgs.unsignedSorts(
         Intervals.dimensionsAsLongArray(imgC))
     ImgUtil.copy(
         ImgView.wrap(
             convert(
                 Views.collapseReal(
                     Views.interval(img, minC, maxC)),
                 reduce_max.newInstance(), imglibtype),
             img.factory()), imgA)
     return imgA
Пример #6
0
 def translate(self, dx, dy):
     a = zeros(2, 'l')
     self.interval.min(a)
     width = self.cell_dimensions[0]
     height = self.cell_dimensions[1]
     x0 = max(0, min(a[0] + dx, self.img_dimensions[0] - width))
     y0 = max(0, min(a[1] + dy, self.img_dimensions[1] - height))
     self.interval = FinalInterval([x0, y0],
                                   [x0 + width - 1, y0 + height - 1])
     syncPrintQ(str(Intervals.dimensionsAsLongArray(self.interval)))
     self.cache.clear()
Пример #7
0
def makeImg(filepaths, pixelType, loadImg, img_dimensions, matrices,
            cropInterval, preload):
    dims = Intervals.dimensionsAsLongArray(cropInterval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]
    grid = CellGrid(voldims, cell_dimensions)
    cellGet = TranslatedSectionGet(filepaths,
                                   loadImg,
                                   matrices,
                                   img_dimensions,
                                   cell_dimensions,
                                   cropInterval,
                                   preload=preload)
    return LazyCellImg(grid, pixelType(), cellGet), cellGet
Пример #8
0
 def crop(event):
   global cropped, cropped_imp
   coords = [int(float(tf.getText())) for tf in textfields]
   minC = [max(0, c) for c in coords[0:3]]
   maxC = [min(d -1, c) for d, c in izip(Intervals.dimensionsAsLongArray(images[0]), coords[3:6])]
   storeRoi(minC, maxC)
   print "ROI min and max coordinates"
   print minC
   print maxC
   cropped = [Views.zeroMin(Views.interval(img, minC, maxC)) for img in images]
   cropped_imp = showAsStack(cropped, title="cropped")
   cropped_imp.setDisplayRange(imp.getDisplayRangeMin(), imp.getDisplayRangeMax())
   if cropContinuationFn:
     cropContinuationFn(images, minC, maxC, cropped, cropped_imp)
Пример #9
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg
Пример #10
0
 def prepare(index):
     # Prepare the img for deconvolution:
     # 0. Transform in one step.
     # 1. Ensure its pixel values conform to expectations (no zeros inside)
     # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
     syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
     img = klb_loader.get(filepaths[index])  # of UnsignedShortType
     imgP = prepareImgForDeconvolution(
         img, transforms[index], target_interval)  # returns of FloatType
     # Copy transformed view into ArrayImg for best performance in deconvolution
     imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
     #ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
     ImgUtil.copy(imgP, imgA, n_threads / 2)  # parallel copying
     syncPrint("--Completed preparing %s CM0%i for deconvolution" %
               (tm_dirname, index))
     imgP = None
     img = None
     return (index, imgA)
Пример #11
0
def makeInterpolatedImage(img1, search1, img2, search2, weight):
  """ weight: float between 0 and 1 """
  img3 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img1))
  c1 = img1.cursor()
  c2 = img2.cursor()
  c3 = img3.cursor()
  while c3.hasNext():
    t1 = c1.next()
    t2 = c2.next()
    t3 = c3.next()
    sign1 = -1 if 0 == t1.get() else 1
    sign2 = -1 if 0 == t2.get() else 1
    search1.search(c1)
    search2.search(c2)
    value1 = sign1 * search1.getDistance() * (1 - weight)
    value2 = sign2 * search2.getDistance() * weight
    if value1 + value2 > 0:
      t3.setOne()
  return img3
def oneStep(index=0):
    # Combining transforms into one, via a translation to account of the ROI crop
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    t1 = cmIsotropicTransforms[index]
    t2 = affine3D(
        [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]])
    t3 = affine3D(fineTransformsPostROICrop[index]).inverse()
    aff = AffineTransform3D()
    aff.set(t1)
    aff.preConcatenate(t2)
    aff.preConcatenate(t3)
    # Final interval is now rooted at 0,0,0 given that the transform includes the translation
    imgP = prepareImgForDeconvolution(
        img, aff,
        FinalInterval([0, 0, 0],
                      [maxC - minC for minC, maxC in izip(roi[0], roi[1])]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "one step index %i" % index).show()
Пример #13
0
fi.nImages = 128

baseDir = "/home/albert/lab/scripts/data/cim.mcgill.ca-shape-benchmark/"

bird = IL.wrap(Raw.open(baseDir + "/birdsIm/b21.im", fi))
airplane = IL.wrap(Raw.open(baseDir + "/airplanesIm/b14.im", fi))

# Rotate bird
# Starts with posterior view
# Rotate 180 degrees around Y axis
# Set to dorsal up: 180 degrees
birdY90 = Views.rotate(bird, 2, 0)  # 90
birdY180 = Views.rotate(birdY90, 2, 0)  # 90 again: 180

c1 = Views.iterable(birdY180).cursor()
img1 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(birdY180))
c2 = img1.cursor()
while c2.hasNext():
    c2.next().set(c1.next())

# Rotate airplane
# Starts with dorsal view, anterior down
# Set to: coronal view, but dorsal is down
airplaneC = Views.rotate(airplane, 2, 1)
# Set to dorsal up: 180 degrees
airplaneC90 = Views.rotate(airplaneC, 0, 1)  # 90
airplaneC180 = Views.rotate(airplaneC90, 0, 1)  # 90 again: 180

c1 = Views.iterable(airplaneC180).cursor()
img2 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(airplaneC180))
c2 = img2.cursor()
Пример #14
0
    print median, max_sum

    # Turns out the maximum is infinity.
    # Therefore, discard all infinity values, and also any above 1.5 * median
    threshold = median * 1.5

    filtered = [
        filename for filename, pixel_sum in sums if pixel_sum < threshold
    ]

    n_threads = Runtime.getRuntime().availableProcessors()
    threads = []
    chunk_size = len(filtered) / n_threads
    aimgs = []
    first = klb.readFull(os.path.join(srcDir, filtered[0]))
    dimensions = Intervals.dimensionsAsLongArray(first)

    for i in xrange(n_threads):
        m = Max(dimensions, filtered[i * chunk_size:(i + 1) * chunk_size])
        m.start()
        threads.append(m)

    # Await completion of all
    for m in threads:
        m.join()

    # Merge all results into a single maximum projection
    max_projection = computeInto(maximum([m.aimg for m in threads]),
                                 ArrayImgs.floats(dimensions))

    max3D = writeZip(max_projection,
Пример #15
0
IL.wrap(img_sub, "LoopBuilder").show()
"""

# Example 2b: with ImgLib2 LoopBuilder using a clojure-defined TriConsumer
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.converter import Converters
from net.imglib2.img.array import ArrayImgs
from net.imglib2.util import Intervals
from net.imglib2.loops import LoopBuilder
from org.scijava.plugins.scripting.clojure import ClojureScriptEngine

img = IL.wrap(imp_rgb) # an ARGBType Img
red   = Converters.argbChannel(img, 1) # a view of the ARGB red channel
green = Converters.argbChannel(img, 2) # a view of the ARGB green channel
img_sub = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))    # the img to store the result

code = """
(deftype Consumer [^long threshold]
  %s
  (accept [self red green result] ; can't type-hint, doesn't find matching method
    (let [^%s r red
          ^%s g green
          ^%s s result]
      (.setInteger s (if (>= (.getInteger r) threshold)
                       (.getInteger g)
                       0)))))
""" % ((LoopBuilder.TriConsumer.getName(),) \
      + tuple(a.randomAccess().get().getClass().getName() for a in [red, green, img_sub]))

clj = ClojureScriptEngine()
Пример #16
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
Пример #17
0
def makeCropUI(imp, images, tgtDir, panel=None, cropContinuationFn=None):
  """ imp: the ImagePlus to work on.
      images: the list of ImgLib2 images, one per frame, not original but already isotropic.
              (These are views that use a nearest neighbor interpolation using the calibration to scale to isotropy.)
      tgtDir: the target directory where e.g. CSV files will be stored, for ROI, features, pointmatches.
      panel: optional, a JPanel controlled by a GridBagLayout.
      cropContinuationFn: optional, a function to execute after cropping,
                          which is given as arguments the original images,
                          minC, maxC (both define a ROI), and the cropped images. """
  independent = None == panel
  if not panel:
    panel = JPanel()
    panel.setBorder(BorderFactory.createEmptyBorder(10,10,10,10))
    gb = GridBagLayout()
    gc = GBC()
  else:
    gb = panel.getLayout()
    # Constraints of the last component
    gc = gb.getConstraints(panel.getComponent(panel.getComponentCount() - 1))
    
    # Horizontal line to separate prior UI components from crop UI
    gc.gridx = 0
    gc.gridy += 1
    gc.gridwidth = 4
    gc.anchor = GBC.WEST
    gc.fill = GBC.HORIZONTAL
    sep = JSeparator()
    sep.setMinimumSize(Dimension(200, 10))
    gb.setConstraints(sep, gc)
    panel.add(sep)

  # ROI UI header
  title = JLabel("ROI controls:")
  gc.gridy +=1
  gc.anchor = GBC.WEST
  gc.gridwidth = 4
  gb.setConstraints(title, gc)
  panel.add(title)

  # Column labels for the min and max coordinates
  gc.gridy += 1
  gc.gridwidth = 1
  for i, title in enumerate(["", "X", "Y", "Z"]):
    gc.gridx = i
    gc.anchor = GBC.CENTER
    label = JLabel(title)
    gb.setConstraints(label, gc)
    panel.add(label)

  textfields = []
  rms = []

  # Load stored ROI if any
  roi_path = path = os.path.join(tgtDir, "crop-roi.csv")
  if os.path.exists(roi_path):
    with open(roi_path, 'r') as csvfile:
      reader = csv.reader(csvfile, delimiter=',', quotechar="\"")
      reader.next() # header
      minC = map(int, reader.next()[1:])
      maxC = map(int, reader.next()[1:])
      # Place the ROI over the ImagePlus
      imp.setRoi(Roi(minC[0], minC[1], maxC[0] + 1 - minC[0], maxC[1] + 1 - minC[1]))
  else:
    # Use whole image dimensions
    minC = [0, 0, 0]
    maxC = [v -1 for v in Intervals.dimensionsAsLongArray(images[0])]

  # Text fields for the min and max coordinates
  for rowLabel, coords in izip(["min coords: ", "max coords: "],
                               [minC, maxC]):
    gc.gridx = 0
    gc.gridy += 1
    label = JLabel(rowLabel)
    gb.setConstraints(label, gc)
    panel.add(label)
    for i in xrange(3):
      gc.gridx += 1
      tf = JTextField(str(coords[i]), 10)
      gb.setConstraints(tf, gc)
      panel.add(tf)
      textfields.append(tf)
      listener = RoiMaker(imp, textfields, len(textfields) -1)
      rms.append(listener)
      tf.addKeyListener(listener)
      tf.addMouseWheelListener(listener)

  # Listen to changes in the ROI of imp
  rfl = RoiFieldListener(imp, textfields)
  Roi.addRoiListener(rfl)
  # ... and enable cleanup
  ImagePlus.addImageListener(FieldDisabler(rfl, rms))

  # Functions for cropping images
  cropped = None
  cropped_imp = None

  def storeRoi(minC, maxC):
    if os.path.exists(roi_path):
      # Load ROI
      with open(path, 'r') as csvfile:
        reader = csv.reader(csvfile, delimiter=',', quotechar="\"")
        reader.next() # header
        same = True
        for a, b in izip(minC + maxC, map(int, reader.next()[1:] + reader.next()[1:])):
          if a != b:
            same = False
            # Invalidate any CSV files for features and pointmatches: different cropping
            for filename in os.listdir(tgtDir):
              if filename.endswith("features.csv") or filename.endswith("pointmatches.csv"):
                os.remove(os.path.join(tgtDir, filename))
            break
        if same:
          return
    # Store the ROI as crop-roi.csv
    with open(roi_path, 'w') as csvfile:
      w = csv.writer(csvfile, delimiter=',', quotechar="\"", quoting=csv.QUOTE_NONNUMERIC)
      w.writerow(["coords", "x", "y", "z"])
      w.writerow(["min"] + map(int, minC))
      w.writerow(["max"] + map(int, maxC))
  
  def crop(event):
    global cropped, cropped_imp
    coords = [int(float(tf.getText())) for tf in textfields]
    minC = [max(0, c) for c in coords[0:3]]
    maxC = [min(d -1, c) for d, c in izip(Intervals.dimensionsAsLongArray(images[0]), coords[3:6])]
    storeRoi(minC, maxC)
    print "ROI min and max coordinates"
    print minC
    print maxC
    cropped = [Views.zeroMin(Views.interval(img, minC, maxC)) for img in images]
    cropped_imp = showAsStack(cropped, title="cropped")
    cropped_imp.setDisplayRange(imp.getDisplayRangeMin(), imp.getDisplayRangeMax())
    if cropContinuationFn:
      cropContinuationFn(images, minC, maxC, cropped, cropped_imp)

  # Buttons to create a ROI and to crop to ROI,
  # which when activated enables the fine registration buttons
  crop_button = JButton("Crop to ROI")
  crop_button.addActionListener(crop)
  gc.gridx = 0
  gc.gridy += 1
  gc.gridwidth = 4
  gc.anchor = GBC.WEST
  buttons_panel = JPanel()
  buttons_panel.add(crop_button)
  gb.setConstraints(buttons_panel, gc)
  panel.add(buttons_panel)

  if independent:
    frame = JFrame("Crop by ROI")
    frame.getContentPane().add(panel)
    frame.pack()
    frame.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE)
    frame.addWindowListener(CloseControl(destroyables=rms + [rfl]))
    frame.setVisible(True)
  else:
    # Re-pack the JFrame
    parent = panel.getParent()
    while not isinstance(parent, JFrame) and parent is not None:
      parent = parent.getParent()

    if parent:
      frame = parent
      frame.pack()
      found = False
      for wl in frame.getWindowListeners():
        if isinstance(wl, CloseControl):
          wl.addDestroyables(rms + [rfl])
          found = True
          break
      if not found:
        frame.addWindowListener(CloseControl(destroyables=rms + [rfl]))
      frame.setVisible(True)

  return panel
Пример #18
0
from net.imglib2.algorithm.math.ImgMath import compute, gen
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.type.numeric.integer import UnsignedByteType
from net.imglib2.img.array import ArrayImgs
from net.imglib2.util import Intervals
from net.imglib2 import KDTree, Point

locations = [(10, 15), (25, 40), (30, 75), (80, 60)]

points = [
    Point.wrap([x, y]) for x, y in [(10, 15), (25, 40), (30, 75), (80, 60)]
]
values = [UnsignedByteType(v) for v in [128, 164, 200, 255]]

kt = KDTree(values, points)
dimensions = [100, 100]

op = gen(kt, 10)
target = ArrayImgs.unsignedBytes(
    Intervals.dimensionsAsLongArray(op.getInterval()))
compute(op).into(target)

IL.wrap(target, "KDTree").show()
Пример #19
0
 def get(self, index):
     img = self.asArrayImg(index, self.loadFn(self.filepaths[index]))
     dims = Intervals.dimensionsAsLongArray(img)
     return Cell(
         list(dims) + [1], [0] * img.numDimensions() + [index],
         img.update(None))
Пример #20
0
    bytes = zeros(width * height * depth, 'b')
    ra.read(bytes)
    return ArrayImgs.unsignedBytes(bytes, [width, height, depth])
  finally:
    ra.close()

bird = readBinaryMaskImg(os.path.join(baseDir, "birdsIm/b21.im"), 128, 128, 128, 1024)
airplane = readBinaryMaskImg(os.path.join(baseDir, "airplanesIm/b14.im"), 128, 128, 128, 1024)

# Rotate bird: starts with posterior view, dorsal down
# Rotate 180 degrees around Y axis
birdY90 = Views.rotate(bird, 2, 0) # 90
birdY180 = Views.rotate(birdY90, 2, 0) # 90 again: 180

# Copy rotated bird into ArrayImg
dims = Intervals.dimensionsAsLongArray(birdY90)
img1 = compute(ImgSource(birdY180)).into(ArrayImgs.unsignedBytes(dims))

# Rotate airplane: starts with dorsal view, anterior down
# Set to: coronal view, but dorsal is still down
airplaneC = Views.rotate(airplane, 2, 1)
# Set to dorsal up: rotate 180 degrees
airplaneC90 = Views.rotate(airplaneC, 0, 1) # 90
airplaneC180 = Views.rotate(airplaneC90, 0, 1) # 90 again: 180

# Copy rotated airplace into ArrayImg
img2 = compute(ImgSource(airplaneC180)).into(ArrayImgs.unsignedBytes(dims))


# Find edges
def findEdgePixels(img):
Пример #21
0
 def get(self, index):
   img = self.asArrayImg(index, self.loadFn(self.filepaths[index]))
   dims = Intervals.dimensionsAsLongArray(img)
   return Cell(list(dims) + [1], # cell dimensions
               [0] * img.numDimensions() + [index], # position in the grid: 0, 0, 0, Z-index
               img.update(None)) # get the underlying DataAccess
Пример #22
0
def export8bitN5(filepaths,
                 img_dimensions,
                 matrices,
                 name,
                 exportDir,
                 interval,
                 gzip_compression=6,
                 block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  name: name to assign to the N5 volume.
  img3D: the serial sections to export.
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld.
  block_size: defaults to 128x128x128 px.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(blockRadius, stds, center, stretch,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        NormalizeLocalContrast().run(sp, blockRadius, blockRadius, stds,
                                     center, stretch)
        return ArrayImgs.unsignedBytes(
            sp.convertToByte(True).getPixels(),
            [sp.getWidth(), sp.getHeight()])

    loader = SectionCellLoader(filepaths,
                               asArrayImg=partial(
                                   asNormalizedUnsignedByteArrayImg, 400, 3,
                                   True, True))

    # TODO: how to preload 128 at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader,
                                      voldims,
                                      cell_dimensions,
                                      UnsignedByteType,
                                      BYTE,
                                      returnCache=True)

    def preload(cachedCellImg, loader):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
        cache = cachedCellImg.getCache()
        f1 = cache.getClass().getDeclaredField(
            "cache")  # LoaderCacheAsCacheAdapter.cache
        f1.setAccessible(True)
        softCache = f1.get(cache)
        f2 = softCache.getClass().getDeclaredField(
            "map")  # SoftRefLoaderCache.map
        f2.setAccessible(True)
        keys = sorted(f2.get(softCache).getKeys())
        if 0 == len(keys):
            return
        first = keys[-1] - (keys[-1] % blockSize[2])
        syncPrint("Preloading %i-%i" % (first, first + blockSize[2] - 1))
        exe = newFixedThreadPool(n_threads=-1, name="preloader")
        try:
            for index in xrange(first, first + blockSize[2]):
                exe.submit(Task, softCache.get, index, loader)
        except:
            syncPrint(sys.exc_info())
        finally:
            exe.shutdown()

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(partial(preload, cachedCellImg, loader),
                                     0, 60, TimeUnit.SECONDS)

    writeN5(cachedCellImg,
            exportDir,
            name,
            block_size,
            gzip_compression=gzip_compression,
            n_threads=0)
    preloader.shutdown()
img1 = IL.wrap(IJ.openImage("/home/albert/Desktop/bat-cochlea-volume.zip"))
img2 = IL.wrap(IJ.openImage("/home/albert/Desktop/mri-stack-mask.zip"))

# Make any non-zero pixel be 1
for t in img1:
    if 0 != t.getIntegerLong():
        t.setOne()

for t in img2:
    if 0 != t.getIntegerLong():
        t.setOne()

# Make both fit within the same window, centered

dims1 = Intervals.dimensionsAsLongArray(img1)
dims2 = Intervals.dimensionsAsLongArray(img2)
dims3 = [max(a, b) for a, b in izip(dims1, dims2)]

zero = UnsignedByteType(0)
img1E = Views.extendValue(img1, zero)
img2E = Views.extendValue(img2, zero)

img1M = Views.interval(
    img1E, [(dim1 - dim3) / 2 for dim1, dim3 in izip(dims1, dims3)],
    [dim1 + (dim3 - dim1) / 2 - 1 for dim1, dim3 in izip(dims1, dims3)])

img2M = Views.interval(
    img2E, [(dim2 - dim3) / 2 for dim2, dim3 in izip(dims2, dims3)],
    [dim2 + (dim3 - dim2) / 2 - 1 for dim2, dim3 in izip(dims2, dims3)])
Пример #24
0
        Views.interpolate(
            Views.extendZero(Converters.argbChannel(imgSlice1, i)),
            NLinearInterpolatorFactory()) for i in [1, 2, 3]
    ]
    # ARGBType 2D view of the transformed color channels
    imgSlice2 = Converters.mergeARGB(
        Views.stack(
            Views.interval(RealViews.transform(channel, transform),
                           sliceInterval)
            for channel in channels), ColorChannelOrder.RGB)
    slices2.append(imgSlice2)

# Transformed view
viewImg2 = Views.stack(slices2)
# Materialized image
img2 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2))
ImgUtil.copy(viewImg2, img2)

imp4 = IL.wrap(img2, "imglib2-transformed RGB (pull)")
imp4.show()

# Fourth approach: pull (CORRECT!), and much faster (delegates pixel-wise operations
# to java libraries and delegates RGB color handling altogether)
# Defines a list of views (recipes, really) for transforming every stack slice
# and then materializes the view by copying it in a multi-threaded way into an ArrayImg.
# Now without separating the color channels: will use the NLinearInterpolatorARGB
# In practice, it's a tad slower than the third approach: also processes the alpha channel in ARGB
# even though we know it is empty. Its conciseness adds clarity and is a win.
"""
# Procedural code:
slices3 = []
Пример #25
0
ui.show(normalized)

# convolution
kernel = ops.run("create.img", [3, 3], FloatType())
for p in kernel:
    p.set(1.0 / kernel.size())
convoluted = ops.create().img(normalized)
ops.filter().convolve(convoluted, Views.extendMirrorSingle(normalized), kernel)
ui.show(convoluted)

# min filter
minfiltered = ops.create().img(convoluted)
ops.filter().min(minfiltered, input, RectangleShape(3, False))
ui.show(minfiltered)

# threshold
thresholded = ops.create().img(minfiltered, BitType())
ops.threshold().apply(thresholded, minfiltered, UnsignedByteType(128))
ui.show(thresholded)

# project
dims = Intervals.dimensionsAsLongArray(thresholded)
projected_dims = dims[:-1]
projected = ops.create().img(projected_dims)
ops.transform().project(IterableRandomAccessibleInterval(projected),
                        thresholded, ops.op(IterableMax, input),
                        len(projected_dims))
ui.show(projected)

scifio.datasetIO().save(datasets.create(projected), output_path)
print("OK")
Пример #26
0
def export8bitN5(
        filepaths,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        copy_threads=2,
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  name: name to assign to the N5 volume.
  img3D: the serial sections to export.
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld.
  block_size: defaults to 128x128x128 px.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices, copy_threads))

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    def preload(cachedCellImg, loader, block_size, filepaths):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                                 name="preloader")
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = keys[-1] - (keys[-1] % block_size[2])
            last = max(len(filepaths), first + block_size[2] - 1)
            keys = None
            msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
            futures = []
            for index in xrange(first, first + block_size[2]):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            count = 1
            while len(futures) > 0:
                r, t = futures.pop(0).get()
                # t in miliseconds
                if t > 500:
                    if msg:
                        syncPrint(msg)
                        msg = None
                    syncPrint("preloaded index %i in %f ms" %
                              (first + count, t))
                count += 1
            if not msg:  # msg was printed
                syncPrint("Completed preloading %i-%i" %
                          (first, first + block_size[2] - 1))
        except:
            syncPrint(sys.exc_info())
        finally:
            exe.shutdown()

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size), 10, 60,
        TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
Пример #27
0
def export8bitN5(
        filepaths,
        loadFn,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  filepaths: the ordered list of filepaths, one per serial section.
  loadFn: a function to load a filepath into an ImagePlus.
  name: name to assign to the N5 volume.
  matrices: the list of transformation matrices (each one is an array), one per section
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld. 0 means no compression.
  invert:  Defaults to True (necessary for FIBSEM). Whether to invert the images upon loading.
  CLAHE_params: defaults to [400, 256, 3.0]. If not None, the a list of the 3 parameters needed for a CLAHE filter to apply to each image.
  n5_threads: defaults to 0, meaning as many as CPU cores.
  block_size: defaults to 128x128x128 px. A list of 3 integer numbers, the dimensions of each individual block.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    # A CacheLoader that interprets the list of filepaths as a 3D volume: a stack of 2D slices
    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices),
        loadFn=loadFn)

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    exe_preloader = newFixedThreadPool(n_threads=min(
        block_size[2], n5_threads if n5_threads > 0 else numCPUs()),
                                       name="preloader")

    def preload(cachedCellImg, loader, block_size, filepaths, exe):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = max(0, keys[-1] - (keys[-1] % block_size[2]))
            last = min(len(filepaths), first + block_size[2]) - 1
            keys = None
            syncPrintQ("### Preloading %i-%i ###" % (first, last))
            futures = []
            for index in xrange(first, last + 1):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            loaded_any = False
            count = 0
            while len(futures) > 0:
                r, t = futures.pop(0).get()  # waits for the image to load
                if t > 1000:  # in miliseconds. Less than this is for sure a cache hit, more a cache miss and reload
                    loaded_any = True
                r = None
                # t in miliseconds
                syncPrintQ("preloaded index %i in %f ms" % (first + count, t))
                count += 1
            if not loaded_any:
                syncPrintQ("Completed preloading %i-%i" %
                           (first, first + block_size[2] - 1))
        except:
            syncPrintQ(sys.exc_info())

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size, filepaths,
                exe_preloader), 10, 60, TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
        exe_preloader.shutdown()
Пример #28
0
from net.imglib2.img.array import ArrayImgFactory
from net.imglib2.type.numeric.integer import UnsignedByteType, UnsignedShortType
from net.imglib2.util import Intervals

# An 8-bit 256x256x256 volume
img = ArrayImgFactory(UnsignedByteType()).create([256, 256, 256])

# Another image of the same type and dimensions, but empty
img2 = img.factory().create(
    [img.dimension(d) for d in xrange(img.numDimensions())])

# Same, but easier reading of the image dimensions
img3 = img.factory().create(Intervals.dimensionsAsLongArray(img))

# Same, but use an existing img as an Interval from which to read out the dimensions
img4 = img.factory().create(img)

# Now we change the type: same kind of image and same dimensions,
# but crucially a different pixel type (16-bit) via a new ImgFactory
imgShorts = img.factory().imgFactory(UnsignedShortType()).create(img)
Пример #29
0
#@ OpService ops
#@ SCIFIO scifio
#@ DatasetService datasetService
#@ String input_path
#@ String output_path
#@ UIService ui

import os
from net.imglib2.util import Intervals
from net.imglib2.view import IterableRandomAccessibleInterval

try:
    os.unlink(output_path)
except OSError:
    pass

input = scifio.datasetIO().open(input_path)
dims = Intervals.dimensionsAsLongArray(input)
output_dims = dims[:-1]
output = ops.create().img(output_dims)
ops.transform().project(IterableRandomAccessibleInterval(output), input,
                        ops.op('stats.max', input.getImgPlus()),
                        len(output_dims))

scifio.datasetIO().save(datasetService.create(output), output_path)
ui.show(output)
print("OK")
Пример #30
0
from net.imglib2.algorithm.math.ImgMath import compute, block, div, offset, add
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.img.array import ArrayImgs
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.view import Views
from net.imglib2.util import Intervals
from ij import IJ

imp = IJ.getImage()  # an 8-bit image
img = IL.wrap(imp)

# Create the integral image of an 8-bit input, stored as 64-bit
target = ArrayImgs.unsignedLongs(Intervals.dimensionsAsLongArray(img))
# Copy input onto the target image
compute(img).into(target)
# Extend target with zeros, so that we can read at coordinate -1
imgE = Views.extendZero(target)
# Integrate every dimension, cummulatively by writing into
# a target image that is also the input
for d in xrange(img.numDimensions()):
    coord = [0] * img.numDimensions()  # array of zeros
    coord[d] = -1
    # Cummulative sum along the current dimension
    # Note that instead of the ImgMath offset op,
    # we could have used Views.translate(Views.extendZero(target), [1, 0]))
    # (Notice though the sign change in the translation)
    integral = add(target, offset(imgE, coord))
    compute(integral).into(target)

# The target is the integral image
integralImg = target