def translate_single_stack_using_imglib2(imp, dx, dy, dz):
  # wrap into a float imglib2 and translate
  #   conversion into float is necessary due to "overflow of n-linear interpolation due to accuracy limits of unsigned bytes"
  #   see: https://github.com/fiji/fiji/issues/136#issuecomment-173831951
  img = ImagePlusImgs.from(imp.duplicate())
  extended = Views.extendBorder(img)
  converted = Converters.convert(extended, RealFloatSamplerConverter())
  interpolant = Views.interpolate(converted, NLinearInterpolatorFactory())
  
  # translate
  if imp.getNDimensions()==3:
    transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz))
  elif imp.getNDimensions()==2:
    transformed = RealViews.affine(interpolant, Translation2D(dx, dy))
  else:
    IJ.log("Can only work on 2D or 3D stacks")
    return None
  
  cropped = Views.interval(transformed, img)
  # wrap back into bit depth of input image and return
  bd = imp.getBitDepth()
  if bd==8:
    return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2"))
  elif bd == 16:
    return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2"))
  elif bd == 32:
    return(ImageJFunctions.wrapFloat(cropped,"imglib2"))
  else:
    return None    
예제 #2
0
def translate_single_stack_using_imglib2(imp, dx, dy, dz):
  # wrap into a float imglib2 and translate
  #   conversion into float is necessary due to "overflow of n-linear interpolation due to accuracy limits of unsigned bytes"
  #   see: https://github.com/fiji/fiji/issues/136#issuecomment-173831951
  img = ImagePlusImgs.from(imp.duplicate())
  extended = Views.extendZero(img)
  converted = Converters.convert(extended, RealFloatSamplerConverter())
  interpolant = Views.interpolate(converted, NLinearInterpolatorFactory())
  
  # translate
  if imp.getNDimensions()==3:
    transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz))
  elif imp.getNDimensions()==2:
    transformed = RealViews.affine(interpolant, Translation2D(dx, dy))
  else:
    IJ.log("Can only work on 2D or 3D stacks")
    return None
  
  cropped = Views.interval(transformed, img)
  # wrap back into bit depth of input image and return
  bd = imp.getBitDepth()
  if bd==8:
    return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2"))
  elif bd == 16:
    return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2"))
  elif bd == 32:
    return(ImageJFunctions.wrapFloat(cropped,"imglib2"))
  else:
    return None    
예제 #3
0
def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):
  """ Return a rotated view of the image, around the Z axis,
      with an expanded (or reduced) interval view so that all pixels are exactly included.

      img: a RandomAccessibleInterval
      angle: in degrees
  """
  cx = img.dimension(0) / 2.0
  cy = img.dimension(1) / 2.0
  toCenter = AffineTransform2D()
  toCenter.translate(-cx, -cy)
  rotation = AffineTransform2D()
  # Step 1: place origin of rotation at the center of the image
  rotation.preConcatenate(toCenter)
  # Step 2: rotate around the Z axis
  rotation.rotate(radians(angle))
  # Step 3: undo translation to the center
  rotation.preConcatenate(toCenter.inverse())
  rotated = RV.transform(Views.interpolate(extend(img),
                                           NLinearInterpolatorFactory()), rotation)
  if enlarge:
    # Bounds:
    bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values  
                                     # for min, max to compare against  
    transformed = zeros(2, 'f')
    for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):
      rotation.apply(corner, transformed)
      bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))
                for (vmin, vmax), v in zip(bounds, transformed)]
    minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs
                                         # into 2 lists of 2 values
    imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))
  else:
    imgRot = Views.interval(rotated, img)
  return imgRot
def translatedView(img, matrix):
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  # In negative: the inverse
  t = Translation3D(-matrix[3], -matrix[7], -matrix[11])
  imgT = RealViews.transform(imgI, t)
  return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
예제 #5
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg
def transformedView(img, matrix):
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  aff = AffineTransform3D()
  aff.set(*matrix)
  aff = aff.inverse()
  imgT = RealViews.transform(imgI, aff)
  return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
예제 #7
0
def viewTransformed(img, affine):
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, affine)
    minC = [0, 0, 0]
    maxC = [img.dimension(d) - 1 for d in xrange(img.numDimensions())]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
예제 #8
0
 def get(self, path):
   transform = self.transformsDict[path]
   img = self.loader.get(path)
   imgE = Views.extendZero(img)
   imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
   imgT = RealViews.transform(imgI, transform)
   minC = self.roi[0] if self.roi else [0] * img.numDimensions()
   maxC = self.roi[1] if self.roi else [img.dimension(d) -1 for d in xrange(img.numDimensions())]
   imgO = Views.zeroMin(Views.interval(imgT, minC, maxC))
   return ImgView.wrap(imgO, img.factory()) if self.asImg else imgO
예제 #9
0
def transformedView(img, transform, interval=None):
    """ """
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, transform)
    if interval:
        return Views.interval(imgT, interval)
    else:
        return Views.interval(
            imgT, [0, 0, 0],
            [img.dimension(d) - 1 for d in xrange(img.numDimensions())])
예제 #10
0
def viewTransformed(img, matrix):
    affine = AffineTransform3D()
    affine.set(*matrix)
    # It's a forward transform: invert
    affine = affine.inverse()
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, affine)
    # Same dimensions
    imgB = Views.interval(imgT, img)
    return imgB
예제 #11
0
 def makeCell(self, index):
     self.preloadCells(index)  # preload others in the background
     img = self.loadImg(self.filepaths[index])
     affine = AffineTransform2D()
     affine.set(self.matrices[index])
     imgI = Views.interpolate(Views.extendZero(img),
                              NLinearInterpolatorFactory())
     imgA = RealViews.transform(imgI, affine)
     imgT = Views.zeroMin(Views.interval(imgA, self.interval))
     aimg = img.factory().create(self.interval)
     ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg)
     return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
예제 #12
0
def scale3D(img, x=1.0, y=1.0, z=1.0):
  scale3d = AffineTransform3D()
  scale3d.set(x, 0, 0, 0,
              0, y, 0, 0,
              0, 0, z, 0)
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  imgT = RealViews.transform(imgI, scale3d)
  # dimensions
  minC = [0, 0, 0]
  maxC = [int(img.dimension(d) * k + 0.5) -1 for d, k in enumerate([x, y, z])]
  imgB = Views.interval(imgT, minC, maxC)
  return imgB
예제 #13
0
def viewTransformed(img, calibration, transform):
    """ View img transformed to isotropy (via the calibration)
      and transformed by the affine. """
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    if type(transform) == AffineTransform3D:
        scale3d = AffineTransform3D()
        scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0,
                    calibration[2], 0)
        affine = transform.copy()
        affine.concatenate(scale3d)
        imgT = RealViews.transform(imgI, affine)
    else:
        imgT = RealViews.transform(imgI, Scale3D(*calibration))
        imgT = RealViews.transform(imgT, transform)
    # dimensions
    minC = [0, 0, 0]
    maxC = [
        int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration)
    ]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
def scale(img, calibration):
    scale3d = AffineTransform3D()
    scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0,
                calibration[2], 0)
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, scale3d)
    # dimensions
    minC = [0, 0, 0]
    maxC = [
        int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration)
    ]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
예제 #15
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg
예제 #16
0
def viewTransformed(img, calibration, affine):
    # Correct calibration
    scale3d = AffineTransform3D()
    scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0,
                calibration[2], 0)
    transform = affine.copy()
    transform.concatenate(scale3d)
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, transform)
    # dimensions
    minC = [0, 0, 0]
    maxC = [
        int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration)
    ]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
 def get(self, path):
     img = self.klb.readFull(path)
     imgE = Views.extendZero(img)
     imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
     affine = AffineTransform3D()
     affine.set(self.transforms[path])
     affine = affine.inverse()  # it's a forward transform: must invert
     affine.concatenate(scale3d)  # calibrated space: isotropic
     imgT = RealViews.transform(imgI, affine)
     minC = [0, 0, 0]
     maxC = [
         int(img.dimension(d) * cal) - 1
         for d, cal in enumerate(calibration)
     ]
     imgB = Views.interval(imgT, minC, maxC)
     # View a RandomAccessibleInterval as an Img, required by Load.lazyStack
     return ImgView.wrap(imgB, img.factory())
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
예제 #19
0
def viewTransformed(img, calibration, matrix):
  affine = AffineTransform3D()
  affine.set(*matrix)
  # It's a forward transform: invert
  affine = affine.inverse()
  # Correct calibration
  scale3d = AffineTransform3D()
  scale3d.set(calibration[0], 0, 0, 0,
              0, calibration[1], 0, 0,
              0, 0, calibration[2], 0)
  affine.concatenate(scale3d)
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  imgT = RealViews.transform(imgI, affine)
  # dimensions
  minC = [0, 0, 0]
  maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)]
  imgB = Views.interval(imgT, minC, maxC)
  return imgB
def translate_using_imglib2(imp, dx, dy, dz):
  print "imp channels",imp.getNChannels()
  # todo:
  # if multiple channels use Duplicator to translate each channel individually
  ## wrap
  # http://javadoc.imagej.net/ImgLib2/net/imglib2/img/imageplus/ImagePlusImg.html
  img = ImagePlusImgs.from(imp.duplicate())
  print "dimensions:",img.numDimensions()
  print img.getChannels()
  ## prepare image
  print "img",img
  ddd
  extended = Views.extendBorder(img)
  #print "extended",extended
  #print "extended",extended.dimension(1)
  dims = zeros(4, 'l')
  img.dimensions(dims)
  print "dims",dims
  converted = Converters.convert(extended, RealFloatSamplerConverter())
  composite = Views.collapseReal(converted, imp.getNChannels())
  print "composite",composite
  interpolant = Views.interpolate(composite, NLinearInterpolatorFactory())
  #print "interpolant",interpolant
  transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz))
  print "transformed", transformed
  cropped = Views.interval(transformed, img)
  print "cropped.numDimensions()", cropped.numDimensions()
  print "cropped",cropped
  ## wrap back and return
  bd = imp.getBitDepth()
  # maybe simply wrap works?
  if bd==8:
    return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2"))
  elif bd == 16:
    return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2"))
  elif bd == 32:
    return(ImageJFunctions.wrapFloat(cropped,"imglib2"))
  else:
    return None    
예제 #21
0
def viewTransformed(image,
                    transformation,
                    title=None,
                    interval=None,
                    show=True):
    if isinstance(image, ImagePlus):
        img = IL.wrap(
            image
        )  # ImagePlus to ImgLib2 RandomAccessibleInterva & IterableInterval aka Img
    elif isinstance(image, RandomAccessibleInterval):
        img = image
    else:
        return None
    # Make the image be defined anywhere by infinitely padding with zeros.
    imgInfinite = Views.extendZero(img)
    # Make the image be defined at arbitrarily precise subpixel coordinates
    # by using n-dimensional linear interpolation
    imgInterpolated = Views.interpolate(imgInfinite,
                                        NLinearInterpolatorFactory())
    # Make the image be seen as a transformed view of the source image
    imgTransformed = RealViews.transform(imgInterpolated, transformation)
    # Define an interval within which we want the transformed image to be defined
    # (such as that of the source img itself; an img in ImgLib2 also happens to be an Interval
    # and can therefore be used as an interval, which is convenient here because we
    # expect the original field of view--the interval--to be where image data can still be found)
    interval = interval if interval else img  # every Img is also an Interval because each Img is bounded
    # Make the image finite by defining it as the content within the interval
    imgBounded = Views.interval(imgTransformed, interval)  # same as original
    # Optionally show the transformed, bounded image in an ImageJ VirtualStack
    # (Note that anytime one of the VirtualStack's ImageProcessor will have to
    # update its pixel data, it will incur in executing the transformation again;
    # no pixel data is cached or copied anywhere other than for display purposes)
    if show:
        title = title if title else imp.getTitle()
        imp = IL.wrap(imgBounded, title)  # as an ImagePlus
        imp.show()  # in an ImageJ ImageWindow
    return imgBounded
img1M = Views.interval(
    img1E, [(dim1 - dim3) / 2 for dim1, dim3 in izip(dims1, dims3)],
    [dim1 + (dim3 - dim1) / 2 - 1 for dim1, dim3 in izip(dims1, dims3)])

img2M = Views.interval(
    img2E, [(dim2 - dim3) / 2 for dim2, dim3 in izip(dims2, dims3)],
    [dim2 + (dim3 - dim2) / 2 - 1 for dim2, dim3 in izip(dims2, dims3)])

IL.show(img1M, "img1M")
IL.show(img2M, "img2M")

# Scale by half (too slow otherwise)  -- ERROR: the smaller one (img1) doesn't remain centered.
s = [0.5 for d in xrange(img1.numDimensions())]
img1s = Views.interval(
    RealViews.transform(
        Views.interpolate(Views.extendValue(img1M, zero),
                          NLinearInterpolatorFactory()), Scale(s)),
    [0 for d in xrange(img1M.numDimensions())], [
        int(img1M.dimension(d) / 2.0 + 0.5) - 1
        for d in xrange(img1M.numDimensions())
    ])
img2s = Views.interval(
    RealViews.transform(
        Views.interpolate(Views.extendValue(img2M, zero),
                          NLinearInterpolatorFactory()), Scale(s)),
    [0 for d in xrange(img2M.numDimensions())], [
        int(img2M.dimension(d) / 2.0 + 0.5) - 1
        for d in xrange(img2M.numDimensions())
    ])

# simplify var names
예제 #23
0
파일: pyramid.py 프로젝트: mwinding/scripts
def pyramid(img,
            top_level,
            min_width=32,
            ViewOutOfBounds=Views.extendBorder
            interpolation_factory=NLinearInterpolatorFactory()):
  """
  Create an image pyramid as interpolated scaled views of the provided img.
  """
  imgR = Views.interpolate(ViewOutOfBounds(img), interpolation_factory)

  # Create levels of a pyramid as interpolated views
  width = img.dimension(0)
  pyramid = [img]
  scale = 1.0
  level_index = 1
  while width > min_width and level_index <= top_level:
    scale /= 2.0
    width /= 2
    s = [scale for d in xrange(img.numDimensions())]
    scaled = Views.interval(RealViews.transform(imgR, Scale(s)),
                            FinalInterval([int(img.dimension(d) * scale)
                                           for d in xrange(img.numDimensions())]))
    pyramid.append(scaled)
    level_index += 1 # for next iteration
  
  return pyramid


# TODO pyramidGauss a la Saalfeld
예제 #24
0
sliceInterval = FinalInterval([interval2.dimension(0), interval2.dimension(1)])

slices2 = []
for index in xrange(img1.dimension(2)):
    # One single 2D RGB slice
    imgSlice1 = Views.hyperSlice(img1, 2, index)
    # Views of the 3 color channels, as extended and interpolatable
    channels = [
        Views.interpolate(
            Views.extendZero(Converters.argbChannel(imgSlice1, i)),
            NLinearInterpolatorFactory()) for i in [1, 2, 3]
    ]
    # ARGBType 2D view of the transformed color channels
    imgSlice2 = Converters.mergeARGB(
        Views.stack(
            Views.interval(RealViews.transform(channel, transform),
                           sliceInterval)
            for channel in channels), ColorChannelOrder.RGB)
    slices2.append(imgSlice2)

# Transformed view
viewImg2 = Views.stack(slices2)
# Materialized image
img2 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2))
ImgUtil.copy(viewImg2, img2)

imp4 = IL.wrap(img2, "imglib2-transformed RGB (pull)")
imp4.show()

# Fourth approach: pull (CORRECT!), and much faster (delegates pixel-wise operations
# to java libraries and delegates RGB color handling altogether)
예제 #25
0
def maxCoords(img):
    return [
        int(img.dimension(d) * calibration[d] - 1)
        for d in xrange(img.numDimensions())
    ]


# Identity transform for CM00, scaled to isotropy
affine0 = AffineTransform3D()
affine0.identity()
affine0.concatenate(scale3D)

# Expand camera CM00 to isotropy
imgE = Views.extendZero(img0)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, affine0)
imgB0 = Views.interval(imgT, [0, 0, 0], maxCoords(img0))

# Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation in X
affine1 = AffineTransform3D()
affine1.set(-1.0, 0.0, 0.0, img1.dimension(0), 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
            1.0, 0.0)
affine1.concatenate(scale3D)
imgE = Views.extendZero(img1)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, affine1)
imgB1 = Views.interval(imgT, [0, 0, 0], maxCoords(img1))

# Transform camera CM02 to CM00: 90 degrees on Y axis, plus a translation in Z
affine2 = AffineTransform3D()
affine2.set(0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0,
        resultFileName = '%s/result.tif' % home.rstrip('/')
        imp = ImageJFunctions.wrap( result, 'result' )
        IJ.saveAsTiff(imp.duplicate(), resultFileName)

        relativeResult = result.copy()
        c = relativeResult.cursor()
        while c.hasNext():
            c.fwd()
            cur = c.get()
            val = cur.get()
            cur.set( val - c.getDoublePosition( 2 ) )

        relativeResultFileName = '%s/relativeResult.tif' % home.rstrip('/')
        imp = ImageJFunctions.wrap( relativeResult, 'relative result' )
        IJ.saveAsTiff(imp.duplicate(), relativeResultFileName)

        ratio = [ wrappedImage.dimension( 0 )*1.0/result.dimension( 0 ), wrappedImage.dimension( 1 )*1.0/result.dimension( 1 ) ]
        shift = [ 0.0, 0.0 ]
        lutField = SingleDimensionLUTGrid(3, 3, result, 2, ratio, shift )

        transformed = Views.interval( Views.raster( RealViews.transformReal( Views.interpolate( Views.extendBorder( wrappedImage ), NLinearInterpolatorFactory() ), lutField ) ), wrappedImage )
        imp = ImageJFunctions.wrap( transformed, 'transformed' )
        transformedFileName = '%s/transformed.tif' % home.rstrip('/')
        IJ.saveAsTiff( imp.duplicate(), transformedFileName )
        
        # result = inference.estimateZCoordinates( 0, 0, startingCoordinates, matrixTracker, options )

             
 
예제 #27
0
img0 = klb.readFull(paths[0])
img1 = klb.readFull(paths[1])
img2 = klb.readFull(paths[2])
img3 = klb.readFull(paths[3])

# Calibration: [1.0, 1.0, 5.0]
scale3D = AffineTransform3D()
scale3D.set(1.0, 0.0, 0.0, 0.0,
            0.0, 1.0, 0.0, 0.0,
            0.0, 0.0, 5.0, 0.0)

# Expand camera CM00 to isotropy
imgE = Views.extendZero(img0)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, scale3D)
imgB0 = Views.interval(imgT, [0, 0, 0], [img0.dimension(0) -1, img0.dimension(1) -1, img0.dimension(2) * 5 - 1])


# Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation
dx = -195
dy = 54
dz = 8
affine = AffineTransform3D()
affine.set(-1.0, 0.0, 0.0, img1.dimension(0) + dx,
            0.0, 1.0, 0.0, 0.0 + dy,
            0.0, 0.0, 1.0, 0.0 + dz)
affine.concatenate(scale3D)
imgE = Views.extendZero(img1)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, affine)
예제 #28
0
affine.rotate(angle_rad)
affine.translate(center)
 
# Get the interpolator
interpolator = LanczosInterpolatorFactory()
 
# Iterate over all frame in the stack
axis = Axes.TIME
output = []
for d in range(data.dimension(axis)):
 
    # Get the current frame
    frame = crop_along_one_axis(ops, data, [d, d], "TIME")
 
    # Get the interpolate view of the frame
    extended = ops.run("transform.extendZeroView", frame)
    interpolant = ops.run("transform.interpolateView", extended, interpolator)
 
    # Apply the transformation to it
    rotated = RealViews.affine(interpolant, affine)
     
    # Set the intervals
    rotated = ops.transform().offset(rotated, frame)
 
    output.append(rotated)
 
output = Views.stack(output)

# Create output Dataset
output = ds.create(output)
예제 #29
0
# imp = IJ.getImage()

# Access its pixel data as an ImgLib2 RandomAccessibleInterval
img = IL.wrapReal(imp2)

# View as an infinite image, with a value of zero beyond the image edges

imgE = Views.extendZero(img)

# View the pixel data as a RealRandomAccessible
# (that is, accessible with sub-pixel precision)
# by using an interpolator
imgR = Views.interpolate(imgE, NLinearInterpolatorFactory())

# Obtain a view of the 2D image twice as big
s = [2.0
     for d in range(img.numDimensions())]  # as many 2.0 as image dimensions
bigger = RV.transform(imgR, Scale(s))

# Define the interval we want to see: the original image, enlarged by 2X
# E.g. from 0 to 2*width, from 0 to 2*height, etc. for every dimension
minC = [0 for d in range(img.numDimensions())]
maxC = [int(img.dimension(i) * scale) for i, scale in enumerate(s)]
imgI = Views.interval(bigger, minC, maxC)

# Visualize the bigger view
imp2x = IL.wrap(imgI, imp.getTitle() + " - 2X")  # an ImagePlus
imp2x.show()

FileSaver(imp2x).saveAsPng(str_out_png)
# create an empty image
phantom = ops.create().img([xSize, ySize, zSize])

# make phantom an ImgPlus
phantom = ops.create().imgPlus(phantom)

location = Point(phantom.numDimensions())
location.setPosition([xSize / 2, ySize / 2, zSize / 2])

hyperSphere = HyperSphere(phantom, location, 10)

for value in hyperSphere:
    value.setReal(100)

phantom.setName("phantom")

affine = AffineTransform3D()
affine.scale(1, 1, 0.4)

interpolatedImg = Views.interpolate(Views.extendZero(phantom),
                                    NLinearInterpolatorFactory())

phantom = Views.interval(
    Views.raster(RealViews.affine(interpolatedImg, affine)),
    Intervals.createMinMax(0, 0, 18, 255, 255, 82))

# make phantom an ImgPlus
phantom = ops.create().imgPlus(ops.copy().iterableInterval(
    Views.zeroMin(phantom)))
phantom.setName('phantom')
예제 #31
0
affine.rotate(angle_rad)
affine.translate(center)

# Get the interpolator
interpolator = LanczosInterpolatorFactory()

# Iterate over all frame in the stack
axis = Axes.TIME
output = []
for d in range(data.dimension(axis)):

    # Get the current frame
    frame = crop_along_one_axis(ops, data, [d, d], "TIME")

    # Get the interpolate view of the frame
    extended = ops.run("transform.extendZeroView", frame)
    interpolant = ops.run("transform.interpolateView", extended, interpolator)

    # Apply the transformation to it
    rotated = RealViews.affine(interpolant, affine)

    # Set the intervals
    rotated = ops.transform().offsetView(rotated, frame)

    output.append(rotated)

output = Views.stack(output)

# Create output Dataset
output = ds.create(output)
예제 #32
0
# Define a rotation by +30 degrees relative to the image center in the XY axes
angle = radians(30)
toCenter = AffineTransform3D()
cx = img.dimension(0) / 2.0  # X axis
cy = img.dimension(1) / 2.0  # Y axis
toCenter.setTranslation(-cx, -cy, 0.0)  # no translation in the Z axis
rotation = AffineTransform3D()
# Step 1: place origin of rotation at the center of the image
rotation.preConcatenate(toCenter)
# Step 2: rotate around the Z axis
rotation.rotate(2, angle)  # 2 is the Z axis, or 3rd dimension
# Step 3: undo translation to the center
rotation.preConcatenate(toCenter.inverse())  # undo translation to the center

# Define a rotated view of the image
rotated = RV.transform(imgR, rotation)

# View the image rotated, without enlarging the canvas
# so we define the interval (here, the field of view of an otherwise infinite image)
# as the original image dimensions by using "img", which in itself is an Interval.
imgRot2d = IL.wrap(Views.interval(rotated, img), imp.getTitle() + " - rot2d")
imgRot2d.show()

# View the image rotated, enlarging the interval to fit it.
# (This is akin to enlarging the canvas.)

# We define each corner of the nth-dimensional volume as a combination,
# namely the 'product' (think nested loop) of the pairs of possible values
# that each dimension can take in every corner coordinate, zipping each
# with the value zero (hence the repeat(0) to provide as many as necessary),
# and then unpacking the list of pairs by using the * in front of 'zip'
#imgE = Views.extendValue(img, t)
# Easier:
imgE = Views.extendZero(img)

# Or view mirroring the data beyond the edges
#imgE = Views.extendMirrorSingle(img)

# View the pixel data as a RealRandomAccessible with the help of an interpolator
imgR = Views.interpolate(imgE, NLinearInterpolatorFactory())

print type(imgR)
print dir(imgR)

# Obtain a view of the 2D image twice as big
s = [2.0 for d in range(img.numDimensions())] # as many 2.0 as dimensions the image has
bigger = RV.transform(imgR, Scale(s))

# Obtain a rasterized view (with integer coordinates for its pixels)
# NOT NEEDED
#imgRA = Views.raster(bigger)

# Define the interval we want to see: the original image, enlarged by 2X
# E.g. from 0 to 2*width, from 0 to 2*height, etc. for every dimension
# Notice the -1 in maxC: the interval is inclusive of the largest coordinate.
minC = [0 for d in range(img.numDimensions())]
maxC = [int(img.dimension(i) * scale) -1 for i, scale in enumerate(s)]
imgI = Views.interval(bigger, minC, maxC)

# Visualize the bigger view
imp2x = IL.wrap(imgI, imp.getTitle() + " - 2X") # an ImagePlus
imp2x.show()