Exemplo n.º 1
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg
def translatedView(img, matrix):
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  # In negative: the inverse
  t = Translation3D(-matrix[3], -matrix[7], -matrix[11])
  imgT = RealViews.transform(imgI, t)
  return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
Exemplo n.º 3
0
def translate_single_stack_using_imglib2(imp, dx, dy, dz):
  # wrap into a float imglib2 and translate
  #   conversion into float is necessary due to "overflow of n-linear interpolation due to accuracy limits of unsigned bytes"
  #   see: https://github.com/fiji/fiji/issues/136#issuecomment-173831951
  img = ImagePlusImgs.from(imp.duplicate())
  extended = Views.extendZero(img)
  converted = Converters.convert(extended, RealFloatSamplerConverter())
  interpolant = Views.interpolate(converted, NLinearInterpolatorFactory())
  
  # translate
  if imp.getNDimensions()==3:
    transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz))
  elif imp.getNDimensions()==2:
    transformed = RealViews.affine(interpolant, Translation2D(dx, dy))
  else:
    IJ.log("Can only work on 2D or 3D stacks")
    return None
  
  cropped = Views.interval(transformed, img)
  # wrap back into bit depth of input image and return
  bd = imp.getBitDepth()
  if bd==8:
    return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2"))
  elif bd == 16:
    return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2"))
  elif bd == 32:
    return(ImageJFunctions.wrapFloat(cropped,"imglib2"))
  else:
    return None    
Exemplo n.º 4
0
def viewTransformed(img, affine):
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, affine)
    minC = [0, 0, 0]
    maxC = [img.dimension(d) - 1 for d in xrange(img.numDimensions())]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
def transformedView(img, matrix):
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  aff = AffineTransform3D()
  aff.set(*matrix)
  aff = aff.inverse()
  imgT = RealViews.transform(imgI, aff)
  return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
Exemplo n.º 6
0
 def updatePixels(self):
     # Copy interval into pixels
     view = Views.interval(
         Views.extendZero(Views.hyperSlice(self.img3D, 2, self.indexZ)),
         self.interval2D)
     aimg = ArrayImgs.floats(
         self.getPixels(),
         [self.interval2D.dimension(0),
          self.interval2D.dimension(1)])
     ImgUtil.copy(view, aimg)
Exemplo n.º 7
0
 def get(self, path):
   transform = self.transformsDict[path]
   img = self.loader.get(path)
   imgE = Views.extendZero(img)
   imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
   imgT = RealViews.transform(imgI, transform)
   minC = self.roi[0] if self.roi else [0] * img.numDimensions()
   maxC = self.roi[1] if self.roi else [img.dimension(d) -1 for d in xrange(img.numDimensions())]
   imgO = Views.zeroMin(Views.interval(imgT, minC, maxC))
   return ImgView.wrap(imgO, img.factory()) if self.asImg else imgO
Exemplo n.º 8
0
def transformedView(img, transform, interval=None):
    """ """
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, transform)
    if interval:
        return Views.interval(imgT, interval)
    else:
        return Views.interval(
            imgT, [0, 0, 0],
            [img.dimension(d) - 1 for d in xrange(img.numDimensions())])
Exemplo n.º 9
0
def viewTransformed(img, matrix):
    affine = AffineTransform3D()
    affine.set(*matrix)
    # It's a forward transform: invert
    affine = affine.inverse()
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, affine)
    # Same dimensions
    imgB = Views.interval(imgT, img)
    return imgB
Exemplo n.º 10
0
 def makeCell(self, index):
     self.preloadCells(index)  # preload others in the background
     img = self.loadImg(self.filepaths[index])
     affine = AffineTransform2D()
     affine.set(self.matrices[index])
     imgI = Views.interpolate(Views.extendZero(img),
                              NLinearInterpolatorFactory())
     imgA = RealViews.transform(imgI, affine)
     imgT = Views.zeroMin(Views.interval(imgA, self.interval))
     aimg = img.factory().create(self.interval)
     ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg)
     return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
Exemplo n.º 11
0
def scale3D(img, x=1.0, y=1.0, z=1.0):
  scale3d = AffineTransform3D()
  scale3d.set(x, 0, 0, 0,
              0, y, 0, 0,
              0, 0, z, 0)
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  imgT = RealViews.transform(imgI, scale3d)
  # dimensions
  minC = [0, 0, 0]
  maxC = [int(img.dimension(d) * k + 0.5) -1 for d, k in enumerate([x, y, z])]
  imgB = Views.interval(imgT, minC, maxC)
  return imgB
def scale(img, calibration):
    scale3d = AffineTransform3D()
    scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0,
                calibration[2], 0)
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, scale3d)
    # dimensions
    minC = [0, 0, 0]
    maxC = [
        int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration)
    ]
    imgB = Views.interval(imgT, minC, maxC)
    return imgB
Exemplo n.º 13
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg
 def get(self, path):
     img = self.klb.readFull(path)
     imgE = Views.extendZero(img)
     imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
     affine = AffineTransform3D()
     affine.set(self.transforms[path])
     affine = affine.inverse()  # it's a forward transform: must invert
     affine.concatenate(scale3d)  # calibrated space: isotropic
     imgT = RealViews.transform(imgI, affine)
     minC = [0, 0, 0]
     maxC = [
         int(img.dimension(d) * cal) - 1
         for d, cal in enumerate(calibration)
     ]
     imgB = Views.interval(imgT, minC, maxC)
     # View a RandomAccessibleInterval as an Img, required by Load.lazyStack
     return ImgView.wrap(imgB, img.factory())
def viewTransformed(img, calibration, affine):
  """ View img transformed to isotropy (via the calibration)
      and transformed by the affine. """
  scale3d = AffineTransform3D()
  scale3d.set(calibration[0], 0, 0, 0,
              0, calibration[1], 0, 0,
              0, 0, calibration[2], 0)
  transform = affine.copy()
  transform.concatenate(scale3d)
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  imgT = RealViews.transform(imgI, transform)
  # dimensions
  minC = [0, 0, 0]
  maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)]
  imgB = Views.interval(imgT, minC, maxC)
  return imgB
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
Exemplo n.º 17
0
def viewTransformed(img, calibration, matrix):
  affine = AffineTransform3D()
  affine.set(*matrix)
  # It's a forward transform: invert
  affine = affine.inverse()
  # Correct calibration
  scale3d = AffineTransform3D()
  scale3d.set(calibration[0], 0, 0, 0,
              0, calibration[1], 0, 0,
              0, 0, calibration[2], 0)
  affine.concatenate(scale3d)
  imgE = Views.extendZero(img)
  imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
  imgT = RealViews.transform(imgI, affine)
  # dimensions
  minC = [0, 0, 0]
  maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)]
  imgB = Views.interval(imgT, minC, maxC)
  return imgB
Exemplo n.º 18
0
def viewTransformed(image,
                    transformation,
                    title=None,
                    interval=None,
                    show=True):
    if isinstance(image, ImagePlus):
        img = IL.wrap(
            image
        )  # ImagePlus to ImgLib2 RandomAccessibleInterva & IterableInterval aka Img
    elif isinstance(image, RandomAccessibleInterval):
        img = image
    else:
        return None
    # Make the image be defined anywhere by infinitely padding with zeros.
    imgInfinite = Views.extendZero(img)
    # Make the image be defined at arbitrarily precise subpixel coordinates
    # by using n-dimensional linear interpolation
    imgInterpolated = Views.interpolate(imgInfinite,
                                        NLinearInterpolatorFactory())
    # Make the image be seen as a transformed view of the source image
    imgTransformed = RealViews.transform(imgInterpolated, transformation)
    # Define an interval within which we want the transformed image to be defined
    # (such as that of the source img itself; an img in ImgLib2 also happens to be an Interval
    # and can therefore be used as an interval, which is convenient here because we
    # expect the original field of view--the interval--to be where image data can still be found)
    interval = interval if interval else img  # every Img is also an Interval because each Img is bounded
    # Make the image finite by defining it as the content within the interval
    imgBounded = Views.interval(imgTransformed, interval)  # same as original
    # Optionally show the transformed, bounded image in an ImageJ VirtualStack
    # (Note that anytime one of the VirtualStack's ImageProcessor will have to
    # update its pixel data, it will incur in executing the transformation again;
    # no pixel data is cached or copied anywhere other than for display purposes)
    if show:
        title = title if title else imp.getTitle()
        imp = IL.wrap(imgBounded, title)  # as an ImagePlus
        imp.show()  # in an ImageJ ImageWindow
    return imgBounded
Exemplo n.º 19
0
def transformedView(img, matrix):
  imgE = Views.extendZero(img)
Exemplo n.º 20
0
def run():
    t_start = datetime.now()
    image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif'))

    print '\tread image metadata'
    reader = ImageReader()
    in_meta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(in_meta)

    x_dims = []
    y_dims = []
    z_dims = []
    c_dims = []
    t_dims = []
    eff = []
    spp = []

    for image_path in image_paths:
        print '\t  parse %s' % (image_path)
        reader.setId(image_path)
        x_dims.append(reader.getSizeX())
        y_dims.append(reader.getSizeY())
        z_dims.append(reader.getSizeZ())
        c_dims.append(reader.getSizeC())
        t_dims.append(reader.getSizeT())
        eff.append(reader.imageCount / z_dims[-1] / t_dims[-1])
        spp.append(reader.getSizeC() / eff[-1])

    format = FormatTools.getPixelTypeString(reader.getPixelType())
    series = reader.getSeries()
    big_endian = Boolean.FALSE
    order = reader.getDimensionOrder()
    reader.close()

    # Compute the dimensions of the output file
    x_dim = max(x_dims)
    y_dim = max(y_dims)
    z_dim = max(z_dims)
    c_dim = max(c_dims)
    t_dim = max(t_dims)

    print '\t  series: %i' % series
    print '\t  format: %s' % format
    print '\t  dimension order: %s' % order
    print '\t  x: %s -> %i' % (x_dims, x_dim)
    print '\t  y: %s -> %i' % (y_dims, y_dim)
    print '\t  z: %s -> %i' % (z_dims, z_dim)
    print '\t  c: %s -> %i' % (c_dims, c_dim)
    print '\t  t: %s -> %i' % (t_dims, t_dim)
    print '\t  effective size c: %s' % eff
    print '\t  samples per pixel: %s' % spp

    # Get the time dimension from the number of input files
    t_dim = len(image_paths)

    # TODO: Tried to work out the order with Axes class, got something weird though.
    dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)]

    pixels_per_plane = x_dim * y_dim

    # Assemble the metadata for the output file
    out_meta = MetadataTools.createOMEXMLMetadata()
    out_meta.setImageID(MetadataTools.createLSID('Image', series), series)
    out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series)
    out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0)
    out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series)
    out_meta.setPixelsType(PixelType.fromString(format), series)
    out_meta.setPixelsSizeX(PositiveInteger(x_dim), series)
    out_meta.setPixelsSizeY(PositiveInteger(y_dim), series)
    out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series)
    out_meta.setPixelsSizeC(PositiveInteger(c_dim), series)
    out_meta.setPixelsSizeT(PositiveInteger(t_dim), series)

    for c in range(c_dim):
        out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c),
                              series, c)
        out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c)

    # Initialize the BF writer
    result_path = os.path.join(result_dir.getPath(), result_name)
    writer = ImageWriter()
    writer.setMetadataRetrieve(out_meta)
    writer.setId(result_path)
    print '\tcreated to %s' % (result_path)

    # Write the stacks into the output file
    N = len(image_paths)
    for i, image_path in enumerate(image_paths):
        status.showStatus(i, N, "catenating %i of %i time-points" % (i, N))
        print '\t  processing %s' % (image_path)
        ds = io.open(image_path)
        xi = ds.dimensionIndex(Axes.X)
        xv = ds.dimension(xi)
        yi = ds.dimensionIndex(Axes.Y)
        yv = ds.dimension(yi)
        zi = ds.dimensionIndex(Axes.Z)
        zv = ds.dimension(zi)
        ti = ds.dimensionIndex(Axes.TIME)
        tv = ds.dimension(ti)
        ci = ds.dimensionIndex(Axes.CHANNEL)
        cv = ds.dimension(ci)

        dx = float(x_dim - xv) / 2.0
        dy = float(y_dim - yv) / 2.0
        dz = float(z_dim - zv) / 2.0
        print '\t     translation vector (dx, dy, dz) = (%f, %f, %f)' % (
            dx, dy, dz)

        if (dx != 0) or (dy != 0) or (dz != 0):
            stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz))
            stk = Views.extendZero(stk)
        else:
            stk = Views.extendZero(ds.getImgPlus().getImg())

        print '\t     writing planes ',
        n = 0
        plane = 1
        byte_array = []
        interval_view = Views.interval(stk, \
                                       [Long(0), Long(0), Long(0), Long(0)], \
                                       [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)])
        cursor = interval_view.cursor()
        while cursor.hasNext():
            n += 1
            cursor.fwd()
            value = cursor.get().getInteger()
            bytes = DataTools.shortToBytes(value, big_endian)
            byte_array.extend(bytes)

            if n == pixels_per_plane:
                writer.saveBytes(plane - 1, byte_array)

                print '.',
                if ((plane) % 10) == 0:
                    print '\n\t                    ',

                byte_array = []
                plane += 1
                n = 0

        print ' '

    writer.close()
    t = datetime.now() - t_start
    print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path,
                                                  t.total_seconds())
    print '... done.'
Exemplo n.º 21
0
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.img.array import ArrayImgs
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.view import Views
from net.imglib2.util import Intervals
from ij import IJ

imp = IJ.getImage()  # an 8-bit image
img = IL.wrap(imp)

# Create the integral image of an 8-bit input, stored as 64-bit
target = ArrayImgs.unsignedLongs(Intervals.dimensionsAsLongArray(img))
# Copy input onto the target image
compute(img).into(target)
# Extend target with zeros, so that we can read at coordinate -1
imgE = Views.extendZero(target)
# Integrate every dimension, cummulatively by writing into
# a target image that is also the input
for d in xrange(img.numDimensions()):
    coord = [0] * img.numDimensions()  # array of zeros
    coord[d] = -1
    # Cummulative sum along the current dimension
    # Note that instead of the ImgMath offset op,
    # we could have used Views.translate(Views.extendZero(target), [1, 0]))
    # (Notice though the sign change in the translation)
    integral = add(target, offset(imgE, coord))
    compute(integral).into(target)

# The target is the integral image
integralImg = target
Exemplo n.º 22
0
width = img.dimension(0)  # same as imp.getWidth()
height = img.dimension(1)  # same as imp.getHeight()

# from half an image beyond 0,0 (to the left and up) to half an image beyond width,height
imgExtended = Views.interval(extendedView, [-width / 2, -height / 2],
                             [width + width / 2 - 1, height + height / 2 - 1
                              ])  # RandomAccessibleInterval

IL.show(imgExtended, "enlarged canvas with extended mirror symmetry")

# The viewing interval doesn't have to overlap with the interval where the original image is defined
# For example:

imgSomewhere = Views.interval(extendedView, [41000, 60000], [42000, 61000])

IL.show(imgSomewhere, "Arbitrary interval somewhere")

# Other forms of extended views:
extendedEmptyView = Views.extendZero(img)
extendedValueView = Views.extendValue(img, 50)

# Find out the pixel type and its min and max values
t = img.randomAccess().get()
min_value = t.getMinValue()  # minimum value for the pixel type
max_value = t.getMaxValue()  # maximum
extendedRandomView = Views.extendRandom(img, min_value, max_value)
imgExtRandom = Views.interval(extendedRandomView, [-width / 2, -height / 2],
                              [width + width / 2, height + height / 2])

IL.show(imgExtRandom, "extended with random noise")
# create an empty image
phantom = ops.create().img([xSize, ySize, zSize])

# make phantom an ImgPlus
phantom = ops.create().imgPlus(phantom)

location = Point(phantom.numDimensions())
location.setPosition([xSize / 2, ySize / 2, zSize / 2])

hyperSphere = HyperSphere(phantom, location, 10)

for value in hyperSphere:
    value.setReal(100)

phantom.setName("phantom")

affine = AffineTransform3D()
affine.scale(1, 1, 0.4)

interpolatedImg = Views.interpolate(Views.extendZero(phantom),
                                    NLinearInterpolatorFactory())

phantom = Views.interval(
    Views.raster(RealViews.affine(interpolatedImg, affine)),
    Intervals.createMinMax(0, 0, 18, 255, 255, 82))

# make phantom an ImgPlus
phantom = ops.create().imgPlus(ops.copy().iterableInterval(
    Views.zeroMin(phantom)))
phantom.setName('phantom')
Exemplo n.º 24
0

def maxCoords(img):
    return [
        int(img.dimension(d) * calibration[d] - 1)
        for d in xrange(img.numDimensions())
    ]


# Identity transform for CM00, scaled to isotropy
affine0 = AffineTransform3D()
affine0.identity()
affine0.concatenate(scale3D)

# Expand camera CM00 to isotropy
imgE = Views.extendZero(img0)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, affine0)
imgB0 = Views.interval(imgT, [0, 0, 0], maxCoords(img0))

# Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation in X
affine1 = AffineTransform3D()
affine1.set(-1.0, 0.0, 0.0, img1.dimension(0), 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
            1.0, 0.0)
affine1.concatenate(scale3D)
imgE = Views.extendZero(img1)
imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
imgT = RealViews.transform(imgI, affine1)
imgB1 = Views.interval(imgT, [0, 0, 0], maxCoords(img1))

# Transform camera CM02 to CM00: 90 degrees on Y axis, plus a translation in Z
    ReadOnlyCachedCellImgOptions.options().volatileAccesses(
        True).cellDimensions(cell_dimensions))

# View the image as an ImageJ ImagePlus with an underlying VirtualStack
IL.wrap(cachedCellImg, "sections").show()

# Now show a UI that enables moving a window around a data set
from net.imglib2.view import Views
from net.imglib2 import FinalInterval
from jarray import array
from java.awt.event import KeyAdapter, KeyEvent
from net.imglib2.img.display.imagej import ImageJVirtualStack

mins = array([1307, 448, 0], 'l')
maxs = array([1307 + 976 - 1, 448 + 732 - 1, len(filepaths) - 1], 'l')
imgE = Views.extendZero(cachedCellImg)
crop = Views.interval(imgE, FinalInterval(mins, maxs))
imp = IL.wrap(crop, "sections crop")  # ImagePlus
imp.show()

# Once shown, a reference to the ij.gui.ImageWindow exists
win = imp.getWindow()

# Remove and store key listeners from the ImageCanvas
kls = win.getCanvas().getKeyListeners()
for kl in kls:
    win.getCanvas().removeKeyListener(kl)

stack = imp.getStack(
)  # an net.imglib2.img.display.imagej.ImageJVirtualStackUnsignedByte
fsource = ImageJVirtualStack.getDeclaredField("source")
Exemplo n.º 26
0
interval2 = FinalInterval([
    int(img1.dimension(0) * scale),
    int(img1.dimension(1) * scale),
    img1.dimension(2)
])
# Interval of a single stack slice of the target image
sliceInterval = FinalInterval([interval2.dimension(0), interval2.dimension(1)])

slices2 = []
for index in xrange(img1.dimension(2)):
    # One single 2D RGB slice
    imgSlice1 = Views.hyperSlice(img1, 2, index)
    # Views of the 3 color channels, as extended and interpolatable
    channels = [
        Views.interpolate(
            Views.extendZero(Converters.argbChannel(imgSlice1, i)),
            NLinearInterpolatorFactory()) for i in [1, 2, 3]
    ]
    # ARGBType 2D view of the transformed color channels
    imgSlice2 = Converters.mergeARGB(
        Views.stack(
            Views.interval(RealViews.transform(channel, transform),
                           sliceInterval)
            for channel in channels), ColorChannelOrder.RGB)
    slices2.append(imgSlice2)

# Transformed view
viewImg2 = Views.stack(slices2)
# Materialized image
img2 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2))
ImgUtil.copy(viewImg2, img2)
Exemplo n.º 27
0
from ij import IJ
from net.imglib2.view import Views
from net.imglib2.interpolation.randomaccess import NLinearInterpolatorFactory
from net.imglib2.util import Intervals
from math import radians, floor, ceil
from jarray import zeros
from pprint import pprint

# Load an image (of any dimensions)
imp = IJ.getImage()

# Access its pixel data as an ImgLib2 RandomAccessibleInterval
img = IL.wrapReal(imp)

# View as an infinite image, with value zero beyond the image edges
imgE = Views.extendZero(img)

# View the pixel data as a RealRandomAccessible
# (that is, accessible with sub-pixel precision)
# by using an interpolator
imgR = Views.interpolate(imgE, NLinearInterpolatorFactory())

# Define a rotation by +30 degrees relative to the image center in the XY axes
angle = radians(30)
toCenter = AffineTransform3D()
cx = img.dimension(0) / 2.0  # X axis
cy = img.dimension(1) / 2.0  # Y axis
toCenter.setTranslation(-cx, -cy, 0.0)  # no translation in the Z axis
rotation = AffineTransform3D()
# Step 1: place origin of rotation at the center of the image
rotation.preConcatenate(toCenter)
Exemplo n.º 28
0
from ij import IJ
from net.imglib2.img.display.imagej import ImageJFunctions as IJF
from net.imglib2.view import Views

from jarray import zeros

from net.imglib2 import FinalInterval
imp = IJ.getImage()
image = IJF.wrap(imp)

min = zeros(image.numDimensions(), 'l')
max = zeros(image.numDimensions(), 'l')
min[image.numDimensions() - 1] = 0
max[image.numDimensions() - 1] = image.dimension(image.numDimensions() - 1) - 1
min[image.numDimensions() - 2] = 0
max[image.numDimensions() - 2] = image.dimension(image.numDimensions() - 2) - 1
for d in range(0, image.numDimensions() - 2):

    min[d] = -10
    max[d] = image.dimension(d) + 10

interval = FinalInterval(min, max)
print(interval)
infinite = Views.extendZero(image)
IJF.show(Views.interval(infinite, interval))