def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder): """ Return a rotated view of the image, around the Z axis, with an expanded (or reduced) interval view so that all pixels are exactly included. img: a RandomAccessibleInterval angle: in degrees """ cx = img.dimension(0) / 2.0 cy = img.dimension(1) / 2.0 toCenter = AffineTransform2D() toCenter.translate(-cx, -cy) rotation = AffineTransform2D() # Step 1: place origin of rotation at the center of the image rotation.preConcatenate(toCenter) # Step 2: rotate around the Z axis rotation.rotate(radians(angle)) # Step 3: undo translation to the center rotation.preConcatenate(toCenter.inverse()) rotated = RV.transform(Views.interpolate(extend(img), NLinearInterpolatorFactory()), rotation) if enlarge: # Bounds: bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values # for min, max to compare against transformed = zeros(2, 'f') for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))): rotation.apply(corner, transformed) bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v)))) for (vmin, vmax), v in zip(bounds, transformed)] minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs # into 2 lists of 2 values imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC)) else: imgRot = Views.interval(rotated, img) return imgRot
def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins, slope, matrices, copy_threads, index, imp): sp = imp.getProcessor() # ShortProcessor sp.setRoi(interval.min(0), interval.min(1), interval.max(0) - interval.min(0) + 1, interval.max(1) - interval.min(1) + 1) sp = sp.crop() if invert: sp.invert() CLAHE.run( ImagePlus("", sp), blockRadius, n_bins, slope, None ) # far less memory requirements than NormalizeLocalContrast, and faster. minimum, maximum = autoAdjust(sp) # Transform and convert image to 8-bit, mapping to display range img = ArrayImgs.unsignedShorts( sp.getPixels(), [sp.getWidth(), sp.getHeight()]) sp = None affine = AffineTransform2D() affine.set(matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, img)) imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum), UnsignedByteType) aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img)) ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg, copy_threads) img = imgI = imgA = imgT = imgMinMax = None return aimg
def translatedView(img, matrix): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) # In negative: the inverse t = Translation3D(-matrix[3], -matrix[7], -matrix[11]) imgT = RealViews.transform(imgI, t) return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
def transformedView(img, matrix): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse() imgT = RealViews.transform(imgI, aff) return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
def viewTransformed(img, affine): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [img.dimension(d) - 1 for d in xrange(img.numDimensions())] imgB = Views.interval(imgT, minC, maxC) return imgB
def get(self, path): transform = self.transformsDict[path] img = self.loader.get(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) minC = self.roi[0] if self.roi else [0] * img.numDimensions() maxC = self.roi[1] if self.roi else [img.dimension(d) -1 for d in xrange(img.numDimensions())] imgO = Views.zeroMin(Views.interval(imgT, minC, maxC)) return ImgView.wrap(imgO, img.factory()) if self.asImg else imgO
def transformedView(img, transform, interval=None): """ """ imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) if interval: return Views.interval(imgT, interval) else: return Views.interval( imgT, [0, 0, 0], [img.dimension(d) - 1 for d in xrange(img.numDimensions())])
def viewTransformed(img, matrix): affine = AffineTransform3D() affine.set(*matrix) # It's a forward transform: invert affine = affine.inverse() imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) # Same dimensions imgB = Views.interval(imgT, img) return imgB
def makeCell(self, index): self.preloadCells(index) # preload others in the background img = self.loadImg(self.filepaths[index]) affine = AffineTransform2D() affine.set(self.matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, self.interval)) aimg = img.factory().create(self.interval) ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg) return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
def viewTransformed(img, calibration, transform): """ View img transformed to isotropy (via the calibration) and transformed by the affine. """ imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) if type(transform) == AffineTransform3D: scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) affine = transform.copy() affine.concatenate(scale3d) imgT = RealViews.transform(imgI, affine) else: imgT = RealViews.transform(imgI, Scale3D(*calibration)) imgT = RealViews.transform(imgT, transform) # dimensions minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) return imgB
def scale3D(img, x=1.0, y=1.0, z=1.0): scale3d = AffineTransform3D() scale3d.set(x, 0, 0, 0, 0, y, 0, 0, 0, 0, z, 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * k + 0.5) -1 for d, k in enumerate([x, y, z])] imgB = Views.interval(imgT, minC, maxC) return imgB
def scale(img, calibration): scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) return imgB
def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins, slope, matrices, index, imp): sp = imp.getProcessor() # ShortProcessor # Crop to interval if needed x = interval.min(0) y = interval.min(1) width = interval.max(0) - interval.min(0) + 1 height = interval.max(1) - interval.min(1) + 1 if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight( ) != height: sp.setRoi(x, y, width, height) sp = sp.crop() if invert: sp.invert() CLAHE.run( ImagePlus("", sp), blockRadius, n_bins, slope, None ) # far less memory requirements than NormalizeLocalContrast, and faster. minimum, maximum = autoAdjust(sp) # Transform and convert image to 8-bit, mapping to display range img = ArrayImgs.unsignedShorts( sp.getPixels(), [sp.getWidth(), sp.getHeight()]) sp = None imp = None # Must use linear interpolation for subpixel precision affine = AffineTransform2D() affine.set(matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, img)) # Convert to 8-bit imgMinMax = convert2(imgT, RealUnsignedByteConverter(minimum, maximum), UnsignedByteType, randomAccessible=False) # use IterableInterval aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img)) # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg) # Single-threaded copy copier = createBiConsumerTypeSet(UnsignedByteType) LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier) img = imgI = imgA = imgMinMax = imgT = None return aimg
def viewTransformed(img, calibration, affine): # Correct calibration scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) transform = affine.copy() transform.concatenate(scale3d) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) # dimensions minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) return imgB
def get(self, path): img = self.klb.readFull(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) affine = AffineTransform3D() affine.set(self.transforms[path]) affine = affine.inverse() # it's a forward transform: must invert affine.concatenate(scale3d) # calibrated space: isotropic imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) # View a RandomAccessibleInterval as an Img, required by Load.lazyStack return ImgView.wrap(imgB, img.factory())
def twoStep(index=0): # The current way: img = klb.readFull(filepaths[index]) # klb_loader.get(filepaths[index]) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, cmIsotropicTransforms[index]) imgB = Views.zeroMin(Views.interval(imgT, roi[0], roi[1])) # bounded: crop with ROI imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB)) ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA) imgP = prepareImgForDeconvolution( imgBA, affine3D(fineTransformsPostROICrop[index]).inverse(), FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)])) # Copy transformed view into ArrayImg for best performance in deconvolution imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP)) ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA) IL.wrap(imgA, "two step").show()
def viewTransformed(img, calibration, matrix): affine = AffineTransform3D() affine.set(*matrix) # It's a forward transform: invert affine = affine.inverse() # Correct calibration scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) affine.concatenate(scale3d) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)] imgB = Views.interval(imgT, minC, maxC) return imgB
def viewTransformed(image, transformation, title=None, interval=None, show=True): if isinstance(image, ImagePlus): img = IL.wrap( image ) # ImagePlus to ImgLib2 RandomAccessibleInterva & IterableInterval aka Img elif isinstance(image, RandomAccessibleInterval): img = image else: return None # Make the image be defined anywhere by infinitely padding with zeros. imgInfinite = Views.extendZero(img) # Make the image be defined at arbitrarily precise subpixel coordinates # by using n-dimensional linear interpolation imgInterpolated = Views.interpolate(imgInfinite, NLinearInterpolatorFactory()) # Make the image be seen as a transformed view of the source image imgTransformed = RealViews.transform(imgInterpolated, transformation) # Define an interval within which we want the transformed image to be defined # (such as that of the source img itself; an img in ImgLib2 also happens to be an Interval # and can therefore be used as an interval, which is convenient here because we # expect the original field of view--the interval--to be where image data can still be found) interval = interval if interval else img # every Img is also an Interval because each Img is bounded # Make the image finite by defining it as the content within the interval imgBounded = Views.interval(imgTransformed, interval) # same as original # Optionally show the transformed, bounded image in an ImageJ VirtualStack # (Note that anytime one of the VirtualStack's ImageProcessor will have to # update its pixel data, it will incur in executing the transformation again; # no pixel data is cached or copied anywhere other than for display purposes) if show: title = title if title else imp.getTitle() imp = IL.wrap(imgBounded, title) # as an ImagePlus imp.show() # in an ImageJ ImageWindow return imgBounded
def pyramid(img, top_level, min_width=32, ViewOutOfBounds=Views.extendBorder interpolation_factory=NLinearInterpolatorFactory()): """ Create an image pyramid as interpolated scaled views of the provided img. """ imgR = Views.interpolate(ViewOutOfBounds(img), interpolation_factory) # Create levels of a pyramid as interpolated views width = img.dimension(0) pyramid = [img] scale = 1.0 level_index = 1 while width > min_width and level_index <= top_level: scale /= 2.0 width /= 2 s = [scale for d in xrange(img.numDimensions())] scaled = Views.interval(RealViews.transform(imgR, Scale(s)), FinalInterval([int(img.dimension(d) * scale) for d in xrange(img.numDimensions())])) pyramid.append(scaled) level_index += 1 # for next iteration return pyramid # TODO pyramidGauss a la Saalfeld
sliceInterval = FinalInterval([interval2.dimension(0), interval2.dimension(1)]) slices2 = [] for index in xrange(img1.dimension(2)): # One single 2D RGB slice imgSlice1 = Views.hyperSlice(img1, 2, index) # Views of the 3 color channels, as extended and interpolatable channels = [ Views.interpolate( Views.extendZero(Converters.argbChannel(imgSlice1, i)), NLinearInterpolatorFactory()) for i in [1, 2, 3] ] # ARGBType 2D view of the transformed color channels imgSlice2 = Converters.mergeARGB( Views.stack( Views.interval(RealViews.transform(channel, transform), sliceInterval) for channel in channels), ColorChannelOrder.RGB) slices2.append(imgSlice2) # Transformed view viewImg2 = Views.stack(slices2) # Materialized image img2 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2)) ImgUtil.copy(viewImg2, img2) imp4 = IL.wrap(img2, "imglib2-transformed RGB (pull)") imp4.show() # Fourth approach: pull (CORRECT!), and much faster (delegates pixel-wise operations # to java libraries and delegates RGB color handling altogether)
def maxCoords(img): return [ int(img.dimension(d) * calibration[d] - 1) for d in xrange(img.numDimensions()) ] # Identity transform for CM00, scaled to isotropy affine0 = AffineTransform3D() affine0.identity() affine0.concatenate(scale3D) # Expand camera CM00 to isotropy imgE = Views.extendZero(img0) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine0) imgB0 = Views.interval(imgT, [0, 0, 0], maxCoords(img0)) # Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation in X affine1 = AffineTransform3D() affine1.set(-1.0, 0.0, 0.0, img1.dimension(0), 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0) affine1.concatenate(scale3D) imgE = Views.extendZero(img1) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine1) imgB1 = Views.interval(imgT, [0, 0, 0], maxCoords(img1)) # Transform camera CM02 to CM00: 90 degrees on Y axis, plus a translation in Z affine2 = AffineTransform3D() affine2.set(0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0,
img = IL.wrap(IJ.getImage()) pyramid = [img] # level 0 is the image itself # Create levels of a pyramid with interpolation width = img.dimension(0) min_width = 32 s = [0.5 for d in xrange(img.numDimensions())] t = [-0.25 for d in xrange(img.numDimensions())] while width > min_width: width /= 2 imgE = Views.interpolate(Views.extendBorder(img), NLinearInterpolatorFactory()) # A scaled-down view of the imgR level = Views.interval( RealViews.transform(imgE, ScaleAndTranslation(s, t)), FinalInterval([ int(img.dimension(d) * 0.5) for d in xrange(img.numDimensions()) ])) # Create a new image for this level scaledImg = img.factory().create(level) # of dimensions as of level ImgUtil.copy(level, scaledImg) # copy the scaled down view into scaledImg pyramid.append(scaledImg) # Prepare for next iteration img = scaledImg # for the dimensions of the level in the next iteration for i, imgScaled in enumerate(pyramid): IL.wrap(imgScaled, str(i + 1)).show()
img0 = klb.readFull(paths[0]) img1 = klb.readFull(paths[1]) img2 = klb.readFull(paths[2]) img3 = klb.readFull(paths[3]) # Calibration: [1.0, 1.0, 5.0] scale3D = AffineTransform3D() scale3D.set(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0) # Expand camera CM00 to isotropy imgE = Views.extendZero(img0) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3D) imgB0 = Views.interval(imgT, [0, 0, 0], [img0.dimension(0) -1, img0.dimension(1) -1, img0.dimension(2) * 5 - 1]) # Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation dx = -195 dy = 54 dz = 8 affine = AffineTransform3D() affine.set(-1.0, 0.0, 0.0, img1.dimension(0) + dx, 0.0, 1.0, 0.0, 0.0 + dy, 0.0, 0.0, 1.0, 0.0 + dz) affine.concatenate(scale3D) imgE = Views.extendZero(img1) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine)
img1M = Views.interval( img1E, [(dim1 - dim3) / 2 for dim1, dim3 in izip(dims1, dims3)], [dim1 + (dim3 - dim1) / 2 - 1 for dim1, dim3 in izip(dims1, dims3)]) img2M = Views.interval( img2E, [(dim2 - dim3) / 2 for dim2, dim3 in izip(dims2, dims3)], [dim2 + (dim3 - dim2) / 2 - 1 for dim2, dim3 in izip(dims2, dims3)]) IL.show(img1M, "img1M") IL.show(img2M, "img2M") # Scale by half (too slow otherwise) -- ERROR: the smaller one (img1) doesn't remain centered. s = [0.5 for d in xrange(img1.numDimensions())] img1s = Views.interval( RealViews.transform( Views.interpolate(Views.extendValue(img1M, zero), NLinearInterpolatorFactory()), Scale(s)), [0 for d in xrange(img1M.numDimensions())], [ int(img1M.dimension(d) / 2.0 + 0.5) - 1 for d in xrange(img1M.numDimensions()) ]) img2s = Views.interval( RealViews.transform( Views.interpolate(Views.extendValue(img2M, zero), NLinearInterpolatorFactory()), Scale(s)), [0 for d in xrange(img2M.numDimensions())], [ int(img2M.dimension(d) / 2.0 + 0.5) - 1 for d in xrange(img2M.numDimensions()) ]) # simplify var names
# imp = IJ.getImage() # Access its pixel data as an ImgLib2 RandomAccessibleInterval img = IL.wrapReal(imp2) # View as an infinite image, with a value of zero beyond the image edges imgE = Views.extendZero(img) # View the pixel data as a RealRandomAccessible # (that is, accessible with sub-pixel precision) # by using an interpolator imgR = Views.interpolate(imgE, NLinearInterpolatorFactory()) # Obtain a view of the 2D image twice as big s = [2.0 for d in range(img.numDimensions())] # as many 2.0 as image dimensions bigger = RV.transform(imgR, Scale(s)) # Define the interval we want to see: the original image, enlarged by 2X # E.g. from 0 to 2*width, from 0 to 2*height, etc. for every dimension minC = [0 for d in range(img.numDimensions())] maxC = [int(img.dimension(i) * scale) for i, scale in enumerate(s)] imgI = Views.interval(bigger, minC, maxC) # Visualize the bigger view imp2x = IL.wrap(imgI, imp.getTitle() + " - 2X") # an ImagePlus imp2x.show() FileSaver(imp2x).saveAsPng(str_out_png)
# Define a rotation by +30 degrees relative to the image center in the XY axes angle = radians(30) toCenter = AffineTransform3D() cx = img.dimension(0) / 2.0 # X axis cy = img.dimension(1) / 2.0 # Y axis toCenter.setTranslation(-cx, -cy, 0.0) # no translation in the Z axis rotation = AffineTransform3D() # Step 1: place origin of rotation at the center of the image rotation.preConcatenate(toCenter) # Step 2: rotate around the Z axis rotation.rotate(2, angle) # 2 is the Z axis, or 3rd dimension # Step 3: undo translation to the center rotation.preConcatenate(toCenter.inverse()) # undo translation to the center # Define a rotated view of the image rotated = RV.transform(imgR, rotation) # View the image rotated, without enlarging the canvas # so we define the interval (here, the field of view of an otherwise infinite image) # as the original image dimensions by using "img", which in itself is an Interval. imgRot2d = IL.wrap(Views.interval(rotated, img), imp.getTitle() + " - rot2d") imgRot2d.show() # View the image rotated, enlarging the interval to fit it. # (This is akin to enlarging the canvas.) # We define each corner of the nth-dimensional volume as a combination, # namely the 'product' (think nested loop) of the pairs of possible values # that each dimension can take in every corner coordinate, zipping each # with the value zero (hence the repeat(0) to provide as many as necessary), # and then unpacking the list of pairs by using the * in front of 'zip'
#imgE = Views.extendValue(img, t) # Easier: imgE = Views.extendZero(img) # Or view mirroring the data beyond the edges #imgE = Views.extendMirrorSingle(img) # View the pixel data as a RealRandomAccessible with the help of an interpolator imgR = Views.interpolate(imgE, NLinearInterpolatorFactory()) print type(imgR) print dir(imgR) # Obtain a view of the 2D image twice as big s = [2.0 for d in range(img.numDimensions())] # as many 2.0 as dimensions the image has bigger = RV.transform(imgR, Scale(s)) # Obtain a rasterized view (with integer coordinates for its pixels) # NOT NEEDED #imgRA = Views.raster(bigger) # Define the interval we want to see: the original image, enlarged by 2X # E.g. from 0 to 2*width, from 0 to 2*height, etc. for every dimension # Notice the -1 in maxC: the interval is inclusive of the largest coordinate. minC = [0 for d in range(img.numDimensions())] maxC = [int(img.dimension(i) * scale) -1 for i, scale in enumerate(s)] imgI = Views.interval(bigger, minC, maxC) # Visualize the bigger view imp2x = IL.wrap(imgI, imp.getTitle() + " - 2X") # an ImagePlus imp2x.show()