def mergeTransforms(calibration, matrices1, roi, matrices2, invert2=False): """ calibration: a sequence like e.g. [1.0, 1.0, 5.0]. matrices1: sequence of one-dimensional arrays with 12 digits, each describing a 3D affine that was computed from the scaled images (according to the calibration). roi: a two-dimensional sequence, with the minimum coordinates at 0 and the maximum at 1. matrices2: sequence of one-dimensional arrays with 12 digits, each describing a 3D affine that applies after the translation introduced by the ROI is accounted for. Returns a list of AffineTransform3D, each expressing the combined scaling (by calibration) + tranform + translation + transform. """ # Scale to isotropy scale3D = AffineTransform3D() scale3D.set(calibration[0], 0.0, 0.0, 0.0, 0.0, calibration[1], 0.0, 0.0, 0.0, 0.0, calibration[2], 0.0) # Translate to ROI origin of coords roi_translation = affine3D( [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]]) transforms = [] for m1, m2 in izip(matrices1, matrices2): aff = AffineTransform3D() aff.set(*m1) aff.concatenate(scale3D) aff.preConcatenate(roi_translation) aff.preConcatenate(affine3D(m2).inverse() if invert2 else affine3D(m2)) transforms.append(aff) return transforms
def asBackwardConcatTransforms(matrices, transformclass=AffineTransform3D): """ Transforms are img1 -> img2, and we want the opposite: so invert each. Also, each image was registered to the previous, so must concatenate all previous transforms. """ # Special-case for speed if transformclass == Translation3D: tx, ty, tz = 0.0, 0.0, 0.0 translations = [] for matrix in matrices: # Subtract: same as inverse tx -= matrix[3] ty -= matrix[7] tz -= matrix[11] translations.append(Translation3D(tx, ty, tz)) return translations # Else, use AffineTransform3D aff_previous = transformclass() # It's puzzling that AffineTransform3D is not initialized to identity aff_previous.identity() # set to identity affines = [aff_previous] # first image at index 0 gets identity for matrix in matrices[1:]: # skip zero aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse( ) # transform defines img1 -> img2, we want the opposite aff.preConcatenate(aff_previous) # Make relative to prior image affines.append(aff) # Store aff_previous = aff # next iteration return affines
def asBackwardAffineTransforms(matrices): """ Transforms are img1 -> img2, and we want the opposite: so invert each. Also, each image was registered to the previous, so must concatenate all previous transforms. """ aff_previous = AffineTransform3D() aff_previous.identity() # set to identity affines = [aff_previous] # first image at index 0 for matrix in matrices[1:]: # skip zero aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse() # transform defines img1 -> img2, we want the opposite aff.preConcatenate(aff_previous) # Make relative to prior image affines.append(aff) # Store aff_previous = aff # next iteration return affines
def scopeMove(button, zoomFactor): transform = AffineTransform3D() # get the current transform v.getState().getViewerTransform(transform) # get the canvas and its size canvas = v.getDisplay() width = canvas.getWidth() height = canvas.getHeight() #calculate the center of the view (in data coordinates) centerX = (transform.get(0,3) - width/2 ) /float(transform.get(0,0)) centerY = (transform.get(1,3) - height/2) /float(transform.get(1,1)) centerZ = transform.get(2,3) # zoom in if button == "y": n = 1 + zoomFactor/float(100) # zoom out elif button == "x": n = 1 - zoomFactor/float(100) # scale the transform transform.scale( n ) transform.set( transform.get(0,0), 0, 0, centerX * float(transform.get(0,0)) + width/2, 0, transform.get(1,1), 0, centerY * float(transform.get(1,1)) + height/2, 0, 0, 1, centerZ); # apply the transfrom v.setCurrentViewerTransform( transform )
def move2D(axisval, dir): transform = AffineTransform3D() # get the current transform v.getState().getViewerTransform(transform) # global transform # get the canvas and its size canvas = v.getDisplay() width = canvas.getWidth() height = canvas.getHeight() #calculate the center of the view (in data coordinates) dx = 0 dy = 0 # horizontal if dir == 'h': dx = axisval # vertical if dir == 'v': dy = axisval transform.set( transform.get(0,0), 0, 0, transform.get(0,3)-increment*dx, 0, transform.get(1,1), 0, transform.get(1,3)-increment*dy, 0, 0, 1, transform.get(2,3)) v.setCurrentViewerTransform( transform );
def scopeMoveChair(axisval, zoomFactor): #NOT USED IN THIS VERSION transform = AffineTransform3D() # get the current transform v.getState().getViewerTransform(transform) # get the canvas and its size canvas = v.getDisplay() width = canvas.getWidth() height = canvas.getHeight() #calculate the center of the view (in data coordinates) centerX = (transform.get(0,3) - width/2 ) /float(transform.get(0,0)) centerY = (transform.get(1,3) - height/2) /float(transform.get(1,1)) centerZ = transform.get(2,3) # zoom in/out n = 1 + axisval*zoomFactor/float(100) # scale the transform transform.scale( n ) transform.set( transform.get(0,0), 0, 0, centerX * float(transform.get(0,0)) + width/2, 0, transform.get(1,1), 0, centerY * float(transform.get(1,1)) + height/2, 0, 0, 1, centerZ); # apply the transfrom v.setCurrentViewerTransform( transform )
def transformedView(img, matrix): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse() imgT = RealViews.transform(imgI, aff) return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
def viewTransformed(img, calibration, matrix): affine = AffineTransform3D() affine.set(*matrix) # It's a forward transform: invert affine = affine.inverse() # Correct calibration scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) affine.concatenate(scale3d) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)] imgB = Views.interval(imgT, minC, maxC) return imgB
def viewTransformed(img, matrix): affine = AffineTransform3D() affine.set(*matrix) # It's a forward transform: invert affine = affine.inverse() imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) # Same dimensions imgB = Views.interval(imgT, img) return imgB
def swim3D(butval, factorSwim): transform = AffineTransform3D() # get the current transform v.getState().getViewerTransform(transform) dz = butval * factorSwim transform.set( transform.get(0,0), 0, 0, transform.get(0,3), 0, transform.get(1,1), 0, transform.get(1,3), 0, 0, 1, transform.get(2,3)-dz) v.setCurrentViewerTransform( transform );
def scale3D(img, x=1.0, y=1.0, z=1.0): scale3d = AffineTransform3D() scale3d.set(x, 0, 0, 0, 0, y, 0, 0, 0, 0, z, 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * k + 0.5) -1 for d, k in enumerate([x, y, z])] imgB = Views.interval(imgT, minC, maxC) return imgB
def scale(img, calibration): scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) return imgB
def get(self, path): img = self.klb.readFull(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) affine = AffineTransform3D() affine.set(self.transforms[path]) affine = affine.inverse() # it's a forward transform: must invert affine.concatenate(scale3d) # calibrated space: isotropic imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) # View a RandomAccessibleInterval as an Img, required by Load.lazyStack return ImgView.wrap(imgB, img.factory())
def viewTransformed(img, calibration, affine): """ View img transformed to isotropy (via the calibration) and transformed by the affine. """ scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) transform = affine.copy() transform.concatenate(scale3d) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)] imgB = Views.interval(imgT, minC, maxC) return imgB
def oneStep(index=0): # Combining transforms into one, via a translation to account of the ROI crop img = klb.readFull(filepaths[index]) # klb_loader.get(filepaths[index]) t1 = cmIsotropicTransforms[index] t2 = affine3D( [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]]) t3 = affine3D(fineTransformsPostROICrop[index]).inverse() aff = AffineTransform3D() aff.set(t1) aff.preConcatenate(t2) aff.preConcatenate(t3) # Final interval is now rooted at 0,0,0 given that the transform includes the translation imgP = prepareImgForDeconvolution( img, aff, FinalInterval([0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])) # Copy transformed view into ArrayImg for best performance in deconvolution imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP)) ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA) IL.wrap(imgA, "one step index %i" % index).show()
def rotate3D(button, turnFactor, axis): transform = AffineTransform3D() # get the current transform v.getState().getViewerTransform(transform) # get the canvas and its size canvas = v.getDisplay() width = canvas.getWidth() height = canvas.getHeight() # how much you turn n = turnFactor*float(math.pi/180)*1/2 #turnFactor is in degrees; needs to be converted to radian #also halved to reduce speed # rotate the other way around if button == "l": n = float(2*math.pi) - n # scale the transform transform.rotate(axis,n) # apply the transfrom v.setCurrentViewerTransform( transform )
paths = [] timepointDir = srcDir + "TM000000/" for camera_index, channel_index in zip(xrange(4), [1, 1, 0, 0]): paths.append(timepointDir + "SPM00_TM000000_CM0" + str(camera_index) + "_CHN0" + str(channel_index) + ".klb") for path in paths: print basename(path) img0 = klb.readFull(paths[0]) img1 = klb.readFull(paths[1]) img2 = klb.readFull(paths[2]) img3 = klb.readFull(paths[3]) # Make all isotropic (virtually, as a view) scale3D = AffineTransform3D() scale3D.set(calibration[0], 0.0, 0.0, 0.0, 0.0, calibration[1], 0.0, 0.0, 0.0, 0.0, calibration[2], 0.0) def maxCoords(img): return [ int(img.dimension(d) * calibration[d] - 1) for d in xrange(img.numDimensions()) ] # Identity transform for CM00, scaled to isotropy affine0 = AffineTransform3D() affine0.identity() affine0.concatenate(scale3D)
for frame in Frame.getFrames(): if frame.getTitle() == "BigDataViewer": root = frame.getComponent(0) jlayeredpane = root.getComponents()[1] jpanel = jlayeredpane.getComponent(0) bdv_viewerpanel = jpanel.getComponent(0) #print bdv_viewerpanel # see: https://github.com/bigdataviewer/bigdataviewer-core/blob/master/src/main/java/bdv/viewer/ViewerPanel.java viewerstate = bdv_viewerpanel.getState( ) # a copy of the ViewerState instance that wraps the sources sources_and_converters = viewerstate.getSources( ) # a list of SourceState instances wrapping the sources for sc in sources_and_converters: source = sc.getSpimSource() print source # bdv.tools.transformation.TransformedSource # Print the transform transform = AffineTransform3D() timepoint = 0 mipmap_level = 0 source.getSourceTransform(timepoint, mipmap_level, transform) print transform # Grab the RandomAccessible print source.getType().getClass() rai = source.getSource(timepoint, mipmap_level) print rai # an imglib2 PlanarImg that wraps an ij.ImagePlus # TODO: # * demonstrate adding another source # * demonstrate editing a source # * demonstrate copying a source as transformed
# Materialized target image img3 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2)) ImgUtil.copy(viewImg3, img3) # multi-threaded copy imp5 = IL.wrap(img3, "imglib2-transformed ARGB (pull)") imp5.show() # Fifth approach: volume-wise transform with a pull (correct, but not always) # Fast, yet, the interpolator has no way to know that it should restrict # the inputs of the interpolation operation to pixels in the 2D plane, # as generally in image stacks the Z resolution is much worse than that of XY. from net.imglib2.realtransform import AffineTransform3D transform3D = AffineTransform3D() # all matrix values are zero transform3D.identity() # diagonal of 1.0 transform3D.scale(scale, scale, 1.0) # only X and Y viewImg4 = Views.interval( RealViews.transform( Views.interpolate(Views.extendZero(img1), NLinearInterpolatorFactory()), transform3D), interval2) # Materialized target image img4 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2)) ImgUtil.copy(viewImg4, img4) # multi-threaded copy imp5 = IL.wrap(img4, "imglib2-transformed ARGB (pull) volume-wise") imp5.show()
0, 1, 0 ]) finally: exe.shutdown() # Open the 4D series again, this time virtually registered from net.imglib2.realtransform import RealViews, AffineTransform3D from net.imglib2.interpolation.randomaccess import NLinearInterpolatorFactory from net.imglib2.util import Intervals from net.imglib2.img import ImgView # A scaling transform to visualize volume in calibrated units scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) class KLBTransformLoader(CacheLoader): def __init__(self, transforms, calibration): self.transforms = transforms self.klb = KLB.newInstance() def get(self, path): img = self.klb.readFull(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) affine = AffineTransform3D() affine.set(self.transforms[path])
formerTolerance == tolerance and formerThreshold == threshold and formerDoSpotDetection == doSpotDetection ): # sleep some msec Thread.sleep(100); continue; # measure start time for benchmarking timeStamp = System.currentTimeMillis(); if (formerT != imp.getFrame()): formerT = imp.getFrame(); # push image to GPU pushed = clijx.pushCurrentZStack(imp); # scale it initially; depends on zoom factor and voxel size scaleTransform = AffineTransform3D(); scaleTransform.scale(scaleX, scaleY, scaleZ); clijx.affineTransform3D(pushed, input, scaleTransform); pushed.close(); stillValid = False; # Noise/background removal if (formerDoNoiseAndBackgroundRemoval != doNoiseAndBackgroundRemoval or formerSigma1 != sigma1 or formerSigma2 != sigma2): formerDoNoiseAndBackgroundRemoval = doNoiseAndBackgroundRemoval; formerSigma1 = sigma1; formerSigma2 = sigma2; stillValid = False; if (not stillValid): if (doNoiseAndBackgroundRemoval):
and rotationZ == formerRotationZ and formerT == imp.getFrame() and formerTolerance == tolerance and formerThreshold == threshold and formerDoSpotDetection == doSpotDetection): # sleep some msec Thread.sleep(100) continue # measure start time for benchmarking timeStamp = System.currentTimeMillis() if (formerT != imp.getFrame()): formerT = imp.getFrame() # push image to GPU pushed = clij2.pushCurrentZStack(imp) # scale it initially; depends on zoom factor and voxel size scaleTransform = AffineTransform3D() scaleTransform.scale(1.0 / scaleX, 1.0 / scaleY, 1.0 / scaleZ) clij2.affineTransform3D(pushed, input, scaleTransform) pushed.close() stillValid = False # Noise/background removal if (formerDoNoiseAndBackgroundRemoval != doNoiseAndBackgroundRemoval or formerSigma1 != sigma1 or formerSigma2 != sigma2): formerDoNoiseAndBackgroundRemoval = doNoiseAndBackgroundRemoval formerSigma1 = sigma1 formerSigma2 = sigma2 stillValid = False if (not stillValid): if (doNoiseAndBackgroundRemoval):
def affine3D(matrix): aff = AffineTransform3D() aff.set(*matrix) return aff
def affine3D(matrix): aff = AffineTransform3D()
# paths for same timepoint, 4 different cameras paths = [] timepointDir = srcDir + "TM000000/" for camera_index, channel_index in zip(xrange(4), [1, 1, 0, 0]): paths.append(timepointDir + "SPM00_TM000000_CM0" + str(camera_index) + "_CHN0" + str(channel_index) + ".klb") for path in paths: print basename(path) img0 = klb.readFull(paths[0]) img1 = klb.readFull(paths[1]) img2 = klb.readFull(paths[2]) img3 = klb.readFull(paths[3]) # Calibration: [1.0, 1.0, 5.0] scale3D = AffineTransform3D() scale3D.set(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0) # Expand camera CM00 to isotropy imgE = Views.extendZero(img0) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3D) imgB0 = Views.interval(imgT, [0, 0, 0], [img0.dimension(0) -1, img0.dimension(1) -1, img0.dimension(2) * 5 - 1]) # Transform camera CM01 to CM00: 180 degrees on Y axis, plus a translation dx = -195 dy = 54 dz = 8
1, 0, 0, 0, 0, 1, 0 ] # Store matrices.append(matrix) finally: exe.shutdown() # Invert and concatenate transforms aff_previous = AffineTransform3D() aff_previous.identity() # set to identity affines = [aff_previous] # first image at index 0 for matrix in matrices[1:]: # skip zero, which is the identity aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse( ) # matrix describes the img1 -> img2 transform, we want the opposite aff.preConcatenate(aff_previous) # Make relative to prior image affines.append(aff) # Store aff_previous = aff # next iteration def viewTransformed(img, calibration, affine): # Correct calibration
roi = ( [1, 228, 0], # top-left coordinates [1 + 406 - 1, 228 + 465 - 1, 0 + 325 - 1]) # bottom-right coordinates (inclusive, hence the -1) dimensions = [maxC - minC + 1 for minC, maxC in zip(roi[0], roi[1])] imgU = ArrayImgs.unsignedShorts(dimensions) imgF = ArrayImgs.floats(dimensions) #c = imgF.cursor() #while c.hasNext(): # c.next().set(random() * 65535) ImgMath.compute(ImgMath.number(17)).into(imgF) ImgMath.compute(ImgMath.img(imgF)).into(imgU) aff = AffineTransform3D() """ aff.set(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0) """ aff.set(*[ 0.9999949529841275, -0.0031770224721305684, 2.3118912942710207e-05, -1.6032353998500826, 0.003177032139125933, 0.999994860398559, -0.00043086338151948394, -0.4401520585103873, -2.1749931475206362e-05, 0.0004309346564745992, 0.9999999069111268, 6.543187040788581 ]) interval = FinalInterval([0, 0, 0], [d - 1 for d in dimensions]) def test(img):
imp = IJ.getImage() # Access its pixel data as an ImgLib2 RandomAccessibleInterval img = IL.wrapReal(imp) # View as an infinite image, with value zero beyond the image edges imgE = Views.extendZero(img) # View the pixel data as a RealRandomAccessible # (that is, accessible with sub-pixel precision) # by using an interpolator imgR = Views.interpolate(imgE, NLinearInterpolatorFactory()) # Define a rotation by +30 degrees relative to the image center in the XY axes angle = radians(30) toCenter = AffineTransform3D() cx = img.dimension(0) / 2.0 # X axis cy = img.dimension(1) / 2.0 # Y axis toCenter.setTranslation(-cx, -cy, 0.0) # no translation in the Z axis rotation = AffineTransform3D() # Step 1: place origin of rotation at the center of the image rotation.preConcatenate(toCenter) # Step 2: rotate around the Z axis rotation.rotate(2, angle) # 2 is the Z axis, or 3rd dimension # Step 3: undo translation to the center rotation.preConcatenate(toCenter.inverse()) # undo translation to the center # Define a rotated view of the image rotated = RV.transform(imgR, rotation) # View the image rotated, without enlarging the canvas
def registerDeconvolvedTimePoints(targetDir, params, modelclass, exe=None, verbose=True, subrange=None): """ Can only be run after running deconvolveTimePoints, because it expects deconvolved images to exist under <targetDir>/deconvolved/, with a name pattern like: TM_\d+_CM0\d_CM0\d-deconvolved.zip Tests if files exist first, if not, will stop execution. Will write the features, pointmatches and registration affine matrices into a csv folder under targetDir. If a CSV file with the affine transform matrices exist, it will read them out and provide the 4D img right away. Else, it will check which files are missing their features and pointmatches as CSV files, create them, and ultimately create the CSV filew ith the affine transform matrices, and then provide the 4D img. targetDir: the directory containing the deconvolved images. params: for feature extraction and registration. modelclass: the model to use, e.g. Translation3D, AffineTransform3D. exe: the ExecutorService to use (optional). subrange: the range of time point indices to process, as enumerated by the folder name, i.e. the number captured by /TM(\d+)/ Returns an imglib2 4D img with the registered deconvolved 3D stacks.""" deconvolvedDir = os.path.join(targetDir, "deconvolved") # A folder for features, pointmatches and matrices in CSV format csv_dir = os.path.join(deconvolvedDir, "csvs") if not os.path.exists(csv_dir): os.mkdir(csv_dir) # A datastructure to represent the timepoints, each with two filenames timepoint_views = defaultdict(defaultdict) pattern = re.compile("^TM(\d+)_(CM0\d-CM0\d)-deconvolved.zip$") for filename in sorted(os.listdir(deconvolvedDir)): m = re.match(pattern, filename) if m: stime, view = m.groups() timepoint_views[int(stime)][view] = filename # Filter by specified subrange, if any if subrange: subrange = set(subrange) for time in timepoint_views.keys( ): # a list copy of the keys, so timepoints can be modified if time not in subrange: del timepoint_views[time] # Register only the view CM00-CM01, given that CM02-CM03 has the same transform matrices_name = "matrices-%s" % modelclass.getSimpleName() matrices = None if os.path.exists(os.path.join(csv_dir, matrices_name + ".csv")): matrices = loadMatrices(matrices_name, csv_dir) if len(matrices) != len(timepoint_views): syncPrint( "Ignoring existing matrices CSV file: length (%i) doesn't match with expected number of timepoints (%i)" % (len(matrices), len(timepoint_views))) matrices = None if not matrices: original_exe = exe if not exe: exe = newFixedThreadPool() try: # Deconvolved images are isotropic def getCalibration(img_filepath): return [1, 1, 1] timepoints = [] # sorted filepaths = [] # sorted for timepoint, views in sorted(timepoint_views.iteritems(), key=itemgetter(0)): timepoints.append(timepoint) filepaths.append( os.path.join(deconvolvedDir, views["CM00-CM01"])) # #matrices_fwd = computeForwardTransforms(filepaths, ImageJLoader(), getCalibration, # csv_dir, exe, modelclass, params, exe_shutdown=False) #matrices = [affine.getRowPackedCopy() for affine in asBackwardConcatTransforms(matrices_fwd)] matrices = computeOptimizedTransforms(filepaths, ImageJLoader(), getCalibration, csv_dir, exe, modelclass, params, verbose=verbose) saveMatrices(matrices_name, matrices, csv_dir) finally: if not original_exe: exe.shutdownNow() # Was created new # Convert matrices into twice as many affine transforms affines = [] for matrix in matrices: aff = AffineTransform3D() aff.set(*matrix) affines.append(aff) affines.append(aff) # twice: also for the CM02-CM03 # Show the registered deconvolved series as a 4D volume. filepaths = [] for timepoint in sorted(timepoint_views.iterkeys()): views = timepoint_views.get(timepoint) for view_name in sorted(views.keys()): # ["CM00-CM01", "CM02-CM03"] filepaths.append(os.path.join(deconvolvedDir, views[view_name])) img = Load.lazyStack( filepaths, TransformedLoader(ImageJLoader(), dict(izip(filepaths, affines)), asImg=True)) return img