def runTest(self): TPool = pools.GetGlobalMultithreadingPool() self.assertIsNotNone(TPool) runFunctionOnPool(self, TPool, Func=SquareTheNumberWithDelay) runFileIOOnPool(self, TPool, CreateFunc=CreateFileWithDelay, ReadFunc=ReadFileWithDelay) runEvenDistributionOfWorkTestOnThePool(self, TPool)
def Clear(self): '''Sets attributes to None to encourage garbage collection''' self._image = None self._centerDistanceImage = None self._source_space_scale = None self._target_space_scale = None self._transform = None if not self._centerDistanceImage_path is None or not self._image_path is None: pool = nornir_pools.GetGlobalMultithreadingPool() pool.add_task(self._image_path, TransformedImageData._RemoveTempFiles, self._centerDistanceImage_path, self._image_path, self._tempdir)
def ScoreMosaicQuality(transforms, imagepaths, imageScale=None): ''' Walk each overlapping region between tiles. Subtract the ''' tiles = nornir_imageregistration.tile.CreateTiles(transforms, imagepaths) if imageScale is None: imageScale = tileset.MostCommonScalar(transforms, imagepaths) list_tiles = list(tiles.values()) total_score = 0 total_pixels = 0 pool = nornir_pools.GetGlobalMultithreadingPool() tasks = list() for tile_overlap in nornir_imageregistration.tile_overlap.IterateTileOverlaps( list_tiles, imageScale=imageScale): # (downsampled_overlapping_rect_A, downsampled_overlapping_rect_B, OffsetAdjustment) = nornir_imageregistration.tile.Tile.Calculate_Overlapping_Regions(tile_overlap.A, tile_overlap.B, imageScale) # __AlignmentScoreRemote(A.ImagePath, B.ImagePath, downsampled_overlapping_rect_A, downsampled_overlapping_rect_B) t = pool.add_task( "Score %d -> %d" % (tile_overlap.ID[0], tile_overlap.ID[1]), __AlignmentScoreRemote, tile_overlap.A.ImagePath, tile_overlap.B.ImagePath, tile_overlap.scaled_overlapping_source_rect_A, tile_overlap.scaled_overlapping_source_rect_B) tasks.append(t) # OverlappingRegionA = __get_overlapping_image(A.Image, downsampled_overlapping_rect_A, excess_scalar=1.0) # OverlappingRegionB = __get_overlapping_image(B.Image, downsampled_overlapping_rect_B, excess_scalar=1.0) # # OverlappingRegionA -= OverlappingRegionB # absoluteDiff = np.fabs(OverlappingRegionA) # score = np.sum(absoluteDiff.flat) pool.wait_completion() for t in tasks: # (score, num_pixels) = t.wait_return() score = t.wait_return() total_score += score # total_pixels += np.prod(num_pixels) # return total_score / total_pixels return total_score / len(tasks)
def AssembleImage(self, tilesPath, FixedRegion=None, usecluster=False, target_space_scale=None, source_space_scale=None): '''Create a single image of the mosaic for the requested region. :param str tilesPath: Directory containing tiles referenced in our transform :param array FixedRegion: Rectangle object or [MinY MinX MaxY MaxX] boundary of image to assemble :param boolean usecluster: Offload work to other threads or nodes if true :param float target_space_scale: Scalar for target space, used to adjust size of assembled image :param float source_space_scale: Optimization parameter, eliminates need for function to compare input images with transform boundaries to determine scale ''' # Left off here, I need to split this function so that FixedRegion has a consistent meaning # Ensure that all transforms map to positive values # self.TranslateToZeroOrigin() if not FixedRegion is None: spatial.RaiseValueErrorOnInvalidBounds(FixedRegion) # Allocate a buffer for the tiles tilesPathList = self.CreateTilesPathList(tilesPath) if usecluster and len(tilesPathList) > 1: cpool = nornir_pools.GetGlobalMultithreadingPool() return at.TilesToImageParallel( self._TransformsSortedByKey(), tilesPathList, pool=cpool, TargetRegion=FixedRegion, target_space_scale=target_space_scale, source_space_scale=source_space_scale) else: # return at.TilesToImageParallel(self.ImageToTransform.values(), tilesPathList) return at.TilesToImage(self._TransformsSortedByKey(), tilesPathList, TargetRegion=FixedRegion, target_space_scale=target_space_scale, source_space_scale=source_space_scale)
def __InvokeFunctionOnImageList__(listfilenames, Function=None, Pool=None, **kwargs): '''Return a number indicating how interesting the image is using SciPy ''' if Pool is None: TPool = nornir_pools.GetGlobalMultithreadingPool() else: TPool = Pool TileToScore = dict() tasklist = [] for filename in listfilenames: task = TPool.add_task('Calc Feature Score: ' + os.path.basename(filename), Function, filename, **kwargs) task.filename = filename tasklist.append(task) TPool.wait_completion() numTasks = len(tasklist) iTask = 0 for task in tasklist: Result = task.wait_return() iTask = iTask + 1 if Result is None: PrettyOutput.LogErr('No return value for ' + task.filename) continue # if Result[0] is None: # PrettyOutput.LogErr('No filename for ' + task.name) # continue PrettyOutput.CurseProgress("ImageStats", iTask, numTasks) filename = task.filename TileToScore[filename] = Result return TileToScore
def runTest(self): numThreadsInTest = 100 CreateFile(self.TestOutputPath, 0) ExpectedPath = os.path.join(self.TestOutputPath, TestThreadPool.FilenameTemplate % 0) self.assertTrue( os.path.exists(ExpectedPath), "Function we are testing threads with does not seem to work") os.remove(ExpectedPath) # Create a 100 threads and have them create files TPool = pools.GetGlobalMultithreadingPool() self.assertIsNotNone(TPool) VerifyExceptionBehaviour(self, TPool) runFunctionOnPool(self, TPool) runFileIOOnPool(self, TPool) runEvenDistributionOfWorkTestOnThePool(self, TPool) TPool = pools.GetMultithreadingPool("Test multithreading pool") self.assertIsNotNone(TPool) runFileIOOnPool(self, TPool)
def _FindTileOffsets(tile_overlaps, excess_scalar, imageScale=None, existing_layout=None): '''Populates the OffsetToTile dictionary for tiles :param list tile_overlaps: List of all tile overlaps or dictionary whose values are tile overlaps :param float imageScale: downsample level if known. None causes it to be calculated. :param float excess_scalar: How much additional area should we pad the overlapping rectangles with. :return: A layout object describing the optimal adjustment for each tile to align with each neighboring tile ''' if imageScale is None: imageScale = 1.0 downsample = 1.0 / imageScale # idx = tileset.CreateSpatialMap([t.FixedBoundingBox for t in tiles], tiles) CalculationCount = 0 # _CalculateImageFFTs(tiles) #pool = nornir_pools.GetGlobalSerialPool() pool = nornir_pools.GetGlobalMultithreadingPool() tasks = list() layout = existing_layout if layout is None: layout = nornir_imageregistration.layout.Layout() list_tile_overlaps = tile_overlaps if isinstance(tile_overlaps, dict): list_tile_overlaps = list(tile_overlaps.values()) assert (isinstance(list_tile_overlaps, list)) for t in list_tile_overlaps: if not layout.Contains(t.A.ID): layout.CreateNode(t.A.ID, t.A.FixedBoundingBox.Center) if not layout.Contains(t.B.ID): layout.CreateNode(t.B.ID, t.B.FixedBoundingBox.Center) print("Starting tile alignment") for tile_overlap in list_tile_overlaps: t = pool.add_task( "Align %d -> %d" % (tile_overlap.ID[0], tile_overlap.ID[1]), __tile_offset_remote, tile_overlap.A.ImagePath, tile_overlap.B.ImagePath, tile_overlap.scaled_overlapping_source_rect_A, tile_overlap.scaled_overlapping_source_rect_B, tile_overlap.scaled_offset, excess_scalar) t.tile_overlap = tile_overlap tasks.append(t) CalculationCount += 1 # print("Start alignment %d -> %d" % (A.ID, B.ID)) for t in tasks: try: offset = t.wait_return() except FloatingPointError as e: # Very rarely the overlapping region is entirely one color and this error is thrown. print( "FloatingPointError: %d -> %d = %s -> Using stage coordinates." % (t.tile_overlap.A.ID, t.tile_overlap.B.ID, str(e))) # Create an alignment record using only stage position and a weight of zero offset = nornir_imageregistration.AlignmentRecord( peak=t.tile_overlap.scaled_offset, weight=0) tile_overlap = t.tile_overlap # Figure out what offset we found vs. what offset we expected PredictedOffset = tile_overlap.B.FixedBoundingBox.Center - tile_overlap.A.FixedBoundingBox.Center ActualOffset = offset.peak * downsample diff = ActualOffset - PredictedOffset distance = np.sqrt(np.sum(diff**2)) f_score = min(tile_overlap.feature_scores) final_weight = offset.weight * f_score print( "%d -> %d = feature score: %.04g align score: %.04g Final Weight: %.04g Dist: %.04g" % (tile_overlap.A.ID, tile_overlap.B.ID, f_score, offset.weight, final_weight, distance)) layout.SetOffset(tile_overlap.A.ID, tile_overlap.B.ID, ActualOffset, final_weight) pool.wait_completion() print(("Total offset calculations: " + str(CalculationCount))) return layout
def TilesToImageParallel(transforms, imagepaths, TargetRegion=None, target_space_scale=None, source_space_scale=None, pool=None): '''Assembles a set of transforms and imagepaths to a single image using parallel techniques. :param tuple TargetRegion: (MinX, MinY, Width, Height) or Rectangle class. Specifies the SourceSpace to render from :param float target_space_scale: Scalar for the target space coordinates. Used to downsample or upsample the output image. Changes the coordinates of the target space control points of the transform. :param float target_space_scale: Scalar for the source space coordinates. Must match the change in scale of input images relative to the transform source space coordinates. So if downsampled by 4 images are used, this value should be 0.25. Calculated to be correct if None. Specifying is an optimization to reduce I/O of reading image files to calculate. ''' assert (len(transforms) == len(imagepaths)) logger = logging.getLogger('TilesToImageParallel') if pool is None: pool = nornir_pools.GetGlobalMultithreadingPool() # pool = nornir_pools.GetGlobalSerialPool() tasks = [] if source_space_scale is None: source_space_scale = tiles.MostCommonScalar(transforms, imagepaths) if target_space_scale is None: target_space_scale = source_space_scale original_fixed_rect_floats = None if not TargetRegion is None: if isinstance(TargetRegion, spatial.Rectangle): original_fixed_rect_floats = TargetRegion else: original_fixed_rect_floats = spatial.Rectangle.CreateFromPointAndArea( (TargetRegion[0], TargetRegion[1]), (TargetRegion[2] - TargetRegion[0], TargetRegion[3] - TargetRegion[1])) else: original_fixed_rect_floats = tutils.FixedBoundingBox(transforms) scaled_targetRect = nornir_imageregistration.Rectangle.scale_on_origin( original_fixed_rect_floats, target_space_scale) scaled_targetRect = nornir_imageregistration.Rectangle.SafeRound( scaled_targetRect) targetRect = nornir_imageregistration.Rectangle.scale_on_origin( scaled_targetRect, 1.0 / target_space_scale) (fullImage, fullImageZbuffer) = __CreateOutputBufferForArea(scaled_targetRect.Height, scaled_targetRect.Width, target_space_scale) CheckTaskInterval = 16 for i, transform in enumerate(transforms): regionToRender = None original_transform_target_rect = spatial.Rectangle( transform.FixedBoundingBox) transform_target_rect = nornir_imageregistration.Rectangle.SafeRound( original_transform_target_rect) regionToRender = nornir_imageregistration.Rectangle.Intersect( targetRect, transform_target_rect) if regionToRender is None: continue if regionToRender.Area == 0: continue scaled_region_rendered = nornir_imageregistration.Rectangle.scale_on_origin( regionToRender, target_space_scale) scaled_region_rendered = nornir_imageregistration.Rectangle.SafeRound( scaled_region_rendered) imagefullpath = imagepaths[i] task = pool.add_task("TransformTile" + imagefullpath, TransformTile, transform=transform, imagefullpath=imagefullpath, distanceImage=None, target_space_scale=target_space_scale, TargetRegion=regionToRender, SingleThreadedInvoke=False) task.transform = transform task.regionToRender = regionToRender task.scaled_region_rendered = scaled_region_rendered task.transform_fixed_rect = transform_target_rect tasks.append(task) if not i % CheckTaskInterval == 0: continue if len(tasks) > multiprocessing.cpu_count(): iTask = len(tasks) - 1 while iTask >= 0: t = tasks[iTask] if t.iscompleted: transformedImageData = t.wait_return() __AddTransformedTileTaskToComposite( t, transformedImageData, fullImage, fullImageZbuffer, scaled_targetRect) del transformedImageData del tasks[iTask] iTask -= 1 logger.info('All warps queued, integrating results into final image') while len(tasks) > 0: t = tasks.pop(0) transformedImageData = t.wait_return() __AddTransformedTileTaskToComposite(t, transformedImageData, fullImage, fullImageZbuffer, scaled_targetRect) del transformedImageData del t # Pass through the entire loop and eliminate completed tasks in case any finished out of order iTask = len(tasks) - 1 while iTask >= 0: t = tasks[iTask] if t.iscompleted: transformedImageData = t.wait_return() __AddTransformedTileTaskToComposite(t, transformedImageData, fullImage, fullImageZbuffer, scaled_targetRect) del transformedImageData del tasks[iTask] iTask -= 1 logger.info('Final image complete, building mask') mask = fullImageZbuffer < __MaxZBufferValue(fullImageZbuffer.dtype) del fullImageZbuffer fullImage[fullImage < 0] = 0 # Checking for > 1.0 makes sense for floating point images. During the DM4 migration # I was getting images which used 0-255 values, and the 1.0 check set them to entirely black # fullImage[fullImage > 1.0] = 1.0 logger.info('Assemble complete') if isinstance(fullImage, np.memmap): fullImage.flush() return (fullImage, mask)
def _RunRefineTwoImagesIteration(Transform, target_image, source_image, target_mask=None, source_mask=None, cell_size=(256, 256), grid_spacing=(256, 256), finalized=None, angles_to_search=None, min_alignment_overlap=0.5): ''' Places a regular grid of control points across the target image. These corresponding points on the source image are then adjusted to create a mapping from Source To Fixed Space for the source image. :param transform transform: Transform that maps from source to target space :param ndarray target_image: Target image to serve as reference :param ndarray source_image: Source image to be transformed :param ndarray target_mask: Source image mask, True where valid pixels exist, can be None :param ndarray source_mask: Target image mask, True where valid pixels exist, can be None :param tuple cell_size: (width, height) area of image around control points to use for registration :param tuple grid_spacing: (width, height) of separation between control points on the grid :param dict finalized: A dictionary of points, indexed by Target Space Coordinates, that are finalized and do not need to be checked :param array angles_to_search: An array of floats or None. Images are rotated by the degrees indicated in the array. The single best alignment across all angles is selected. :param float min_alighment_overlap: Limits how far control points can be translated. The cells from fixed and target space must overlap by this minimum amount. ''' if isinstance(Transform, str): Transform = nornir_imageregistration.transforms.factory.LoadTransform( Transform, 1) grid_spacing = np.asarray(grid_spacing, np.int32) cell_size = np.asarray(cell_size, np.int32) target_image = nornir_imageregistration.ImageParamToImageArray( target_image, dtype=np.float32) source_image = nornir_imageregistration.ImageParamToImageArray( source_image, dtype=np.float32) if target_mask is not None: target_mask = nornir_imageregistration.ImageParamToImageArray( target_mask, dtype=np.bool) target_image = nornir_imageregistration.RandomNoiseMask( target_image, target_mask) if source_mask is not None: source_mask = nornir_imageregistration.ImageParamToImageArray( source_mask, dtype=np.bool) source_image = nornir_imageregistration.RandomNoiseMask( source_image, source_mask) # shared_fixed_image = nornir_imageregistration.npArrayToReadOnlySharedArray(target_image) # shared_fixed_image.mode = 'r' # shared_warped_image = nornir_imageregistration.npArrayToReadOnlySharedArray(source_image) # shared_warped_image.mode = 'r' # Mark a grid along the fixed image, then find the points on the warped image # grid_data = nornir_imageregistration.grid_subdivision.CenteredGridRefinementCells(target_image.shape, cell_size) grid_data = nornir_imageregistration.ITKGridDivision( source_image.shape, cell_size=cell_size, grid_spacing=grid_spacing) # grid_dims = nornir_imageregistration.TileGridShape(target_image.shape, grid_spacing) if angles_to_search is None: angles_to_search = [0] # angles_to_search = np.linspace(-7.5, 7.5, 11) # Create the grid coordinates # coords = [np.asarray((iRow, iCol), dtype=np.int32) for iRow in range(grid_data.grid_dims[0]) for iCol in range(grid_data.grid_dims[1])] # coords = np.vstack(coords) # Create # TargetPoints = coords * grid_spacing # [np.asarray((iCol * grid_spacing[0], iRow * grid_spacing[1]), dtype=np.int32) for (iRow, iCol) in coords] # Grid dimensions round up, so if we are larger than image find out by how much and adjust the points so they are centered on the image # overage = ((grid_dims * grid_spacing) - target_image.shape) / 2.0 # TargetPoints = np.round(TargetPoints - overage).astype(np.int64) # TODO, ensure fixedPoints are within the bounds of target_image grid_data.FilterOutofBoundsSourcePoints(source_image.shape) grid_data.RemoveCellsUsingSourceImageMask(source_mask, 0.45) #nornir_imageregistration.views.grid_data.PlotGridPositionsAndMask(grid_data.SourcePoints, source_mask, OutputFilename=None) grid_data.PopulateTargetPoints(Transform) grid_data.RemoveCellsUsingTargetImageMask(target_mask, 0.45) #nornir_imageregistration.views.grid_data.PlotGridPositionsAndMask(grid_data.TargetPoints, target_mask, OutputFilename=None) # grid_data.ApplyWarpedImageMask(source_mask) # valid_inbounds = np.logical_and(np.all(FixedPoi4nts >= np.asarray((0, 0)), 1), np.all(TargetPoints < target_mask.shape, 1)) # TargetPoints = TargetPoints[valid_inbounds, :] # coords = coords[valid_inbounds, :] # if finalized is not None: found = [ tuple(grid_data.SourcePoints[i, :]) not in finalized for i in range(grid_data.coords.shape[0]) ] valid = np.asarray(found, np.bool) grid_data.RemoveMaskedPoints(valid) # TargetPoints = TargetPoints[valid, :] # coords = coords[valid, :] # # # Filter Fixed Points falling outside the mask # if target_mask is not None: # valid = nornir_imageregistration.index_with_array(target_mask, TargetPoints) # TargetPoints = TargetPoints[valid, :] # coords = coords[valid, :] # # SourcePoints = Transform.InverseTransform(TargetPoints).astype(np.int32) # if source_mask is not None: # valid = np.logical_and(np.all(SourcePoints >= np.asarray((0, 0)), 1), np.all(SourcePoints < source_mask.shape, 1)) # SourcePoints = SourcePoints[valid, :] # TargetPoints = TargetPoints[valid, :] # coords = coords[valid, :] # # valid = nornir_imageregistration.index_with_array(source_mask, SourcePoints) # SourcePoints = SourcePoints[valid, :] # TargetPoints = TargetPoints[valid, :] # coords = coords[valid, :] pool = nornir_pools.GetGlobalMultithreadingPool() #pool = nornir_pools.GetGlobalThreadPool() tasks = list() alignment_records = list() rigid_transforms = ApproximateRigidTransform( input_transform=Transform, target_points=grid_data.TargetPoints) for (i, coord) in enumerate(grid_data.coords): AlignTask = StartAttemptAlignPoint( pool, "Align %d,%d" % (coord[0], coord[1]), rigid_transforms[i], #Transform, target_image, source_image, grid_data.TargetPoints[i, :], cell_size, anglesToSearch=angles_to_search, min_alignment_overlap=min_alignment_overlap) if AlignTask is None: continue # AlignTask = pool.add_task("Align %d,%d" % (coord[0], coord[1]), # AttemptAlignPoint, # Transform, # shared_fixed_image, # shared_warped_image, # TargetPoints[i,:], # cell_size, # anglesToSearch=AnglesToSearch) AlignTask.ID = i AlignTask.coord = coord tasks.append(AlignTask) # arecord.iRow = iRow # arecord.iCol = iCol # arecord.TargetPoint = TargetPoint # arecord.WarpedPoint = WarpedPoint # arecord.AdjustedWarpedPoint = WarpedPoint + arecord.peak # # alignment_records.append(arecord) for t in tasks: arecord = t.wait_return() erec = nornir_imageregistration.EnhancedAlignmentRecord( ID=t.coord, TargetPoint=grid_data.TargetPoints[t.ID], SourcePoint=grid_data.SourcePoints[t.ID], peak=arecord.peak, weight=arecord.weight, angle=arecord.angle, flipped_ud=arecord.flippedud) erec.TargetROI = t.TargetROI erec.SourceROI = t.SourceROI #erec.TargetPSDScore = nornir_imageregistration.image_stats.ScoreImageWithPowerSpectralDensity(t.TargetROI) #erec.SourcePSDScore = nornir_imageregistration.image_stats.ScoreImageWithPowerSpectralDensity(t.SourceROI) #erec.PSDDelta = abs(erec.TargetPSDScore - erec.SourcePSDScore) erec.PSDDelta = (erec.TargetROI - np.mean(erec.TargetROI.flat)) - ( erec.SourceROI - np.mean(erec.SourceROI.flat)) erec.PSDDelta = np.sum(np.abs(erec.PSDDelta)) # erec.CalculatedWarpedPoint = Transform.InverseTransform(erec.AdjustedTargetPoint).reshape(2) # arecord.ID = (iRow, iCol) # arecord.TargetPoint = t.TargetPoint # arecord.WarpedPoint = t.WarpedPoint # arecord.AdjustedWarpedPoint = t.WarpedPoint + arecord.peak alignment_records.append(erec) # # del shared_warped_image # del shared_fixed_image return alignment_records
def RefineMosaic(transforms, imagepaths, imageScale=None, subregion_shape=None): ''' Locate overlapping regions between tiles in a mosaic and align multiple small subregions within. This generates a set of control points. More than one tile may overlap, for example corners. To solve this the set of control points is merged into a KD tree. Points closer than a set distance (less than subregion size) are averaged to create a single offset. Using the remaining points a mesh transform is generated for the tile. ''' if imageScale is None: imageScale = 1.0 if subregion_shape is None: subregion_shape = np.array([128, 128]) #downsample = 1.0 / imageScale tiles = nornir_imageregistration.tile.CreateTiles(transforms, imagepaths) list_tiles = list(tiles.values()) pool = nornir_pools.GetGlobalMultithreadingPool() tasks = list() if imageScale is None: imageScale = nornir_imageregistration.tileset.MostCommonScalar( transforms, imagepaths) layout = nornir_imageregistration.layout.Layout() for t in list_tiles: layout.CreateNode(t.ID, t.FixedBoundingBox.Center) for tile_overlap in nornir_imageregistration.IterateTileOverlaps( list_tiles, minOverlap=0.03): # OK... add some small neighborhoods and register those... #(downsampled_overlapping_rect_A, downsampled_overlapping_rect_B, OffsetAdjustment) = nornir_imageregistration.tile.Tile.Calculate_Overlapping_Regions(A, B, imageScale) # task = pool.add_task("Align %d -> %d" % (A.ID, B.ID), __RefineTileAlignmentRemote, tile_overlap.A, tile_overlap.B, tile_overlap.scaled_overlapping_source_rect_A, tile_overlap.scaled_overlapping_source_rect_B, tile_overlap.scaled_offset, imageScale, subregion_shape) task.A = tile_overlap.A task.B = tile_overlap.B task.OffsetAdjustment = tile_overlap.scaled_offset tasks.append(task) # # (point_pairs, net_offset) = __RefineTileAlignmentRemote(A, B, downsampled_overlapping_rect_A, downsampled_overlapping_rect_B, OffsetAdjustment, imageScale) # offset = net_offset[0:2] + OffsetAdjustment # weight = net_offset[2] # # print("%d -> %d : %s" % (A.ID, B.ID, str(net_offset))) # # layout.SetOffset(A.ID, B.ID, offset, weight) # print(str(net_offset)) for t in tasks: try: (point_pairs, _) = t.wait_return() except Exception as e: print("Could not register %d -> %d" % (t.A.ID, t.B.ID)) print("%s" % str(e)) continue SplitDisplacements(t.A, t.B, point_pairs) # offset = net_offset[0:2] + (t.OffsetAdjustment * downsample) # weight = net_offset[2] # layout.SetOffset(t.A.ID, t.B.ID, offset, weight) # Figure out what offset we found vs. what offset we expected # PredictedOffset = t.B.FixedBoundingBox.Center - t.A.ControlBoundingBox.Center # diff = offset - PredictedOffset # distance = np.sqrt(np.sum(diff ** 2)) # print("%d -> %d = %g" % (t.A.ID, t.B.ID, distance)) pool.wait_completion() return (layout, tiles)
def TransformImage(transform, fixedImageShape, warpedImage, CropUndefined): '''Cut image into tiles, assemble small chunks :param transform transform: Transform to apply to point to map from warped image to fixed space :param ndarray fixedImageShape: Width and Height of the image to create :param ndarray warpedImage: Image to transform to fixed space :param bool CropUndefined: If true exclude areas outside the convex hull of the transform, if it exists :return: An ndimage array of the transformed image ''' if CropUndefined: transform = triangulation.Triangulation(pointpairs=transform.points) tilesize = [2048, 2048] fixedImageShape = fixedImageShape.astype(dtype=np.int64) height = int(fixedImageShape[0]) width = int(fixedImageShape[1]) # print('\nConverting image to ' + str(self.NumCols) + "x" + str(self.NumRows) + ' grid of OpenGL textures') tasks = [] grid_shape = nornir_imageregistration.TileGridShape( warpedImage.shape, tilesize) if np.all(grid_shape == np.array([1, 1])): # Single threaded return WarpedImageToFixedSpace(transform, fixedImageShape, warpedImage, botleft=np.array([0, 0]), area=fixedImageShape, extrapolate=not CropUndefined) else: outputImage = np.zeros(fixedImageShape, dtype=np.float32) sharedWarpedImage = nornir_imageregistration.npArrayToReadOnlySharedArray( warpedImage) mpool = nornir_pools.GetGlobalMultithreadingPool() for iY in range(0, height, int(tilesize[0])): end_iY = iY + tilesize[0] if end_iY > height: end_iY = height for iX in range(0, width, int(tilesize[1])): end_iX = iX + tilesize[1] if end_iX > width: end_iX = width task = mpool.add_task(str(iX) + "x_" + str(iY) + "y", WarpedImageToFixedSpace, transform, fixedImageShape, sharedWarpedImage, botleft=[iY, iX], area=[end_iY - iY, end_iX - iX], extrapolate=not CropUndefined) task.iY = iY task.end_iY = end_iY task.iX = iX task.end_iX = end_iX tasks.append(task) # registeredTile = WarpedImageToFixedSpace(transform, fixedImageShape, warpedImage, botleft=[iY, iX], area=[end_iY - iY, end_iX - iX]) # outputImage[iY:end_iY, iX:end_iX] = registeredTile mpool.wait_completion() for task in tasks: registeredTile = task.wait_return() outputImage[task.iY:task.end_iY, task.iX:task.end_iX] = registeredTile del sharedWarpedImage return outputImage
def RelaxLayout(layout_obj, max_tension_cutoff=None, max_iter=None, vector_scale=None, plotting_output_path=None, plotting_interval=None): ''' :param layout_obj: Layout to refine :param float max_tension_cutoff: Stop iteration after the maximum tension vector has a magnitude below this value :param int max_iter: Maximum number of iterations ''' max_tension = layout_obj.MaxWeightedNetTensionMagnitude[1] if max_tension_cutoff is None: max_tension_cutoff = 0.1 if max_iter is None: max_iter = 500 if plotting_interval is None: plotting_interval = 10 i = 0 min_plotting_tension = max_tension_cutoff * 20 plotting_max_tension = max(min_plotting_tension, max_tension) # MovieImageDir = os.path.join(self.TestOutputPath, "relax_movie") # if not os.path.exists(MovieImageDir): # os.makedirs(MovieImageDir) pool = None if plotting_output_path is not None: os.makedirs(plotting_output_path, exist_ok=True) pool = nornir_pools.GetGlobalMultithreadingPool() print("Relax Layout") while max_tension > max_tension_cutoff and i < max_iter: print("\t%d %g" % (i, max_tension)) Layout.RelaxNodes(layout_obj, vector_scalar=vector_scale) max_tension = layout_obj.MaxWeightedNetTensionMagnitude[1] plotting_max_tension = max(min_plotting_tension, max_tension) if plotting_output_path is not None and (i % plotting_interval == 0 or i < 10): filename = os.path.join(plotting_output_path, "%d.svg" % i) # nornir_imageregistration.views.plot_layout( # layout_obj=layout_obj.copy(), # OutputFilename=filename, # max_tension=plotting_max_tension) pool.add_task("Plot step #%d" % (i), nornir_imageregistration.views.plot_layout, layout_obj=layout_obj.copy(), OutputFilename=filename, max_tension=plotting_max_tension) # node_distance = setup_imagetest.array_distance(node_movement[:,1:3]) # max_distance = np.max(node_distance,0) i += 1 # nornir_shared.plot.VectorField(layout_obj.GetPositions(), layout_obj.NetTensionVectors(), OutputFilename=filename) # pool.add_task("Plot step #%d" % (i), nornir_shared.plot.VectorField,layout_obj.GetPositions(), layout_obj.WeightedNetTensionVectors(), OutputFilename=filename) return layout_obj
def FindBestAngle(imFixed, imWarped, AngleList, MinOverlap=0.75, SingleThread=False, Cluster=False): '''Find the best angle to align two images. This function can be very memory intensive. Setting SingleThread=True makes debugging easier''' Debug = False pool = None # Temporarily disable until we have cluster pool working again. Leaving this on eliminates shared memory which is a big optimization Cluster = False if len(AngleList) <= 1: SingleThread = True if not SingleThread: if Debug: pool = nornir_pools.GetThreadPool(Poolname=None, num_threads=3) elif Cluster: pool = nornir_pools.GetGlobalClusterPool() else: pool = nornir_pools.GetGlobalMultithreadingPool() AngleMatchValues = list() taskList = list() (fixedStats, warpedStats) = GetFixedAndWarpedImageStats(imFixed, imWarped) # MaxRotatedDimension = max([max(imFixed), max(imWarped)]) * 1.4143 # MinRotatedDimension = max(min(imFixed), min(imWarped)) # # SmallPaddedFixed = PadImageForPhaseCorrelation(imFixed, MaxOffset=0.1) # LargePaddedFixed = PadImageForPhaseCorrelation(imFixed, MaxOffset=0.1) PaddedFixed = nornir_imageregistration.PadImageForPhaseCorrelation( imFixed, MinOverlap=MinOverlap, ImageMedian=fixedStats.median, ImageStdDev=fixedStats.std) # Create a shared read-only memory map for the Padded fixed image if not (Cluster or SingleThread): temp_padded_fixed_memmap = nornir_imageregistration.CreateTemporaryReadonlyMemmapFile( PaddedFixed) temp_shared_warp_memmap = nornir_imageregistration.CreateTemporaryReadonlyMemmapFile( imWarped) temp_padded_fixed_memmap.mode = 'r' #We do not want functions we pass the memmap modifying the original data temp_shared_warp_memmap.mode = 'r' #We do not want functions we pass the memmap modifying the original data # SharedPaddedFixed = nornir_imageregistration.npArrayToReadOnlySharedArray(PaddedFixed) # SharedWarped = nornir_imageregistration.npArrayToReadOnlySharedArray(imWarped) # SharedPaddedFixed = np.save(PaddedFixed, ) else: SharedPaddedFixed = PaddedFixed SharedWarped = imWarped CheckTaskInterval = 16 fixed_shape = imFixed.shape warped_shape = imWarped.shape for i, theta in enumerate(AngleList): if SingleThread: record = ScoreOneAngle(SharedPaddedFixed, SharedWarped, fixed_shape, warped_shape, theta, fixedStats=fixedStats, warpedStats=warpedStats, MinOverlap=MinOverlap) AngleMatchValues.append(record) else: task = pool.add_task(str(theta), ScoreOneAngle, temp_padded_fixed_memmap, temp_shared_warp_memmap, fixed_shape, warped_shape, theta, fixedStats=fixedStats, warpedStats=warpedStats, MinOverlap=MinOverlap) taskList.append(task) if not i % CheckTaskInterval == 0: continue # I don't like this, but it lets me delete tasks before filling the queue which may save some memory. # No sense checking unless we've already filled the queue though if len(taskList) > multiprocessing.cpu_count() * 1.5: for iTask in range(len(taskList) - 1, -1, -1): if taskList[iTask].iscompleted: record = taskList[iTask].wait_return() AngleMatchValues.append(record) del taskList[iTask] # TestOneAngle(SharedPaddedFixed, SharedWarped, angle, None, MinOverlap) # taskList.sort(key=tpool.Task.name) while len(taskList) > 0: for iTask in range(len(taskList) - 1, -1, -1): if taskList[iTask].iscompleted: record = taskList[iTask].wait_return() AngleMatchValues.append(record) del taskList[iTask] if len(taskList) > 0: # Wait a bit before checking the task list sleep(0.5) # print(str(record.angle) + ' = ' + str(record.peak) + ' weight: ' + str(record.weight) + '\n') # ShowGrayscale(NormCorrelationImage) # print str(AngleMatchValues) # Delete the pool to ensure extra python threads do not stick around if pool is not None: pool.wait_completion() del PaddedFixed if not (Cluster or SingleThread): os.remove(temp_shared_warp_memmap.path) os.remove(temp_padded_fixed_memmap.path) # del SharedPaddedFixed # del SharedWarped BestMatch = max(AngleMatchValues, key=nornir_imageregistration.AlignmentRecord.WeightKey) return BestMatch
def HTMLFromLogDataNode(DataNode, htmlpaths, MaxImageWidth=None, MaxImageHeight=None, **kwargs): if MaxImageWidth is None: MaxImageWidth = 1024 if MaxImageHeight is None: MaxImageHeight = 1024 if not DataNode.Name == 'Log': return None TableEntries = {} logFilePath = DataNode.FullPath if os.path.exists(logFilePath): Data = serialemlog.SerialEMLog.Load(logFilePath) RelPath = htmlpaths.GetSubNodeRelativePath(DataNode) TableEntries["2"] = __ExtractLogDataText(Data) TPool = nornir_pools.GetGlobalMultithreadingPool() LogSrcFullPath = os.path.join(RelPath, DataNode.Path) DriftSettleThumbnailFilename = GetTempFileSaltString( ) + "DriftSettle.png" DriftSettleImgSrcPath = os.path.join(htmlpaths.ThumbnailRelative, DriftSettleThumbnailFilename) DriftSettleThumbnailOutputFullPath = os.path.join( htmlpaths.ThumbnailDir, DriftSettleThumbnailFilename) # nfiles.RemoveOutdatedFile(logFilePath, DriftSettleThumbnailOutputFullPath) # if not os.path.exists(DriftSettleThumbnailOutputFullPath): TPool.add_task(DriftSettleThumbnailFilename, serialemlog.PlotDriftSettleTime, logFilePath, DriftSettleThumbnailOutputFullPath) DriftGridThumbnailFilename = GetTempFileSaltString() + "DriftGrid.png" DriftGridImgSrcPath = os.path.join(htmlpaths.ThumbnailRelative, DriftGridThumbnailFilename) DriftGridThumbnailOutputFullPath = os.path.join( htmlpaths.ThumbnailDir, DriftGridThumbnailFilename) # nfiles.RemoveOutdatedFile(logFilePath, DriftGridThumbnailFilename) # if not os.path.exists(DriftGridThumbnailFilename): TPool.add_task(DriftGridThumbnailFilename, serialemlog.PlotDriftGrid, logFilePath, DriftGridThumbnailOutputFullPath) # Build a histogram of drift settings # x = [] # y = [] # for t in Data.tileData.values(): # if not (t.dwellTime is None or t.drift is None): # x.append(t.dwellTime) # y.append(t.drift) # # ThumbnailFilename = GetTempFileSaltString() + "Drift.png" # ImgSrcPath = os.path.join(ThumbnailDirectoryRelPath, ThumbnailFilename) # ThumbnailOutputFullPath = os.path.join(ThumbnailDirectory, ThumbnailFilename) # PlotHistogram.PolyLinePlot(lines, Title="Stage settle time, max drift %g" % maxdrift, XAxisLabel='Dwell time (sec)', YAxisLabel="Drift (nm/sec)", OutputFilename=ThumbnailOutputFullPath) HTMLDriftSettleImage = HTMLImageTemplate % { 'src': DriftSettleImgSrcPath, 'AltText': 'Drift scatterplot', 'ImageWidth': MaxImageWidth, 'ImageHeight': MaxImageHeight } HTMLDriftSettleAnchor = HTMLAnchorTemplate % { 'href': DriftSettleImgSrcPath, 'body': HTMLDriftSettleImage } HTMLDriftGridImage = HTMLImageTemplate % { 'src': DriftGridImgSrcPath, 'AltText': 'Drift scatterplot', 'ImageWidth': MaxImageWidth, 'ImageHeight': MaxImageHeight } HTMLDriftGridAnchor = HTMLAnchorTemplate % { 'href': DriftGridImgSrcPath, 'body': HTMLDriftGridImage } TableEntries["1"] = HTMLAnchorTemplate % { 'href': LogSrcFullPath, 'body': "Log File" } TableEntries["3"] = ColumnList( [HTMLDriftSettleAnchor, HTMLDriftGridAnchor]) else: TableEntries = [] if 'AverageTileDrift' in DataNode.attrib: TableEntries.append([ 'Average tile drift:', '%.3g nm/sec' % float(DataNode.AverageTileDrift) ]) if 'MinTileDrift' in DataNode.attrib: TableEntries.append([ 'Min tile drift:', '%.3g nm/sec' % float(DataNode.MinTileDrift) ]) if 'MaxTileDrift' in DataNode.attrib: TableEntries.append([ 'Max tile drift:', '%.3g nm/sec' % float(DataNode.MaxTileDrift) ]) if 'AverageTileTime' in DataNode.attrib: TableEntries.append([ 'Average tile time:', '%.3g' % float(DataNode.AverageTileTime) ]) if 'FastestTileTime' in DataNode.attrib: dtime = datetime.timedelta(seconds=float(DataNode.FastestTileTime)) TableEntries.append(['Fastest tile time:', str(dtime)]) if 'CaptureTime' in DataNode.attrib: dtime = datetime.timedelta(seconds=float(DataNode.CaptureTime)) TableEntries.append(['Total capture time:', str(dtime)]) if len(TableEntries) == 0: return None # HTML = MatrixToTable(TableEntries) return TableEntries
def _Relax_Layout(self, layout_obj, max_tension_cutoff=None, max_iter=None, dirname_postfix=None): if max_tension_cutoff is None: max_tension_cutoff = 1.0 if max_iter is None: max_iter = 100 if dirname_postfix is None: dirname_postfix = "" max_tension = layout_obj.MaxWeightedNetTensionMagnitude[1] min_plotting_tension = max_tension_cutoff * 20 plotting_max_tension = max(min_plotting_tension, max_tension) i = 0 pool = nornir_pools.GetGlobalMultithreadingPool() MovieImageDir = os.path.join(self.TestOutputPath, "relax_movie" + dirname_postfix) if not os.path.exists(MovieImageDir): os.makedirs(MovieImageDir) while max_tension > max_tension_cutoff and i < max_iter: print("%d %g" % (i, max_tension)) node_movement = nornir_imageregistration.layout.Layout.RelaxNodes( layout_obj) max_tension = layout_obj.MaxWeightedNetTensionMagnitude[1] plotting_max_tension = max_tension plotting_max_tension = max(min_plotting_tension, max_tension) # node_distance = setup_imagetest.array_distance(node_movement[:,1:3]) # max_distance = np.max(node_distance,0) filename = os.path.join(MovieImageDir, "%d.png" % i) #pool.add_task("Plot step #%d" % (i), nornir_imageregistration.views.plot_layout, # layout_obj=layout_obj, # OutputFilename=filename, # max_tension=plotting_max_tension) if i % 10 == 0: # nornir_imageregistration.views.plot_layout( # layout_obj=layout_obj, # OutputFilename=filename, # max_tension=plotting_max_tension) pool.add_task("Plot step #%d" % (i), nornir_imageregistration.views.plot_layout, layout_obj=layout_obj.copy(), OutputFilename=filename, max_tension=plotting_max_tension) # nornir_shared.plot.VectorField(Points=layout_obj.GetPositions(), # Offsets=layout_obj.WeightedNetTensionVectors(), # weights=nornir_imageregistration.array_distance(layout_obj.WeightedNetTensionVectors()) / layout_obj.MaxWeightedNetTensionMagnitude, # OutputFilename=filename) i += 1 # return layout_obj