コード例 #1
0
def plot_aligned_images(alignment_record, image_A, image_B):
    '''
    Plot the two images in the same axis using the provided alignment record    
    '''
    
    image_A = nornir_imageregistration.ImageParamToImageArray(image_A)
    image_B = nornir_imageregistration.ImageParamToImageArray(image_B)
    
    plt.clf()
    
    a = _gray_to_rgba(image_A, alpha=128)
    b = _gray_to_rgba(image_B, alpha=128)
    
    a[:,:,3] = a[:,:,0] #Scale alpha by luminosity
    b[:,:,3] = b[:,:,0] #Scale alpha by luminosity
    a[:,:,1] = 0
    
    b[:,:,0] = 0
    b[:,:,2] = 0
    
    a_extent = (0, image_A.shape[1],  0, image_A.shape[0])
    b_extent = (alignment_record.peak[1], alignment_record.peak[1] + image_B.shape[1], alignment_record.peak[0], alignment_record.peak[0] + image_B.shape[0])
    plt.imshow(a, origin='lower', extent=a_extent)
    plt.imshow(b, origin='lower', extent=b_extent)
    plt.axis('equal')
    plt.tight_layout()
    plt.show()
         
    return
コード例 #2
0
def _ReplaceFilesWithImages(listImages):
    '''Replace any filepath strings in the passed parameter with loaded images.'''

    if isinstance(listImages, list):
        for i, value in enumerate(listImages):
            listImages[i] = nornir_imageregistration.ImageParamToImageArray(
                value)
    else:
        listImages = nornir_imageregistration.ImageParamToImageArray(
            listImages)

    return listImages
コード例 #3
0
def __AlignmentScoreRemote(A_Filename, B_Filename,
                           scaled_overlapping_source_rect_A,
                           scaled_overlapping_source_rect_B):
    '''Returns the difference between the images'''

    try:
        OverlappingRegionA = __get_overlapping_image(
            nornir_imageregistration.ImageParamToImageArray(A_Filename,
                                                            dtype=np.float16),
            scaled_overlapping_source_rect_A,
            excess_scalar=1.0)
        OverlappingRegionB = __get_overlapping_image(
            nornir_imageregistration.ImageParamToImageArray(B_Filename,
                                                            dtype=np.float16),
            scaled_overlapping_source_rect_B,
            excess_scalar=1.0)

        # If the entire region is a solid color, then return the maximum score possible
        if (OverlappingRegionA.min() == OverlappingRegionA.max()) or \
            (OverlappingRegionA.max() == 0) or \
            (OverlappingRegionB.min() == OverlappingRegionB.max()) or \
            (OverlappingRegionB.max() == 0):
            return 1.0

        OverlappingRegionA -= OverlappingRegionA.min()
        OverlappingRegionA /= OverlappingRegionA.max()

        OverlappingRegionB -= OverlappingRegionB.min()
        OverlappingRegionB /= OverlappingRegionB.max()

        ignoreIndicies = OverlappingRegionA == OverlappingRegionA.max()
        ignoreIndicies |= OverlappingRegionA == OverlappingRegionA.min()
        ignoreIndicies |= OverlappingRegionB == OverlappingRegionB.max()
        ignoreIndicies |= OverlappingRegionB == OverlappingRegionB.min()

        # There was data in the aligned images, but not overlapping.  So we return the maximum value
        if np.alltrue(ignoreIndicies):
            return 1.0

        validIndicies = np.invert(ignoreIndicies)

        OverlappingRegionA -= OverlappingRegionB
        absoluteDiff = np.fabs(OverlappingRegionA)

        # nornir_imageregistration.ShowGrayscale([OverlappingRegionA, OverlappingRegionB, absoluteDiff])
        return np.mean(absoluteDiff[validIndicies])
    except FloatingPointError as e:
        print("FloatingPointError: {0} for images\n\t{1}\n\t{2}".format(
            str(e), A_Filename, B_Filename))
        raise e
コード例 #4
0
    def Create(cls, image):
        '''Returns an object with the mean,median,std.dev of an image,
           this object is attached to the image object and only calculated once'''

#        I removed this cache in the image object of the statistics.  I believe 
#        Python 3 had issues with it.  If there are performance problems we 
#        should add it back
#         try:
#             cachedVal = image.__IrToolsImageStats__
#             if cachedVal is not None:
#                 return cachedVal
#         except AttributeError:
#             pass
        
        obj = ImageStats()
        image = nornir_imageregistration.ImageParamToImageArray(image, dtype=numpy.float64)
        #if image.dtype is not numpy.float64:  # Use float 64 to ensure accurate statistical results
        #    image = image.astype(dtype=numpy.float64)
            
        flatImage = image.flat 
        obj._median = numpy.median(flatImage)
        obj._mean = numpy.mean(flatImage)
        obj._std = numpy.std(flatImage)
        obj._max = numpy.max(flatImage)
        obj._min = numpy.min(flatImage)
        
        del flatImage
         
#        image.__IrtoolsImageStats__ = obj
        return obj
コード例 #5
0
def _ConvertParamsToImageList(param):
    output = None
    if isinstance(param, str):
        loaded_image = nornir_imageregistration.ImageParamToImageArray(param)
        output = nornir_imageregistration.core._Image_To_Uint8(loaded_image)
    elif isinstance(param, np.ndarray):
        output = nornir_imageregistration.core._Image_To_Uint8(param)
    elif isinstance(param, collections.Iterable):
        output = [_ConvertParamsToImageList(item) for item in param]
        if len(output) == 1:
            output = output[0]

    return output
コード例 #6
0
    def RunSaveLoadImageTest(self, input_image_fullpath,
                             expected_input_properties,
                             expected_output_properties):
        self.assertTrue(isinstance(expected_input_properties, ImageProperties))
        self.assertTrue(isinstance(expected_output_properties,
                                   ImageProperties))

        self.assertTrue(os.path.exists(input_image_fullpath),
                        "Missing test input: {0}".format(input_image_fullpath))
        input_image = nornir_imageregistration.ImageParamToImageArray(
            input_image_fullpath)

        output_path = os.path.join(
            self.TestOutputPath,
            expected_output_properties.GenFilename(input_image_fullpath))

        wrong_input_bpp_error_msg = "Expected {0}-bit image".format(
            expected_input_properties.bpp)
        wrong_output_bpp_error_msg = "Expected {0}-bit image".format(
            expected_output_properties.bpp)

        bpp = nornir_imageregistration.ImageBpp(input_image)
        self.assertEqual(bpp, expected_input_properties.bpp,
                         wrong_input_bpp_error_msg)

        nornir_imageregistration.SaveImage(output_path,
                                           input_image,
                                           bpp=expected_output_properties.bpp)
        reloaded_image = nornir_imageregistration.ImageParamToImageArray(
            output_path)
        MagickBpp = nornir_shared.images.GetImageBpp(output_path)
        self.assertEqual(MagickBpp, expected_output_properties.bpp,
                         wrong_output_bpp_error_msg)

        reloaded_bpp = nornir_imageregistration.ImageBpp(reloaded_image)
        self.assertEqual(reloaded_bpp, expected_output_properties.bpp,
                         wrong_output_bpp_error_msg)
コード例 #7
0
def TransformStos(transformData,
                  OutputFilename=None,
                  fixedImage=None,
                  warpedImage=None,
                  scalar=1.0,
                  CropUndefined=False):
    '''Assembles an image based on the passed transform.
    :param str fixedImage: Image describing the size we want the warped image to fill, either a string or ndarray
    :param str warpedImage: Image we will warp into fixed space, either a string or ndarray
    :param float scalar: Amount to scale the transform before passing the image through
    :param bool CropUndefined: If true exclude areas outside the convex hull of the transform, if it exists
    :param bool Dicreet: True causes points outside the defined transform region to be clipped instead of interpolated
    :return: transformed image
    '''

    stos = None
    stostransform = ParameterToStosTransform(transformData)

    if fixedImage is None:
        if stos is None:
            return None

        fixedImage = stos.ControlImageFullPath

    if warpedImage is None:
        if stos is None:
            return None

        warpedImage = stos.MappedImageFullPath

    fixedImageSize = nornir_imageregistration.GetImageSize(fixedImage)
    fixedImageShape = np.array(fixedImageSize) * scalar
    warpedImage = nornir_imageregistration.ImageParamToImageArray(warpedImage)

    stostransform.points = stostransform.points * scalar

    warpedImage = TransformImage(stostransform, fixedImageShape, warpedImage,
                                 CropUndefined)

    if not OutputFilename is None:
        imsave(OutputFilename, warpedImage, cmap='gray')

    return warpedImage
コード例 #8
0
def ShadeCorrect(imagepaths,
                 shadeimagepath,
                 outputpath,
                 correction_type=None,
                 bpp=None):

    shadeimage = nornir_imageregistration.ImageParamToImageArray(
        shadeimagepath)

    if correction_type == ShadeCorrectionTypes.BRIGHTFIELD:
        return __CorrectBrightfieldShading(imagepaths,
                                           shadeimage,
                                           outputpath,
                                           bpp=bpp)
    elif correction_type == ShadeCorrectionTypes.DARKFIELD:
        return __CorrectDarkfieldShading(imagepaths,
                                         shadeimage,
                                         outputpath,
                                         bpp=bpp)
コード例 #9
0
    def RunSaveLoadImageTest_BppOnly(self, input_image, output_fullpath,
                                     expected_bpp):

        wrong_input_bpp_error_msg = "Expected {0}-bit image".format(
            expected_bpp)
        wrong_output_bpp_error_msg = "Expected {0}-bit image".format(
            expected_bpp)

        bpp = nornir_imageregistration.ImageBpp(input_image)
        self.assertEqual(bpp, expected_bpp, wrong_input_bpp_error_msg)

        nornir_imageregistration.SaveImage(output_fullpath,
                                           input_image,
                                           bpp=expected_bpp)
        reloaded_image = nornir_imageregistration.ImageParamToImageArray(
            output_fullpath)
        MagickBpp = nornir_shared.images.GetImageBpp(output_fullpath)
        self.assertEqual(MagickBpp, expected_bpp, wrong_output_bpp_error_msg)

        reloaded_bpp = nornir_imageregistration.ImageBpp(reloaded_image)
        self.assertEqual(reloaded_bpp, expected_bpp,
                         wrong_output_bpp_error_msg)
コード例 #10
0
def __PruneFileSciPy__(filename, MaxOverlap=0.15, **kwargs):
    '''Returns a prune score for a single file
        Args:
           MaxOverlap = 0 to 1'''

    # TODO: This function should be updated to use the grid_subdivision module to create cells.  It should be used in the mosaic tile translation code to eliminate featureless 
    # overlap regions of adjacent tiles

    # logger = logging.getLogger('irtools.prune')
    # logger = multiprocessing.log_to_stderr()

    if MaxOverlap > 0.5:
        MaxOverlap = 0.5
# 
#     if not os.path.exists(filename):
#         # logger.error(filename + ' not found when attempting prune')
#         # PrettyOutput.LogErr(filename + ' not found when attempting prune')
#         return None

    Im = nornir_imageregistration.ImageParamToImageArray(filename)
    # Imb = nornir_imageregistration.LoadImage(filename)
    (Height, Width) = Im.shape

    StdDevList = []
    # MeanList = []

    MaxDim = Height
    if Width > Height:
        MaxDim = Width

    SampleSize = int(ceil(MaxDim / 32))

    VertOverlapPixelRange = int(MaxOverlap * float(Height))
    HorzOverlapPixelRange = int(MaxOverlap * float(Width))

    MaskTopBorder = VertOverlapPixelRange + (SampleSize - (mod(VertOverlapPixelRange, 32)))
    MaskBottomBorder = Height - VertOverlapPixelRange - (SampleSize - (mod(VertOverlapPixelRange, 32)))

    MaskLeftBorder = HorzOverlapPixelRange + (SampleSize - (mod(HorzOverlapPixelRange, 32)))
    MaskRightBorder = Width - HorzOverlapPixelRange - (SampleSize - (mod(HorzOverlapPixelRange, 32)))

    # Calculate the top
    for iHeight in range(0, MaskTopBorder - (SampleSize - 1), SampleSize):
        for iWidth in range(0, Width - 1, SampleSize):
            StdDev = numpy.std(Im[iHeight:iHeight + SampleSize, iWidth:iWidth + SampleSize])
            StdDevList.append(StdDev)
            # Im[iHeight:iHeight+SampleSize,iWidth:iWidth+SampleSize] = 0

    # Calculate the sides
    for iHeight in range(MaskTopBorder, MaskBottomBorder, SampleSize):
        for iWidth in range(0, MaskLeftBorder - (SampleSize - 1), SampleSize):
            StdDev = numpy.std(Im[iHeight:iHeight + SampleSize, iWidth:iWidth + SampleSize])
            StdDevList.append(StdDev)
            # Im[iHeight:iHeight+SampleSize,iWidth:iWidth+SampleSize] = 0.25

        for iWidth in range(MaskRightBorder, Width - SampleSize, SampleSize):
            StdDev = numpy.std(Im[iHeight:iHeight + SampleSize, iWidth:iWidth + SampleSize])
            StdDevList.append(StdDev)
            # Im[iHeight:iHeight+SampleSize,iWidth:iWidth+SampleSize] = 0.5

    # Calculate the bottom
    for iHeight in range(MaskBottomBorder, Height - SampleSize, SampleSize):
        for iWidth in range(0, Width - 1, SampleSize):
            StdDev = numpy.std(Im[iHeight:iHeight + SampleSize, iWidth:iWidth + SampleSize])
            StdDevList.append(StdDev)
            # Im[iHeight:iHeight+SampleSize,iWidth:iWidth+SampleSize] = 0.75

    del Im
    # nornir_imageregistration.ShowGrayscale(Im)
    return sum(StdDevList)
コード例 #11
0
def __CalculateFeatureScoreSciPy__(image, cell_size=None, feature_coverage_percent=None, **kwargs):
    '''
    Calculates a score indicating the amount of texture available for our phase correlation algorithm to use for alignment
    :param image: The image to score, either an ndarray or filename
    :param tuple cell_size: The dimensions of the subregions that will be evaluated across the image.
    :param float feature_coverage_percent: A value from 0 - 100 indicating what percentage of the image should contain textures scoring at or above the returned value.
     
    '''
    
    if feature_coverage_percent is None:
        feature_coverage_percent = 75
    else:
        feature_coverage_percent = 100 - feature_coverage_percent
        assert(feature_coverage_percent <= 100 and feature_coverage_percent >= 0)
         
    Im = nornir_imageregistration.ImageParamToImageArray(image, dtype=numpy.float32)
# #     Im_filtered = scipy.ndimage.filters.median_filter(Im, size=3)
# #     sx = scipy.ndimage.sobel(Im_filtered, axis=0, mode='nearest')
# #     sy = scipy.ndimage.sobel(Im_filtered, axis=1, mode='nearest')
# #     sob = numpy.hypot(sx,sy)
# #
      
#     
# #     
#     logamp = numpy.log(amp) ** 2 
#     logampflat = numpy.asarray(logamp.flat)
#     aboveMedian = numpy.median(logampflat)
#     score = numpy.mean(logampflat[logampflat > aboveMedian])

    # score = numpy.max(Im_filtered.flat) - numpy.min(Im_filtered.flat)
    
    # score = numpy.var(Im_filtered.flat)
    # score = numpy.percentile(sob.flat, 90)
    # score = numpy.max(sob.flat)
# #    score = numpy.mean(sob.flat)
    # score = numpy.median(sob.flat) - numpy.percentile(sob.flat, 10)
    # mode = numpy.stats.mode(sob.flat)
    
    # p10 = numpy.percentile(Im_filtered.flat, 10)
    # p90 = numpy.percentile(Im_filtered.flat, 90)
    # med = numpy.median(sob.flat)
    
    # score = (p90 - p10)
    
    # score = numpy.median(sob.flat) - numpy.percentile(sob.flat, 10))
    
#     if score < .025:
#         nornir_imageregistration.ShowGrayscale([Im, Im_filtered, sob], title=str(score))
#         plt.figure()
#         plt.hist(sob.flat, bins=100)
#         a = 4
# #     return score

#     finite_subset = numpy.asarray(Im[numpy.isfinite(Im)].flat, dtype=numpy.float32)
#     if len(finite_subset) < 3:
#         return 0
# 
#     return numpy.std(finite_subset)
         
    if cell_size is None:
        # cell_size = numpy.max(numpy.vstack((numpy.asarray(numpy.asarray(Im.shape) / 64, dtype=numpy.int32), numpy.asarray((64,64),dtype=numpy.int32))),0) 
        cell_size = numpy.asarray((64, 64), dtype=numpy.int32)
     
    grid = nornir_imageregistration.CenteredGridDivision(Im.shape, cell_size=cell_size)
    
    cell_area = numpy.prod(cell_size)
     
    score_list = []
     
    for iPoint in range(0, grid.num_points):
        rect = nornir_imageregistration.Rectangle.CreateFromCenterPointAndArea(grid.SourcePoints[iPoint, :], grid.cell_size)
        subset = nornir_imageregistration.CropImageRect(Im, rect, cval=numpy.nan)
        finite_subset = subset[numpy.isfinite(subset)].flat
        if len(finite_subset) < (cell_area / 2.0):
            continue
         
        std_val = ScoreImageWithPowerSpectralDensity(subset)
         
        # std_val = numpy.var(numpy.asarray(finite_subset, dtype=numpy.float32))
        # std_val = numpy.percentile(finite_subset, q=90) - numpy.percentile(finite_subset, q=10) 
        score_list.append(std_val)
        
        del subset
        
    del Im

    if len(score_list) == 0:
        return 0
    elif len(score_list) == 1:
        return score_list[0]
    else:
        val = numpy.percentile(score_list, q=feature_coverage_percent)
     
        # val = numpy.max(score_list)
        # val = numpy.mean(score_list) #Median was less reliable when using the range of intensity values as a measure
        return val
コード例 #12
0
    def RunStosRefinement(self,
                          stosFilePath,
                          ImageDir=None,
                          SaveImages=False,
                          SavePlots=True):
        '''
        This is a test for the refine mosaic feature which is not fully implemented
        '''

        #stosFile = self.GetStosFile("0164-0162_brute_32")
        #stosFile = self.GetStosFile("0617-0618_brute_64")
        stosObj = nornir_imageregistration.files.StosFile.Load(stosFilePath)
        #stosObj.Downsample = 64.0
        #stosObj.Scale(2.0)
        #stosObj.Save(os.path.join(self.TestOutputPath, "0617-0618_brute_32.stos"))

        fixedImage = stosObj.ControlImageFullPath
        warpedImage = stosObj.MappedImageFullPath

        if ImageDir is not None:
            fixedImage = os.path.join(ImageDir, stosObj.ControlImageFullPath)
            warpedImage = os.path.join(ImageDir, stosObj.MappedImageFullPath)

        fixedImageData = nornir_imageregistration.ImageParamToImageArray(
            fixedImage, dtype=np.float16)
        warpedImageData = nornir_imageregistration.ImageParamToImageArray(
            warpedImage, dtype=np.float16)

        stosTransform = nornir_imageregistration.transforms.factory.LoadTransform(
            stosObj.Transform, 1)

        unrefined_image_path = os.path.join(self.TestOutputPath,
                                            'unrefined_transform.png')

        #        if not os.path.exists(unrefined_image_path):
        #            unrefined_warped_image = nornir_imageregistration.assemble.TransformStos(stosTransform,
        #                                                                                     fixedImage=fixedImage,
        #                                                                                     warpedImage=warpedImage)
        #            nornir_imageregistration.SaveImage(unrefined_image_path, unrefined_warped_image, bpp=8)
        #        else:
        #            unrefined_warped_image = nornir_imageregistration.LoadImage(unrefined_image_path)

        num_iterations = 10

        cell_size = np.asarray((128, 128), dtype=np.int32) * 2.0
        grid_spacing = (256, 256)

        i = 1

        finalized_points = {}

        min_percentile_included = 5.0

        final_pass = False
        final_pass_angles = np.linspace(-7.5, 7.5, 11)

        CutoffPercentilePerIteration = 10.0

        angles_to_search = None

        pool = nornir_pools.GetGlobalThreadPool()

        while i <= num_iterations:

            cachedFileName = '{5}_pass{0}_alignment_Cell_{2}x{1}_Grid_{4}x{3}'.format(
                i, cell_size[0], cell_size[1], grid_spacing[0],
                grid_spacing[1], self.TestName)
            alignment_points = self.ReadOrCreateVariable(cachedFileName)

            if alignment_points is None:
                alignment_points = _RunRefineTwoImagesIteration(
                    stosTransform,
                    fixedImageData,
                    warpedImageData,
                    os.path.join(ImageDir, stosObj.ControlMaskFullPath),
                    os.path.join(ImageDir, stosObj.MappedMaskFullPath),
                    cell_size=cell_size,
                    grid_spacing=grid_spacing,
                    finalized=finalized_points,
                    angles_to_search=angles_to_search,
                    min_alignment_overlap=None)
                self.SaveVariable(alignment_points, cachedFileName)

            print("Pass {0} aligned {1} points".format(i,
                                                       len(alignment_points)))

            if i == 1:
                cell_size = cell_size / 2.0

            combined_alignment_points = alignment_points + list(
                finalized_points.values())

            percentile = 100.0 - (CutoffPercentilePerIteration * i)
            if percentile < 10.0:
                percentile = 10.0
            elif percentile > 100:
                percentile = 100

#         if final_pass:
#             percentile = 0

            if SavePlots:
                histogram_filename = os.path.join(
                    self.TestOutputPath,
                    'weight_histogram_pass{0}.png'.format(i))
                nornir_imageregistration.views.PlotWeightHistogram(
                    alignment_points,
                    histogram_filename,
                    cutoff=percentile / 100.0)
                vector_field_filename = os.path.join(
                    self.TestOutputPath, 'Vector_field_pass{0}.png'.format(i))
                nornir_imageregistration.views.PlotPeakList(
                    alignment_points,
                    list(finalized_points.values()),
                    vector_field_filename,
                    ylim=(0, fixedImageData.shape[1]),
                    xlim=(0, fixedImageData.shape[0]))

            updatedTransform = local_distortion_correction._PeakListToTransform(
                combined_alignment_points, percentile)

            new_finalized_points = local_distortion_correction.CalculateFinalizedAlignmentPointsMask(
                combined_alignment_points,
                percentile=percentile,
                min_travel_distance=0.333)

            new_finalizations = 0
            for (ir, record) in enumerate(alignment_points):
                if not new_finalized_points[ir]:
                    continue

                key = tuple(record.SourcePoint)
                if key in finalized_points:
                    continue

                #See if we can improve the final alignment
                refined_align_record = nornir_imageregistration.stos_brute.SliceToSliceBruteForce(
                    record.TargetROI,
                    record.SourceROI,
                    AngleSearchRange=final_pass_angles,
                    MinOverlap=0.25,
                    SingleThread=True,
                    Cluster=False,
                    TestFlip=False)

                if refined_align_record.weight > record.weight:
                    record = nornir_imageregistration.alignment_record.EnhancedAlignmentRecord(
                        ID=record.ID,
                        TargetPoint=record.TargetPoint,
                        SourcePoint=record.SourcePoint,
                        peak=refined_align_record.peak,
                        weight=refined_align_record.weight,
                        angle=refined_align_record.angle,
                        flipped_ud=refined_align_record.flippedud)

                #Create a record that is unmoving
                finalized_points[key] = EnhancedAlignmentRecord(
                    record.ID,
                    TargetPoint=record.AdjustedTargetPoint,
                    SourcePoint=record.SourcePoint,
                    peak=np.asarray((0, 0), dtype=np.float32),
                    weight=record.weight,
                    angle=0,
                    flipped_ud=record.flippedud)

                new_finalizations += 1

            print("Pass {0} has locked {1} new points, {2} of {3} are locked".
                  format(i, new_finalizations, len(finalized_points),
                         len(combined_alignment_points)))

            stosObj.Transform = updatedTransform
            stosObj.Save(
                os.path.join(self.TestOutputPath,
                             "UpdatedTransform_pass{0}.stos".format(i)))

            if SaveImages:
                warpedToFixedImage = nornir_imageregistration.assemble.TransformStos(
                    updatedTransform,
                    fixedImage=fixedImageData,
                    warpedImage=warpedImageData)

                Delta = warpedToFixedImage - fixedImageData
                ComparisonImage = np.abs(Delta)
                ComparisonImage = ComparisonImage / ComparisonImage.max()

                #nornir_imageregistration.SaveImage(os.path.join(self.TestOutputPath, 'delta_pass{0}.png'.format(i)), ComparisonImage, bpp=8)
                #nornir_imageregistration.SaveImage(os.path.join(self.TestOutputPath, 'image_pass{0}.png'.format(i)), warpedToFixedImage, bpp=8)

                pool.add_task('delta_pass{0}.png'.format(i),
                              nornir_imageregistration.SaveImage,
                              os.path.join(self.TestOutputPath,
                                           'delta_pass{0}.png'.format(i)),
                              np.copy(ComparisonImage),
                              bpp=8)
                pool.add_task('image_pass{0}.png'.format(i),
                              nornir_imageregistration.SaveImage,
                              os.path.join(self.TestOutputPath,
                                           'image_pass{0}.png'.format(i)),
                              np.copy(warpedToFixedImage),
                              bpp=8)

            #nornir_imageregistration.core.ShowGrayscale([fixedImageData, unrefined_warped_image, warpedToFixedImage, ComparisonImage])

            i = i + 1

            stosTransform = updatedTransform

            if final_pass:
                break

            if i == num_iterations:
                final_pass = True
                angles_to_search = final_pass_angles

            #If we've locked 10% of the points and have not locked any new ones we are done
            if len(finalized_points) > len(combined_alignment_points
                                           ) * 0.1 and new_finalizations == 0:
                final_pass = True
                angles_to_search = final_pass_angles

            #If we've locked 90% of the points we are done
            if len(finalized_points) > len(combined_alignment_points) * 0.9:
                final_pass = True
                angles_to_search = final_pass_angles

        #Convert the transform to a grid transform and persist to disk
        stosObj.Transform = local_distortion_correction.ConvertTransformToGridTransform(
            stosObj.Transform,
            source_image_shape=warpedImageData.shape,
            cell_size=cell_size,
            grid_spacing=grid_spacing)
        stosObj.Save(os.path.join(self.TestOutputPath, "Final_Transform.stos"))
        return
コード例 #13
0
def TransformTile(transform,
                  imagefullpath,
                  distanceImage=None,
                  target_space_scale=None,
                  TargetRegion=None,
                  SingleThreadedInvoke=False):
    '''Transform the passed image.  DistanceImage is an existing image recording the distance to the center of the
       image for each pixel.  target_space_scale is used when the image size does not match the image size encoded in the
       transform.  A scale will be calculated in this case and if it does not match the required scale the tile will 
       not be transformed.
       :param transform transform: Transformation used to map pixels from source image to output image
       :param str imagefullpath: Full path to the image on disk
       :param ndarray distanceImage: Optional pre-allocated array to contain the distance of each pixel from the center for use as a depth mask
       :param float target_space_scale: Optional pre-calculated scalar to apply to the transforms target space control points.  If None the scale is calculated based on the difference
                                   between input image size and the image size of the transform. i.e.  If the source_space is downsampled by 4 then the target_space will be downsampled to match
       :param array TargetRegion: [MinY MinX MaxY MaxX] If specified only the specified region is populated.  Otherwise transform the entire image.'''

    TargetRegionRect = None
    if not TargetRegion is None:
        if isinstance(TargetRegion, nornir_imageregistration.Rectangle):
            TargetRegionRect = TargetRegion.copy()
            TargetRegion = list(TargetRegion.ToTuple())
        else:
            TargetRegionRect = nornir_imageregistration.Rectangle.CreateFromBounds(
                TargetRegion)

        spatial.RaiseValueErrorOnInvalidBounds(TargetRegion)

    if not os.path.exists(imagefullpath):
        return nornir_imageregistration.transformed_image_data.TransformedImageData(
            errorMsg='Tile does not exist ' + imagefullpath)

    # if isinstance(transform, meshwithrbffallback.MeshWithRBFFallback):
    # Don't bother mapping points falling outside the defined boundaries because we won't have image data for it
    #   transform = triangulation.Triangulation(transform.points)

    warpedImage = nornir_imageregistration.ImageParamToImageArray(
        imagefullpath, dtype=np.float32)
    warpedImage = nornir_imageregistration.ForceGrayscale(warpedImage)

    # Automatically scale the transform if the input image shape does not match the transform bounds
    source_space_scale = tiles.__DetermineTransformScale(
        transform, warpedImage.shape)

    if target_space_scale is None:
        target_space_scale = source_space_scale

    Scaled_TargetRegionRect = TargetRegionRect
    if source_space_scale == target_space_scale:
        if source_space_scale != 1.0:
            scaledTransform = copy.deepcopy(transform)
            scaledTransform.Scale(source_space_scale)
            transform = scaledTransform

    else:
        if source_space_scale != 1.0:
            scaledTransform = copy.deepcopy(transform)
            scaledTransform.ScaleWarped(source_space_scale)
            transform = scaledTransform

        if target_space_scale != 1.0:
            scaledTransform = copy.deepcopy(transform)
            scaledTransform.ScaleFixed(target_space_scale)
            transform = scaledTransform

    if not TargetRegion is None and target_space_scale != 1.0:
        TargetRegion = np.array(TargetRegion) * target_space_scale
        Scaled_TargetRegionRect = nornir_imageregistration.Rectangle.scale_on_origin(
            TargetRegionRect, target_space_scale)
        Scaled_TargetRegionRect = nornir_imageregistration.Rectangle.SafeRound(
            Scaled_TargetRegionRect)

    (width, height, minX, minY) = (0, 0, 0, 0)

    if TargetRegion is None:
        if hasattr(transform, 'FixedBoundingBox'):
            width = transform.FixedBoundingBox.Width
            height = transform.FixedBoundingBox.Height

            TargetRegionRect = transform.FixedBoundingBox
            TargetRegionRect = nornir_imageregistration.Rectangle.SafeRound(
                TargetRegionRect)
            Scaled_TargetRegionRect = TargetRegionRect

            (minY, minX, maxY, maxX) = TargetRegionRect.ToTuple()
        else:
            width = warpedImage.shape[1]
            height = warpedImage.shape[0]
            TargetRegionRect = nornir_imageregistration.Rectangle.CreateFromPointAndArea(
                (0, 0), warpedImage.shape)
            Scaled_TargetRegionRect = TargetRegionRect
    else:
        assert (len(TargetRegion) == 4)
        (minY, minX, maxY, maxX) = Scaled_TargetRegionRect.ToTuple()
        height = maxY - minY
        width = maxX - minX

    height = np.ceil(height)
    width = np.ceil(width)

    distanceImage = __GetOrCreateDistanceImage(distanceImage,
                                               warpedImage.shape[0:2])

    (fixedImage, centerDistanceImage) = assemble.WarpedImageToFixedSpace(
        transform, (height, width), [warpedImage, distanceImage],
        botleft=(minY, minX),
        area=(height, width),
        cval=[0, __MaxZBufferValue(np.float16)])

    del warpedImage
    del distanceImage

    return nornir_imageregistration.transformed_image_data.TransformedImageData.Create(
        fixedImage.astype(np.float16),
        centerDistanceImage.astype(np.float16),
        transform,
        source_space_scale,
        target_space_scale,
        SingleThreadedInvoke=SingleThreadedInvoke)
コード例 #14
0
    def RunConvertImageTest(self, input_image_fullpath):
        '''Tests converting an image using the min/max/gamma parameters.  Displays to user
        to ensure valid results'''
        original_image = nornir_imageregistration.ImageParamToImageArray(
            input_image_fullpath)

        Bpp = nornir_shared.images.GetImageBpp(input_image_fullpath)
        hist = nornir_imageregistration.Histogram(input_image_fullpath)
        hist = nornir_shared.histogram.Histogram.TryRemoveMaxValueOutlier(hist)
        hist = nornir_shared.histogram.Histogram.TryRemoveMinValueOutlier(hist)
        iMinCutoff = hist.MinNonEmptyBin()
        iMaxCutoff = hist.MaxNonEmptyBin()

        min_val = hist.BinValue(iMinCutoff)
        max_val = hist.BinValue(iMaxCutoff, 1.0)

        leveled_image = nornir_imageregistration.core._ConvertSingleImage(
            input_image_fullpath, MinMax=(min_val, max_val), Bpp=Bpp)

        #         self.assertTrue(nornir_imageregistration.ShowGrayscale([original_image,leveled_image],
        #                                                                PassFail=True,
        #                                                                title="The original image"))

        inverted_leveled_image = nornir_imageregistration.core._ConvertSingleImage(
            input_image_fullpath,
            MinMax=(min_val, max_val),
            Bpp=Bpp,
            Invert=True)
        gamma_lowered_image = nornir_imageregistration.core._ConvertSingleImage(
            input_image_fullpath,
            MinMax=(min_val, max_val),
            Bpp=Bpp,
            Gamma=0.7)
        gamma_raised_image = nornir_imageregistration.core._ConvertSingleImage(
            input_image_fullpath,
            MinMax=(min_val, max_val),
            Bpp=Bpp,
            Gamma=1.3)

        self.assertTrue(
            nornir_imageregistration.ShowGrayscale(
                [[original_image, leveled_image, inverted_leveled_image],
                 [gamma_lowered_image, gamma_raised_image]],
                PassFail=True,
                title=
                "First Row: The original image, the leveled image, the inverted image\nSecond Row: Lower gamma, Raised Gamma"
            ))

        self.RunSaveLoadImageTest_BppOnly(
            leveled_image, os.path.join(self.TestOutputPath, 'leveled.png'),
            Bpp)
        self.RunSaveLoadImageTest_BppOnly(
            inverted_leveled_image,
            os.path.join(self.TestOutputPath, 'inverted_leveled.png'), Bpp)
        self.RunSaveLoadImageTest_BppOnly(
            gamma_lowered_image,
            os.path.join(self.TestOutputPath, 'gamma_raised.png'), Bpp)
        self.RunSaveLoadImageTest_BppOnly(
            gamma_raised_image,
            os.path.join(self.TestOutputPath, 'gamma_lowered.png'), Bpp)

        return leveled_image
コード例 #15
0
def ScoreOneAngle(imFixed,
                  imWarped,
                  FixedImageShape,
                  WarpedImageShape,
                  angle,
                  fixedStats=None,
                  warpedStats=None,
                  FixedImagePrePadded=True,
                  MinOverlap=0.75):
    '''Returns an alignment score for a fixed image and an image rotated at a specified angle'''

    imFixed = nornir_imageregistration.ImageParamToImageArray(imFixed,
                                                              dtype=np.float32)
    imWarped = nornir_imageregistration.ImageParamToImageArray(
        imWarped, dtype=np.float32)

    # gc.set_debug(gc.DEBUG_LEAK)
    if fixedStats is None:
        fixedStats = nornir_imageregistration.ImageStats.CalcStats(imFixed)

    if warpedStats is None:
        warpedStats = nornir_imageregistration.ImageStats.CalcStats(imWarped)

    OKToDelimWarped = False
    if angle != 0:
        imWarped = interpolation.rotate(imWarped,
                                        axes=(1, 0),
                                        angle=angle,
                                        cval=np.nan)
        imWarpedEmptyIndicies = np.isnan(imWarped)
        imWarped[imWarpedEmptyIndicies] = warpedStats.GenerateNoise(
            np.sum(imWarpedEmptyIndicies))
        OKToDelimWarped = True

    RotatedWarped = nornir_imageregistration.PadImageForPhaseCorrelation(
        imWarped,
        ImageMedian=warpedStats.median,
        ImageStdDev=warpedStats.std,
        MinOverlap=MinOverlap)

    assert (RotatedWarped.shape[0] > 0)
    assert (RotatedWarped.shape[1] > 0)

    if not FixedImagePrePadded:
        PaddedFixed = nornir_imageregistration.PadImageForPhaseCorrelation(
            imFixed,
            ImageMedian=fixedStats.median,
            ImageStdDev=fixedStats.std,
            MinOverlap=MinOverlap)
    else:
        PaddedFixed = imFixed

    # print str(PaddedFixed.shape) + ' ' +  str(RotatedPaddedWarped.shape)

    TargetHeight = max([PaddedFixed.shape[0], RotatedWarped.shape[0]])
    TargetWidth = max([PaddedFixed.shape[1], RotatedWarped.shape[1]])

    PaddedFixed = nornir_imageregistration.PadImageForPhaseCorrelation(
        imFixed,
        NewWidth=TargetWidth,
        NewHeight=TargetHeight,
        ImageMedian=fixedStats.median,
        ImageStdDev=fixedStats.std,
        MinOverlap=1.0)
    RotatedPaddedWarped = nornir_imageregistration.PadImageForPhaseCorrelation(
        RotatedWarped,
        NewWidth=TargetWidth,
        NewHeight=TargetHeight,
        ImageMedian=warpedStats.median,
        ImageStdDev=warpedStats.std,
        MinOverlap=1.0)

    #if OKToDelimWarped:
    del imWarped

    del imFixed

    del RotatedWarped

    assert (PaddedFixed.shape == RotatedPaddedWarped.shape)

    CorrelationImage = nornir_imageregistration.ImagePhaseCorrelation(
        PaddedFixed, RotatedPaddedWarped)

    del PaddedFixed
    del RotatedPaddedWarped

    CorrelationImage = fftshift(CorrelationImage)
    CorrelationImage -= CorrelationImage.min()
    CorrelationImage /= CorrelationImage.max()

    # Timer.Start('Find Peak')

    OverlapMask = nornir_imageregistration.overlapmasking.GetOverlapMask(
        FixedImageShape,
        WarpedImageShape,
        CorrelationImage.shape,
        MinOverlap,
        MaxOverlap=1.0)
    (peak, weight) = nornir_imageregistration.FindPeak(CorrelationImage,
                                                       OverlapMask)
    del OverlapMask

    del CorrelationImage

    record = nornir_imageregistration.AlignmentRecord(peak, weight, angle)

    return record
コード例 #16
0
def _RunRefineTwoImagesIteration(Transform,
                                 target_image,
                                 source_image,
                                 target_mask=None,
                                 source_mask=None,
                                 cell_size=(256, 256),
                                 grid_spacing=(256, 256),
                                 finalized=None,
                                 angles_to_search=None,
                                 min_alignment_overlap=0.5):
    '''
    Places a regular grid of control points across the target image.  These corresponding points on the
    source image are then adjusted to create a mapping from Source To Fixed Space for the source image. 
    :param transform transform: Transform that maps from source to target space
    :param ndarray target_image: Target image to serve as reference
    :param ndarray source_image: Source image to be transformed
    :param ndarray target_mask: Source image mask, True where valid pixels exist, can be None
    :param ndarray source_mask: Target image mask, True where valid pixels exist, can be None
    :param tuple cell_size: (width, height) area of image around control points to use for registration
    :param tuple grid_spacing: (width, height) of separation between control points on the grid
    :param dict finalized: A dictionary of points, indexed by Target Space Coordinates, that are finalized and do not need to be checked
    :param array angles_to_search: An array of floats or None.  Images are rotated by the degrees indicated in the array.  The single best alignment across all angles is selected.
    :param float min_alighment_overlap: Limits how far control points can be translated.  The cells from fixed and target space must overlap by this minimum amount.    
    '''

    if isinstance(Transform, str):
        Transform = nornir_imageregistration.transforms.factory.LoadTransform(
            Transform, 1)

    grid_spacing = np.asarray(grid_spacing, np.int32)
    cell_size = np.asarray(cell_size, np.int32)

    target_image = nornir_imageregistration.ImageParamToImageArray(
        target_image, dtype=np.float32)
    source_image = nornir_imageregistration.ImageParamToImageArray(
        source_image, dtype=np.float32)

    if target_mask is not None:
        target_mask = nornir_imageregistration.ImageParamToImageArray(
            target_mask, dtype=np.bool)
        target_image = nornir_imageregistration.RandomNoiseMask(
            target_image, target_mask)

    if source_mask is not None:
        source_mask = nornir_imageregistration.ImageParamToImageArray(
            source_mask, dtype=np.bool)
        source_image = nornir_imageregistration.RandomNoiseMask(
            source_image, source_mask)

#     shared_fixed_image  = nornir_imageregistration.npArrayToReadOnlySharedArray(target_image)
#     shared_fixed_image.mode = 'r'
#     shared_warped_image = nornir_imageregistration.npArrayToReadOnlySharedArray(source_image)
#     shared_warped_image.mode = 'r'

# Mark a grid along the fixed image, then find the points on the warped image

# grid_data = nornir_imageregistration.grid_subdivision.CenteredGridRefinementCells(target_image.shape, cell_size)
    grid_data = nornir_imageregistration.ITKGridDivision(
        source_image.shape, cell_size=cell_size, grid_spacing=grid_spacing)

    # grid_dims = nornir_imageregistration.TileGridShape(target_image.shape, grid_spacing)

    if angles_to_search is None:
        angles_to_search = [0]
    # angles_to_search = np.linspace(-7.5, 7.5, 11)

    # Create the grid coordinates
#    coords = [np.asarray((iRow, iCol), dtype=np.int32) for iRow in range(grid_data.grid_dims[0]) for iCol in range(grid_data.grid_dims[1])]
#    coords = np.vstack(coords)

# Create
#    TargetPoints = coords * grid_spacing  # [np.asarray((iCol * grid_spacing[0], iRow * grid_spacing[1]), dtype=np.int32) for (iRow, iCol) in coords]

# Grid dimensions round up, so if we are larger than image find out by how much and adjust the points so they are centered on the image
#    overage = ((grid_dims * grid_spacing) - target_image.shape) / 2.0
#    TargetPoints = np.round(TargetPoints - overage).astype(np.int64)
# TODO, ensure fixedPoints are within the bounds of target_image
    grid_data.FilterOutofBoundsSourcePoints(source_image.shape)
    grid_data.RemoveCellsUsingSourceImageMask(source_mask, 0.45)
    #nornir_imageregistration.views.grid_data.PlotGridPositionsAndMask(grid_data.SourcePoints, source_mask, OutputFilename=None)

    grid_data.PopulateTargetPoints(Transform)
    grid_data.RemoveCellsUsingTargetImageMask(target_mask, 0.45)

    #nornir_imageregistration.views.grid_data.PlotGridPositionsAndMask(grid_data.TargetPoints, target_mask, OutputFilename=None)
    # grid_data.ApplyWarpedImageMask(source_mask)
    #     valid_inbounds = np.logical_and(np.all(FixedPoi4nts >= np.asarray((0, 0)), 1), np.all(TargetPoints < target_mask.shape, 1))
    #     TargetPoints = TargetPoints[valid_inbounds, :]
    #     coords = coords[valid_inbounds, :]
    #
    if finalized is not None:
        found = [
            tuple(grid_data.SourcePoints[i, :]) not in finalized
            for i in range(grid_data.coords.shape[0])
        ]
        valid = np.asarray(found, np.bool)
        grid_data.RemoveMaskedPoints(valid)

#         TargetPoints = TargetPoints[valid, :]
#         coords = coords[valid, :]
#
#     # Filter Fixed Points falling outside the mask
#     if target_mask is not None:
#         valid = nornir_imageregistration.index_with_array(target_mask, TargetPoints)
#         TargetPoints = TargetPoints[valid, :]
#         coords = coords[valid, :]
#
#     SourcePoints = Transform.InverseTransform(TargetPoints).astype(np.int32)
#     if source_mask is not None:
#         valid = np.logical_and(np.all(SourcePoints >= np.asarray((0, 0)), 1), np.all(SourcePoints < source_mask.shape, 1))
#         SourcePoints = SourcePoints[valid, :]
#         TargetPoints = TargetPoints[valid, :]
#         coords = coords[valid, :]
#
#         valid = nornir_imageregistration.index_with_array(source_mask, SourcePoints)
#         SourcePoints = SourcePoints[valid, :]
#         TargetPoints = TargetPoints[valid, :]
#         coords = coords[valid, :]

    pool = nornir_pools.GetGlobalMultithreadingPool()
    #pool = nornir_pools.GetGlobalThreadPool()
    tasks = list()
    alignment_records = list()

    rigid_transforms = ApproximateRigidTransform(
        input_transform=Transform, target_points=grid_data.TargetPoints)

    for (i, coord) in enumerate(grid_data.coords):

        AlignTask = StartAttemptAlignPoint(
            pool,
            "Align %d,%d" % (coord[0], coord[1]),
            rigid_transforms[i],
            #Transform,
            target_image,
            source_image,
            grid_data.TargetPoints[i, :],
            cell_size,
            anglesToSearch=angles_to_search,
            min_alignment_overlap=min_alignment_overlap)

        if AlignTask is None:
            continue

#         AlignTask = pool.add_task("Align %d,%d" % (coord[0], coord[1]),
#                                   AttemptAlignPoint,
#                                   Transform,
#                                   shared_fixed_image,
#                                   shared_warped_image,
#                                   TargetPoints[i,:],
#                                   cell_size,
#                                   anglesToSearch=AnglesToSearch)

        AlignTask.ID = i
        AlignTask.coord = coord
        tasks.append(AlignTask)

    #             arecord.iRow = iRow
#             arecord.iCol = iCol
#             arecord.TargetPoint = TargetPoint
#             arecord.WarpedPoint = WarpedPoint
#             arecord.AdjustedWarpedPoint = WarpedPoint + arecord.peak
#
#             alignment_records.append(arecord)

    for t in tasks:
        arecord = t.wait_return()

        erec = nornir_imageregistration.EnhancedAlignmentRecord(
            ID=t.coord,
            TargetPoint=grid_data.TargetPoints[t.ID],
            SourcePoint=grid_data.SourcePoints[t.ID],
            peak=arecord.peak,
            weight=arecord.weight,
            angle=arecord.angle,
            flipped_ud=arecord.flippedud)

        erec.TargetROI = t.TargetROI
        erec.SourceROI = t.SourceROI

        #erec.TargetPSDScore = nornir_imageregistration.image_stats.ScoreImageWithPowerSpectralDensity(t.TargetROI)
        #erec.SourcePSDScore = nornir_imageregistration.image_stats.ScoreImageWithPowerSpectralDensity(t.SourceROI)

        #erec.PSDDelta = abs(erec.TargetPSDScore - erec.SourcePSDScore)
        erec.PSDDelta = (erec.TargetROI - np.mean(erec.TargetROI.flat)) - (
            erec.SourceROI - np.mean(erec.SourceROI.flat))
        erec.PSDDelta = np.sum(np.abs(erec.PSDDelta))
        # erec.CalculatedWarpedPoint = Transform.InverseTransform(erec.AdjustedTargetPoint).reshape(2)
        # arecord.ID = (iRow, iCol)
        # arecord.TargetPoint = t.TargetPoint
        # arecord.WarpedPoint = t.WarpedPoint
        # arecord.AdjustedWarpedPoint = t.WarpedPoint + arecord.peak

        alignment_records.append(erec)


#
#     del shared_warped_image
#     del shared_fixed_image

    return alignment_records
コード例 #17
0
def RefineTransform(stosTransform,
                    target_image,
                    source_image,
                    target_mask=None,
                    source_mask=None,
                    num_iterations=None,
                    cell_size=None,
                    grid_spacing=None,
                    angles_to_search=None,
                    min_travel_for_finalization=None,
                    min_alignment_overlap=None,
                    SaveImages=False,
                    SavePlots=False,
                    outputDir=None):
    '''
    Refines a transform and returns a grid transform produced by the refinement algorithm.
    
    Places a regular grid of control points across the target image.  These corresponding points on the
    source image are then adjusted to create a mapping from Source To Fixed Space for the source image. 
    :param stosTransform: The transform to refine
    :param target_image: ndarray or path to file, fixed space image
    :param source_image: ndarray or path to file, source space image
    :param target_mask: ndarray or path to file, fixed space image mask
    :param source_mask: ndarray or path to file, source space image mask
    :param int num_iterations: The maximum number of iterations to perform
    :param tuple cell_size: (width, height) area of image around control points to use for registration
    :param tuple grid_spacing: (width, height) of separation between control points on the grid
    :param array angles_to_search: An array of floats or None.  Images are rotated by the degrees indicated in the array.  The single best alignment across all angles is selected.
    :param float min_alighment_overlap: Limits how far control points can be translated.  The cells from fixed and target space must overlap by this minimum amount.
    :param bool SaveImages: Saves registered images of each iteration in the output path for debugging purposes
    :param bool SavePlots: Saves histograms and vector plots of each iteration in the output path for debugging purposes     
    :param str outputDir: Directory to save images and plots if requested.  Must not be null if SaveImages or SavePlots are true   
    '''

    if cell_size is None:
        cell_size = (256, 256)

    if grid_spacing is None:
        grid_spacing = (256, 256)

    if angles_to_search is None:
        angles_to_search = [0]

    if num_iterations is None:
        num_iterations = 10

    if min_travel_for_finalization is None:
        min_travel_for_finalization = 0.333

    if min_alignment_overlap is None:
        min_alignment_overlap = 0.5

    if SavePlots or SaveImages:
        assert (outputDir is not None)

    # Convert inputs to numpy arrays

    cell_size = np.asarray(
        cell_size,
        dtype=np.int32) * 2.0  # Double size of cell area for first pass only
    grid_spacing = np.asarray(grid_spacing, dtype=np.int32)

    target_image = nornir_imageregistration.ImageParamToImageArray(
        target_image, dtype=np.float32)
    source_image = nornir_imageregistration.ImageParamToImageArray(
        source_image, dtype=np.float32)

    if target_mask is not None:
        target_mask = nornir_imageregistration.ImageParamToImageArray(
            target_mask, dtype=np.bool)

    if source_mask is not None:
        source_mask = nornir_imageregistration.ImageParamToImageArray(
            source_mask, dtype=np.bool)

    final_pass = False  # True if this is the last iteration the loop will perform
    final_pass_angles = np.linspace(
        -7.5, 7.5, 11
    )  # The last registration we perform on a cell is a bit more thorough

    finalized_points = {}

    CutoffPercentilePerIteration = 10.0

    i = 1

    while i <= num_iterations:
        alignment_points = _RunRefineTwoImagesIteration(
            stosTransform,
            target_image,
            source_image,
            target_mask,
            source_mask,
            cell_size=cell_size,
            grid_spacing=grid_spacing,
            finalized=finalized_points,
            angles_to_search=angles_to_search,
            min_alignment_overlap=min_alignment_overlap)

        print("Pass {0} aligned {1} points".format(i, len(alignment_points)))

        # For the first pass we use a larger cell to help get some initial registration points
        if i == 1:
            cell_size = cell_size / 2.0

        combined_alignment_points = alignment_points + list(
            finalized_points.values())

        percentile = 100.0 - (CutoffPercentilePerIteration * i)
        if percentile < 10.0:
            percentile = 10.0
        elif percentile > 100:
            percentile = 100


#         if final_pass:
#             percentile = 0

        if SavePlots:
            histogram_filename = os.path.join(
                outputDir, 'weight_histogram_pass{0}.png'.format(i))
            nornir_imageregistration.views.PlotWeightHistogram(
                alignment_points,
                histogram_filename,
                cutoff=percentile / 100.0)
            vector_field_filename = os.path.join(
                outputDir, 'Vector_field_pass{0}.png'.format(i))
            nornir_imageregistration.views.PlotPeakList(
                alignment_points,
                list(finalized_points.values()),
                vector_field_filename,
                ylim=(0, target_image.shape[1]),
                xlim=(0, target_image.shape[0]))
            vector_field_filename = os.path.join(
                outputDir, 'Vector_field_pass_delta{0}.png'.format(i))
            nornir_imageregistration.views.PlotPeakList(
                alignment_points,
                list(finalized_points.values()),
                vector_field_filename,
                ylim=(0, target_image.shape[1]),
                xlim=(0, target_image.shape[0]),
                attrib='PSDDelta')

        updatedTransform = _PeakListToTransform(combined_alignment_points,
                                                percentile)

        new_finalized_points = CalculateFinalizedAlignmentPointsMask(
            combined_alignment_points,
            percentile=percentile,
            min_travel_distance=min_travel_for_finalization)

        new_finalizations = 0
        for (ir, record) in enumerate(alignment_points):
            if not new_finalized_points[ir]:
                continue

            key = tuple(record.SourcePoint)
            if key in finalized_points:
                continue

            # See if we can improve the final alignment
            refined_align_record = nornir_imageregistration.stos_brute.SliceToSliceBruteForce(
                record.TargetROI,
                record.SourceROI,
                AngleSearchRange=final_pass_angles,
                MinOverlap=min_alignment_overlap,
                SingleThread=True,
                Cluster=False,
                TestFlip=False)

            if refined_align_record.weight > record.weight:
                oldPSDDelta = record.PSDDelta
                record = nornir_imageregistration.EnhancedAlignmentRecord(
                    ID=record.ID,
                    TargetPoint=record.TargetPoint,
                    SourcePoint=record.SourcePoint,
                    peak=refined_align_record.peak,
                    weight=refined_align_record.weight,
                    angle=refined_align_record.angle,
                    flipped_ud=refined_align_record.flippedud)
                record.PSDDelta = oldPSDDelta

            # Create a record that is unmoving
            finalized_points[
                key] = nornir_imageregistration.EnhancedAlignmentRecord(
                    record.ID,
                    TargetPoint=record.AdjustedTargetPoint,
                    SourcePoint=record.SourcePoint,
                    peak=np.asarray((0, 0), dtype=np.float32),
                    weight=record.weight,
                    angle=0,
                    flipped_ud=record.flippedud)

            finalized_points[key].PSDDelta = record.PSDDelta

            new_finalizations += 1

        print(
            "Pass {0} has locked {1} new points, {2} of {3} are locked".format(
                i, new_finalizations, len(finalized_points),
                len(combined_alignment_points)))
        stosTransform = updatedTransform

        if SaveImages:
            #InputStos.Save(os.path.join(outputDir, "UpdatedTransform_pass{0}.stos".format(i)))

            warpedToFixedImage = nornir_imageregistration.assemble.TransformStos(
                updatedTransform,
                fixedImage=target_image,
                warpedImage=source_image)

            Delta = warpedToFixedImage - target_image
            ComparisonImage = np.abs(Delta)
            ComparisonImage = ComparisonImage / ComparisonImage.max()

            nornir_imageregistration.SaveImage(os.path.join(
                outputDir, 'delta_pass{0}.png'.format(i)),
                                               ComparisonImage,
                                               bpp=8)
            nornir_imageregistration.SaveImage(os.path.join(
                outputDir, 'image_pass{0}.png'.format(i)),
                                               warpedToFixedImage,
                                               bpp=8)

        i = i + 1

        if final_pass:
            break

        if i == num_iterations:
            final_pass = True
            angles_to_search = final_pass_angles

        # If we've locked 10% of the points and have not locked any new ones we are done
        if len(finalized_points) > len(
                combined_alignment_points) * 0.1 and new_finalizations == 0:
            final_pass = True
            angles_to_search = final_pass_angles

        # If we've locked 90% of the points we are done
        if len(finalized_points) > len(combined_alignment_points) * 0.9:
            final_pass = True
            angles_to_search = final_pass_angles

    # Convert the transform to a grid transform and persist to disk

    return stosTransform
コード例 #18
0
def RefineStosFile(InputStos,
                   OutputStosPath,
                   num_iterations=None,
                   cell_size=None,
                   grid_spacing=None,
                   angles_to_search=None,
                   min_travel_for_finalization=None,
                   min_alignment_overlap=None,
                   SaveImages=False,
                   SavePlots=False):
    '''
    Refines an inputStos file and produces the OutputStos file.
    
    Places a regular grid of control points across the target image.  These corresponding points on the
    source image are then adjusted to create a mapping from Source To Fixed Space for the source image. 
    :param StosFile InputStos: Either a file path or StosFile object.  This is the stosfile to be refined.
    :param OutputStosPath: Path to save the refined stos file at.
    :param ndarray target_image: A file path indicating where to save the refined stos file
    :param int num_iterations: The maximum number of iterations to perform
    :param tuple cell_size: (width, height) area of image around control points to use for registration
    :param tuple grid_spacing: (width, height) of separation between control points on the grid
    :param array angles_to_search: An array of floats or None.  Images are rotated by the degrees indicated in the array.  The single best alignment across all angles is selected.
    :param float min_alighment_overlap: Limits how far control points can be translated.  The cells from fixed and target space must overlap by this minimum amount.
    :param bool SaveImages: Saves registered images of each iteration in the output path for debugging purposes
    :param bool SavePlots: Saves histograms and vector plots of each iteration in the output path for debugging purposes        
    '''

    outputDir = os.path.dirname(OutputStosPath)

    # Load the input stos file if it is not already loaded
    if not isinstance(InputStos, nornir_imageregistration.files.StosFile):
        stosDir = os.path.dirname(InputStos)
        InputStos = nornir_imageregistration.files.StosFile.Load(InputStos)
        InputStos.TryConvertRelativePathsToAbsolutePaths(stosDir)

    stosTransform = nornir_imageregistration.transforms.factory.LoadTransform(
        InputStos.Transform, 1)

    target_image = nornir_imageregistration.ImageParamToImageArray(
        InputStos.ControlImageFullPath, dtype=np.float32)
    source_image = nornir_imageregistration.ImageParamToImageArray(
        InputStos.MappedImageFullPath, dtype=np.float32)
    target_mask = None
    source_mask = None

    if InputStos.ControlMaskFullPath is not None:
        target_mask = nornir_imageregistration.ImageParamToImageArray(
            InputStos.ControlMaskFullPath, dtype=np.bool)

    if InputStos.MappedMaskFullPath is not None:
        source_mask = nornir_imageregistration.ImageParamToImageArray(
            InputStos.MappedMaskFullPath, dtype=np.bool)

    output_transform = RefineTransform(
        stosTransform,
        target_image,
        source_image,
        target_mask,
        source_mask,
        num_iterations=num_iterations,
        cell_size=cell_size,
        grid_spacing=grid_spacing,
        angles_to_search=angles_to_search,
        min_travel_for_finalization=min_travel_for_finalization,
        min_alignment_overlap=min_alignment_overlap,
        SaveImages=SaveImages,
        SavePlots=SavePlots,
        outputDir=outputDir)

    InputStos.Transform = ConvertTransformToGridTransform(
        output_transform,
        source_image_shape=source_image.shape,
        cell_size=cell_size,
        grid_spacing=grid_spacing)
    InputStos.Save(OutputStosPath)