Пример #1
0
def OpenAndPreProcessImage(path,
                           norm=True,
                           blur=None,
                           copyOrig=False,
                           quantize=None):
    im = Image.open(path).convert('L')
    im = np.asarray(im)  #[125:375,125:375] #Take a smaller region for speed

    # Also return an unprocessed copy of original image, if required
    im_orig = None
    if copyOrig:
        im_orig = im.copy()

    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)
    # Normalize
    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im / 255.  #convert to floats without normalizing

    #Quantize into a number of intensity bins
    if quantize:
        im = np.rint(im * (quantize - 1)) / (quantize - 1)

    return im, im_orig
    def test_normalize(self):
        mormalizer = filters.normalize(10)

        expected = np.array([.0, .1, .2, .3, .4, .5, .6, .7, .8, .9])
        exist = mormalizer([i for i in range(10)])

        self.assertTrue(np.array_equal(exist, expected))
Пример #3
0
def main():
    if len(sys.argv) < 2:
        print "Usage:%s <franko string>" % sys.argv[0]
        sys.exit(1)
    
    word = sys.argv[1]
    normalizedWord = filters.normalize(word)
    print filters.generatePlainReplacement(normalizedWord)
Пример #4
0
def preProcessImage(im, norm=True, blur=None, equalize=False, quantize=None):
    """Take a uint8 ndarray representing an image and preprocess it according to arguments.
    Note: returns a floating point (0-1) image.
    """

    #Convert to float to avoid any overflow or rounding issues
    im = np.array(im, dtype='float64')
    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)

    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im/255. #convert to floats between 0 and 1 without normalizing

    if equalize:        
        im = filters.image_histogram_equalization(im)

    if quantize:
        im = np.rint(im * (quantize-1))/(quantize-1)
    
    return im
Пример #5
0
def preProcessImageAndCopy(path, norm=True, blur=None):
    im = Image.open(path).convert('L')
    im = np.asarray(im)  #[125:375,125:375] #Take a smaller region for speed

    # Keep unmodified copies of original images
    im_orig = im.copy()

    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)
    # Normalize
    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im / 255.  #convert to floats without normalizing

    # Normalizing gives us values between 0 and 1. Now quantize into 4 bits
    # to reduce the amount noise in the mutual information calculation and
    # reduce the time taken to calculate it, too.
    # Instead, do this later on in the distance measure.


#    im = np.rint(im*63).astype('uint8')

    return im, im_orig
Пример #6
0
    def initialize(self, pyramid_images_output_path=None):
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(AffineTransform(self.dim))

        ### Preprocessing

        pyramid_levels = len(self.pyramid_factors)

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_resampled = filters.downsample(
                filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]),
                factor)
            flo_resampled = filters.downsample(
                filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]),
                factor)

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            ref_resampled = filters.normalize(ref_resampled, 0.0,
                                              ref_mask_resampled)
            flo_resampled = filters.normalize(flo_resampled, 0.0,
                                              flo_mask_resampled)

            if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
                scipy.misc.imsave(
                    '%sref_resampled_%d.png' %
                    (pyramid_images_output_path, i + 1), ref_resampled)
                scipy.misc.imsave(
                    '%sflo_resampled_%d.png' %
                    (pyramid_images_output_path, i + 1), flo_resampled)

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled.shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled.shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

            ref_diag = np.sqrt(
                np.square(np.array(ref_resampled.shape) *
                          self.ref_spacing).sum())
            flo_diag = np.sqrt(
                np.square(np.array(flo_resampled.shape) *
                          self.flo_spacing).sum())

            q_ref = QuantizedImage(ref_resampled,
                                   self.alpha_levels,
                                   ref_weights,
                                   self.ref_spacing * factor,
                                   remove_zero_weight_pnts=True)
            q_flo = QuantizedImage(flo_resampled,
                                   self.alpha_levels,
                                   flo_weights,
                                   self.flo_spacing * factor,
                                   remove_zero_weight_pnts=True)

            tf_ref = alpha_amd.AlphaAMD(q_ref,
                                        self.alpha_levels,
                                        ref_diag,
                                        self.ref_spacing * factor,
                                        ref_mask_resampled,
                                        ref_mask_resampled,
                                        interpolator_mode='linear',
                                        dt_fun=None,
                                        mask_out_edges=True)
            tf_flo = alpha_amd.AlphaAMD(q_flo,
                                        self.alpha_levels,
                                        flo_diag,
                                        self.flo_spacing * factor,
                                        flo_mask_resampled,
                                        flo_mask_resampled,
                                        interpolator_mode='linear',
                                        dt_fun=None,
                                        mask_out_edges=True)

            symmetric_measure = True
            squared_measure = False

            sym_dist = symmetric_amd_distance.SymmetricAMDDistance(
                symmetric_measure=symmetric_measure,
                squared_measure=squared_measure)

            sym_dist.set_ref_image_source(q_ref)
            sym_dist.set_ref_image_target(tf_ref)

            sym_dist.set_flo_image_source(q_flo)
            sym_dist.set_flo_image_target(tf_flo)

            sym_dist.set_sampling_fraction(self.sampling_fraction)

            sym_dist.initialize()

            self.distances.append(sym_dist)
def main():
    np.random.seed(1000)
    
    if len(sys.argv) < 3:
        print('register_example.py: Too few parameters. Give the path to two gray-scale image files.')
        print('Example: python2 register_example.py reference_image floating_image')
        return False

    ref_im_path = sys.argv[1]
    flo_im_path = sys.argv[2]

    ref_im = scipy.misc.imread(ref_im_path, 'L')
    flo_im = scipy.misc.imread(flo_im_path, 'L')

    # Save copies of original images
    ref_im_orig = ref_im.copy()
    flo_im_orig = flo_im.copy()

    ref_im = filters.normalize(ref_im, 0.0, None)
    flo_im = filters.normalize(flo_im, 0.0, None)
    
    diag = 0.5 * (transforms.image_diagonal(ref_im, spacing) + transforms.image_diagonal(flo_im, spacing))

    weights1 = np.ones(ref_im.shape)
    mask1 = np.ones(ref_im.shape, 'bool')
    weights2 = np.ones(flo_im.shape)
    mask2 = np.ones(flo_im.shape, 'bool')

    # Initialize registration framework for 2d images
    reg = Register(2)

    reg.set_report_freq(param_report_freq)
    reg.set_alpha_levels(alpha_levels)

    reg.set_reference_image(ref_im)
    reg.set_reference_mask(mask1)
    reg.set_reference_weights(weights1)

    reg.set_floating_image(flo_im)
    reg.set_floating_mask(mask2)
    reg.set_floating_weights(weights2)

    # Setup the Gaussian pyramid resolution levels
    
    reg.add_pyramid_level(4, 5.0)
    reg.add_pyramid_level(2, 3.0)
    reg.add_pyramid_level(1, 0.0)

    # Learning-rate / Step lengths [[start1, end1], [start2, end2] ...] (for each pyramid level)
    step_lengths = np.array([[1.0 ,1.0], [1.0, 0.5], [0.5, 0.1]])

    # Create the transform and add it to the registration framework (switch between affine/rigid transforms by commenting/uncommenting)
    # Affine
    reg.add_initial_transform(AffineTransform(2), np.array([1.0/diag, 1.0/diag, 1.0/diag, 1.0/diag, 1.0, 1.0]))
    # Rigid 2D
    #reg.add_initial_transform(Rigid2DTransform(2), np.array([1.0/diag, 1.0, 1.0]))

    # Set the parameters
    reg.set_iterations(param_iterations)
    reg.set_gradient_magnitude_threshold(0.001)
    reg.set_sampling_fraction(param_sampling_fraction)
    reg.set_step_lengths(step_lengths)

    # Create output directory
    directory = os.path.dirname('./test_images/output/')
    if not os.path.exists(directory):
        os.makedirs(directory)

    # Start the pre-processing
    reg.initialize('./test_images/output/')
    
    # Control the formatting of numpy
    np.set_printoptions(suppress=True, linewidth=200)

    # Start the registration
    reg.run()

    (transform, value) = reg.get_output(0)

    ### Warp final image
    c = transforms.make_image_centered_transform(transform, ref_im, flo_im, spacing, spacing)

    # Print out transformation parameters
    print('Transformation parameters: %s.' % str(transform.get_params()))

    # Create the output image
    ref_im_warped = np.zeros(ref_im.shape)

    # Transform the floating image into the reference image space by applying transformation 'c'
    c.warp(In = flo_im_orig, Out = ref_im_warped, in_spacing=spacing, out_spacing=spacing, mode='spline', bg_value = 0.0)

    # Save the registered image
    scipy.misc.imsave('./test_images/output/registered.png', ref_im_warped)

    # Compute the absolute difference image between the reference and registered images
    D1 = np.abs(ref_im_orig-ref_im_warped)
    err = np.sum(D1)
    print("Err: %f" % err)

    scipy.misc.imsave('./test_images/output/diff.png', D1)

    return True
Пример #8
0
def get_filters(digit_width,
                digit_max_value,
                digits_len,
                spacing_range,
                image_width,
                evenly=False,
                fltrs=None):
    """
    Getting complete list of filters to process a digit images: resizing, spacing, etc.

    Parameters
    ----------
    digit_width: int
        The standard width of an image stored in MNIST DB.

    digit_max_value: int
        The max (white) value of the image array.

    digits_len: int
        A count of a digit of the generated sequence.

    spacing_range: tuple
        A (minimum, maximum) pair (tuple), representing the min and max spacing between digits.
        A unit should be a pixel.

    evenly: boolean    Default: False
        A mode of generating an image.
        If False - Randomly choosing a spacing in the spacing_range.
        If True - evenly interval for each image and spacing.

    fltrs: list of functions
        A list-like containing functions. Each of them will apply on a digit image and modify it
        before adding to sequence.

    Return
    ------
    A list-like containing filter functions.
    The result has to contain a default filters like as invert, resize and spacing
    and might be extending a custom list of filters.
    Each of them will apply on a digit image and modify it before adding to sequence.
    """
    # image parameters
    digit_count = digits_len
    digit_width = digit_width
    # default filter - invert
    processing_filters = [
        filters.invert(digit_max_value),
        filters.normalize(digit_max_value)
    ]
    if evenly and fltrs:
        processing_filters += fltrs
    # calc image and spacing parameters
    creating_interval = helper.randomly_image_interval if not evenly else helper.evenly_image_interval

    digit_width_seq, spacing_width_seq = creating_interval(
        digit_width=digit_width,
        digit_count=digit_count,
        image_width=image_width,
        spacing=spacing_range)
    # add default post process filters
    processing_filters.append(
        filters.resize_seq(digit_width_seq, default=digit_width))
    processing_filters.append(
        filters.spacing_seq(spacing_width_seq, digit_max_value))

    postprocessing_filters = []
    if not evenly:
        if fltrs:
            postprocessing_filters += fltrs
        postprocessing_filters.append(filters.resize(image_width))

    return processing_filters, postprocessing_filters
def main():
    np.random.seed(1000)

    if len(sys.argv) < 3:
        print(
            f'{sys.argv[0]}: Too few parameters. Give the path to two gray-scale image files.'
        )
        print(f'Example: python {sys.argv[0]} reference_image floating_image')
        return False

    ref_im_path = sys.argv[1]
    flo_im_path = sys.argv[2]

    ref_im = Image.open(ref_im_path).convert('L')
    flo_im = Image.open(flo_im_path).convert('L')
    ref_im = np.asarray(ref_im) / 255.
    flo_im = np.asarray(flo_im) / 255.

    # Make copies of original images
    ref_im_orig = ref_im.copy()
    flo_im_orig = flo_im.copy()

    # Preprocess images
    ref_im = filters.normalize(ref_im, 0.0, None)
    flo_im = filters.normalize(flo_im, 0.0, None)

    weights1 = np.ones(ref_im.shape)
    mask1 = np.ones(ref_im.shape, 'bool')
    weights2 = np.ones(flo_im.shape)
    mask2 = np.ones(flo_im.shape, 'bool')

    # Initialize registration framework for 2d images
    reg = Register(2)
    reg.set_image_data(ref_im, flo_im, mask1, mask2, weights1, weights2)

    # Choose a registration model
    reg.set_model('alphaAMD', alpha_levels=alpha_levels, \
                  symmetric_measure=symmetric_measure, \
                  squared_measure=squared_measure)

    # Setup the Gaussian pyramid resolution levels
    reg.add_pyramid_level(4, 5.0)
    reg.add_pyramid_level(2, 3.0)
    reg.add_pyramid_level(1, 0.0)

    # Choose an optimizer and set optimizer-specific parameters
    # For GD and adam, learning-rate / Step lengths given by [[start1, end1], [start2, end2] ...] (for each pyramid level)
    reg.set_optimizer('adam', \
                      gradient_magnitude_threshold=0.01, \
                      iterations=param_iterations
                      )
    #    reg.set_optimizer('gd', \
    #                      step_length=np.array([1., 0.5, 0.25]), \
    #                      end_step_length=np.array([0.4, 0.2, 0.01]), \
    #                      gradient_magnitude_threshold=0.01, \
    #                      iterations=param_iterations
    #                      )
    #    reg.set_optimizer('scipy', \
    #                      iterations=param_iterations, \
    #                      epsilon=0.001 \
    #                      )

    # Scale all transform parameters to approximately the same order of magnitude, based on sizes of images
    diag = 0.5 * (transforms.image_diagonal(ref_im, spacing) +
                  transforms.image_diagonal(flo_im, spacing))

    # Create the initial transform and add it to the registration framework
    # (switch between affine/rigid transforms by commenting/uncommenting)
    #    # Affine
    #    initial_transform = transforms.AffineTransform(2)
    #    param_scaling = np.array([1.0/diag, 1.0/diag, 1.0/diag, 1.0/diag, 1.0, 1.0])
    #    reg.add_initial_transform(initial_transform, param_scaling=param_scaling)
    #    # Rigid 2D
    #    initial_transform = transforms.Rigid2DTransform()
    #    param_scaling = np.array([1.0/diag, 1.0, 1.0])
    #    reg.add_initial_transform(initial_transform, param_scaling=param_scaling)
    # Composite scale + rigid
    param_scaling = np.array([1.0 / diag, 1.0 / diag, 1.0, 1.0])
    initial_transform = transforms.CompositeTransform(2, [transforms.ScalingTransform(2, uniform=True), \
                                                transforms.Rigid2DTransform()])
    reg.add_initial_transform(initial_transform, param_scaling=param_scaling)

    # Set up other registration framework parameters
    reg.set_report_freq(param_report_freq)
    reg.set_sampling_fraction(param_sampling_fraction)

    # Create output directory
    directory = os.path.dirname(outdir)
    if not os.path.exists(directory):
        os.makedirs(directory)

    # Start the pre-processing
    reg.initialize(outdir)

    # Control the formatting of numpy
    np.set_printoptions(suppress=True, linewidth=200)

    # Start the registration
    reg.run()

    (transform, value) = reg.get_output(0)

    ### Warp final image
    c = transforms.make_image_centered_transform(transform, ref_im, flo_im,
                                                 spacing, spacing)

    # Print out transformation parameters and status
    print('Starting from %s, optimizer terminated with message: %s'%(str(initial_transform.get_params()), \
                                                                    reg.get_output_messages()[0]))
    print('Final transformation parameters: %s.' % str(transform.get_params()))

    # Create the output image
    ref_im_warped = np.zeros(ref_im.shape)
    mask = np.ones(flo_im_orig.shape, dtype='bool')
    warped_mask = np.zeros(ref_im.shape, dtype='bool')

    # Transform the floating image into the reference image space by applying transformation 'c'
    c.warp(In=flo_im_orig,
           Out=ref_im_warped,
           in_spacing=spacing,
           out_spacing=spacing,
           mode='spline',
           bg_value=0.0)
    c.warp(In=mask,
           Out=warped_mask,
           in_spacing=spacing,
           out_spacing=spacing,
           mode='spline',
           bg_value=0.0)

    # Save the registered image
    Image.fromarray(ref_im_warped).convert('RGB').save(outdir +
                                                       'registered.png')

    # Compute the absolute difference image between the reference and registered images
    D1 = np.abs(ref_im_orig - ref_im_warped)
    err = np.mean(D1[warped_mask])
    print("Err: %f" % err)

    Image.fromarray(D1).convert('RGB').save(outdir + 'diff.png')

    return True
Пример #10
0
if __name__ == '__main__':
    from skimage.measure import EllipseModel, CircleModel

    ellipse, circle = EllipseModel(), CircleModel()

    from filters import normalize
    import matplotlib
    #matplotlib.use('AGG')
    import matplotlib.pyplot as plt

    import skimage.io as io

    rpath = '/home/mirok/Downloads/MIRO_TSeries-01302019-0918-028_cycle_001_ch02_short_video-1.tif'
    red_seq = io.imread(rpath)

    red_nseq = normalize(red_seq)

    gpath = '/home/mirok/Downloads/MIRO_TSeries-01302019-0918-028_cycle_001_ch01_short_video-1.tif'
    green_seq = io.imread(gpath)

    green_nseq_bk = normalize(green_seq)

    from filters import time_smooth

    bar, _ = time_smooth(green_nseq_bk, width=3)
    foo, _ = time_smooth(red_nseq, width=3)
    # bar, xx = time_smooth(green_nseq, width=5, arrow=np.max)

    idx = 1
    nrows, ncols = foo[0].shape
    ss = SegmentSampler(foo[idx], (nrows - 1, 0), (0, ncols - 1))
Пример #11
0
def main():
    #np.random.seed(1000)

    if len(sys.argv) > 1:
        ref_im_path = sys.argv[1]
    else:
        ref_im_path = example_ref_im
    if len(sys.argv) > 2:
        flo_im_path = sys.argv[2]
    else:
        flo_im_path = example_flo_im

    print('Registering floating image %s with reference image %s' %
          (flo_im_path, ref_im_path))
    print('Similarity measure %s, optimizer %s' %
          (param_method, param_optimizer))

    ref_im = Image.open(ref_im_path).convert('L')
    flo_im = Image.open(flo_im_path).convert('L')
    ref_im = np.asarray(ref_im)
    flo_im = np.asarray(flo_im)

    # Save copies of original images
    ref_im_orig = ref_im.copy()
    flo_im_orig = flo_im.copy()

    # Initialize registration model for 2d images and do specific preprocessing and setup for that model
    if param_method.lower() == 'alphaamd':
        reg = models.RegisterAlphaAMD(2)
        reg.set_alpha_levels(alpha_levels)
        ref_im = filters.normalize(ref_im, 0.0, None)
        flo_im = filters.normalize(flo_im, 0.0, None)
    elif param_method.lower() == 'mi':
        ref_im = filters.normalize(ref_im, 0.0, None)
        flo_im = filters.normalize(flo_im, 0.0, None)
        reg = models.RegisterMI(2)
    else:
        raise NotImplementedError('Method must be one of alphaAMD, MI')
    reg.set_report_freq(param_report_freq)

    # Generic initialization steps required for every registration model
    weights1 = np.ones(ref_im.shape)
    mask1 = np.ones(ref_im.shape, 'bool')
    weights2 = np.ones(flo_im.shape)
    mask2 = np.ones(flo_im.shape, 'bool')

    reg.set_reference_image(ref_im)
    reg.set_reference_mask(mask1)
    reg.set_reference_weights(weights1)

    reg.set_floating_image(flo_im)
    reg.set_floating_mask(mask2)
    reg.set_floating_weights(weights2)

    # Setup the Gaussian pyramid resolution levels
    reg.add_pyramid_level(4, 5.0)
    reg.add_pyramid_level(2, 3.0)
    reg.add_pyramid_level(1, 0.0)

    # Learning-rate / Step lengths [[start1, end1], [start2, end2] ...] (for each pyramid level)
    step_lengths = np.array([[1., 1.], [1., 0.5], [0.5, 0.1]])

    # Estimate an appropriate parameter scaling based on the sizes of the images.
    diag = transforms.image_diagonal(
        ref_im, spacing) + transforms.image_diagonal(flo_im, spacing)
    diag = 2.0 / diag

    # Create the transform and add it to the registration framework (switch between affine/rigid transforms by commenting/uncommenting)
    # Affine
    #    reg.add_initial_transform(transforms.AffineTransform(2), param_scaling=np.array([diag, diag, diag, diag, 1.0, 1.0]))
    # Rigid 2D
    #reg.add_initial_transform(transforms.Rigid2DTransform(), param_scaling=np.array([diag, 1.0, 1.0]))
    # Uniform scale, rotate and translate
    t = transforms.CompositeTransform(2, [transforms.ScalingTransform(2, uniform=True), \
                                          transforms.Rigid2DTransform()])
    reg.add_initial_transform(t,
                              param_scaling=np.array([diag, diag, 1.0, 1.0]))

    # Set the parameters
    reg.set_iterations(param_iterations)
    reg.set_gradient_magnitude_threshold(1e-6)
    reg.set_sampling_fraction(param_sampling_fraction)
    reg.set_step_lengths(step_lengths)
    reg.set_optimizer(param_optimizer)

    # Create output directory
    directory = os.path.dirname(param_outdir)
    if not os.path.exists(directory):
        os.makedirs(directory)

    # Start the pre-processing
    reg.initialize(param_outdir)

    # Control the formatting of numpy
    np.set_printoptions(suppress=True, linewidth=200)

    # Start the registration
    reg.run()

    (transform, value) = reg.get_output(0)

    ### Warp final image
    c = transforms.make_image_centered_transform(transform, ref_im, flo_im,
                                                 spacing, spacing)

    # Print out transformation parameters
    print('Transformation parameters: %s.' % str(transform.get_params()))

    # Create the output image
    ref_im_warped = np.zeros(ref_im.shape)

    # Transform the floating image into the reference image space by applying transformation 'c'
    c.warp(In=flo_im_orig,
           Out=ref_im_warped,
           in_spacing=spacing,
           out_spacing=spacing,
           mode='spline',
           bg_value=0.0)
    # Cast back to integer values for mutual information comparison
    ref_im_warped = np.rint(ref_im_warped).astype('uint8')
    mask = np.ones(flo_im.shape)
    warped_mask = np.zeros(ref_im.shape)
    c.warp(In=mask,
           Out=warped_mask,
           in_spacing=spacing,
           out_spacing=spacing,
           mode='nearest',
           bg_value=0.0)
    value1 = mutual_info_score(ref_im[warped_mask > 0],
                               ref_im_warped[warped_mask > 0])
    print("Mutual info at estimated transform:", value1)

    #    plt.figure()
    #    plt.subplot(121)
    #    plt.imshow(ref_im_warped, vmin=0, vmax=255, cmap='gray')
    #    plt.title("Registered image")
    #    plt.subplot(122)
    #    plt.imshow(warped_mask, vmin=0, vmax=1, cmap='gray')
    #    plt.show()
    # Save the registered image
    Image.fromarray(ref_im_warped).convert('RGB').save(param_outdir +
                                                       'registered.png')

    ### Compare with ground truth
    scaling_trans = transforms.ScalingTransform(2, uniform=True)
    scaling_trans.set_params([
        1,
    ])
    rigid_trans = transforms.Rigid2DTransform()
    rigid_trans.set_params([0.35, 0.5, 0.5])
    gt_transform = transforms.CompositeTransform(2,
                                                 [scaling_trans, rigid_trans])

    c2 = transforms.make_image_centered_transform(gt_transform, ref_im, flo_im,
                                                  spacing, spacing)

    # Print out ground truth transformation parameters
    print('Ground Truth transformation parameters: %s.' %
          str(gt_transform.get_params()))

    # Create the output image
    warped_image = np.zeros(ref_im.shape)
    mask = np.ones(flo_im_orig.shape)
    warped_mask = np.zeros(ref_im.shape)

    # Transform the floating image into the reference image space by applying transformation defined above
    c2.warp(In=flo_im_orig, Out=warped_image, mode='spline', bg_value=0)
    # Apply the same transform to the mask to determine which pixels are within the original image
    c2.warp(In=mask, Out=warped_mask, mode='nearest', bg_value=0)

    # Cast back to integer values for mutual information comparison
    warped_image = np.rint(warped_image).astype('uint8')

    value2 = mutual_info_score(ref_im[warped_mask > 0],
                               warped_image[warped_mask > 0])
    print("Mutual info at ground truth:", value2)

    plt.figure()
    plt.subplot(131)
    plt.imshow(ref_im_warped, vmin=0, vmax=255, cmap='gray')
    plt.title("Registered image")
    plt.subplot(132)
    plt.imshow(warped_image, vmin=0, vmax=255, cmap='gray')
    plt.title("Ground truth")
    plt.subplot(133)
    plt.imshow(abs(ref_im_warped.astype(float) - warped_image.astype(float)),
               vmin=0,
               vmax=255,
               cmap='gray')
    plt.title("Difference")
    plt.show()

    # Compute the absolute difference image between the reference and registered images
    D1 = np.abs(ref_im_orig - ref_im_warped)
    err = np.mean(D1)
    print("Err: %f" % err)

    Image.fromarray(D1).convert('RGB').save(param_outdir + 'diff.png')

    return True
    def initialize(self, pyramid_images_output_path=None):
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(AffineTransform(self.dim))

        ch = len(self.ref_im)
        #        print(ch)
        # require same number of channels
        assert (ch == len(self.flo_im))

        ref_input = self.ref_im
        flo_input = self.flo_im
        if self.channel_mode == 'decompose_pre':
            lev = None
            #lev = self.alpha_levels
            ref_input = filters.fidt(ref_input, lev)  #self.alpha_levels)
            flo_input = filters.fidt(flo_input, lev)  #self.alpha_levels)
            ch = len(ref_input)
        ### Preprocessing

        pyramid_levels = len(self.pyramid_factors)
        percentile = 0.01

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            ref_resampled = []
            flo_resampled = []

            for k in range(ch):
                ref_k = filters.downsample(
                    filters.gaussian_filter(ref_input[k],
                                            self.pyramid_sigmas[i]), factor)
                flo_k = filters.downsample(
                    filters.gaussian_filter(flo_input[k],
                                            self.pyramid_sigmas[i]), factor)
                #if self.channel_mode == 'sum':
                #ref_k = filters.normalize(ref_k, percentile, ref_mask_resampled)
                #flo_k = filters.normalize(flo_k, percentile, flo_mask_resampled)
                ref_resampled.append(ref_k)
                flo_resampled.append(flo_k)

            if self.channel_mode == 'sum' or self.channel_mode == 'decompose_pre':
                pass
            elif self.channel_mode == 'decompose':
                ref_resampled = filters.fidt(ref_resampled, self.alpha_levels)
                flo_resampled = filters.fidt(flo_resampled, self.alpha_levels)
                for k in range(len(ref_resampled)):
                    ref_resampled[k] = filters.normalize(
                        ref_resampled[k], percentile, ref_mask_resampled)
                    flo_resampled[k] = filters.normalize(
                        flo_resampled[k], percentile, flo_mask_resampled)

            #if pyramid_images_output_path is not None and ref_resampled[0].ndim == 2:
            #    scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)
            #    scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled[0].shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled[0].shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

            ref_diag = np.sqrt(
                np.square(np.array(ref_resampled[0].shape) *
                          self.ref_spacing).sum())
            flo_diag = np.sqrt(
                np.square(np.array(flo_resampled[0].shape) *
                          self.flo_spacing).sum())

            dists = []

            for k in range(len(ref_resampled)):
                q_ref = QuantizedImage(ref_resampled[k],
                                       self.alpha_levels,
                                       ref_weights,
                                       self.ref_spacing * factor,
                                       remove_zero_weight_pnts=True)
                q_flo = QuantizedImage(flo_resampled[k],
                                       self.alpha_levels,
                                       flo_weights,
                                       self.flo_spacing * factor,
                                       remove_zero_weight_pnts=True)

                if self.squared_measure:
                    dt_fun = alpha_amd.edt_sq
                else:
                    dt_fun = None

                tf_ref = alpha_amd.AlphaAMD(q_ref,
                                            self.alpha_levels,
                                            ref_diag,
                                            self.ref_spacing * factor,
                                            ref_mask_resampled,
                                            ref_mask_resampled,
                                            interpolator_mode='linear',
                                            dt_fun=dt_fun,
                                            mask_out_edges=True)
                tf_flo = alpha_amd.AlphaAMD(q_flo,
                                            self.alpha_levels,
                                            flo_diag,
                                            self.flo_spacing * factor,
                                            flo_mask_resampled,
                                            flo_mask_resampled,
                                            interpolator_mode='linear',
                                            dt_fun=dt_fun,
                                            mask_out_edges=True)

                symmetric_measure = True
                squared_measure = False  #self.squared_measure

                sym_dist = symmetric_amd_distance.SymmetricAMDDistance(
                    symmetric_measure=symmetric_measure,
                    squared_measure=squared_measure)

                sym_dist.set_ref_image_source(q_ref)
                sym_dist.set_ref_image_target(tf_ref)

                sym_dist.set_flo_image_source(q_flo)
                sym_dist.set_flo_image_target(tf_flo)

                sym_dist.set_sampling_fraction(self.sampling_fraction)

                sym_dist.initialize()

                dists.append(sym_dist)

            self.distances.append(dists)
Пример #13
0
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-smoothed',
                        type=int,
                        default=0,
                        help='Time smoothing')
    args = parser.parse_args()

    rpath = '/home/mirok/Downloads/MIRO_TSeries-01302019-0918-028_cycle_001_ch02_short_video-1.tif'
    red_seq = io.imread(rpath)

    gpath = '/home/mirok/Downloads/MIRO_TSeries-01302019-0918-028_cycle_001_ch01_short_video-1.tif'
    green_seq = io.imread(gpath)

    smoothed = bool(args.smoothed)

    green_nseq = normalize(green_seq)
    green_nseq_bk = 1 * green_nseq

    red_nseq = normalize(red_seq)
    red_nseq_bk = 1 * red_nseq

    if smoothed:
        green_nseq, (shift, _) = time_smooth(green_nseq, width=3)
        red_nseq, (shift, _) = time_smooth(red_nseq, width=3)
    else:
        shift = 0

    r = red_nseq[0]
    # Remove frame from the blood series
    lft, rght, tp, btm = [
        f(r, 25) for f in (left_edge, right_edge, top_edge, bottom_edge)
Пример #14
0
    def initialize(self, pyramid_images_output_path=None, norm=True):
        """Initialize the registration framework: must be called before run().
        
        Prepare pyramid scheme by creating and saving downsampled versions of the images 
        for each pyramid level. Set up a distance measure (separate instance for each pyramid
        level, with the corresponding version of the images).
        
        Args:
            pyramid_images_output_path: slash-terminated string specifying folder in which to
            save the downsampled images. Default None. If None, images are not saved. Only 
            applicable for 2D images
            
        Other running parameters are set in __init__()
        """
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(transforms.AffineTransform(self.dim))
#        while len(self.opt_opts['step_length']) < len(self.pyramid_factors):
#            self.opt_opts['step_length'] = np.concatenate((self.opt_opts['step_length'], np.array([[0,1.0]])))

### Preprocessing

        pyramid_levels = len(self.pyramid_factors)

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_resampled = filters.downsample(
                filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]),
                factor)
            flo_resampled = filters.downsample(
                filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]),
                factor)

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            if norm:
                ref_resampled = filters.normalize(ref_resampled, 0.0,
                                                  ref_mask_resampled)
                flo_resampled = filters.normalize(flo_resampled, 0.0,
                                                  flo_mask_resampled)

            if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
                Image.fromarray(ref_resampled).convert(
                    'RGB').save(pyramid_images_output_path +
                                'ref_resampled_%d.png' % (i + 1))
                Image.fromarray(flo_resampled).convert(
                    'RGB').save(pyramid_images_output_path +
                                'flo_resampled_%d.png' % (i + 1))

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled.shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled.shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

#            #add default step length in case it wasn't specified,
#            #TODO can't the optimizer do this?
#            self.optimizer_opts['step_length'] = self.optimizer_opts.get('step_length',np.array([[0.1]*pyramid_levels]))

#            #DEBUG
#            if(False):
#                #Display each image with its mask to check all is ok
#                plt.subplot(121)
#                plt.imshow(np.hstack((ref_resampled, ref_mask_resampled)), cmap='gray')
#                plt.subplot(122)
#                plt.imshow(np.hstack((flo_resampled, flo_mask_resampled)), cmap='gray')
#                plt.show()
#            #END DEBUG


            dist_measure = self._make_dist_measure(ref_resampled, ref_mask_resampled, ref_weights, \
                                                  flo_resampled, flo_mask_resampled, flo_weights, factor)

            self.distances.append(dist_measure)
Пример #15
0
def register_aamd(ref_im,
                  flo_im,
                  iterations=1.0,
                  param_sampling_fraction=0.01,
                  param_multi_start=True,
                  do_sigmoid=False):
    #    def _no_report_callback(opt):
    #        pass
    np.random.seed(1000)
    # The number of iterations

    param_iterations = [
        int(iterations * 3000),
        int(iterations * 1000),
        int(iterations * 200)
    ]  # 500 -> 200?

    #    do_grayscale = False
    #
    #    if do_grayscale == True:
    #        ref_im = io.imread(ref_im_path, as_gray=True)
    #        flo_im = io.imread(flo_im_path, as_gray=True)
    #        ref_im = np.squeeze(ref_im)
    #        flo_im = np.squeeze(flo_im)
    #        ref_im = ref_im.reshape(ref_im.shape + (1,))
    #        flo_im = flo_im.reshape(flo_im.shape + (1,))
    #    else:
    #        ref_im = io.imread(ref_im_path, as_gray=False)
    #        flo_im = io.imread(flo_im_path, as_gray=False)
    #        ref_im = np.squeeze(ref_im)
    #        flo_im = np.squeeze(flo_im)
    #        if ref_im.ndim == 2:
    #            ref_im = ref_im.reshape(ref_im.shape + (1,))
    #        if flo_im.ndim == 2:
    #            flo_im = flo_im.reshape(flo_im.shape + (1,))
    #
    #    print(ref_im.shape)
    #    print(flo_im.shape)
    if ref_im.ndim == 2:
        ref_im = np.expand_dims(ref_im, axis=-1)
    if flo_im.ndim == 2:
        flo_im = np.expand_dims(flo_im, axis=-1)

    flo_mask = None

    ch = ref_im.shape[-1]

    #    ref_im_orig = ref_im.copy()

    ref_im = filters.channels_to_list(ref_im)
    flo_im = filters.channels_to_list(flo_im)

    #weights1 = generators.make_circular_hann_window_like_image(ref_im[0], rad_factor = 1.0, spacing=None, p=0.25)
    weights1 = np.ones(ref_im[0].shape)
    mask1 = np.ones(ref_im[0].shape, 'bool')
    #weights2 = generators.make_circular_hann_window_like_image(flo_im[0], rad_factor = 1.0, spacing=None, p=0.25)
    weights2 = np.ones(flo_im[0].shape)
    if flo_mask is None:
        mask2 = np.ones(flo_im[0].shape, 'bool')
    else:
        mask2 = (flo_mask >= 0.5)

    # Save copies of original images
#    flo_im_orig = flo_im.copy()

    def inv_sigmoid(x):
        return np.log((x + 1e-7) / (1 - x + 1e-7))

    def sigmoid(x):
        return 1.0 / (1.0 + np.exp(x))

    if do_sigmoid:
        for k in range(ch):
            ref_im[k] = sigmoid(ref_im[k])
            flo_im[k] = sigmoid(flo_im[k])
    else:
        for k in range(ch):
            ref_im[k] = filters.normalize(ref_im[k], 0.0, None)
            flo_im[k] = filters.normalize(flo_im[k], 0.0, mask2)

    diag = 0.5 * (transforms.image_diagonal(ref_im[0], spacing) +
                  transforms.image_diagonal(flo_im[0], spacing))

    # Initialize registration framework for 2d images
    reg = RegisterMultiChannel(2)

    reg.set_report_freq(param_report_freq)
    #    reg.set_report_func(_no_report_callback)
    reg.set_alpha_levels(alpha_levels)
    reg.set_channel_mode(param_channel_mode)

    reg.set_reference_image(ref_im, spacing)
    reg.set_reference_mask(mask1)
    reg.set_reference_weights(weights1)

    reg.set_floating_image(flo_im, spacing)
    reg.set_floating_mask(mask2)
    reg.set_floating_weights(weights2)

    reg.set_squared_measure(squared_measure)

    # Setup the Gaussian pyramid resolution levels
    if iterations < 1:
        reg.add_pyramid_level(4, 7.0)
        reg.add_pyramid_level(2, 3.0)
        reg.add_pyramid_level(1, 1.0)
    else:
        reg.add_pyramid_level(4, 12.0)
        reg.add_pyramid_level(2, 5.0)
        reg.add_pyramid_level(1, 1.0)

    # Learning-rate / Step lengths [[start1, end1], [start2, end2] ...] (for each pyramid level)
    step_lengths = np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 0.1]
                             ]) * 2.0  #* 5e-2

    scale = 0.1 / diag
    tscale = 5.0

    transform_count = 1

    t = Rigid2DTransform()
    reg.add_initial_transform(t, np.array([scale, tscale, tscale]))

    if param_multi_start:
        t = Rigid2DTransform()
        t.set_param(0, -0.4)
        reg.add_initial_transform(t, np.array([scale, tscale, tscale]))
        t = Rigid2DTransform()
        t.set_param(0, 0.4)
        reg.add_initial_transform(t, np.array([scale, tscale, tscale]))
        transform_count += 2

    # Set the parameters
    reg.set_iterations(param_iterations)
    reg.set_gradient_magnitude_threshold(0.0001)
    reg.set_sampling_fraction(param_sampling_fraction)
    reg.set_step_lengths(step_lengths)
    reg.set_optimizer('sgd')

    # Start the pre-processing
    reg.initialize('./test_images/output/')

    # Control the formatting of numpy
    np.set_printoptions(suppress=True, linewidth=200)

    # Start the registration
    reg.run()

    ts = []
    tval = []
    for i in range(transform_count):
        ti, vi = reg.get_output(i)
        ts.append(ti)
        tval.append(vi)
    transform, value = find_best_transform(ts, tval)

    c = transforms.make_image_centered_transform(transform, ref_im[0],
                                                 flo_im[0], spacing, spacing)

    # Create the output image
    ref_im_warped = [np.zeros(ref_im[i].shape) for i in range(ch)]
    ref_im_copied = [np.zeros(ref_im[i].shape) for i in range(ch)]

    # Transform the floating image into the reference image space by applying transformation 'c'
    for k in range(ch):
        ref_im_copied[k] = ref_im[k]
        c.warp(In=flo_im[k],
               Out=ref_im_warped[k],
               in_spacing=spacing,
               out_spacing=spacing,
               mode='linear',
               bg_value=0.0)

    ref_im_copied = np.squeeze(filters.list_to_channels(ref_im_copied))
    ref_im_warped = np.squeeze(filters.list_to_channels(ref_im_warped))

    return ref_im_warped, c