示例#1
0
文件: composite.py 项目: zfphil/llops
    def _inverse(x, y):
        """Inverse using phase correlation."""
        # Extract two arrays to correlate
        xf_1 = array_to_register_to_f
        xf_2 = x

        # Compute normalized cross-correlation
        phasor = (conj(xf_1) * xf_2) / abs(conj(xf_1) * xf_2)
        phasor[isnan(phasor)] = 0

        # Convert phasor to delta function
        delta = F.H * phasor

        # If axes is defined, return only one axis
        if len(axes) != ndim(x) or any(
            [ax != index for (ax, index) in zip(axes, range(len(axes)))]):
            axes_not_used = [
                index for index in range(ndim(x)) if index not in axes
            ]

            delta = squeeze(sum(delta, axes=axes_not_used))

        if debug:
            import matplotlib.pyplot as plt
            plt.figure(figsize=(12, 3))
            plt.subplot(131)
            plt.imshow(abs(F.H * xf_1))
            plt.subplot(132)
            plt.imshow(abs(F.H * xf_2))
            plt.subplot(133)
            if ndim(delta) > 1:
                plt.imshow(abs(delta))
            else:
                plt.plot(abs(delta))

        # Calculate maximum and return
        if not center:
            y[:] = reshape(asarray(argmax(delta)), shape(y))
        else:
            y[:] = reshape(
                asarray(argmax(delta)) - asarray(delta.shape) / 2, shape(y))

        # Deal with negative values
        sizes = reshape(asarray([_shape[ax] for ax in axes]), shape(y))
        mask = y[:] > sizes / 2
        y[:] -= mask * sizes

        if debug:
            plt.title(
                str(np.real(np.asarray(argmax(delta))).tolist()) + ' ' +
                str(np.abs(np.asarray(y).ravel())))
示例#2
0
def affineHomographyBlocks(coordinate_list):
    """Generate affine homography blocks which can be used to solve for homography coordinates."""
    # Store dtype and backend
    dtype = yp.getDatatype(coordinate_list)
    backend = yp.getBackend(coordinate_list)

    # Convert coordinates to numpy array
    coordinate_list = yp.asarray(yp.real(coordinate_list),
                                 dtype='float32',
                                 backend='numpy')

    # Ensure coordinate_list is a list of lists
    if yp.ndim(coordinate_list) == 1:
        coordinate_list = np.asarray([coordinate_list])

    # Determine the number of elements in a coordinate
    axis_count = yp.shape(coordinate_list)[1]

    # Loop over positions and concatenate to array
    coordinate_blocks = []
    for coordinate in coordinate_list:
        block = np.append(coordinate, 1)
        coordinate_blocks.append(np.kron(np.eye(len(coordinate)), block))

    # Convert to initial datatype and backend
    coordinate_blocks = yp.asarray(np.concatenate(coordinate_blocks), dtype,
                                   backend)

    return coordinate_blocks
示例#3
0
def _loadImage(image_label, shape, dtype=None, backend=None, **kwargs):

    # Determine backend and dtype
    backend = backend if backend is not None else yp.config.default_backend
    dtype = dtype if dtype is not None else yp.config.default_dtype

    # Load image
    image = np.asarray(
        imageio.imread(test_images_directory + '/' +
                       _image_dict[image_label]['filename']))

    # Process color channel
    if yp.ndim(image) > 2:
        color_processing_mode = kwargs.get('color_channel', 'average')
        if color_processing_mode == 'average':
            image = np.mean(image, 2)
        elif color_processing_mode == None:
            pass
        else:
            assert type(color_processing_mode) in [np.int, int]
            image = image[:, :, int(color_processing_mode)]

    # Resize image if requested
    if shape is not None:

        # Warn if the measurement will be band-limited in the frequency domain
        if any([image.shape[i] < shape[i] for i in range(len(shape))]):
            print(
                'WARNING : Raw image size (%d x %d) is smaller than requested size (%d x %d). Resolution will be lower than bandwidth of image.'
                % (image.shape[0], image.shape[1], shape[0], shape[1]))

        # Perform resize operation
        image = resize(image,
                       shape,
                       mode=kwargs.get('reshape_mode', 'constant'),
                       preserve_range=True,
                       anti_aliasing=kwargs.get('anti_aliasing',
                                                False)).astype(np.float)

    return yp.cast(image, dtype, backend)
示例#4
0
def registerImage(image0,
                  image1,
                  method='xc',
                  axis=None,
                  preprocess_methods=['reflect'],
                  debug=False,
                  **kwargs):

    # Perform preprocessing
    if len(preprocess_methods) > 0:
        image0, image1 = _preprocessForRegistration(image0, image1,
                                                    preprocess_methods,
                                                    **kwargs)

    # Parameter on whether we can trust our registration
    trust_ratio = 1.0

    if method in ['xc' or 'cross_correlation']:

        # Get energy ratio threshold
        trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)

        # Pad arrays for optimal speed
        pad_size = tuple(
            [sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])

        # Perform padding
        if pad_size is not yp.shape(image0):
            image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
            image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)

        # Take F.T. of measurements
        src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1,
                                                                axes=axis)

        # Whole-pixel shift - Compute cross-correlation by an IFFT
        image_product = src_freq * yp.conj(target_freq)
        # image_product /= abs(src_freq * yp.conj(target_freq))
        cross_correlation = yp.iFt(image_product, center=False, axes=axis)

        # Take sum along axis if we're doing 1D
        if axis is not None:
            axis_to_sum = list(range(yp.ndim(image1)))
            del axis_to_sum[axis]
            cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)

        # Locate maximum
        shape = yp.shape(src_freq)
        maxima = yp.argmax(yp.abs(cross_correlation))
        midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

        shifts = np.array(maxima, dtype=np.float64)
        shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        for dim in range(yp.ndim(src_freq)):
            if shape[dim] == 1:
                shifts[dim] = 0

        # If energy ratio is too small, set all shifts to zero
        trust_metric = yp.scalar(
            yp.max(yp.abs(cross_correlation)**2) /
            yp.mean(yp.abs(cross_correlation)**2))

        # Determine if this registraition can be trusted
        trust_ratio = trust_metric / trust_threshold

    elif method == 'orb':

        # Get user-defined mean_residual_threshold if given
        trust_threshold = kwargs.get('mean_residual_threshold', 40.0)

        # Get user-defined mean_residual_threshold if given
        orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)

        match_count = 0
        fast_threshold = 0.05
        while match_count < orb_feature_threshold:
            descriptor_extractor = ORB(n_keypoints=500,
                                       fast_n=9,
                                       harris_k=0.1,
                                       fast_threshold=fast_threshold)

            # Extract keypoints from first frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image0).astype(np.double))
            keypoints0 = descriptor_extractor.keypoints
            descriptors0 = descriptor_extractor.descriptors

            # Extract keypoints from second frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image1).astype(np.double))
            keypoints1 = descriptor_extractor.keypoints
            descriptors1 = descriptor_extractor.descriptors

            # Set match count
            match_count = min(len(keypoints0), len(keypoints1))
            fast_threshold -= 0.01

            if fast_threshold == 0:
                raise RuntimeError(
                    'Could not find any keypoints (even after shrinking fast threshold).'
                )

        # Match descriptors
        matches = match_descriptors(descriptors0,
                                    descriptors1,
                                    cross_check=True)

        # Filter descriptors to axes (if provided)
        if axis is not None:
            matches_filtered = []
            for (index_0, index_1) in matches:
                point_0 = keypoints0[index_0, :]
                point_1 = keypoints1[index_1, :]
                unit_vec = point_0 - point_1
                unit_vec /= np.linalg.norm(unit_vec)

                if yp.abs(unit_vec[axis]) > 0.99:
                    matches_filtered.append((index_0, index_1))

            matches_filtered = np.asarray(matches_filtered)
        else:
            matches_filtered = matches

        # Robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
                                        keypoints1[matches_filtered[:, 1]]),
                                       EuclideanTransform,
                                       min_samples=3,
                                       residual_threshold=2,
                                       max_trials=100)

        # Note that model_robust has a translation property, but this doesn't
        # seem to be as numerically stable as simply averaging the difference
        # between the coordinates along the desired axis.

        # Apply match filter
        matches_filtered = matches_filtered[inliers, :]

        # Process keypoints
        if yp.shape(matches_filtered)[0] > 0:

            # Compute shifts
            difference = keypoints0[matches_filtered[:, 0]] - keypoints1[
                matches_filtered[:, 1]]
            shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
            shifts = np.round(shifts[0])

            # Filter to axis mask
            if axis is not None:
                _shifts = [0, 0]
                _shifts[axis] = shifts[axis]
                shifts = _shifts

            # Calculate residuals
            residuals = yp.sqrt(
                yp.sum(
                    yp.abs(keypoints0[matches_filtered[:, 0]] +
                           np.asarray(shifts) -
                           keypoints1[matches_filtered[:, 1]])**2))

            # Define a trust metric
            trust_metric = residuals / yp.shape(
                keypoints0[matches_filtered[:, 0]])[0]

            # Determine if this registration can be trusted
            trust_ratio = 1 / (trust_metric / trust_threshold)
            print('===')
            print(trust_ratio)
            print(trust_threshold)
            print(trust_metric)
            print(shifts)
        else:
            trust_metric = 1e10
            trust_ratio = 0.0
            shifts = np.asarray([0, 0])

    elif method == 'optimize':

        # Create Operators
        L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
        R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
        REAL = ops.RealFilter((2, 1), dtype='complex64')

        # Take Fourier Transforms of images
        image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(
            yp.Ft(image1), 'complex64')

        # Diagonalize one of the images
        D = ops.Diagonalize(image0_f)

        # Form objective
        objective = L2 * (D * R * REAL - image1_f)

        # Solve objective
        solver = ops.solvers.GradientDescent(objective)
        shifts = solver.solve(iteration_count=1000, step_size=1e-8)

        # Convert to numpy array, take real part, and round.
        shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))

        # Flip shift axes (x,y to y, x)
        shifts = np.fliplr(shifts)

        # TODO: Trust metric and trust_threshold
        trust_threshold = 1
        trust_ratio = 1.0

    else:
        raise ValueError('Invalid Registration Method %s' % method)

    # Mark whether or not this measurement is of good quality
    if not trust_ratio > 1:
        if debug:
            print('Ignoring shift with trust metric %g (threshold is %g)' %
                  (trust_metric, trust_threshold))
        shifts = yp.zeros_like(np.asarray(shifts)).tolist()

    # Show debugging figures if requested
    if debug:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(6, 5))
        plt.subplot(131)
        plt.imshow(yp.abs(image0))
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(yp.abs(image1))
        plt.title('Trust ratio: %g' % (trust_ratio))
        plt.axis('off')
        plt.subplot(133)
        if method in ['xc' or 'cross_correlation']:
            if axis is not None:
                plt.plot(yp.abs(yp.squeeze(cross_correlation)))
            else:
                plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
        else:
            plot_matches(plt.gca(), yp.real(image0), yp.real(image1),
                         keypoints0, keypoints1, matches_filtered)
        plt.title(str(shifts))
        plt.axis('off')

    # Return
    return shifts, trust_ratio
示例#5
0
def register_translation(src_image,
                         target_image,
                         upsample_factor=1,
                         energy_ratio_threshold=2,
                         space="real"):
    """
    Efficient subpixel image translation registration by cross-correlation.
    This code gives the same precision as the FFT upsampled cross-correlation
    in a fraction of the computation time and with reduced memory requirements.
    It obtains an initial estimate of the cross-correlation peak by an FFT and
    then refines the shift estimation by upsampling the DFT only in a small
    neighborhood of that estimate by means of a matrix-multiply DFT.
    Parameters
    ----------
    src_image : ndarray
        Reference image.
    target_image : ndarray
        Image to register.  Must be same dimensionality as ``src_image``.
    upsample_factor : int, optional
        Upsampling factor. Images will be registered to within
        ``1 / upsample_factor`` of a pixel. For example
        ``upsample_factor == 20`` means the images will be registered
        within 1/20th of a pixel.  Default is 1 (no upsampling)
    space : string, one of "real" or "fourier", optional
        Defines how the algorithm interprets input data.  "real" means data
        will be FFT'd to compute the correlation, while "fourier" data will
        bypass FFT of input data.  Case insensitive.
    return_error : bool, optional
        Returns error and phase difference if on,
        otherwise only shifts are returned
    Returns
    -------
    shifts : ndarray
        Shift vector (in pixels) required to register ``target_image`` with
        ``src_image``.  Axis ordering is consistent with numpy (e.g. Z, Y, X)
    error : float
        Translation invariant normalized RMS error between ``src_image`` and
        ``target_image``.
    phasediff : float
        Global phase difference between the two images (should be
        zero if images are non-negative).
    References
    ----------
    .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
           "Efficient subpixel image registration algorithms,"
           Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
    .. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
           Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
    """
    # images must be the same shape
    if yp.shape(src_image) != yp.shape(target_image):
        raise ValueError("Error: images must be same size for "
                         "register_translation")

    # only 2D data makes sense right now
    if yp.ndim(src_image) > 3 and upsample_factor > 1:
        raise NotImplementedError("Error: register_translation only supports "
                                  "subpixel registration for 2D and 3D images")

    # assume complex data is already in Fourier space
    if space.lower() == 'fourier':
        src_freq = src_image
        target_freq = target_image
    # real data needs to be fft'd.
    elif space.lower() == 'real':
        src_freq = yp.Ft(src_image)
        target_freq = yp.Ft(target_image)
    else:
        raise ValueError("Error: register_translation only knows the \"real\" "
                         "and \"fourier\" values for the ``space`` argument.")

    # Whole-pixel shift - Compute cross-correlation by an IFFT
    shape = yp.shape(src_freq)
    image_product = src_freq * yp.conj(target_freq)
    cross_correlation = yp.iFt(image_product, center=False)

    # Locate maximum
    maxima = yp.argmax(yp.abs(cross_correlation))
    midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

    shifts = np.array(maxima, dtype=np.float64)
    shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

    # if upsample_factor > 1:
    #     # Initial shift estimate in upsampled grid
    #     shifts = np.round(shifts * upsample_factor) / upsample_factor
    #     upsampled_region_size = np.ceil(upsample_factor * 1.5)
    #     # Center of output array at dftshift + 1
    #     dftshift = np.fix(upsampled_region_size / 2.0)
    #     upsample_factor = np.array(upsample_factor, dtype=np.float64)
    #     normalization = (src_freq.size * upsample_factor ** 2)
    #     # Matrix multiply DFT around the current shift estimate
    #     sample_region_offset = dftshift - shifts*upsample_factor
    #     cross_correlation = _upsampled_dft(image_product.conj(),
    #                                        upsampled_region_size,
    #                                        upsample_factor,
    #                                        sample_region_offset).conj()
    #     cross_correlation /= normalization
    #     # Locate maximum and map back to original pixel grid
    #     maxima = np.array(np.unravel_index(
    #                           np.argmax(np.abs(cross_correlation)),
    #                           cross_correlation.shape),
    #                       dtype=np.float64)
    #     maxima -= dftshift
    #
    #     shifts = shifts + maxima / upsample_factor

    # If its only one row or column the shift along that dimension has no
    # effect. We set to zero.
    for dim in range(yp.ndim(src_freq)):
        if shape[dim] == 1:
            shifts[dim] = 0

    # If energy ratio is too small, set all shifts to zero
    energy_ratio = yp.max(yp.abs(cross_correlation)**2) / yp.sum(
        yp.abs(cross_correlation)**2) * yp.prod(yp.shape(cross_correlation))
    if energy_ratio < energy_ratio_threshold:
        print('Ignoring shift with energy ratio %g (threshold is %g)' %
              (energy_ratio, energy_ratio_threshold))
        shifts = yp.zeros_like(shifts)

    return shifts
示例#6
0
文件: composite.py 项目: zfphil/llops
def ConvolutionOld(kernel,
                   dtype=None,
                   backend=None,
                   normalize=False,
                   mode='circular',
                   label='C',
                   pad_value='mean',
                   pad_size=None,
                   fft_backend=None,
                   inverse_regularizer=0,
                   center=False,
                   inner_operator=None,
                   force_full_convolution=False):
    """Convolution linear operator"""

    # Get temporary kernel to account for inner_operator size
    _kernel = kernel if inner_operator is None else inner_operator * kernel

    # Check number of dimensions
    N = _kernel.shape
    dim_count = len(N)
    assert dim_count == ndim(_kernel)

    # Get kernel backend
    if backend is None:
        backend = getBackend(kernel)
    else:
        # Convert kernel to provided backend
        kernel = asbackend(kernel, backend)

    # Get kernel dtype
    if dtype is None:
        dtype = getDatatype(kernel)
    else:
        kernel = astype(kernel, dtype)

    # Determine if the kernel is a shifted delta function - if so, return a
    # shift operator masked as a convolution
    position_list = tuple([
        tuple(np.asarray(pos) - np.asarray(shape(_kernel)) // 2)
        for pos in where(_kernel != 0.0)
    ])
    mode = 'discrete' if len(
        position_list) == 1 and not force_full_convolution else mode

    # Discrete convolution
    if mode == 'discrete':
        # Create shift operator, or identity operator if there is no shift.
        if all([pos == 0.0 for pos in position_list[0]]):
            op = Identity(N)
        else:
            op = Shift(N, position_list[0])

        loc = where(_kernel != 0)[0]

        # If the kernel is not binary, normalize to the correct value
        if scalar(_kernel[loc[0], loc[1]]) != 1.0:
            op *= scalar(_kernel[loc[0], loc[1]])

        # Update label to indicate this is a shift-based convolution
        label += '_{shift}'

        # Normalize if desired
        if normalize:
            op *= 1 / np.sqrt(np.size(kernel))

    elif mode in ['windowed', 'circular']:
        # The only difference between circular and non-circular convolution is
        # the pad size. We'll define this first, then define the convolution in
        # a common framework.

        if mode == 'circular':

            # Pad kernel to effecient size
            N_pad = list(N)
            for ind, d in enumerate(N):
                if next_fast_len(d) != d:
                    N_pad[ind] = next_fast_len(d)

            crop_start = [0] * len(N_pad)

        elif mode == 'windowed':
            if pad_size is None:
                # Determine support of kernel
                kernel_support_roi = boundingBox(kernel, return_roi=True)
                N_pad_raw = (np.asarray(N) +
                             np.asarray(kernel_support_roi.size).tolist())
                N_pad = [next_fast_len(sz) for sz in N_pad_raw]

                # Create pad and crop operator
                crop_start = [(N_pad[dim] - N[dim]) // 2
                              for dim in range(len(N))]
            else:
                if type(pad_size) not in (list, tuple, np.ndarray):
                    pad_size = (pad_size, pad_size)
                N_pad = (pad_size[0] + N[0], pad_size[1] + N[1])

                crop_start = None

        # Create pad operator
        P = Pad(N,
                N_pad,
                pad_value=pad_value,
                pad_start=crop_start,
                backend=backend,
                dtype=dtype)

        # Create F.T. operator
        F = FourierTransform(N_pad,
                             dtype=dtype,
                             backend=backend,
                             normalize=normalize,
                             fft_backend=fft_backend,
                             pad=False,
                             center=center)

        # Optionally create FFTShift operator
        if not center:
            FFTS = FFTShift(N_pad, dtype=dtype, backend=backend)
        else:
            FFTS = Identity(N_pad, dtype=dtype, backend=backend)

        # Diagonalize kernel
        K = Diagonalize(kernel,
                        inner_operator=F * FFTS * P,
                        inverse_regularizer=inverse_regularizer,
                        label=label)

        # Generate composite op
        op = P.H * F.H * K * F * P

        # Define inversion function
        def _inverse(self, x, y):
            # Get current kernel
            kernel_f = F * FFTS * P * kernel

            # Invert and create operator
            kernel_f_inv = conj(kernel_f) / (abs(kernel_f)**2 +
                                             self.inverse_regularizer)
            K_inverse = Diagonalize(kernel_f_inv,
                                    backend=backend,
                                    dtype=dtype,
                                    label=label)

            # Set output
            y[:] = P.H * F.H * K_inverse * F * P * x

        # Set inverse function
        op._inverse = types.MethodType(_inverse, op)

    else:
        raise ValueError(
            'Convolution mode %s is not defined! Valid options are "circular" and "windowed"'
            % mode)

    # Append type to label
    if '_' not in label:
        label += '_{' + mode + '}'

    # Set label
    op.label = label

    # Set inverse_regularizer
    op.inverse_regularizer = inverse_regularizer

    # Set latex to be just label
    def repr_latex(latex_input=None):
        if latex_input is None:
            return op.label
        else:
            return op.label + ' \\times ' + latex_input

    op.repr_latex = repr_latex

    return op
示例#7
0
文件: composite.py 项目: zfphil/llops
def Registration(array_to_register_to,
                 dtype=None,
                 backend=None,
                 label='R',
                 inner_operator=None,
                 center=False,
                 axes=None,
                 debug=False):
    """Registeration operator for input x and operator input."""
    # Configure backend and datatype
    backend = backend if backend is not None else config.default_backend
    dtype = dtype if dtype is not None else config.default_dtype
    _shape = shape(
        array_to_register_to) if inner_operator is None else inner_operator.M
    axes = axes if axes is not None else tuple(
        range(ndim(array_to_register_to)))

    # Create sub-operators
    PR = PhaseRamp(_shape, dtype, backend, center=center, axes=axes)
    F = FourierTransform(_shape, dtype, backend, center=center, axes=axes)
    X = Diagonalize(-1 * array_to_register_to,
                    dtype,
                    backend,
                    inner_operator=F * inner_operator,
                    label='x')
    _R = X * PR

    # Compute Fourier Transform of array to register to
    array_to_register_to_f = F * array_to_register_to

    # Define inverse
    def _inverse(x, y):
        """Inverse using phase correlation."""
        # Extract two arrays to correlate
        xf_1 = array_to_register_to_f
        xf_2 = x

        # Compute normalized cross-correlation
        phasor = (conj(xf_1) * xf_2) / abs(conj(xf_1) * xf_2)
        phasor[isnan(phasor)] = 0

        # Convert phasor to delta function
        delta = F.H * phasor

        # If axes is defined, return only one axis
        if len(axes) != ndim(x) or any(
            [ax != index for (ax, index) in zip(axes, range(len(axes)))]):
            axes_not_used = [
                index for index in range(ndim(x)) if index not in axes
            ]

            delta = squeeze(sum(delta, axes=axes_not_used))

        if debug:
            import matplotlib.pyplot as plt
            plt.figure(figsize=(12, 3))
            plt.subplot(131)
            plt.imshow(abs(F.H * xf_1))
            plt.subplot(132)
            plt.imshow(abs(F.H * xf_2))
            plt.subplot(133)
            if ndim(delta) > 1:
                plt.imshow(abs(delta))
            else:
                plt.plot(abs(delta))

        # Calculate maximum and return
        if not center:
            y[:] = reshape(asarray(argmax(delta)), shape(y))
        else:
            y[:] = reshape(
                asarray(argmax(delta)) - asarray(delta.shape) / 2, shape(y))

        # Deal with negative values
        sizes = reshape(asarray([_shape[ax] for ax in axes]), shape(y))
        mask = y[:] > sizes / 2
        y[:] -= mask * sizes

        if debug:
            plt.title(
                str(np.real(np.asarray(argmax(delta))).tolist()) + ' ' +
                str(np.abs(np.asarray(y).ravel())))

    # Define operator name
    repr_str = 'Registration'

    # Create a new operator from phase ramp
    R = Operator(
        _R.shape,
        _R.dtype,
        _R.backend,
        repr_str=repr_str,
        label=label,
        forward=_R.forward,  # Don't provide adjoint, implies nonlinear
        gradient=_R._gradient,
        inverse=_inverse,
        cost=_R.cost,
        convex=_R.convex,
        smooth=True,
        set_arguments_function=X._set_argument_function,
        get_arguments_function=X._get_argument_function,
        inverse_regularizer=_R.inverse_regularizer,
        repr_latex=_R.repr_latex)

    def show_xc(x, figsize=(11, 3)):
        xf_1 = F * _R.arguments[0]
        xf_2 = F * x

        # Compute normalized cross-correlation
        phasor = (conj(xf_1) * xf_2) / abs(conj(xf_1) * xf_2)

        import matplotlib.pyplot as plt
        import llops as yp

        plt.figure(figsize=figsize)
        plt.subplot(121)
        plt.imshow(yp.angle(phasor))
        plt.title('Phase of Frequency domain')
        plt.subplot(122)
        plt.imshow(yp.abs(yp.iFt(phasor)))
        plt.title('Amplitude of Object domain')

    # Return operator with prepended inverse Fourier Transform
    op = F.H * R

    # Set the show_xc in this operator, since it is removed by multiplication
    op.show_xc = show_xc

    # Set the set and get argument functions
    op._setArgumentsFunction = R._set_argument_function
    op._getArgumentsFunction = R._get_argument_function

    # Return result
    return op