Пример #1
0
    def test_base(self):
        assert np.abs(bops.norm(self.x_np_ones) -
                      bops.norm(self.x_ocl_ones)) < 1e-8
        assert np.abs(
            np.linalg.norm(self.x_np_ones) - bops.norm(self.x_ocl_ones)) < 1e-8
        assert np.sum(
            np.abs(
                bops.angle(self.x_np_randn) -
                np.asarray(bops.angle(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.abs(self.x_np_randn) -
                np.asarray(bops.abs(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.exp(self.x_np_randn) -
                np.asarray(bops.exp(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.conj(self.x_np_randn) -
                np.asarray(bops.conj(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.flip(self.x_np_randn) -
                np.asarray(bops.flip(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.transpose(self.x_np_randn) -
                np.asarray(bops.transpose(self.x_ocl_randn)))) < 1e-4

        assert np.sum(np.abs(self.A_np - np.asarray(self.A_ocl))) < 1e-4
        assert np.sum(
            np.abs(self.x_np_randn - np.asarray(self.x_ocl_randn))) < 1e-4
Пример #2
0
    def test_operator_fourier_transform(self):
        # Define "true" FFTs
        Ft = lambda x: np.fft.fftshift(np.fft.fft2(np.fft.fftshift(x, axes=(0, 1)), axes=(0, 1), norm='ortho'), axes=(0, 1))
        iFt = lambda x: np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(x, axes=(0, 1)), axes=(0, 1), norm='ortho'), axes=(0, 1))

        eps_fft = yp.precision(self.x, for_sum=True)

        if global_backend == 'numpy':
            fft_backends = ['scipy', 'numpy']
        else:
            fft_backends = ['af']

        for fft_backend in fft_backends:

            # Create Operator
            F = ops.FourierTransform(image_size, dtype=global_dtype, axes=(0, 1), fft_backend=fft_backend, center=True, backend=global_backend)

            # Check forward model
            assert yp.sumb(yp.abs(Ft(self.x).reshape(image_size) - yp.changeBackend(F * self.x, 'numpy').reshape(image_size))) < eps_fft, '%f' % yp.sumb(yp.abs(Ft(x).reshape(image_size) - yp.changeBackend(F * vec(self.x), 'numpy').reshape(image_size)))
            assert yp.sumb(yp.abs(iFt(self.x).reshape(image_size) - yp.changeBackend((F.H * self.x), 'numpy').reshape(image_size))) < eps_fft

            # Check reciprocity
            assert yp.sumb(yp.abs(F * F.H * self.x - self.x)) < eps_fft, "%.4e" % yp.sumb(yp.abs(F * F.H * vec(self.x) - vec(self.x)))

            # Check Gradient
            F.gradient_check()
Пример #3
0
    def test_intensity(self):
        I = ops.Intensity(image_size)

        # Check forward operator
        assert yp.sumb(yp.abs((yp.abs(yp.changeBackend(self.x, 'numpy')) ** 2) - yp.changeBackend(I * self.x, 'numpy'))) < eps

        # Check gradient
        I.gradient_check()
Пример #4
0
def psnr(signal, noise_roi=None, signal_roi=None):
    """ Calculate the peak signal to noise ratio (SNR) of a signal """
    # Reference: https://en.wikipedia.org/wiki/Signal-to-noise_ratio

    # Calculate full power of signal
    signal_power = yp.sum(yp.abs(signal)**2) if signal_roi is None else yp.sum(
        yp.abs(signal[noise_roi.slice])**2)

    # Calculate noise standard deviation, using ROI if provided
    signal_var = yp.std(signal)**2 if noise_roi is None else yp.std(
        signal[noise_roi.slice])**2

    return yp.log10(signal_power / signal_var)
Пример #5
0
    def _inverse(x, y):
        """Inverse using phase correlation."""
        # Extract two arrays to correlate
        xf_1 = array_to_register_to_f
        xf_2 = x

        # Compute normalized cross-correlation
        phasor = (conj(xf_1) * xf_2) / abs(conj(xf_1) * xf_2)
        phasor[isnan(phasor)] = 0

        # Convert phasor to delta function
        delta = F.H * phasor

        # If axes is defined, return only one axis
        if len(axes) != ndim(x) or any(
            [ax != index for (ax, index) in zip(axes, range(len(axes)))]):
            axes_not_used = [
                index for index in range(ndim(x)) if index not in axes
            ]

            delta = squeeze(sum(delta, axes=axes_not_used))

        if debug:
            import matplotlib.pyplot as plt
            plt.figure(figsize=(12, 3))
            plt.subplot(131)
            plt.imshow(abs(F.H * xf_1))
            plt.subplot(132)
            plt.imshow(abs(F.H * xf_2))
            plt.subplot(133)
            if ndim(delta) > 1:
                plt.imshow(abs(delta))
            else:
                plt.plot(abs(delta))

        # Calculate maximum and return
        if not center:
            y[:] = reshape(asarray(argmax(delta)), shape(y))
        else:
            y[:] = reshape(
                asarray(argmax(delta)) - asarray(delta.shape) / 2, shape(y))

        # Deal with negative values
        sizes = reshape(asarray([_shape[ax] for ax in axes]), shape(y))
        mask = y[:] > sizes / 2
        y[:] -= mask * sizes

        if debug:
            plt.title(
                str(np.real(np.asarray(argmax(delta))).tolist()) + ' ' +
                str(np.abs(np.asarray(y).ravel())))
Пример #6
0
    def test_operator_matrix_multiply(self):
        matrix_size = (10,10)
        m = yp.rand(matrix_size, global_dtype, global_backend)
        xm = yp.rand(matrix_size[1], global_dtype, global_backend)
        M = ops.MatrixMultiply(m)

        # Check Forward operator
        assert yp.sumb(yp.abs(yp.vec(yp.changeBackend(M * xm, 'numpy')) - yp.vec(yp.changeBackend(m, 'numpy').dot(yp.changeBackend(xm, 'numpy'))))) < eps, "%f" % yp.sumb(yp.abs(yp.changeBackend(M * xm, 'numpy') - yp.changeBackend(m, 'numpy').dot(yp.changeBackend(xm, 'numpy'))[:, np.newaxis]))

        # Check Adjoint
        assert yp.sumb(yp.abs(yp.vec(yp.changeBackend(M.H * xm, 'numpy')) - yp.vec(np.conj(yp.changeBackend(m, 'numpy').T).dot(yp.changeBackend(xm, 'numpy'))))) < eps, "%f" % yp.sumb(yp.abs(yp.changeBackend(M.H * xm, 'numpy') - np.conj(yp.changeBackend(m, 'numpy').T).dot(yp.changeBackend(xm, 'numpy'))[:, np.newaxis]))

        # Check gradient
        M.gradient_check()
Пример #7
0
def cnr(signal, noise_roi=None, signal_roi=None):
    """ Calculate the imaging contrast to noise ratio (CNR) of an image """
    # Reference: https://en.wikipedia.org/wiki/Contrast-to-noise_ratio

    # Calculate signal mean, using ROI if provided
    signal_contrast = yp.abs(yp.max(signal) -
                             yp.min(signal)) if signal_roi is None else yp.abs(
                                 yp.max(signal[noise_roi.slice]) -
                                 yp.min(signal[noise_roi.slice]))

    # Calculate noise standard deviation, using ROI if provided
    signal_std = yp.std(signal) if noise_roi is None else np.std(
        signal[noise_roi.slice])

    return (signal_contrast / signal_std)
Пример #8
0
def otf(shape,
        camera_pixel_size,
        illumination_wavelength,
        objective_numerical_aperture,
        center=True,
        dtype=None,
        backend=None):

    # Generate pupil
    p = pupil(shape,
              camera_pixel_size,
              illumination_wavelength,
              objective_numerical_aperture,
              center,
              dtype=dtype,
              backend=backend)

    # Generate OTF
    otf = iFt(Ft(p) * yp.conj(Ft(p)))

    # Normalize
    otf /= yp.max(yp.abs(otf))

    # Center
    if center:
        return otf
    else:
        return yp.fft.ifftshift(otf)
Пример #9
0
    def show_xc(x, figsize=(11, 3)):
        xf_1 = F * _R.arguments[0]
        xf_2 = F * x

        # Compute normalized cross-correlation
        phasor = (conj(xf_1) * xf_2) / abs(conj(xf_1) * xf_2)

        import matplotlib.pyplot as plt
        import llops as yp

        plt.figure(figsize=figsize)
        plt.subplot(121)
        plt.imshow(yp.angle(phasor))
        plt.title('Phase of Frequency domain')
        plt.subplot(122)
        plt.imshow(yp.abs(yp.iFt(phasor)))
        plt.title('Amplitude of Object domain')
Пример #10
0
    def test_operator_flip(self):
        ''' Flip Operator '''

        flip_axis = 0
        L = ops.Flip(image_size, axis=flip_axis)

        # Check forward operator
        assert yp.sumb(yp.abs(L * self.x - yp.flip(self.x, flip_axis))) < eps, "%f" % yp.sumb(yp.abs(L * self.x - vec(yp.flip(self.x, flip_axis))))

        # Check gradient
        L.gradient_check()
Пример #11
0
    def test_operator_crop_non_centered(self):
        ''' Non-centered Crop Operator '''
        # Generate Crop Operator
        crop_size = (image_size[0] // 2, image_size[1] // 2)
        crop_start = (6, 6)
        CR = ops.Crop(image_size, crop_size, pad_value=0,  dtype=global_dtype, backend=global_backend, crop_start=crop_start)

        # Check forward operator
        y_1 = yp.changeBackend(CR * self.x, 'numpy')
        y_2 = yp.changeBackend(yp.crop(self.x, crop_size, crop_start), 'numpy')
        assert yp.sumb(yp.abs(y_1 - y_2)) < eps

        # Check Adjoint Operator
        pad_size = [int((image_size[i] - crop_size[i]) / 2) for i in range(len(image_size))]
        y_3 = yp.pad(yp.crop(self.x, crop_size, crop_start), image_size, crop_start, pad_value=0)
        y_4 = yp.reshape(CR.H * CR * self.x, image_size)
        assert yp.sumb(yp.abs(y_3 - y_4)) < eps

        # Check gradient
        CR.gradient_check()
Пример #12
0
    def test_operator_crop(self):
        ''' Crop Operator '''
        # Generate Crop Operator
        crop_size = (image_size[0] // 2, image_size[1] // 2)
        CR = ops.Crop(image_size, crop_size, pad_value=0,  dtype=global_dtype, backend=global_backend)

        # Check forward operator
        crop_start = tuple(np.asarray(image_size) // 2 - np.asarray(crop_size) // 2)
        y_1 = yp.changeBackend(CR * self.x, 'numpy')
        y_2 = yp.changeBackend(yp.crop(self.x, crop_size, crop_start), 'numpy')
        assert yp.sumb(yp.abs(y_1 - y_2)) < eps

        # Check Adjoint Operator
        pad_size = [int((image_size[i] - crop_size[i]) / 2) for i in range(len(image_size))]
        y_3 = yp.pad(yp.crop(self.x, crop_size, crop_start), image_size, crop_start, pad_value=0)
        y_4 = CR.H * CR * self.x
        assert yp.sumb(yp.abs(y_3 - y_4)) < eps

        # Check gradient
        CR.gradient_check()
Пример #13
0
def propKernelFresnelFourier(shape,
                             pixel_size,
                             wavelength,
                             prop_distance,
                             angle_deg=None,
                             RI=1.0):
    '''
    Creates a fresnel propagation kernel in the Fourier Domain
    :param shape: :class:`list, tuple, np.array`
        Shape of sensor plane (pixels)
    :param pixel_size: :class:`float`
        Pixel size of sensor in spatial units
    :param wavelength: :class:`float`
        Detection wavelength in spatial units
    :param prop_distance: :class:`float`
        Propagation distance in spatial units
    :param angle_deg: :class:`tuple, list, np.array`
        Propagation angle, degrees
    :param RI: :class:`float`
        Refractive index of medium
    '''
    assert len(
        shape) == 2, "Propigation kernel size should be two dimensional!"

    # Determine propagation angle and spatial frequency
    angle = len(shape) * [0.0] if angle_deg is None else np.deg2rad(angle_deg)
    fy_illu, fx_illu = [RI * np.sin(a) / wavelength for a in angle]

    # Generate coordinate system
    fylin = _genLin(shape[0], 1 / pixel_size / shape[0])
    fxlin = _genLin(shape[1], 1 / pixel_size / shape[1])

    # Calculate wavenunmber
    k = (2.0 * np.pi / wavelength) * RI

    prop_kernel = yp.exp(1j * k * yp.abs(prop_distance)) * yp.exp(
        -1j * np.pi * wavelength * yp.abs(prop_distance) *
        ((fxlin[np.newaxis, :] - fx_illu)**2 +
         (fylin[:, np.newaxis] - fy_illu)**2))

    return prop_kernel if prop_distance >= 0 else prop_kernel.conj()
Пример #14
0
    def test_operator_wavelet(self):
        ''' Wavelet Transform Operator '''
        import pywt
        wavelet_list = ['db1', 'haar', 'rbio1.1', 'bior1.1', 'bior4.4', 'sym12']
        for wavelet_test in wavelet_list:
            # Wavelet Transform
            W = ops.WaveletTransform(image_size, wavelet_type=wavelet_test, use_cycle_spinning=False)

            # Check forward operation
            coeffs = pywt.wavedecn(self.x, wavelet=wavelet_test)
            x_wavelet, coeff_slices = pywt.coeffs_to_array(coeffs)
            assert yp.sumb(yp.abs(yp.changeBackend(W * self.x, 'numpy') - x_wavelet)) < eps, "Difference %.6e"

            # Check inverse operation
            coeffs_from_arr = pywt.array_to_coeffs(x_wavelet, coeff_slices)
            cam_recon = pywt.waverecn(coeffs_from_arr, wavelet=wavelet_test)
            assert yp.sumb(yp.abs(W.H * W * self.x - self.x)) < 1e-2

            # Ensure that the wavelet transform isn't just identity (weird bug)
            if W.shape[1] is yp.size(self.x):
                assert yp.sumb(yp.abs(W * yp.vec(self.x) - yp.vec(self.x))) > 1e-2, "%s" % wavelet_test
Пример #15
0
    def _forward(self, x, y):
        fill(self._forward_y, 0)

        # Take finite differences
        for dim in range(len(self.shape)):
            self._forward_y += abs(roll(x, 1, axis=dim) - x)

        # Normalize
        self._forward_y[:] = sqrt(self._forward_y)

        # Return sum
        y[:] = sum(self._forward_y)
Пример #16
0
def propKernelRayleighSpatial(shape, pixel_size, wavelength, prop_distance):

    # Generate coordinate system
    ylin, xlin = yp.grid(shape, pixel_size)

    kb = 2 * np.pi / wavelength
    R2 = prop_distance**2 + xlin[np.newaxis, :]**2 + ylin[:, np.newaxis]**2
    R = yp.sqrt(R2)
    prop_kernel = yp.abs(prop_distance) * kb / (2j * np.pi) * yp.exp(
        1j * kb * R) * (1 + 1j / (kb * R)) / R2

    return prop_kernel if prop_distance >= 0 else prop_kernel.conj()
Пример #17
0
    def test_operator_shift(self):
        ''' Shift Operator '''
        # Normal shift
        shift = (0, 10) # should be y, x
        T = ops.Shift(image_size, shift)

        def shift_func(x, shift):
            x = yp.changeBackend(self.x, 'numpy')
            for ax, sh in enumerate(shift):
                x = np.roll(self.x, int(sh), axis=ax)
            return(x)

        # Check Forward Operator
        y_1 = yp.changeBackend(T * self.x, 'numpy')
        y_2 = shift_func(yp.changeBackend(self.x, 'numpy'), shift)
        assert yp.sumb(yp.abs(y_1 - y_2)) < eps

        # Check Adjoint Operator
        assert yp.sumb(yp.abs(T.H * T * self.x - self.x)) < eps

        # Check gradient
        T.gradient_check()
Пример #18
0
def calcDnfFromKernel(x):
    if len(x) == 0:
        return 0
    else:
        # Normalize
        x = x / yp.scalar(yp.sum(x))

        # Take fourier transform intensity
        x_fft = yp.Ft(x)
        sigma_x = yp.abs(x_fft) ** 2

        # Calculate DNF
        return np.sqrt(1 / len(x) * np.sum(1 / sigma_x))
Пример #19
0
    def test_operator_stacking_linear(self):
        # Create list of operators
        op_list_linear = [
            ops.FourierTransform(image_size, dtype=global_dtype, backend=global_backend),
            ops.Identity(image_size, dtype=global_dtype, backend=global_backend),
            ops.Exponential(image_size, dtype=global_dtype, backend=global_backend)
        ]

        # Horizontally stacked operators
        H_l = ops.Hstack(op_list_linear)

        # Vertically stack x for forward operator
        x_np = yp.changeBackend(self.x, 'numpy')
        x3 = yp.changeBackend(np.vstack((x_np, x_np, x_np)), global_backend)

        # Check forward operation
        y2 = yp.zeros(op_list_linear[0].N, op_list_linear[0].dtype, op_list_linear[0].backend)

        for op in op_list_linear:
            y2 = y2 + op * self.x

        assert yp.sumb(yp.abs(yp.changeBackend(H_l(x3) - y2, 'numpy'))) < eps, "%.4e" % yp.sumb(yp.abs(H_l(x3) - y2))

        # Check gradient
        H_l.gradient_check()

        # Create vertically stacked operator
        V_l = ops.Vstack(op_list_linear)

        # Check forward operator
        y3 = np.empty((0,image_size[1]), dtype=yp.getNativeDatatype(global_dtype, 'numpy'))
        for index, op in enumerate(op_list_linear):
            y3 = np.append(y3, (op * self.x), axis=0)

        y3 = yp.changeBackend(y3, global_backend)
        assert yp.sumb(yp.abs(V_l * self.x - y3)) < eps, "%.4e" % yp.sumb(yp.abs(V_l * vec(x) - y3))

        # Check gradient
        V_l.gradient_check()
Пример #20
0
    def test_operator_sum(self):
        ''' Element-wise Sum Operator '''
        axis_to_sum = (0,1)
        Σ = ops.Sum(image_size, axes=axis_to_sum)

        # Check forward operator
        y_1 = yp.changeBackend(Σ * self.x, 'numpy')
        y_2 = yp.sumb(yp.changeBackend(self.x, 'numpy'), axes=axis_to_sum)
        assert yp.abs(yp.sumb(y_1 - y_2)) < eps

        # Check adjoint operator
        y_3 = yp.changeBackend(Σ.H * Σ * self.x, 'numpy')
        reps = [1, ] * len(image_size)
        axes = list(range(len(image_size))) if axis_to_sum is 'all' else axis_to_sum
        scale = 1
        for axis in axes:
            reps[axis] = image_size[axis]
            scale *= 1 / image_size[axis]
        y_4 = yp.tile(y_2, reps) * scale
        assert yp.sumb(yp.abs(y_3 - y_4)) < eps

        # Check gradient
        Σ.gradient_check(eps=1)
Пример #21
0
        def _inverse(self, x, y):
            # Get current kernel
            kernel_f = F * FFTS * P * kernel

            # Invert and create operator
            kernel_f_inv = conj(kernel_f) / (abs(kernel_f)**2 +
                                             self.inverse_regularizer)
            K_inverse = Diagonalize(kernel_f_inv,
                                    backend=backend,
                                    dtype=dtype,
                                    label=label)

            # Set output
            y[:] = P.H * F.H * K_inverse * F * P * x
Пример #22
0
def propKernelRayleightFourier(shape, pixel_size, wavelength, prop_distance):

    # Generate coordinate system
    fylin, fxlin = yp.grid(shape, 1 / pixel_size / np.asarray(shape))

    # Generate Squared Coordinate System
    fy2 = (fylin**2)[:, np.newaxis]
    fx2 = (fxlin**2)[np.newaxis, :]
    fz2 = 1 / wavelength**2 - fy2 - fx2

    fz = np.lib.scimath.sqrt(fz2)
    prop_kernel = yp.exp(2j * np.pi * fz * yp.abs(prop_distance))

    return prop_kernel if prop_distance >= 0 else np.conj(prop_kernel)
Пример #23
0
def dnfFromConvolutionKernel(h):
    """Calculate the deconvolution noise factor (DNF) of N-dimensional
       convolution operator, given it's kernel."""
    if len(h) == 0:
        return 0
    else:
        # Normalize
        h = copy.deepcopy(h) / yp.scalar(yp.sum(h))

        # Take fourier transform intensity
        h_fft = yp.Ft(h)
        sigma_h = yp.abs(h_fft)**2

        # Calculate DNF
        return np.sqrt(1 / len(h) * np.sum(1 / sigma_h))
Пример #24
0
    def test_operator_exponential(self):
        L2 = ops.L2Norm(image_size)
        F = ops.FourierTransform(image_size)
        EXP = ops.Exponential(image_size)

        # Forward model
        assert yp.sumb(yp.abs(yp.changeBackend(EXP * self.x, 'numpy') - np.exp(yp.changeBackend(self.x, 'numpy')))) < eps

        # Check gradient
        EXP.gradient_check()

        # Generate composite operator
        D = ops.Diagonalize(self.h)
        L2 = ops.L2Norm(image_size)

        EXP_COMP = L2 * F * EXP
        EXP_COMP.gradient_check()

        EXP_COMP_2 = L2 * F * EXP * D
        EXP_COMP_2.gradient_check()
Пример #25
0
def add(signal, type='gaussian', **kwargs):
    """ Add noise to a measurement"""
    if type == 'gaussian':
        snr = kwargs.get('snr', 1.0)
        signal_mean = yp.abs(yp.mean(signal))
        noise_gaussian = np.random.normal(0, signal_mean / snr,
                                          yp.shape(signal))
        return signal + noise_gaussian

    elif type == 'poisson':
        noise_poisson = np.random.poisson(np.real(signal))
        return signal + noise_poisson

    elif type == 'saltpepper' or type == 'saltandpepper':
        salt_pepper_ratio = kwargs.get('ratio', 0.5)
        salt_pepper_saturation = kwargs.get('saturation', 0.004)
        output = yp.changeBackend(signal, 'numpy')

        # Salt mode
        num_salt = np.ceil(salt_pepper_saturation * signal.size *
                           salt_pepper_ratio)
        coords = [
            np.random.randint(0, i - 1, int(num_salt)) for i in signal.shape
        ]
        output[tuple(coords)] = 1

        # Pepper mode
        num_pepper = np.ceil(salt_pepper_saturation * signal.size *
                             (1. - salt_pepper_ratio))
        coords = [
            np.random.randint(0, i - 1, int(num_pepper)) for i in signal.shape
        ]
        output[tuple(coords)] = 0
        return yp.cast_like(output, signal)

    elif type == 'speckle':
        noise_speckle = yp.randn(signal.shape)
        return signal + signal * noise_speckle
Пример #26
0
    def test_operator_phase_ramp(self):
        eps_phase_ramp = 1e-4
        shift = yp.changeBackend(np.asarray((-5,3)).astype(yp.getNativeDatatype(global_dtype, 'numpy')), global_backend)

        # Generate phase ramp
        R = ops.PhaseRamp(image_size)
        r = R * shift

        F = ops.FourierTransform(image_size, dtype=global_dtype, normalize=False, backend=global_backend)
        D_R = ops.Diagonalize(r, dtype=global_dtype)
        S_R = F.H * D_R * F

        # Pixel-wise shift operator
        S = ops.Shift(image_size, shift)

        # Check that phase ramp is shifting by correct amount
        assert yp.sumb(yp.abs(yp.changeBackend(S_R * self.x, 'numpy') - yp.changeBackend(S * self.x, 'numpy'))) < 1e-3

        # Check gradient of phase ramp convolution
        S_R.gradient_check()

        # Check gradient of phase ramp
        R.gradient_check(eps=1e-1)
Пример #27
0
    def blur_vectors(self, dtype=None, backend=None, debug=False,
                     use_phase_ramp=False, corrections={}):
        """
        This function generates the object size, image size, and blur kernels from
        a libwallerlab dataset object.

            Args:
                dataset: An io.Dataset object
                dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
            Returns:
                object_size: The object size this dataset can recover
                image_size: The computed image size of the dataset
                blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.

        """
        # Assign dataset
        dataset = self

        # Get corrections from metadata
        if len(corrections) is 0 and 'blur_vector' in self.metadata.calibration:
            corrections = dataset.metadata.calibration['blur_vector']

        # Get datatype and backends
        dtype = dtype if dtype is not None else yp.config.default_dtype
        backend = backend if backend is not None else yp.config.default_backend

        # Calculate effective pixel size if necessaey
        if dataset.metadata.system.eff_pixel_size_um is None:
            dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
                (dataset.metadata.objective.mag * dataset.metadata.system.mag)

        # Recover and store position and illumination list
        blur_vector_roi_list = []
        position_list, illumination_list = [], []
        frame_segment_map = []

        for frame_index in range(dataset.shape[0]):
            frame_state = dataset.frame_state_list[frame_index]

            # Store which segment this measurement uses
            frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])

            # Extract list of illumination values for each time point
            if 'illumination' in frame_state:
                illumination_list_frame = []
                if type(frame_state['illumination']) is str:
                    illum_state_list = self._frame_state_list[0]['illumination']['states']
                else:
                    illum_state_list = frame_state['illumination']['states']
                for time_point in illum_state_list:
                    illumination_list_time_point = []
                    for illumination in time_point:
                        illumination_list_time_point.append(
                            {'index': illumination['index'], 'value': illumination['value']})
                    illumination_list_frame.append(illumination_list_time_point)

            else:
                raise ValueError('Frame %d does not contain illumination information' % frame_index)

            # Extract list of positions for each time point
            if 'position' in frame_state:
                position_list_frame = []
                for time_point in frame_state['position']['states']:
                    position_list_time_point = []
                    for position in time_point:
                        if 'units' in position['value']:
                            if position['value']['units'] == 'mm':
                                ps_um = dataset.metadata.system.eff_pixel_size_um
                                position_list_time_point.append(
                                    [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'um':
                                position_list_time_point.append(
                                    [position['value']['y'] / ps_um, position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'pixels':
                                position_list_time_point.append([position['value']['y'], position['value']['x']])
                            else:
                                raise ValueError('Invalid units %s for position in frame %d' %
                                                 (position['value']['units'], frame_index))
                        else:
                            # print('WARNING: Could not find posiiton units in metadata, assuming mm')
                            ps_um = dataset.metadata.system.eff_pixel_size_um
                            position_list_time_point.append(
                                [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])

                    position_list_frame.append(position_list_time_point[0])  # Assuming single time point for now.

                # Define positions and position indicies used
                positions_used, position_indicies_used = [], []
                for index, pos in enumerate(position_list_frame):
                    for color in illumination_list_frame[index][0]['value']:
                        if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
                            position_indicies_used.append(index)
                            positions_used.append(pos)

                # Generate ROI for this blur vector
                from htdeblur.blurkernel import getPositionListBoundingBox
                blur_vector_roi = getPositionListBoundingBox(positions_used)

                # Append to list
                blur_vector_roi_list.append(blur_vector_roi)

                # Crop illumination list to values within the support used
                illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])

                # Store corresponding positions
                position_list.append(positions_used)

        # Apply kernel scaling or compression if necessary
        if 'scale' in corrections:

            # We need to use phase-ramp based kernel generation if we modify the positions
            use_phase_ramp = True

            # Modify position list
            for index in range(len(position_list)):
                _positions = np.asarray(position_list[index])
                for scale_correction in corrections['scale']:
                    factor, axis = corrections['scale']['factor'], corrections['scale']['axis']
                    _positions[:, axis] = ((_positions[:, axis] - yp.min(_positions[:, axis])) * factor + yp.min(_positions[:, axis]))
                position_list[index] = _positions.tolist()

        # Synthesize blur vectors
        blur_vector_list = []
        for frame_index in range(dataset.shape[0]):
            #  Generate blur vectors
            if use_phase_ramp:
                from llops.operators import PhaseRamp
                kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
                offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32', dataset.backend)

                # Create phase ramp and calculate offset
                R = PhaseRamp(kernel_shape, dtype='complex32', backend=dataset.backend)

                # Generate blur vector
                blur_vector = yp.zeros(R.M, dtype='complex32', backend=dataset.backend)
                for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
                    pos = yp.cast(pos, dtype=dataset.dtype, backend=dataset.backend)
                    blur_vector += (R * (yp.cast(pos - offset, 'complex32')))

                # Take inverse Fourier Transform
                blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)

                if position_list[frame_index][0][-1] > position_list[frame_index][0][0]:
                    blur_vector = yp.flip(blur_vector)

            else:
                blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
                                         dtype=dtype, backend=backend)

            # Normalize illuminaiton vectors
            blur_vector /= yp.scalar(yp.sum(blur_vector))

            # Append to list
            blur_vector_list.append(blur_vector)

        # Return
        return blur_vector_list, blur_vector_roi_list
Пример #28
0
def registerImage(image0,
                  image1,
                  method='xc',
                  axis=None,
                  preprocess_methods=['reflect'],
                  debug=False,
                  **kwargs):

    # Perform preprocessing
    if len(preprocess_methods) > 0:
        image0, image1 = _preprocessForRegistration(image0, image1,
                                                    preprocess_methods,
                                                    **kwargs)

    # Parameter on whether we can trust our registration
    trust_ratio = 1.0

    if method in ['xc' or 'cross_correlation']:

        # Get energy ratio threshold
        trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)

        # Pad arrays for optimal speed
        pad_size = tuple(
            [sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])

        # Perform padding
        if pad_size is not yp.shape(image0):
            image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
            image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)

        # Take F.T. of measurements
        src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1,
                                                                axes=axis)

        # Whole-pixel shift - Compute cross-correlation by an IFFT
        image_product = src_freq * yp.conj(target_freq)
        # image_product /= abs(src_freq * yp.conj(target_freq))
        cross_correlation = yp.iFt(image_product, center=False, axes=axis)

        # Take sum along axis if we're doing 1D
        if axis is not None:
            axis_to_sum = list(range(yp.ndim(image1)))
            del axis_to_sum[axis]
            cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)

        # Locate maximum
        shape = yp.shape(src_freq)
        maxima = yp.argmax(yp.abs(cross_correlation))
        midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

        shifts = np.array(maxima, dtype=np.float64)
        shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        for dim in range(yp.ndim(src_freq)):
            if shape[dim] == 1:
                shifts[dim] = 0

        # If energy ratio is too small, set all shifts to zero
        trust_metric = yp.scalar(
            yp.max(yp.abs(cross_correlation)**2) /
            yp.mean(yp.abs(cross_correlation)**2))

        # Determine if this registraition can be trusted
        trust_ratio = trust_metric / trust_threshold

    elif method == 'orb':

        # Get user-defined mean_residual_threshold if given
        trust_threshold = kwargs.get('mean_residual_threshold', 40.0)

        # Get user-defined mean_residual_threshold if given
        orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)

        match_count = 0
        fast_threshold = 0.05
        while match_count < orb_feature_threshold:
            descriptor_extractor = ORB(n_keypoints=500,
                                       fast_n=9,
                                       harris_k=0.1,
                                       fast_threshold=fast_threshold)

            # Extract keypoints from first frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image0).astype(np.double))
            keypoints0 = descriptor_extractor.keypoints
            descriptors0 = descriptor_extractor.descriptors

            # Extract keypoints from second frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image1).astype(np.double))
            keypoints1 = descriptor_extractor.keypoints
            descriptors1 = descriptor_extractor.descriptors

            # Set match count
            match_count = min(len(keypoints0), len(keypoints1))
            fast_threshold -= 0.01

            if fast_threshold == 0:
                raise RuntimeError(
                    'Could not find any keypoints (even after shrinking fast threshold).'
                )

        # Match descriptors
        matches = match_descriptors(descriptors0,
                                    descriptors1,
                                    cross_check=True)

        # Filter descriptors to axes (if provided)
        if axis is not None:
            matches_filtered = []
            for (index_0, index_1) in matches:
                point_0 = keypoints0[index_0, :]
                point_1 = keypoints1[index_1, :]
                unit_vec = point_0 - point_1
                unit_vec /= np.linalg.norm(unit_vec)

                if yp.abs(unit_vec[axis]) > 0.99:
                    matches_filtered.append((index_0, index_1))

            matches_filtered = np.asarray(matches_filtered)
        else:
            matches_filtered = matches

        # Robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
                                        keypoints1[matches_filtered[:, 1]]),
                                       EuclideanTransform,
                                       min_samples=3,
                                       residual_threshold=2,
                                       max_trials=100)

        # Note that model_robust has a translation property, but this doesn't
        # seem to be as numerically stable as simply averaging the difference
        # between the coordinates along the desired axis.

        # Apply match filter
        matches_filtered = matches_filtered[inliers, :]

        # Process keypoints
        if yp.shape(matches_filtered)[0] > 0:

            # Compute shifts
            difference = keypoints0[matches_filtered[:, 0]] - keypoints1[
                matches_filtered[:, 1]]
            shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
            shifts = np.round(shifts[0])

            # Filter to axis mask
            if axis is not None:
                _shifts = [0, 0]
                _shifts[axis] = shifts[axis]
                shifts = _shifts

            # Calculate residuals
            residuals = yp.sqrt(
                yp.sum(
                    yp.abs(keypoints0[matches_filtered[:, 0]] +
                           np.asarray(shifts) -
                           keypoints1[matches_filtered[:, 1]])**2))

            # Define a trust metric
            trust_metric = residuals / yp.shape(
                keypoints0[matches_filtered[:, 0]])[0]

            # Determine if this registration can be trusted
            trust_ratio = 1 / (trust_metric / trust_threshold)
            print('===')
            print(trust_ratio)
            print(trust_threshold)
            print(trust_metric)
            print(shifts)
        else:
            trust_metric = 1e10
            trust_ratio = 0.0
            shifts = np.asarray([0, 0])

    elif method == 'optimize':

        # Create Operators
        L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
        R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
        REAL = ops.RealFilter((2, 1), dtype='complex64')

        # Take Fourier Transforms of images
        image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(
            yp.Ft(image1), 'complex64')

        # Diagonalize one of the images
        D = ops.Diagonalize(image0_f)

        # Form objective
        objective = L2 * (D * R * REAL - image1_f)

        # Solve objective
        solver = ops.solvers.GradientDescent(objective)
        shifts = solver.solve(iteration_count=1000, step_size=1e-8)

        # Convert to numpy array, take real part, and round.
        shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))

        # Flip shift axes (x,y to y, x)
        shifts = np.fliplr(shifts)

        # TODO: Trust metric and trust_threshold
        trust_threshold = 1
        trust_ratio = 1.0

    else:
        raise ValueError('Invalid Registration Method %s' % method)

    # Mark whether or not this measurement is of good quality
    if not trust_ratio > 1:
        if debug:
            print('Ignoring shift with trust metric %g (threshold is %g)' %
                  (trust_metric, trust_threshold))
        shifts = yp.zeros_like(np.asarray(shifts)).tolist()

    # Show debugging figures if requested
    if debug:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(6, 5))
        plt.subplot(131)
        plt.imshow(yp.abs(image0))
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(yp.abs(image1))
        plt.title('Trust ratio: %g' % (trust_ratio))
        plt.axis('off')
        plt.subplot(133)
        if method in ['xc' or 'cross_correlation']:
            if axis is not None:
                plt.plot(yp.abs(yp.squeeze(cross_correlation)))
            else:
                plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
        else:
            plot_matches(plt.gca(), yp.real(image0), yp.real(image1),
                         keypoints0, keypoints1, matches_filtered)
        plt.title(str(shifts))
        plt.axis('off')

    # Return
    return shifts, trust_ratio
Пример #29
0
def register_translation(src_image,
                         target_image,
                         upsample_factor=1,
                         energy_ratio_threshold=2,
                         space="real"):
    """
    Efficient subpixel image translation registration by cross-correlation.
    This code gives the same precision as the FFT upsampled cross-correlation
    in a fraction of the computation time and with reduced memory requirements.
    It obtains an initial estimate of the cross-correlation peak by an FFT and
    then refines the shift estimation by upsampling the DFT only in a small
    neighborhood of that estimate by means of a matrix-multiply DFT.
    Parameters
    ----------
    src_image : ndarray
        Reference image.
    target_image : ndarray
        Image to register.  Must be same dimensionality as ``src_image``.
    upsample_factor : int, optional
        Upsampling factor. Images will be registered to within
        ``1 / upsample_factor`` of a pixel. For example
        ``upsample_factor == 20`` means the images will be registered
        within 1/20th of a pixel.  Default is 1 (no upsampling)
    space : string, one of "real" or "fourier", optional
        Defines how the algorithm interprets input data.  "real" means data
        will be FFT'd to compute the correlation, while "fourier" data will
        bypass FFT of input data.  Case insensitive.
    return_error : bool, optional
        Returns error and phase difference if on,
        otherwise only shifts are returned
    Returns
    -------
    shifts : ndarray
        Shift vector (in pixels) required to register ``target_image`` with
        ``src_image``.  Axis ordering is consistent with numpy (e.g. Z, Y, X)
    error : float
        Translation invariant normalized RMS error between ``src_image`` and
        ``target_image``.
    phasediff : float
        Global phase difference between the two images (should be
        zero if images are non-negative).
    References
    ----------
    .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
           "Efficient subpixel image registration algorithms,"
           Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
    .. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
           Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
    """
    # images must be the same shape
    if yp.shape(src_image) != yp.shape(target_image):
        raise ValueError("Error: images must be same size for "
                         "register_translation")

    # only 2D data makes sense right now
    if yp.ndim(src_image) > 3 and upsample_factor > 1:
        raise NotImplementedError("Error: register_translation only supports "
                                  "subpixel registration for 2D and 3D images")

    # assume complex data is already in Fourier space
    if space.lower() == 'fourier':
        src_freq = src_image
        target_freq = target_image
    # real data needs to be fft'd.
    elif space.lower() == 'real':
        src_freq = yp.Ft(src_image)
        target_freq = yp.Ft(target_image)
    else:
        raise ValueError("Error: register_translation only knows the \"real\" "
                         "and \"fourier\" values for the ``space`` argument.")

    # Whole-pixel shift - Compute cross-correlation by an IFFT
    shape = yp.shape(src_freq)
    image_product = src_freq * yp.conj(target_freq)
    cross_correlation = yp.iFt(image_product, center=False)

    # Locate maximum
    maxima = yp.argmax(yp.abs(cross_correlation))
    midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

    shifts = np.array(maxima, dtype=np.float64)
    shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

    # if upsample_factor > 1:
    #     # Initial shift estimate in upsampled grid
    #     shifts = np.round(shifts * upsample_factor) / upsample_factor
    #     upsampled_region_size = np.ceil(upsample_factor * 1.5)
    #     # Center of output array at dftshift + 1
    #     dftshift = np.fix(upsampled_region_size / 2.0)
    #     upsample_factor = np.array(upsample_factor, dtype=np.float64)
    #     normalization = (src_freq.size * upsample_factor ** 2)
    #     # Matrix multiply DFT around the current shift estimate
    #     sample_region_offset = dftshift - shifts*upsample_factor
    #     cross_correlation = _upsampled_dft(image_product.conj(),
    #                                        upsampled_region_size,
    #                                        upsample_factor,
    #                                        sample_region_offset).conj()
    #     cross_correlation /= normalization
    #     # Locate maximum and map back to original pixel grid
    #     maxima = np.array(np.unravel_index(
    #                           np.argmax(np.abs(cross_correlation)),
    #                           cross_correlation.shape),
    #                       dtype=np.float64)
    #     maxima -= dftshift
    #
    #     shifts = shifts + maxima / upsample_factor

    # If its only one row or column the shift along that dimension has no
    # effect. We set to zero.
    for dim in range(yp.ndim(src_freq)):
        if shape[dim] == 1:
            shifts[dim] = 0

    # If energy ratio is too small, set all shifts to zero
    energy_ratio = yp.max(yp.abs(cross_correlation)**2) / yp.sum(
        yp.abs(cross_correlation)**2) * yp.prod(yp.shape(cross_correlation))
    if energy_ratio < energy_ratio_threshold:
        print('Ignoring shift with energy ratio %g (threshold is %g)' %
              (energy_ratio, energy_ratio_threshold))
        shifts = yp.zeros_like(shifts)

    return shifts