Ejemplo n.º 1
0
def testObject(absorption,
               shape=None,
               phase=None,
               invert=False,
               invert_phase=False,
               dtype=None,
               backend=None,
               **kwargs):

    # Load absorption image
    test_object = _loadImage(absorption, shape, dtype, backend, **kwargs)

    # Normalize
    test_object -= yp.min(test_object)
    test_object /= yp.max(test_object)

    # invert if requested
    if invert:
        test_object = 1 - test_object

    # Apply correct range to absorption
    absorption_max, absorption_min = kwargs.get('max_value', 1.1), kwargs.get(
        'min_value', 0.9)
    test_object *= (absorption_max - absorption_min)
    test_object += absorption_min

    # Add phase if label is provided
    if phase:
        # Load phase image
        phase = _loadImage(phase, shape, **kwargs)

        # invert if requested
        if invert_phase:
            phase = 1 - phase

        # Normalize
        phase -= yp.min(phase)
        phase /= yp.max(phase)

        # Apply correct range to absorption
        phase_max, phase_min = kwargs.get('max_value_phase',
                                          0), kwargs.get('min_value_phase', 1)
        phase *= (phase_max - phase_min)
        phase += phase_min

        # Add phase to test_object
        test_object = yp.astype(test_object, 'complex32')
        test_object *= yp.exp(
            1j * yp.astype(yp.real(phase), yp.getDatatype(test_object)))

    # Cast to correct dtype and backend
    return yp.cast(test_object, dtype, backend)
Ejemplo n.º 2
0
    def as_integer(*args, **kwargs):

        # Store original datatype
        original_dtype = getDatatype(args[0])

        # Store original range
        extent = min(args[0]), max(args[0])

        # Change first argument (x) to a numpy backend
        args = list(args)
        args[0] = astype(65535.0 * (args[0] - min(args[0])) / max(args[0]), 'uint16')
        args = tuple(args)

        # Call the function
        return astype(func(*args, **kwargs) / 65535.0 * extent[1] + extent[0], original_dtype)
Ejemplo n.º 3
0
    def register(self, force=False, segment_offset=(0, 550), frame_offset=-26,
                 blur_axis=1, frame_registration_mode=None, debug=False,
                 segment_registration_mode=None, write_file=True):

        if 'registration' not in self.metadata.calibration or force:

            # Assign all segments
            self.frame_segment_list = self.frame_segment_list_full

            # Pre-compute indicies for speed
            frame_segment_map = self.frame_segment_map
            frame_segment_direction_list = self.frame_segment_direction_list

            # Apply pre-computed offset
            frame_offset_list = []
            for frame_index in range(len(self.frame_mask)):

                # Get segment index and direction
                segment_direction_is_left_right = frame_segment_direction_list[frame_index][1] > 0

                # Get index of frame in this segment
                frame_segment_index = len([segment for segment in frame_segment_map[:frame_index] if segment == frame_segment_map[frame_index]])

                # Get index of current segment
                segment_index = frame_segment_map[frame_index]

                # Apply frame dependent offset
                _offset_frame = [0, 0]
                _offset_frame[blur_axis] = frame_segment_index * frame_offset
                if not segment_direction_is_left_right:
                    _offset_frame[blur_axis] *= -1

                # Apply segment dependent offset
                _offset_segment = list(segment_offset)
                if segment_direction_is_left_right:
                    for ax in range(len(_offset_segment)):
                        if ax is blur_axis:
                            _offset_segment[ax] *= -1
                        else:
                            _offset_segment[ax] *= segment_index

                # Combine offsets
                offset = [_offset_frame[i] + _offset_segment[i] for i in range(2)]

                # Append to list
                frame_offset_list.append(offset)

            # Apply registration
            if frame_registration_mode is not None:

                # Register frames within segments
                for segment_index in self.frame_segment_list_full:
                    self.frame_segment_list = [segment_index]

                    # Get frame ROI list
                    roi_list = self.roi_list

                    # Get offsets for this segment
                    frame_offset_list_segment = [frame_offset_list[index] for index in self.frame_mask]

                    # Apply frame offsets from previous steps
                    for roi, offset in zip(roi_list, frame_offset_list_segment):
                        roi += offset

                    # Perform registration
                    from comptic.registration import register_roi_list
                    frame_offset_list_segment = register_roi_list(self.frame_list,
                                                                  roi_list,
                                                                  debug=debug,
                                                                  tolerance=(1000, 1000),
                                                                  method=frame_registration_mode,
                                                                  force_2d=False,
                                                                  axis=1)

                    # Apply correction to frame list
                    for index, frame_index in enumerate(self.frame_mask):
                        for i in range(len(frame_offset_list[frame_index])):
                            frame_offset_list[frame_index][i] += frame_offset_list_segment[index][i]

            if segment_registration_mode is not None:
                from llops.operators import VecStack, Segmentation
                from htdeblur.recon import alignRoiListToOrigin, register_roi_list
                stitched_segment_list, stitched_segment_roi_list = [], []
                # Stitch segments
                for segment_index in self.frame_segment_list_full:
                    self.frame_segment_list = [segment_index]

                    # Get frame ROI list
                    roi_list = self.roi_list

                    # Get offsets for this segment
                    frame_offset_list_segment = [frame_offset_list[index] for index in self.frame_mask]

                    # Apply frame offsets from previous steps
                    for roi, offset in zip(roi_list, frame_offset_list_segment):
                        roi += offset

                    # Determine segment ROI
                    stitched_segment_roi_list.append(sum(roi_list))

                    # Align ROI list to origin
                    alignRoiListToOrigin(roi_list)

                    # Create segmentation operator
                    G = Segmentation(roi_list)

                    # Get measurement list
                    y = yp.astype(VecStack(self.frame_list), G.dtype)

                    # Append to list
                    stitched_segment_list.append(G.inv * y)

                # Register stitched segments
                frame_offset_list_segment = register_roi_list(stitched_segment_list,
                                                              stitched_segment_roi_list,
                                                              debug=debug,
                                                              tolerance=(200, 200),
                                                              method=segment_registration_mode)

                # Apply registration to all frames
                self.frame_segment_list = self.frame_segment_list_full

                # Apply offset to frames
                for frame_index in range(self.shape[0]):

                    # Get segment index
                    segment_index = self.frame_segment_map[frame_index]

                    # Apply offset
                    for i in range(len(frame_offset_list[frame_index])):
                        frame_offset_list[frame_index][i] += frame_offset_list_segment[segment_index][i]

            # Set updated values in metadata
            self.metadata.calibration['registration'] = {'frame_offsets': frame_offset_list,
                                                         'segment_offset': segment_offset,  # For debugging
                                                         'frame_offset': frame_offset}      # For debugging

            # Save calibration file
            if write_file:
                self.saveCalibration()
Ejemplo n.º 4
0
def registerImage(image0,
                  image1,
                  method='xc',
                  axis=None,
                  preprocess_methods=['reflect'],
                  debug=False,
                  **kwargs):

    # Perform preprocessing
    if len(preprocess_methods) > 0:
        image0, image1 = _preprocessForRegistration(image0, image1,
                                                    preprocess_methods,
                                                    **kwargs)

    # Parameter on whether we can trust our registration
    trust_ratio = 1.0

    if method in ['xc' or 'cross_correlation']:

        # Get energy ratio threshold
        trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)

        # Pad arrays for optimal speed
        pad_size = tuple(
            [sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])

        # Perform padding
        if pad_size is not yp.shape(image0):
            image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
            image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)

        # Take F.T. of measurements
        src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1,
                                                                axes=axis)

        # Whole-pixel shift - Compute cross-correlation by an IFFT
        image_product = src_freq * yp.conj(target_freq)
        # image_product /= abs(src_freq * yp.conj(target_freq))
        cross_correlation = yp.iFt(image_product, center=False, axes=axis)

        # Take sum along axis if we're doing 1D
        if axis is not None:
            axis_to_sum = list(range(yp.ndim(image1)))
            del axis_to_sum[axis]
            cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)

        # Locate maximum
        shape = yp.shape(src_freq)
        maxima = yp.argmax(yp.abs(cross_correlation))
        midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

        shifts = np.array(maxima, dtype=np.float64)
        shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        for dim in range(yp.ndim(src_freq)):
            if shape[dim] == 1:
                shifts[dim] = 0

        # If energy ratio is too small, set all shifts to zero
        trust_metric = yp.scalar(
            yp.max(yp.abs(cross_correlation)**2) /
            yp.mean(yp.abs(cross_correlation)**2))

        # Determine if this registraition can be trusted
        trust_ratio = trust_metric / trust_threshold

    elif method == 'orb':

        # Get user-defined mean_residual_threshold if given
        trust_threshold = kwargs.get('mean_residual_threshold', 40.0)

        # Get user-defined mean_residual_threshold if given
        orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)

        match_count = 0
        fast_threshold = 0.05
        while match_count < orb_feature_threshold:
            descriptor_extractor = ORB(n_keypoints=500,
                                       fast_n=9,
                                       harris_k=0.1,
                                       fast_threshold=fast_threshold)

            # Extract keypoints from first frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image0).astype(np.double))
            keypoints0 = descriptor_extractor.keypoints
            descriptors0 = descriptor_extractor.descriptors

            # Extract keypoints from second frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image1).astype(np.double))
            keypoints1 = descriptor_extractor.keypoints
            descriptors1 = descriptor_extractor.descriptors

            # Set match count
            match_count = min(len(keypoints0), len(keypoints1))
            fast_threshold -= 0.01

            if fast_threshold == 0:
                raise RuntimeError(
                    'Could not find any keypoints (even after shrinking fast threshold).'
                )

        # Match descriptors
        matches = match_descriptors(descriptors0,
                                    descriptors1,
                                    cross_check=True)

        # Filter descriptors to axes (if provided)
        if axis is not None:
            matches_filtered = []
            for (index_0, index_1) in matches:
                point_0 = keypoints0[index_0, :]
                point_1 = keypoints1[index_1, :]
                unit_vec = point_0 - point_1
                unit_vec /= np.linalg.norm(unit_vec)

                if yp.abs(unit_vec[axis]) > 0.99:
                    matches_filtered.append((index_0, index_1))

            matches_filtered = np.asarray(matches_filtered)
        else:
            matches_filtered = matches

        # Robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
                                        keypoints1[matches_filtered[:, 1]]),
                                       EuclideanTransform,
                                       min_samples=3,
                                       residual_threshold=2,
                                       max_trials=100)

        # Note that model_robust has a translation property, but this doesn't
        # seem to be as numerically stable as simply averaging the difference
        # between the coordinates along the desired axis.

        # Apply match filter
        matches_filtered = matches_filtered[inliers, :]

        # Process keypoints
        if yp.shape(matches_filtered)[0] > 0:

            # Compute shifts
            difference = keypoints0[matches_filtered[:, 0]] - keypoints1[
                matches_filtered[:, 1]]
            shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
            shifts = np.round(shifts[0])

            # Filter to axis mask
            if axis is not None:
                _shifts = [0, 0]
                _shifts[axis] = shifts[axis]
                shifts = _shifts

            # Calculate residuals
            residuals = yp.sqrt(
                yp.sum(
                    yp.abs(keypoints0[matches_filtered[:, 0]] +
                           np.asarray(shifts) -
                           keypoints1[matches_filtered[:, 1]])**2))

            # Define a trust metric
            trust_metric = residuals / yp.shape(
                keypoints0[matches_filtered[:, 0]])[0]

            # Determine if this registration can be trusted
            trust_ratio = 1 / (trust_metric / trust_threshold)
            print('===')
            print(trust_ratio)
            print(trust_threshold)
            print(trust_metric)
            print(shifts)
        else:
            trust_metric = 1e10
            trust_ratio = 0.0
            shifts = np.asarray([0, 0])

    elif method == 'optimize':

        # Create Operators
        L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
        R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
        REAL = ops.RealFilter((2, 1), dtype='complex64')

        # Take Fourier Transforms of images
        image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(
            yp.Ft(image1), 'complex64')

        # Diagonalize one of the images
        D = ops.Diagonalize(image0_f)

        # Form objective
        objective = L2 * (D * R * REAL - image1_f)

        # Solve objective
        solver = ops.solvers.GradientDescent(objective)
        shifts = solver.solve(iteration_count=1000, step_size=1e-8)

        # Convert to numpy array, take real part, and round.
        shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))

        # Flip shift axes (x,y to y, x)
        shifts = np.fliplr(shifts)

        # TODO: Trust metric and trust_threshold
        trust_threshold = 1
        trust_ratio = 1.0

    else:
        raise ValueError('Invalid Registration Method %s' % method)

    # Mark whether or not this measurement is of good quality
    if not trust_ratio > 1:
        if debug:
            print('Ignoring shift with trust metric %g (threshold is %g)' %
                  (trust_metric, trust_threshold))
        shifts = yp.zeros_like(np.asarray(shifts)).tolist()

    # Show debugging figures if requested
    if debug:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(6, 5))
        plt.subplot(131)
        plt.imshow(yp.abs(image0))
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(yp.abs(image1))
        plt.title('Trust ratio: %g' % (trust_ratio))
        plt.axis('off')
        plt.subplot(133)
        if method in ['xc' or 'cross_correlation']:
            if axis is not None:
                plt.plot(yp.abs(yp.squeeze(cross_correlation)))
            else:
                plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
        else:
            plot_matches(plt.gca(), yp.real(image0), yp.real(image1),
                         keypoints0, keypoints1, matches_filtered)
        plt.title(str(shifts))
        plt.axis('off')

    # Return
    return shifts, trust_ratio
Ejemplo n.º 5
0
 def _adjoint(self, x, y):
     y[:] = yp.astype(yp.real(x), self.dtype)
Ejemplo n.º 6
0
 def _forward(self, x, y):
     y[:] = yp.astype(yp.real(x), self.dtype)
Ejemplo n.º 7
0
def ConvolutionOld(kernel,
                   dtype=None,
                   backend=None,
                   normalize=False,
                   mode='circular',
                   label='C',
                   pad_value='mean',
                   pad_size=None,
                   fft_backend=None,
                   inverse_regularizer=0,
                   center=False,
                   inner_operator=None,
                   force_full_convolution=False):
    """Convolution linear operator"""

    # Get temporary kernel to account for inner_operator size
    _kernel = kernel if inner_operator is None else inner_operator * kernel

    # Check number of dimensions
    N = _kernel.shape
    dim_count = len(N)
    assert dim_count == ndim(_kernel)

    # Get kernel backend
    if backend is None:
        backend = getBackend(kernel)
    else:
        # Convert kernel to provided backend
        kernel = asbackend(kernel, backend)

    # Get kernel dtype
    if dtype is None:
        dtype = getDatatype(kernel)
    else:
        kernel = astype(kernel, dtype)

    # Determine if the kernel is a shifted delta function - if so, return a
    # shift operator masked as a convolution
    position_list = tuple([
        tuple(np.asarray(pos) - np.asarray(shape(_kernel)) // 2)
        for pos in where(_kernel != 0.0)
    ])
    mode = 'discrete' if len(
        position_list) == 1 and not force_full_convolution else mode

    # Discrete convolution
    if mode == 'discrete':
        # Create shift operator, or identity operator if there is no shift.
        if all([pos == 0.0 for pos in position_list[0]]):
            op = Identity(N)
        else:
            op = Shift(N, position_list[0])

        loc = where(_kernel != 0)[0]

        # If the kernel is not binary, normalize to the correct value
        if scalar(_kernel[loc[0], loc[1]]) != 1.0:
            op *= scalar(_kernel[loc[0], loc[1]])

        # Update label to indicate this is a shift-based convolution
        label += '_{shift}'

        # Normalize if desired
        if normalize:
            op *= 1 / np.sqrt(np.size(kernel))

    elif mode in ['windowed', 'circular']:
        # The only difference between circular and non-circular convolution is
        # the pad size. We'll define this first, then define the convolution in
        # a common framework.

        if mode == 'circular':

            # Pad kernel to effecient size
            N_pad = list(N)
            for ind, d in enumerate(N):
                if next_fast_len(d) != d:
                    N_pad[ind] = next_fast_len(d)

            crop_start = [0] * len(N_pad)

        elif mode == 'windowed':
            if pad_size is None:
                # Determine support of kernel
                kernel_support_roi = boundingBox(kernel, return_roi=True)
                N_pad_raw = (np.asarray(N) +
                             np.asarray(kernel_support_roi.size).tolist())
                N_pad = [next_fast_len(sz) for sz in N_pad_raw]

                # Create pad and crop operator
                crop_start = [(N_pad[dim] - N[dim]) // 2
                              for dim in range(len(N))]
            else:
                if type(pad_size) not in (list, tuple, np.ndarray):
                    pad_size = (pad_size, pad_size)
                N_pad = (pad_size[0] + N[0], pad_size[1] + N[1])

                crop_start = None

        # Create pad operator
        P = Pad(N,
                N_pad,
                pad_value=pad_value,
                pad_start=crop_start,
                backend=backend,
                dtype=dtype)

        # Create F.T. operator
        F = FourierTransform(N_pad,
                             dtype=dtype,
                             backend=backend,
                             normalize=normalize,
                             fft_backend=fft_backend,
                             pad=False,
                             center=center)

        # Optionally create FFTShift operator
        if not center:
            FFTS = FFTShift(N_pad, dtype=dtype, backend=backend)
        else:
            FFTS = Identity(N_pad, dtype=dtype, backend=backend)

        # Diagonalize kernel
        K = Diagonalize(kernel,
                        inner_operator=F * FFTS * P,
                        inverse_regularizer=inverse_regularizer,
                        label=label)

        # Generate composite op
        op = P.H * F.H * K * F * P

        # Define inversion function
        def _inverse(self, x, y):
            # Get current kernel
            kernel_f = F * FFTS * P * kernel

            # Invert and create operator
            kernel_f_inv = conj(kernel_f) / (abs(kernel_f)**2 +
                                             self.inverse_regularizer)
            K_inverse = Diagonalize(kernel_f_inv,
                                    backend=backend,
                                    dtype=dtype,
                                    label=label)

            # Set output
            y[:] = P.H * F.H * K_inverse * F * P * x

        # Set inverse function
        op._inverse = types.MethodType(_inverse, op)

    else:
        raise ValueError(
            'Convolution mode %s is not defined! Valid options are "circular" and "windowed"'
            % mode)

    # Append type to label
    if '_' not in label:
        label += '_{' + mode + '}'

    # Set label
    op.label = label

    # Set inverse_regularizer
    op.inverse_regularizer = inverse_regularizer

    # Set latex to be just label
    def repr_latex(latex_input=None):
        if latex_input is None:
            return op.label
        else:
            return op.label + ' \\times ' + latex_input

    op.repr_latex = repr_latex

    return op