示例#1
0
文件: iterative.py 项目: zfphil/llops
    def _iteration_function(self, x, iteration_number, step_size):
        if iteration_number == 0:
            self.r = self.y - self.A * x
            self.p = self.r
        else:

            # Helper variables
            Ap = self.A * self.p
            pAp = yp.sum(yp.real((yp.conj(self.p) * Ap)))
            r2 = yp.sum(yp.real((yp.conj(self.r) * self.r)))

            # Update alpha
            alpha = r2 / pAp

            # Update x
            x += alpha * self.p

            # Update r
            self.r -= alpha * Ap

            # Update beta
            beta = yp.sum(yp.real((yp.conj(self.r) * self.r))) / r2

            # Update p
            self.p = self.r + beta * self.p

        return (x)
示例#2
0
def affineHomographyBlocks(coordinate_list):
    """Generate affine homography blocks which can be used to solve for homography coordinates."""
    # Store dtype and backend
    dtype = yp.getDatatype(coordinate_list)
    backend = yp.getBackend(coordinate_list)

    # Convert coordinates to numpy array
    coordinate_list = yp.asarray(yp.real(coordinate_list),
                                 dtype='float32',
                                 backend='numpy')

    # Ensure coordinate_list is a list of lists
    if yp.ndim(coordinate_list) == 1:
        coordinate_list = np.asarray([coordinate_list])

    # Determine the number of elements in a coordinate
    axis_count = yp.shape(coordinate_list)[1]

    # Loop over positions and concatenate to array
    coordinate_blocks = []
    for coordinate in coordinate_list:
        block = np.append(coordinate, 1)
        coordinate_blocks.append(np.kron(np.eye(len(coordinate)), block))

    # Convert to initial datatype and backend
    coordinate_blocks = yp.asarray(np.concatenate(coordinate_blocks), dtype,
                                   backend)

    return coordinate_blocks
示例#3
0
def testObject(absorption,
               shape=None,
               phase=None,
               invert=False,
               invert_phase=False,
               dtype=None,
               backend=None,
               **kwargs):

    # Load absorption image
    test_object = _loadImage(absorption, shape, dtype, backend, **kwargs)

    # Normalize
    test_object -= yp.min(test_object)
    test_object /= yp.max(test_object)

    # invert if requested
    if invert:
        test_object = 1 - test_object

    # Apply correct range to absorption
    absorption_max, absorption_min = kwargs.get('max_value', 1.1), kwargs.get(
        'min_value', 0.9)
    test_object *= (absorption_max - absorption_min)
    test_object += absorption_min

    # Add phase if label is provided
    if phase:
        # Load phase image
        phase = _loadImage(phase, shape, **kwargs)

        # invert if requested
        if invert_phase:
            phase = 1 - phase

        # Normalize
        phase -= yp.min(phase)
        phase /= yp.max(phase)

        # Apply correct range to absorption
        phase_max, phase_min = kwargs.get('max_value_phase',
                                          0), kwargs.get('min_value_phase', 1)
        phase *= (phase_max - phase_min)
        phase += phase_min

        # Add phase to test_object
        test_object = yp.astype(test_object, 'complex32')
        test_object *= yp.exp(
            1j * yp.astype(yp.real(phase), yp.getDatatype(test_object)))

    # Cast to correct dtype and backend
    return yp.cast(test_object, dtype, backend)
示例#4
0
def registerImage(image0,
                  image1,
                  method='xc',
                  axis=None,
                  preprocess_methods=['reflect'],
                  debug=False,
                  **kwargs):

    # Perform preprocessing
    if len(preprocess_methods) > 0:
        image0, image1 = _preprocessForRegistration(image0, image1,
                                                    preprocess_methods,
                                                    **kwargs)

    # Parameter on whether we can trust our registration
    trust_ratio = 1.0

    if method in ['xc' or 'cross_correlation']:

        # Get energy ratio threshold
        trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)

        # Pad arrays for optimal speed
        pad_size = tuple(
            [sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])

        # Perform padding
        if pad_size is not yp.shape(image0):
            image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
            image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)

        # Take F.T. of measurements
        src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1,
                                                                axes=axis)

        # Whole-pixel shift - Compute cross-correlation by an IFFT
        image_product = src_freq * yp.conj(target_freq)
        # image_product /= abs(src_freq * yp.conj(target_freq))
        cross_correlation = yp.iFt(image_product, center=False, axes=axis)

        # Take sum along axis if we're doing 1D
        if axis is not None:
            axis_to_sum = list(range(yp.ndim(image1)))
            del axis_to_sum[axis]
            cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)

        # Locate maximum
        shape = yp.shape(src_freq)
        maxima = yp.argmax(yp.abs(cross_correlation))
        midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

        shifts = np.array(maxima, dtype=np.float64)
        shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        for dim in range(yp.ndim(src_freq)):
            if shape[dim] == 1:
                shifts[dim] = 0

        # If energy ratio is too small, set all shifts to zero
        trust_metric = yp.scalar(
            yp.max(yp.abs(cross_correlation)**2) /
            yp.mean(yp.abs(cross_correlation)**2))

        # Determine if this registraition can be trusted
        trust_ratio = trust_metric / trust_threshold

    elif method == 'orb':

        # Get user-defined mean_residual_threshold if given
        trust_threshold = kwargs.get('mean_residual_threshold', 40.0)

        # Get user-defined mean_residual_threshold if given
        orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)

        match_count = 0
        fast_threshold = 0.05
        while match_count < orb_feature_threshold:
            descriptor_extractor = ORB(n_keypoints=500,
                                       fast_n=9,
                                       harris_k=0.1,
                                       fast_threshold=fast_threshold)

            # Extract keypoints from first frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image0).astype(np.double))
            keypoints0 = descriptor_extractor.keypoints
            descriptors0 = descriptor_extractor.descriptors

            # Extract keypoints from second frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image1).astype(np.double))
            keypoints1 = descriptor_extractor.keypoints
            descriptors1 = descriptor_extractor.descriptors

            # Set match count
            match_count = min(len(keypoints0), len(keypoints1))
            fast_threshold -= 0.01

            if fast_threshold == 0:
                raise RuntimeError(
                    'Could not find any keypoints (even after shrinking fast threshold).'
                )

        # Match descriptors
        matches = match_descriptors(descriptors0,
                                    descriptors1,
                                    cross_check=True)

        # Filter descriptors to axes (if provided)
        if axis is not None:
            matches_filtered = []
            for (index_0, index_1) in matches:
                point_0 = keypoints0[index_0, :]
                point_1 = keypoints1[index_1, :]
                unit_vec = point_0 - point_1
                unit_vec /= np.linalg.norm(unit_vec)

                if yp.abs(unit_vec[axis]) > 0.99:
                    matches_filtered.append((index_0, index_1))

            matches_filtered = np.asarray(matches_filtered)
        else:
            matches_filtered = matches

        # Robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
                                        keypoints1[matches_filtered[:, 1]]),
                                       EuclideanTransform,
                                       min_samples=3,
                                       residual_threshold=2,
                                       max_trials=100)

        # Note that model_robust has a translation property, but this doesn't
        # seem to be as numerically stable as simply averaging the difference
        # between the coordinates along the desired axis.

        # Apply match filter
        matches_filtered = matches_filtered[inliers, :]

        # Process keypoints
        if yp.shape(matches_filtered)[0] > 0:

            # Compute shifts
            difference = keypoints0[matches_filtered[:, 0]] - keypoints1[
                matches_filtered[:, 1]]
            shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
            shifts = np.round(shifts[0])

            # Filter to axis mask
            if axis is not None:
                _shifts = [0, 0]
                _shifts[axis] = shifts[axis]
                shifts = _shifts

            # Calculate residuals
            residuals = yp.sqrt(
                yp.sum(
                    yp.abs(keypoints0[matches_filtered[:, 0]] +
                           np.asarray(shifts) -
                           keypoints1[matches_filtered[:, 1]])**2))

            # Define a trust metric
            trust_metric = residuals / yp.shape(
                keypoints0[matches_filtered[:, 0]])[0]

            # Determine if this registration can be trusted
            trust_ratio = 1 / (trust_metric / trust_threshold)
            print('===')
            print(trust_ratio)
            print(trust_threshold)
            print(trust_metric)
            print(shifts)
        else:
            trust_metric = 1e10
            trust_ratio = 0.0
            shifts = np.asarray([0, 0])

    elif method == 'optimize':

        # Create Operators
        L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
        R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
        REAL = ops.RealFilter((2, 1), dtype='complex64')

        # Take Fourier Transforms of images
        image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(
            yp.Ft(image1), 'complex64')

        # Diagonalize one of the images
        D = ops.Diagonalize(image0_f)

        # Form objective
        objective = L2 * (D * R * REAL - image1_f)

        # Solve objective
        solver = ops.solvers.GradientDescent(objective)
        shifts = solver.solve(iteration_count=1000, step_size=1e-8)

        # Convert to numpy array, take real part, and round.
        shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))

        # Flip shift axes (x,y to y, x)
        shifts = np.fliplr(shifts)

        # TODO: Trust metric and trust_threshold
        trust_threshold = 1
        trust_ratio = 1.0

    else:
        raise ValueError('Invalid Registration Method %s' % method)

    # Mark whether or not this measurement is of good quality
    if not trust_ratio > 1:
        if debug:
            print('Ignoring shift with trust metric %g (threshold is %g)' %
                  (trust_metric, trust_threshold))
        shifts = yp.zeros_like(np.asarray(shifts)).tolist()

    # Show debugging figures if requested
    if debug:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(6, 5))
        plt.subplot(131)
        plt.imshow(yp.abs(image0))
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(yp.abs(image1))
        plt.title('Trust ratio: %g' % (trust_ratio))
        plt.axis('off')
        plt.subplot(133)
        if method in ['xc' or 'cross_correlation']:
            if axis is not None:
                plt.plot(yp.abs(yp.squeeze(cross_correlation)))
            else:
                plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
        else:
            plot_matches(plt.gca(), yp.real(image0), yp.real(image1),
                         keypoints0, keypoints1, matches_filtered)
        plt.title(str(shifts))
        plt.axis('off')

    # Return
    return shifts, trust_ratio
示例#5
0
 def _adjoint(self, x, y):
     y[:] = yp.astype(yp.real(x), self.dtype)
示例#6
0
 def _forward(self, x, y):
     y[:] = yp.astype(yp.real(x), self.dtype)
示例#7
0
 def _proximal(self, x, alpha):
     x[real(x) < 0] = 0.0
     return x
示例#8
0
 def _forward(self, x, y):
     if any(real(x) < 0):
         y[:] = np.inf
     else:
         y[:] = 0
示例#9
0
文件: camera.py 项目: zfphil/comptic
def demosaic(frame,
             order='grbg',
             bayer_coupling_matrix=None,
             debug=False,
             white_balance=False):

    # bayer_coupling_matrix = None
    # bgrg: cells very green
    # rggb: slight gteen tint

    """Demosaic a frame"""
    frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3), yp.getDatatype(frame), yp.getBackend(frame))

    if bayer_coupling_matrix is not None:
        frame_vec = yp.zeros((4, int(yp.shape(frame)[0] * yp.shape(frame)[1] / 4)), yp.getDatatype(frame), yp.getBackend(frame))

        # Cast bayer coupling matrix
        bayer_coupling_matrix = yp.cast(bayer_coupling_matrix,
                                        yp.getDatatype(frame),
                                        yp.getBackend(frame))

        # Define frame vector
        for bayer_pattern_index in range(4):
            pixel_offsets = (0, 0)
            if bayer_pattern_index == 3:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 1:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1] + 1::2]
            elif bayer_pattern_index == 2:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 0:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1] + 1::2]
            frame_vec[bayer_pattern_index, :] = yp.dcopy(yp.vec(img_sub))
            if debug:
                print("Channel %d mean is %g" % (bayer_pattern_index, yp.scalar(yp.real(yp.sum(img_sub)))))

        # Perform demosaic using least squares
        result = yp.linalg.lstsq(bayer_coupling_matrix, frame_vec)

        result -= yp.amin(result)
        result /= yp.amax(result)
        for channel in range(3):
            values = result[channel]
            frame_out[:, :, channel] = yp.reshape(values, ((yp.shape(frame_out)[0], yp.shape(frame_out)[1])))
            if white_balance:
                frame_out[:, :, channel] -= yp.amin(frame_out[:, :, channel])
                frame_out[:, :, channel] /= yp.amax(frame_out[:, :, channel])
        return frame_out
    else:
        frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3),
                             dtype=yp.getDatatype(frame), backend=yp.getBackend(frame))

        # Get color order from order variable
        b_index = order.find('b')
        r_index = order.find('r')
        g1_index = order.find('g')

        # Get g2 from intersection of sets
        g2_index = set(list(range(4))).difference({b_index, r_index, g1_index}).pop()
        #  +-----+-----+
        #  |  0  |  1  |
        #  +-----+-----|
        #  |  2  |  3  |
        #  +-----+-----|

        if debug:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.imshow(frame[:12, :12])

        r_start = (int(r_index in [2, 3]), int(r_index in [1, 3]))
        g1_start = (int(g1_index in [2, 3]), int(g1_index in [1, 3]))
        g2_start = (int(g2_index in [2, 3]), int(g2_index in [1, 3]))
        b_start = (int(b_index in [2, 3]), int(b_index in [1, 3]))

        frame_out[:, :, 0] = frame[r_start[0]::2, r_start[1]::2]
        frame_out[:, :, 1] = (frame[g1_start[0]::2, g1_start[1]::2] + frame[g2_start[0]::2, g2_start[1]::2]) / 2.0
        frame_out[:, :, 2] = frame[b_start[0]::2, b_start[1]::2]

        # normalize
        frame_out /= yp.max(frame_out)

        # Perform white balancing if desired
        if white_balance:
            clims = []
            for channel in range(3):
                clims.append(yp.max(frame_out[:, :, channel]))
                frame_out[:, :, channel] /= yp.max(frame_out[:, :, channel])

        # Return frame
        return frame_out