예제 #1
0
파일: iterative.py 프로젝트: zfphil/llops
    def _check_communitivity(self):
        """This function simply checks whether a list of objective functions is communative"""
        # Generate test arrays
        test_arrays = []
        for objective in self.objective_list:
            test_arrays.append(yp.rand(objective.N))

        # Loop over each objective, setting arguments and operating on missing one
        objective_value_list = []
        for index, objective in enumerate(self.objective_list):

            # Get sublist with all arguments EXCEPT the index. These will be used to set arguments.
            arguments = [None] * len(objective.arguments)
            for (_index, replacement) in zip(
                    self.argument_mask,
                    test_arrays[:index] + test_arrays[index + 1:]):
                arguments[_index] = replacement
            objective.arguments = arguments

            # Determine value of objective function and append to list
            objective_value_list.append(
                yp.scalar(objective * test_arrays[index]))

        # Ensure all the objective values are the same
        assert all([
            value == objective_value_list[0] for value in objective_value_list
        ]), "Objective functions are not commutative"
예제 #2
0
파일: noise.py 프로젝트: zfphil/comptic
def snr(signal, noise_roi=None, signal_roi=None, debug=False):
    """ Calculate the imaging signal to noise ratio (SNR) of a signal """
    # Reference: https://en.wikipedia.org/wiki/Signal-to-noise_ratio_(imaging)

    # Calculate signal mean, using ROI if provided
    signal_mean = yp.mean(signal) if signal_roi is None else yp.mean(
        signal[signal_roi.slice])

    # Calculate noise standard deviation, using ROI if provided
    signal_std = yp.std(signal) if noise_roi is None else yp.std(
        signal[noise_roi.slice])

    if debug:
        print('Mean is %g, std is %g' %
              (yp.scalar(signal_mean), yp.scalar(signal_std)))

    return yp.scalar(signal_mean / signal_std)
예제 #3
0
파일: iterative.py 프로젝트: zfphil/llops
    def iterate(self, y):
        """Nesterov iteration term"""

        # Update step size t
        _t = self.t
        self.t = 1 + math.sqrt(1 + 4 * self.t_prev**2) / 2
        self.t_prev = _t

        # Update x
        if self.y_prev is not None:
            if type(self.y_prev) not in (list, tuple):
                x = (1 - self.beta) * y + self.beta * self.y_prev
            else:
                x = [(1 - self.beta) * _y + self.beta * _y_prev
                     for _y, _y_prev in zip(y, self.y_prev)]

            # Update momentum term
            if type(self.objective) in (list, tuple):
                if self.restart_enabled and abs(
                        yp.scalar(self.objective[0](self.x_prev[0]))) < abs(
                            yp.scalar(self.objective[0](x[0]))):
                    self.beta = 0
                    x = y
                else:
                    self.beta = (1 - self.t_prev) / self.t
            else:
                if self.restart_enabled and abs(
                        yp.scalar(self.objective(self.x_prev))) < abs(
                            yp.scalar(self.objective(x))):
                    self.beta = 0
                    x = y
                else:
                    self.beta = (1 - self.t_prev) / self.t
        else:
            x = y

        # Store x, y and t values from previous iteration
        self.x_prev = x
        self.y_prev = y
        self.t_prev = self.t

        return (x)
예제 #4
0
def calcDnfFromKernel(x):
    if len(x) == 0:
        return 0
    else:
        # Normalize
        x = x / yp.scalar(yp.sum(x))

        # Take fourier transform intensity
        x_fft = yp.Ft(x)
        sigma_x = yp.abs(x_fft) ** 2

        # Calculate DNF
        return np.sqrt(1 / len(x) * np.sum(1 / sigma_x))
예제 #5
0
파일: noise.py 프로젝트: zfphil/comptic
def dnfFromConvolutionKernel(h):
    """Calculate the deconvolution noise factor (DNF) of N-dimensional
       convolution operator, given it's kernel."""
    if len(h) == 0:
        return 0
    else:
        # Normalize
        h = copy.deepcopy(h) / yp.scalar(yp.sum(h))

        # Take fourier transform intensity
        h_fft = yp.Ft(h)
        sigma_h = yp.abs(h_fft)**2

        # Calculate DNF
        return np.sqrt(1 / len(h) * np.sum(1 / sigma_h))
예제 #6
0
파일: test_llops.py 프로젝트: zfphil/llops
    def test_indexing(self):
        q = bops.randn((10, 10), backend='numpy', dtype='complex32')
        q_ocl = bops.changeBackend(q, 'arrayfire')
        q_ocl_np = bops.changeBackend(q_ocl, 'numpy')
        assert np.sum(np.abs(q - q_ocl_np)) < eps

        m = bops.rand((10, 20), dtype, "numpy")
        m_ocl = bops.changeBackend(m, 'arrayfire')

        assert abs(m[0, 1] - bops.scalar(m_ocl[0, 1])) < eps
        assert abs(m[1, 1] - bops.scalar(m_ocl[1, 1])) < eps
        assert abs(bops.scalar(m_ocl[4, 1]) - m[4, 1]) < eps

        assert bops.scalar(self.x_np_rect[5, 15]) == bops.scalar(
            bops.changeBackend(self.x_np_rect, 'arrayfire')[5, 15])
        assert bops.scalar(self.x_ocl_rect[5, 15]) == bops.scalar(
            bops.changeBackend(self.x_ocl_rect, 'numpy')[5, 15])
예제 #7
0
파일: iterative.py 프로젝트: zfphil/llops
    def _iteration_function(self, x, iteration_number, step_size):
        # Perform gradient step
        # TODO check gradient shape
        if step_size is not None:
            if hasattr(step_size, '__call__'):
                x[:] -= step_size(iteration_number) * self.objective.gradient(
                    x)
            else:
                """ Explicit step size is provided """
                x[:] -= step_size * self.objective.gradient(x)
        else:
            """ No explicit step size is provided """
            # If no step size provided, use optimal step size if objective is convex,
            # or backtracking linesearch if not.
            if self.objective.convex:
                g = self.objective.grad(x)
                step_size = yp.norm(g)**2 / (yp.norm(g)**2 + eps)
                x[:] -= step_size * g
            else:
                x[:], _ = backTrackingStep(
                    x.reshape(-1), lambda x: yp.scalar(self.objective(x)),
                    self.objective.grad(x).reshape(-1))

        return (x)
예제 #8
0
    def blur_vectors(self, dtype=None, backend=None, debug=False,
                     use_phase_ramp=False, corrections={}):
        """
        This function generates the object size, image size, and blur kernels from
        a libwallerlab dataset object.

            Args:
                dataset: An io.Dataset object
                dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
            Returns:
                object_size: The object size this dataset can recover
                image_size: The computed image size of the dataset
                blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.

        """
        # Assign dataset
        dataset = self

        # Get corrections from metadata
        if len(corrections) is 0 and 'blur_vector' in self.metadata.calibration:
            corrections = dataset.metadata.calibration['blur_vector']

        # Get datatype and backends
        dtype = dtype if dtype is not None else yp.config.default_dtype
        backend = backend if backend is not None else yp.config.default_backend

        # Calculate effective pixel size if necessaey
        if dataset.metadata.system.eff_pixel_size_um is None:
            dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
                (dataset.metadata.objective.mag * dataset.metadata.system.mag)

        # Recover and store position and illumination list
        blur_vector_roi_list = []
        position_list, illumination_list = [], []
        frame_segment_map = []

        for frame_index in range(dataset.shape[0]):
            frame_state = dataset.frame_state_list[frame_index]

            # Store which segment this measurement uses
            frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])

            # Extract list of illumination values for each time point
            if 'illumination' in frame_state:
                illumination_list_frame = []
                if type(frame_state['illumination']) is str:
                    illum_state_list = self._frame_state_list[0]['illumination']['states']
                else:
                    illum_state_list = frame_state['illumination']['states']
                for time_point in illum_state_list:
                    illumination_list_time_point = []
                    for illumination in time_point:
                        illumination_list_time_point.append(
                            {'index': illumination['index'], 'value': illumination['value']})
                    illumination_list_frame.append(illumination_list_time_point)

            else:
                raise ValueError('Frame %d does not contain illumination information' % frame_index)

            # Extract list of positions for each time point
            if 'position' in frame_state:
                position_list_frame = []
                for time_point in frame_state['position']['states']:
                    position_list_time_point = []
                    for position in time_point:
                        if 'units' in position['value']:
                            if position['value']['units'] == 'mm':
                                ps_um = dataset.metadata.system.eff_pixel_size_um
                                position_list_time_point.append(
                                    [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'um':
                                position_list_time_point.append(
                                    [position['value']['y'] / ps_um, position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'pixels':
                                position_list_time_point.append([position['value']['y'], position['value']['x']])
                            else:
                                raise ValueError('Invalid units %s for position in frame %d' %
                                                 (position['value']['units'], frame_index))
                        else:
                            # print('WARNING: Could not find posiiton units in metadata, assuming mm')
                            ps_um = dataset.metadata.system.eff_pixel_size_um
                            position_list_time_point.append(
                                [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])

                    position_list_frame.append(position_list_time_point[0])  # Assuming single time point for now.

                # Define positions and position indicies used
                positions_used, position_indicies_used = [], []
                for index, pos in enumerate(position_list_frame):
                    for color in illumination_list_frame[index][0]['value']:
                        if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
                            position_indicies_used.append(index)
                            positions_used.append(pos)

                # Generate ROI for this blur vector
                from htdeblur.blurkernel import getPositionListBoundingBox
                blur_vector_roi = getPositionListBoundingBox(positions_used)

                # Append to list
                blur_vector_roi_list.append(blur_vector_roi)

                # Crop illumination list to values within the support used
                illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])

                # Store corresponding positions
                position_list.append(positions_used)

        # Apply kernel scaling or compression if necessary
        if 'scale' in corrections:

            # We need to use phase-ramp based kernel generation if we modify the positions
            use_phase_ramp = True

            # Modify position list
            for index in range(len(position_list)):
                _positions = np.asarray(position_list[index])
                for scale_correction in corrections['scale']:
                    factor, axis = corrections['scale']['factor'], corrections['scale']['axis']
                    _positions[:, axis] = ((_positions[:, axis] - yp.min(_positions[:, axis])) * factor + yp.min(_positions[:, axis]))
                position_list[index] = _positions.tolist()

        # Synthesize blur vectors
        blur_vector_list = []
        for frame_index in range(dataset.shape[0]):
            #  Generate blur vectors
            if use_phase_ramp:
                from llops.operators import PhaseRamp
                kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
                offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32', dataset.backend)

                # Create phase ramp and calculate offset
                R = PhaseRamp(kernel_shape, dtype='complex32', backend=dataset.backend)

                # Generate blur vector
                blur_vector = yp.zeros(R.M, dtype='complex32', backend=dataset.backend)
                for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
                    pos = yp.cast(pos, dtype=dataset.dtype, backend=dataset.backend)
                    blur_vector += (R * (yp.cast(pos - offset, 'complex32')))

                # Take inverse Fourier Transform
                blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)

                if position_list[frame_index][0][-1] > position_list[frame_index][0][0]:
                    blur_vector = yp.flip(blur_vector)

            else:
                blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
                                         dtype=dtype, backend=backend)

            # Normalize illuminaiton vectors
            blur_vector /= yp.scalar(yp.sum(blur_vector))

            # Append to list
            blur_vector_list.append(blur_vector)

        # Return
        return blur_vector_list, blur_vector_roi_list
예제 #9
0
def registerImage(image0,
                  image1,
                  method='xc',
                  axis=None,
                  preprocess_methods=['reflect'],
                  debug=False,
                  **kwargs):

    # Perform preprocessing
    if len(preprocess_methods) > 0:
        image0, image1 = _preprocessForRegistration(image0, image1,
                                                    preprocess_methods,
                                                    **kwargs)

    # Parameter on whether we can trust our registration
    trust_ratio = 1.0

    if method in ['xc' or 'cross_correlation']:

        # Get energy ratio threshold
        trust_threshold = kwargs.get('energy_ratio_threshold', 1.5)

        # Pad arrays for optimal speed
        pad_size = tuple(
            [sp.fftpack.next_fast_len(s) for s in yp.shape(image0)])

        # Perform padding
        if pad_size is not yp.shape(image0):
            image0 = yp.pad(image0, pad_size, pad_value='edge', center=True)
            image1 = yp.pad(image1, pad_size, pad_value='edge', center=True)

        # Take F.T. of measurements
        src_freq, target_freq = yp.Ft(image0, axes=axis), yp.Ft(image1,
                                                                axes=axis)

        # Whole-pixel shift - Compute cross-correlation by an IFFT
        image_product = src_freq * yp.conj(target_freq)
        # image_product /= abs(src_freq * yp.conj(target_freq))
        cross_correlation = yp.iFt(image_product, center=False, axes=axis)

        # Take sum along axis if we're doing 1D
        if axis is not None:
            axis_to_sum = list(range(yp.ndim(image1)))
            del axis_to_sum[axis]
            cross_correlation = yp.sum(cross_correlation, axis=axis_to_sum)

        # Locate maximum
        shape = yp.shape(src_freq)
        maxima = yp.argmax(yp.abs(cross_correlation))
        midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])

        shifts = np.array(maxima, dtype=np.float64)
        shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]

        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        for dim in range(yp.ndim(src_freq)):
            if shape[dim] == 1:
                shifts[dim] = 0

        # If energy ratio is too small, set all shifts to zero
        trust_metric = yp.scalar(
            yp.max(yp.abs(cross_correlation)**2) /
            yp.mean(yp.abs(cross_correlation)**2))

        # Determine if this registraition can be trusted
        trust_ratio = trust_metric / trust_threshold

    elif method == 'orb':

        # Get user-defined mean_residual_threshold if given
        trust_threshold = kwargs.get('mean_residual_threshold', 40.0)

        # Get user-defined mean_residual_threshold if given
        orb_feature_threshold = kwargs.get('orb_feature_threshold', 25)

        match_count = 0
        fast_threshold = 0.05
        while match_count < orb_feature_threshold:
            descriptor_extractor = ORB(n_keypoints=500,
                                       fast_n=9,
                                       harris_k=0.1,
                                       fast_threshold=fast_threshold)

            # Extract keypoints from first frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image0).astype(np.double))
            keypoints0 = descriptor_extractor.keypoints
            descriptors0 = descriptor_extractor.descriptors

            # Extract keypoints from second frame
            descriptor_extractor.detect_and_extract(
                np.asarray(image1).astype(np.double))
            keypoints1 = descriptor_extractor.keypoints
            descriptors1 = descriptor_extractor.descriptors

            # Set match count
            match_count = min(len(keypoints0), len(keypoints1))
            fast_threshold -= 0.01

            if fast_threshold == 0:
                raise RuntimeError(
                    'Could not find any keypoints (even after shrinking fast threshold).'
                )

        # Match descriptors
        matches = match_descriptors(descriptors0,
                                    descriptors1,
                                    cross_check=True)

        # Filter descriptors to axes (if provided)
        if axis is not None:
            matches_filtered = []
            for (index_0, index_1) in matches:
                point_0 = keypoints0[index_0, :]
                point_1 = keypoints1[index_1, :]
                unit_vec = point_0 - point_1
                unit_vec /= np.linalg.norm(unit_vec)

                if yp.abs(unit_vec[axis]) > 0.99:
                    matches_filtered.append((index_0, index_1))

            matches_filtered = np.asarray(matches_filtered)
        else:
            matches_filtered = matches

        # Robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((keypoints0[matches_filtered[:, 0]],
                                        keypoints1[matches_filtered[:, 1]]),
                                       EuclideanTransform,
                                       min_samples=3,
                                       residual_threshold=2,
                                       max_trials=100)

        # Note that model_robust has a translation property, but this doesn't
        # seem to be as numerically stable as simply averaging the difference
        # between the coordinates along the desired axis.

        # Apply match filter
        matches_filtered = matches_filtered[inliers, :]

        # Process keypoints
        if yp.shape(matches_filtered)[0] > 0:

            # Compute shifts
            difference = keypoints0[matches_filtered[:, 0]] - keypoints1[
                matches_filtered[:, 1]]
            shifts = (yp.sum(difference, axis=0) / yp.shape(difference)[0])
            shifts = np.round(shifts[0])

            # Filter to axis mask
            if axis is not None:
                _shifts = [0, 0]
                _shifts[axis] = shifts[axis]
                shifts = _shifts

            # Calculate residuals
            residuals = yp.sqrt(
                yp.sum(
                    yp.abs(keypoints0[matches_filtered[:, 0]] +
                           np.asarray(shifts) -
                           keypoints1[matches_filtered[:, 1]])**2))

            # Define a trust metric
            trust_metric = residuals / yp.shape(
                keypoints0[matches_filtered[:, 0]])[0]

            # Determine if this registration can be trusted
            trust_ratio = 1 / (trust_metric / trust_threshold)
            print('===')
            print(trust_ratio)
            print(trust_threshold)
            print(trust_metric)
            print(shifts)
        else:
            trust_metric = 1e10
            trust_ratio = 0.0
            shifts = np.asarray([0, 0])

    elif method == 'optimize':

        # Create Operators
        L2 = ops.L2Norm(yp.shape(image0), dtype='complex64')
        R = ops.PhaseRamp(yp.shape(image0), dtype='complex64')
        REAL = ops.RealFilter((2, 1), dtype='complex64')

        # Take Fourier Transforms of images
        image0_f, image1_f = yp.astype(yp.Ft(image0), 'complex64'), yp.astype(
            yp.Ft(image1), 'complex64')

        # Diagonalize one of the images
        D = ops.Diagonalize(image0_f)

        # Form objective
        objective = L2 * (D * R * REAL - image1_f)

        # Solve objective
        solver = ops.solvers.GradientDescent(objective)
        shifts = solver.solve(iteration_count=1000, step_size=1e-8)

        # Convert to numpy array, take real part, and round.
        shifts = yp.round(yp.real(yp.asbackend(shifts, 'numpy')))

        # Flip shift axes (x,y to y, x)
        shifts = np.fliplr(shifts)

        # TODO: Trust metric and trust_threshold
        trust_threshold = 1
        trust_ratio = 1.0

    else:
        raise ValueError('Invalid Registration Method %s' % method)

    # Mark whether or not this measurement is of good quality
    if not trust_ratio > 1:
        if debug:
            print('Ignoring shift with trust metric %g (threshold is %g)' %
                  (trust_metric, trust_threshold))
        shifts = yp.zeros_like(np.asarray(shifts)).tolist()

    # Show debugging figures if requested
    if debug:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(6, 5))
        plt.subplot(131)
        plt.imshow(yp.abs(image0))
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(yp.abs(image1))
        plt.title('Trust ratio: %g' % (trust_ratio))
        plt.axis('off')
        plt.subplot(133)
        if method in ['xc' or 'cross_correlation']:
            if axis is not None:
                plt.plot(yp.abs(yp.squeeze(cross_correlation)))
            else:
                plt.imshow(yp.abs(yp.fftshift(cross_correlation)))
        else:
            plot_matches(plt.gca(), yp.real(image0), yp.real(image1),
                         keypoints0, keypoints1, matches_filtered)
        plt.title(str(shifts))
        plt.axis('off')

    # Return
    return shifts, trust_ratio
예제 #10
0
파일: iterative.py 프로젝트: zfphil/llops
    def _initialize(self, initialization=None, **kwargs):

        for keyword in kwargs:
            if hasattr(self, keyword):
                setattr(self, keyword, kwargs[keyword])
            else:
                "Ignoring keyword %s" % keyword

        # Show objective function(s)
        if self.display_type is not None:
            if type(self.objective) in (list, tuple):
                print('Minimizing functions:')
                for objective in self.objective:
                    objective.latex()
            else:
                print('Minimizing function:')
                self.objective.latex()

        # Generate initialization
        if type(self.objective) not in (list, tuple):
            if initialization is None:
                self.x = yp.zeros(self.objective.N,
                                  dtype=self.objective.dtype,
                                  backend=self.objective.backend)
            else:
                self.x = yp.dcopy(initialization)
        else:
            # Create random initializations for all variables
            self.x = []
            for index, objective in enumerate(self.objective_list):
                if initialization is None:
                    self.x.append(
                        yp.zeros(objective.N, objective.dtype,
                                 objective.backend))
                else:
                    assert type(initialization) in (list, tuple)
                    self.x.append(initialization[index])

        # Generate plotting interface
        if self.display_type == 'text':
            # Create text plot object
            self.plot = display.IterationText()

        elif self.display_type == 'plot':
            # Create figure if axis was not provided
            if 'ax' in kwargs:
                self.ax = kwargs['ax']
            else:
                self.fig = plt.figure(figsize=kwargs.pop('figsize', (5, 3)))
                ax = plt.gca()

            # Create iteration plot object
            use_log = (kwargs.pop('use_log_x',
                                  False), kwargs.pop('use_log_y', False))
            max_iter = kwargs.pop('max_iter_plot',
                                  self._default_iteration_count)
            self.plot = display.IterationPlot(ax, max_iter, use_log=use_log)
            self.fig.canvas.draw()
            plt.tight_layout()

        # Update plot with first value
        if self.plot is not None:
            objective_value = self.objective(
                self.x) if not self.multi_objective else self.objective[0](
                    self.x[0])

            self.plot.update(0,
                             abs(yp.scalar(objective_value)),
                             new_time=0,
                             step_norm=0)
            self.t0 = time.time()

        # If using Nesterov acceleration, intitalize NesterovAccelerator class
        if self.use_nesterov_acceleration:
            self.nesterov = NesterovAccelerator(self.objective,
                                                self.nesterov_restart_enabled)

            # Enable restarting if desired
            if self.nesterov_restart_enabled:
                self.let_diverge = True  # We need to let nesterov diverge a bit

        self.initialized = True
예제 #11
0
파일: iterative.py 프로젝트: zfphil/llops
    def solve(self,
              initialization=None,
              iteration_count=10,
              display_iteration_delta=None,
              **kwargs):

        # Process display iteration delta
        if display_iteration_delta is None:
            display_iteration_delta = iteration_count // 10

        # Try to import arrayfire and call garbage collection to free memory
        try:
            import arrayfire
            arrayfire.device_gc()
        except ImportError:
            pass

        # Initialize solver if it hasn't been already
        if not self.initialized:
            self._initialize(initialization, **kwargs)

        cost = []
        # Run Algorithm
        for iteration in range(iteration_count):

            # Determine step norm
            if self.multi_objective:
                x_prev_norm = sum([yp.norm(x) for x in self.x])
            else:
                x_prev_norm = yp.norm(self.x)

            # Perform iteration
            self.x = self._iteration_function(self.x, iteration,
                                              self.step_size)

            # Apply nesterov acceleration if desired
            if self.use_nesterov_acceleration:
                self.x = self.nesterov.iterate(self.x)

            # Store cost
            objective_value = self.objective(
                self.x) if not self.multi_objective else self.objective[0](
                    self.x[0])
            cost.append(abs(yp.scalar(objective_value)))

            # Determine step norm
            if self.multi_objective:
                step_norm = abs(
                    sum([yp.norm(x) for x in self.x]) - x_prev_norm)
            else:
                step_norm = abs(yp.norm(self.x) - x_prev_norm)

            # Show update
            if self.display_type == 'text':
                if (iteration + 1) % display_iteration_delta == 0:
                    self.plot.update(iteration + 1, cost[-1],
                                     time.time() - self.t0, step_norm)
            elif self.display_type == 'plot':
                self.plot.update(iteration, new_cost=cost[-1])
                self.fig.canvas.draw()
            elif self.display_type is not None:
                raise ValueError('display_type %s is not defined!' %
                                 self.display_type)

            # Check if converged or diverged
            if len(cost) > 2:
                if self.convergence_tol is not None and (
                        abs(cost[-1] - cost[-2]) / max(cost[-1], 1e-10) <
                        self.convergence_tol or cost[-1] < 1e-20):
                    print(
                        "Met convergence requirement (delta < %.2E) at iteration %d"
                        % (self.convergence_tol, iteration + 1))
                    return (self.x)
                elif cost[-1] > cost[-2] and not self.let_diverge:
                    print("Diverged at iteration %d" % (iteration + 1))
                    return (self.x)
        return (self.x)
예제 #12
0
파일: camera.py 프로젝트: zfphil/comptic
def demosaic(frame,
             order='grbg',
             bayer_coupling_matrix=None,
             debug=False,
             white_balance=False):

    # bayer_coupling_matrix = None
    # bgrg: cells very green
    # rggb: slight gteen tint

    """Demosaic a frame"""
    frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3), yp.getDatatype(frame), yp.getBackend(frame))

    if bayer_coupling_matrix is not None:
        frame_vec = yp.zeros((4, int(yp.shape(frame)[0] * yp.shape(frame)[1] / 4)), yp.getDatatype(frame), yp.getBackend(frame))

        # Cast bayer coupling matrix
        bayer_coupling_matrix = yp.cast(bayer_coupling_matrix,
                                        yp.getDatatype(frame),
                                        yp.getBackend(frame))

        # Define frame vector
        for bayer_pattern_index in range(4):
            pixel_offsets = (0, 0)
            if bayer_pattern_index == 3:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 1:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1] + 1::2]
            elif bayer_pattern_index == 2:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 0:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1] + 1::2]
            frame_vec[bayer_pattern_index, :] = yp.dcopy(yp.vec(img_sub))
            if debug:
                print("Channel %d mean is %g" % (bayer_pattern_index, yp.scalar(yp.real(yp.sum(img_sub)))))

        # Perform demosaic using least squares
        result = yp.linalg.lstsq(bayer_coupling_matrix, frame_vec)

        result -= yp.amin(result)
        result /= yp.amax(result)
        for channel in range(3):
            values = result[channel]
            frame_out[:, :, channel] = yp.reshape(values, ((yp.shape(frame_out)[0], yp.shape(frame_out)[1])))
            if white_balance:
                frame_out[:, :, channel] -= yp.amin(frame_out[:, :, channel])
                frame_out[:, :, channel] /= yp.amax(frame_out[:, :, channel])
        return frame_out
    else:
        frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3),
                             dtype=yp.getDatatype(frame), backend=yp.getBackend(frame))

        # Get color order from order variable
        b_index = order.find('b')
        r_index = order.find('r')
        g1_index = order.find('g')

        # Get g2 from intersection of sets
        g2_index = set(list(range(4))).difference({b_index, r_index, g1_index}).pop()
        #  +-----+-----+
        #  |  0  |  1  |
        #  +-----+-----|
        #  |  2  |  3  |
        #  +-----+-----|

        if debug:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.imshow(frame[:12, :12])

        r_start = (int(r_index in [2, 3]), int(r_index in [1, 3]))
        g1_start = (int(g1_index in [2, 3]), int(g1_index in [1, 3]))
        g2_start = (int(g2_index in [2, 3]), int(g2_index in [1, 3]))
        b_start = (int(b_index in [2, 3]), int(b_index in [1, 3]))

        frame_out[:, :, 0] = frame[r_start[0]::2, r_start[1]::2]
        frame_out[:, :, 1] = (frame[g1_start[0]::2, g1_start[1]::2] + frame[g2_start[0]::2, g2_start[1]::2]) / 2.0
        frame_out[:, :, 2] = frame[b_start[0]::2, b_start[1]::2]

        # normalize
        frame_out /= yp.max(frame_out)

        # Perform white balancing if desired
        if white_balance:
            clims = []
            for channel in range(3):
                clims.append(yp.max(frame_out[:, :, channel]))
                frame_out[:, :, channel] /= yp.max(frame_out[:, :, channel])

        # Return frame
        return frame_out
예제 #13
0
파일: composite.py 프로젝트: zfphil/llops
def ConvolutionOld(kernel,
                   dtype=None,
                   backend=None,
                   normalize=False,
                   mode='circular',
                   label='C',
                   pad_value='mean',
                   pad_size=None,
                   fft_backend=None,
                   inverse_regularizer=0,
                   center=False,
                   inner_operator=None,
                   force_full_convolution=False):
    """Convolution linear operator"""

    # Get temporary kernel to account for inner_operator size
    _kernel = kernel if inner_operator is None else inner_operator * kernel

    # Check number of dimensions
    N = _kernel.shape
    dim_count = len(N)
    assert dim_count == ndim(_kernel)

    # Get kernel backend
    if backend is None:
        backend = getBackend(kernel)
    else:
        # Convert kernel to provided backend
        kernel = asbackend(kernel, backend)

    # Get kernel dtype
    if dtype is None:
        dtype = getDatatype(kernel)
    else:
        kernel = astype(kernel, dtype)

    # Determine if the kernel is a shifted delta function - if so, return a
    # shift operator masked as a convolution
    position_list = tuple([
        tuple(np.asarray(pos) - np.asarray(shape(_kernel)) // 2)
        for pos in where(_kernel != 0.0)
    ])
    mode = 'discrete' if len(
        position_list) == 1 and not force_full_convolution else mode

    # Discrete convolution
    if mode == 'discrete':
        # Create shift operator, or identity operator if there is no shift.
        if all([pos == 0.0 for pos in position_list[0]]):
            op = Identity(N)
        else:
            op = Shift(N, position_list[0])

        loc = where(_kernel != 0)[0]

        # If the kernel is not binary, normalize to the correct value
        if scalar(_kernel[loc[0], loc[1]]) != 1.0:
            op *= scalar(_kernel[loc[0], loc[1]])

        # Update label to indicate this is a shift-based convolution
        label += '_{shift}'

        # Normalize if desired
        if normalize:
            op *= 1 / np.sqrt(np.size(kernel))

    elif mode in ['windowed', 'circular']:
        # The only difference between circular and non-circular convolution is
        # the pad size. We'll define this first, then define the convolution in
        # a common framework.

        if mode == 'circular':

            # Pad kernel to effecient size
            N_pad = list(N)
            for ind, d in enumerate(N):
                if next_fast_len(d) != d:
                    N_pad[ind] = next_fast_len(d)

            crop_start = [0] * len(N_pad)

        elif mode == 'windowed':
            if pad_size is None:
                # Determine support of kernel
                kernel_support_roi = boundingBox(kernel, return_roi=True)
                N_pad_raw = (np.asarray(N) +
                             np.asarray(kernel_support_roi.size).tolist())
                N_pad = [next_fast_len(sz) for sz in N_pad_raw]

                # Create pad and crop operator
                crop_start = [(N_pad[dim] - N[dim]) // 2
                              for dim in range(len(N))]
            else:
                if type(pad_size) not in (list, tuple, np.ndarray):
                    pad_size = (pad_size, pad_size)
                N_pad = (pad_size[0] + N[0], pad_size[1] + N[1])

                crop_start = None

        # Create pad operator
        P = Pad(N,
                N_pad,
                pad_value=pad_value,
                pad_start=crop_start,
                backend=backend,
                dtype=dtype)

        # Create F.T. operator
        F = FourierTransform(N_pad,
                             dtype=dtype,
                             backend=backend,
                             normalize=normalize,
                             fft_backend=fft_backend,
                             pad=False,
                             center=center)

        # Optionally create FFTShift operator
        if not center:
            FFTS = FFTShift(N_pad, dtype=dtype, backend=backend)
        else:
            FFTS = Identity(N_pad, dtype=dtype, backend=backend)

        # Diagonalize kernel
        K = Diagonalize(kernel,
                        inner_operator=F * FFTS * P,
                        inverse_regularizer=inverse_regularizer,
                        label=label)

        # Generate composite op
        op = P.H * F.H * K * F * P

        # Define inversion function
        def _inverse(self, x, y):
            # Get current kernel
            kernel_f = F * FFTS * P * kernel

            # Invert and create operator
            kernel_f_inv = conj(kernel_f) / (abs(kernel_f)**2 +
                                             self.inverse_regularizer)
            K_inverse = Diagonalize(kernel_f_inv,
                                    backend=backend,
                                    dtype=dtype,
                                    label=label)

            # Set output
            y[:] = P.H * F.H * K_inverse * F * P * x

        # Set inverse function
        op._inverse = types.MethodType(_inverse, op)

    else:
        raise ValueError(
            'Convolution mode %s is not defined! Valid options are "circular" and "windowed"'
            % mode)

    # Append type to label
    if '_' not in label:
        label += '_{' + mode + '}'

    # Set label
    op.label = label

    # Set inverse_regularizer
    op.inverse_regularizer = inverse_regularizer

    # Set latex to be just label
    def repr_latex(latex_input=None):
        if latex_input is None:
            return op.label
        else:
            return op.label + ' \\times ' + latex_input

    op.repr_latex = repr_latex

    return op