示例#1
0
    def __init__(self, shape, label='TV', dtype=None, backend=None):

        # Configure backend and datatype
        self.backend = backend if backend is not None else config.default_backend
        self.dtype = dtype if dtype is not None else config.default_dtype

        self.D = len(shape)
        self.threshold = 1.41 * 2 * self.D

        # Allocate prox array before calculation for efficency
        self._forward_y = zeros(shape, self.dtype, self.backend)
        self._prox_y = zeros(shape, self.dtype, self.backend)
        self._haar_w = zeros(shape, self.dtype, self.backend)
        self._haar_y = zeros(shape, self.dtype, self.backend)

        # Instantiate Metaclass
        super(self.__class__, self).__init__(((1, 1), shape),
                                             self.dtype,
                                             self.backend,
                                             label=label,
                                             forward=self._forward,
                                             proximal=self._proximal,
                                             repr_latex=self._latex,
                                             smooth=False)
示例#2
0
    def setup_method(self, test_method):
        # Load object and crop to size
        x_0 = yp.rand(image_size)

        # Convert object to desired backend
        self.x = yp.changeBackend(x_0, global_backend)

        # Generate convolution kernel h
        h_size = np.array([4, 4])
        self.h = yp.zeros(image_size, global_dtype, global_backend)
        self.h[image_size[0] // 2 - h_size[0] // 2:image_size[0] // 2 + h_size[0] // 2,
          image_size[1] // 2 - h_size[1] // 2:image_size[1] // 2 + h_size[1] // 2] = yp.randn((h_size[0], h_size[1]), global_dtype, global_backend)

        # A = ops.Convolution(image_size, h, dtype=global_dtype, fft_backend='numpy', backend=global_backend)
        self.A = ops.FourierTransform(image_size, dtype=global_dtype, backend=global_backend, center=True)
        self.y = self.A(yp.vectorize(self.x))
示例#3
0
    def test_operator_stacking_linear(self):
        # Create list of operators
        op_list_linear = [
            ops.FourierTransform(image_size, dtype=global_dtype, backend=global_backend),
            ops.Identity(image_size, dtype=global_dtype, backend=global_backend),
            ops.Exponential(image_size, dtype=global_dtype, backend=global_backend)
        ]

        # Horizontally stacked operators
        H_l = ops.Hstack(op_list_linear)

        # Vertically stack x for forward operator
        x_np = yp.changeBackend(self.x, 'numpy')
        x3 = yp.changeBackend(np.vstack((x_np, x_np, x_np)), global_backend)

        # Check forward operation
        y2 = yp.zeros(op_list_linear[0].N, op_list_linear[0].dtype, op_list_linear[0].backend)

        for op in op_list_linear:
            y2 = y2 + op * self.x

        assert yp.sumb(yp.abs(yp.changeBackend(H_l(x3) - y2, 'numpy'))) < eps, "%.4e" % yp.sumb(yp.abs(H_l(x3) - y2))

        # Check gradient
        H_l.gradient_check()

        # Create vertically stacked operator
        V_l = ops.Vstack(op_list_linear)

        # Check forward operator
        y3 = np.empty((0,image_size[1]), dtype=yp.getNativeDatatype(global_dtype, 'numpy'))
        for index, op in enumerate(op_list_linear):
            y3 = np.append(y3, (op * self.x), axis=0)

        y3 = yp.changeBackend(y3, global_backend)
        assert yp.sumb(yp.abs(V_l * self.x - y3)) < eps, "%.4e" % yp.sumb(yp.abs(V_l * vec(x) - y3))

        # Check gradient
        V_l.gradient_check()
示例#4
0
def VecStack(vector_list, axis=0):
    """ This is a helper function to stack vectors """

    # Determine output size
    single_vector_shape = [
        max([shape(vector)[0] for vector in vector_list]),
        max([shape(vector)[1] for vector in vector_list])
    ]
    vector_shape = dcopy(single_vector_shape)
    vector_shape[axis] *= len(vector_list)

    # Allocate memory
    vector_full = zeros(vector_shape, getDatatype(vector_list[0]),
                        getBackend(vector_list[0]))

    # Assign vector elements to the output
    for index, vector in enumerate(vector_list):
        vector_full[index * single_vector_shape[0]:(index + 1) *
                    single_vector_shape[0], :] = pad(vector,
                                                     single_vector_shape)

    # Return
    return vector_full
示例#5
0
    def blur_vectors(self, dtype=None, backend=None, debug=False,
                     use_phase_ramp=False, corrections={}):
        """
        This function generates the object size, image size, and blur kernels from
        a libwallerlab dataset object.

            Args:
                dataset: An io.Dataset object
                dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
            Returns:
                object_size: The object size this dataset can recover
                image_size: The computed image size of the dataset
                blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.

        """
        # Assign dataset
        dataset = self

        # Get corrections from metadata
        if len(corrections) is 0 and 'blur_vector' in self.metadata.calibration:
            corrections = dataset.metadata.calibration['blur_vector']

        # Get datatype and backends
        dtype = dtype if dtype is not None else yp.config.default_dtype
        backend = backend if backend is not None else yp.config.default_backend

        # Calculate effective pixel size if necessaey
        if dataset.metadata.system.eff_pixel_size_um is None:
            dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
                (dataset.metadata.objective.mag * dataset.metadata.system.mag)

        # Recover and store position and illumination list
        blur_vector_roi_list = []
        position_list, illumination_list = [], []
        frame_segment_map = []

        for frame_index in range(dataset.shape[0]):
            frame_state = dataset.frame_state_list[frame_index]

            # Store which segment this measurement uses
            frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])

            # Extract list of illumination values for each time point
            if 'illumination' in frame_state:
                illumination_list_frame = []
                if type(frame_state['illumination']) is str:
                    illum_state_list = self._frame_state_list[0]['illumination']['states']
                else:
                    illum_state_list = frame_state['illumination']['states']
                for time_point in illum_state_list:
                    illumination_list_time_point = []
                    for illumination in time_point:
                        illumination_list_time_point.append(
                            {'index': illumination['index'], 'value': illumination['value']})
                    illumination_list_frame.append(illumination_list_time_point)

            else:
                raise ValueError('Frame %d does not contain illumination information' % frame_index)

            # Extract list of positions for each time point
            if 'position' in frame_state:
                position_list_frame = []
                for time_point in frame_state['position']['states']:
                    position_list_time_point = []
                    for position in time_point:
                        if 'units' in position['value']:
                            if position['value']['units'] == 'mm':
                                ps_um = dataset.metadata.system.eff_pixel_size_um
                                position_list_time_point.append(
                                    [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'um':
                                position_list_time_point.append(
                                    [position['value']['y'] / ps_um, position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'pixels':
                                position_list_time_point.append([position['value']['y'], position['value']['x']])
                            else:
                                raise ValueError('Invalid units %s for position in frame %d' %
                                                 (position['value']['units'], frame_index))
                        else:
                            # print('WARNING: Could not find posiiton units in metadata, assuming mm')
                            ps_um = dataset.metadata.system.eff_pixel_size_um
                            position_list_time_point.append(
                                [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])

                    position_list_frame.append(position_list_time_point[0])  # Assuming single time point for now.

                # Define positions and position indicies used
                positions_used, position_indicies_used = [], []
                for index, pos in enumerate(position_list_frame):
                    for color in illumination_list_frame[index][0]['value']:
                        if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
                            position_indicies_used.append(index)
                            positions_used.append(pos)

                # Generate ROI for this blur vector
                from htdeblur.blurkernel import getPositionListBoundingBox
                blur_vector_roi = getPositionListBoundingBox(positions_used)

                # Append to list
                blur_vector_roi_list.append(blur_vector_roi)

                # Crop illumination list to values within the support used
                illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])

                # Store corresponding positions
                position_list.append(positions_used)

        # Apply kernel scaling or compression if necessary
        if 'scale' in corrections:

            # We need to use phase-ramp based kernel generation if we modify the positions
            use_phase_ramp = True

            # Modify position list
            for index in range(len(position_list)):
                _positions = np.asarray(position_list[index])
                for scale_correction in corrections['scale']:
                    factor, axis = corrections['scale']['factor'], corrections['scale']['axis']
                    _positions[:, axis] = ((_positions[:, axis] - yp.min(_positions[:, axis])) * factor + yp.min(_positions[:, axis]))
                position_list[index] = _positions.tolist()

        # Synthesize blur vectors
        blur_vector_list = []
        for frame_index in range(dataset.shape[0]):
            #  Generate blur vectors
            if use_phase_ramp:
                from llops.operators import PhaseRamp
                kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
                offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32', dataset.backend)

                # Create phase ramp and calculate offset
                R = PhaseRamp(kernel_shape, dtype='complex32', backend=dataset.backend)

                # Generate blur vector
                blur_vector = yp.zeros(R.M, dtype='complex32', backend=dataset.backend)
                for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
                    pos = yp.cast(pos, dtype=dataset.dtype, backend=dataset.backend)
                    blur_vector += (R * (yp.cast(pos - offset, 'complex32')))

                # Take inverse Fourier Transform
                blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)

                if position_list[frame_index][0][-1] > position_list[frame_index][0][0]:
                    blur_vector = yp.flip(blur_vector)

            else:
                blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
                                         dtype=dtype, backend=backend)

            # Normalize illuminaiton vectors
            blur_vector /= yp.scalar(yp.sum(blur_vector))

            # Append to list
            blur_vector_list.append(blur_vector)

        # Return
        return blur_vector_list, blur_vector_roi_list
示例#6
0
文件: iterative.py 项目: zfphil/llops
    def _initialize(self, initialization=None, **kwargs):

        for keyword in kwargs:
            if hasattr(self, keyword):
                setattr(self, keyword, kwargs[keyword])
            else:
                "Ignoring keyword %s" % keyword

        # Show objective function(s)
        if self.display_type is not None:
            if type(self.objective) in (list, tuple):
                print('Minimizing functions:')
                for objective in self.objective:
                    objective.latex()
            else:
                print('Minimizing function:')
                self.objective.latex()

        # Generate initialization
        if type(self.objective) not in (list, tuple):
            if initialization is None:
                self.x = yp.zeros(self.objective.N,
                                  dtype=self.objective.dtype,
                                  backend=self.objective.backend)
            else:
                self.x = yp.dcopy(initialization)
        else:
            # Create random initializations for all variables
            self.x = []
            for index, objective in enumerate(self.objective_list):
                if initialization is None:
                    self.x.append(
                        yp.zeros(objective.N, objective.dtype,
                                 objective.backend))
                else:
                    assert type(initialization) in (list, tuple)
                    self.x.append(initialization[index])

        # Generate plotting interface
        if self.display_type == 'text':
            # Create text plot object
            self.plot = display.IterationText()

        elif self.display_type == 'plot':
            # Create figure if axis was not provided
            if 'ax' in kwargs:
                self.ax = kwargs['ax']
            else:
                self.fig = plt.figure(figsize=kwargs.pop('figsize', (5, 3)))
                ax = plt.gca()

            # Create iteration plot object
            use_log = (kwargs.pop('use_log_x',
                                  False), kwargs.pop('use_log_y', False))
            max_iter = kwargs.pop('max_iter_plot',
                                  self._default_iteration_count)
            self.plot = display.IterationPlot(ax, max_iter, use_log=use_log)
            self.fig.canvas.draw()
            plt.tight_layout()

        # Update plot with first value
        if self.plot is not None:
            objective_value = self.objective(
                self.x) if not self.multi_objective else self.objective[0](
                    self.x[0])

            self.plot.update(0,
                             abs(yp.scalar(objective_value)),
                             new_time=0,
                             step_norm=0)
            self.t0 = time.time()

        # If using Nesterov acceleration, intitalize NesterovAccelerator class
        if self.use_nesterov_acceleration:
            self.nesterov = NesterovAccelerator(self.objective,
                                                self.nesterov_restart_enabled)

            # Enable restarting if desired
            if self.nesterov_restart_enabled:
                self.let_diverge = True  # We need to let nesterov diverge a bit

        self.initialized = True
示例#7
0
文件: camera.py 项目: zfphil/comptic
def demosaic(frame,
             order='grbg',
             bayer_coupling_matrix=None,
             debug=False,
             white_balance=False):

    # bayer_coupling_matrix = None
    # bgrg: cells very green
    # rggb: slight gteen tint

    """Demosaic a frame"""
    frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3), yp.getDatatype(frame), yp.getBackend(frame))

    if bayer_coupling_matrix is not None:
        frame_vec = yp.zeros((4, int(yp.shape(frame)[0] * yp.shape(frame)[1] / 4)), yp.getDatatype(frame), yp.getBackend(frame))

        # Cast bayer coupling matrix
        bayer_coupling_matrix = yp.cast(bayer_coupling_matrix,
                                        yp.getDatatype(frame),
                                        yp.getBackend(frame))

        # Define frame vector
        for bayer_pattern_index in range(4):
            pixel_offsets = (0, 0)
            if bayer_pattern_index == 3:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 1:
                img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1] + 1::2]
            elif bayer_pattern_index == 2:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1]::2]
            elif bayer_pattern_index == 0:
                img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1] + 1::2]
            frame_vec[bayer_pattern_index, :] = yp.dcopy(yp.vec(img_sub))
            if debug:
                print("Channel %d mean is %g" % (bayer_pattern_index, yp.scalar(yp.real(yp.sum(img_sub)))))

        # Perform demosaic using least squares
        result = yp.linalg.lstsq(bayer_coupling_matrix, frame_vec)

        result -= yp.amin(result)
        result /= yp.amax(result)
        for channel in range(3):
            values = result[channel]
            frame_out[:, :, channel] = yp.reshape(values, ((yp.shape(frame_out)[0], yp.shape(frame_out)[1])))
            if white_balance:
                frame_out[:, :, channel] -= yp.amin(frame_out[:, :, channel])
                frame_out[:, :, channel] /= yp.amax(frame_out[:, :, channel])
        return frame_out
    else:
        frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3),
                             dtype=yp.getDatatype(frame), backend=yp.getBackend(frame))

        # Get color order from order variable
        b_index = order.find('b')
        r_index = order.find('r')
        g1_index = order.find('g')

        # Get g2 from intersection of sets
        g2_index = set(list(range(4))).difference({b_index, r_index, g1_index}).pop()
        #  +-----+-----+
        #  |  0  |  1  |
        #  +-----+-----|
        #  |  2  |  3  |
        #  +-----+-----|

        if debug:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.imshow(frame[:12, :12])

        r_start = (int(r_index in [2, 3]), int(r_index in [1, 3]))
        g1_start = (int(g1_index in [2, 3]), int(g1_index in [1, 3]))
        g2_start = (int(g2_index in [2, 3]), int(g2_index in [1, 3]))
        b_start = (int(b_index in [2, 3]), int(b_index in [1, 3]))

        frame_out[:, :, 0] = frame[r_start[0]::2, r_start[1]::2]
        frame_out[:, :, 1] = (frame[g1_start[0]::2, g1_start[1]::2] + frame[g2_start[0]::2, g2_start[1]::2]) / 2.0
        frame_out[:, :, 2] = frame[b_start[0]::2, b_start[1]::2]

        # normalize
        frame_out /= yp.max(frame_out)

        # Perform white balancing if desired
        if white_balance:
            clims = []
            for channel in range(3):
                clims.append(yp.max(frame_out[:, :, channel]))
                frame_out[:, :, channel] /= yp.max(frame_out[:, :, channel])

        # Return frame
        return frame_out