コード例 #1
0
ファイル: simulation.py プロジェクト: zfphil/comptic
def testObject(absorption,
               shape=None,
               phase=None,
               invert=False,
               invert_phase=False,
               dtype=None,
               backend=None,
               **kwargs):

    # Load absorption image
    test_object = _loadImage(absorption, shape, dtype, backend, **kwargs)

    # Normalize
    test_object -= yp.min(test_object)
    test_object /= yp.max(test_object)

    # invert if requested
    if invert:
        test_object = 1 - test_object

    # Apply correct range to absorption
    absorption_max, absorption_min = kwargs.get('max_value', 1.1), kwargs.get(
        'min_value', 0.9)
    test_object *= (absorption_max - absorption_min)
    test_object += absorption_min

    # Add phase if label is provided
    if phase:
        # Load phase image
        phase = _loadImage(phase, shape, **kwargs)

        # invert if requested
        if invert_phase:
            phase = 1 - phase

        # Normalize
        phase -= yp.min(phase)
        phase /= yp.max(phase)

        # Apply correct range to absorption
        phase_max, phase_min = kwargs.get('max_value_phase',
                                          0), kwargs.get('min_value_phase', 1)
        phase *= (phase_max - phase_min)
        phase += phase_min

        # Add phase to test_object
        test_object = yp.astype(test_object, 'complex32')
        test_object *= yp.exp(
            1j * yp.astype(yp.real(phase), yp.getDatatype(test_object)))

    # Cast to correct dtype and backend
    return yp.cast(test_object, dtype, backend)
コード例 #2
0
ファイル: noise.py プロジェクト: zfphil/comptic
def cnr(signal, noise_roi=None, signal_roi=None):
    """ Calculate the imaging contrast to noise ratio (CNR) of an image """
    # Reference: https://en.wikipedia.org/wiki/Contrast-to-noise_ratio

    # Calculate signal mean, using ROI if provided
    signal_contrast = yp.abs(yp.max(signal) -
                             yp.min(signal)) if signal_roi is None else yp.abs(
                                 yp.max(signal[noise_roi.slice]) -
                                 yp.min(signal[noise_roi.slice]))

    # Calculate noise standard deviation, using ROI if provided
    signal_std = yp.std(signal) if noise_roi is None else np.std(
        signal[noise_roi.slice])

    return (signal_contrast / signal_std)
コード例 #3
0
    def as_integer(*args, **kwargs):

        # Store original datatype
        original_dtype = getDatatype(args[0])

        # Store original range
        extent = min(args[0]), max(args[0])

        # Change first argument (x) to a numpy backend
        args = list(args)
        args[0] = astype(65535.0 * (args[0] - min(args[0])) / max(args[0]), 'uint16')
        args = tuple(args)

        # Call the function
        return astype(func(*args, **kwargs) / 65535.0 * extent[1] + extent[0], original_dtype)
コード例 #4
0
ファイル: mddataset.py プロジェクト: zfphil/htdeblur
    def blur_vectors(self, dtype=None, backend=None, debug=False,
                     use_phase_ramp=False, corrections={}):
        """
        This function generates the object size, image size, and blur kernels from
        a libwallerlab dataset object.

            Args:
                dataset: An io.Dataset object
                dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
            Returns:
                object_size: The object size this dataset can recover
                image_size: The computed image size of the dataset
                blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.

        """
        # Assign dataset
        dataset = self

        # Get corrections from metadata
        if len(corrections) is 0 and 'blur_vector' in self.metadata.calibration:
            corrections = dataset.metadata.calibration['blur_vector']

        # Get datatype and backends
        dtype = dtype if dtype is not None else yp.config.default_dtype
        backend = backend if backend is not None else yp.config.default_backend

        # Calculate effective pixel size if necessaey
        if dataset.metadata.system.eff_pixel_size_um is None:
            dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
                (dataset.metadata.objective.mag * dataset.metadata.system.mag)

        # Recover and store position and illumination list
        blur_vector_roi_list = []
        position_list, illumination_list = [], []
        frame_segment_map = []

        for frame_index in range(dataset.shape[0]):
            frame_state = dataset.frame_state_list[frame_index]

            # Store which segment this measurement uses
            frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])

            # Extract list of illumination values for each time point
            if 'illumination' in frame_state:
                illumination_list_frame = []
                if type(frame_state['illumination']) is str:
                    illum_state_list = self._frame_state_list[0]['illumination']['states']
                else:
                    illum_state_list = frame_state['illumination']['states']
                for time_point in illum_state_list:
                    illumination_list_time_point = []
                    for illumination in time_point:
                        illumination_list_time_point.append(
                            {'index': illumination['index'], 'value': illumination['value']})
                    illumination_list_frame.append(illumination_list_time_point)

            else:
                raise ValueError('Frame %d does not contain illumination information' % frame_index)

            # Extract list of positions for each time point
            if 'position' in frame_state:
                position_list_frame = []
                for time_point in frame_state['position']['states']:
                    position_list_time_point = []
                    for position in time_point:
                        if 'units' in position['value']:
                            if position['value']['units'] == 'mm':
                                ps_um = dataset.metadata.system.eff_pixel_size_um
                                position_list_time_point.append(
                                    [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'um':
                                position_list_time_point.append(
                                    [position['value']['y'] / ps_um, position['value']['x'] / ps_um])
                            elif position['value']['units'] == 'pixels':
                                position_list_time_point.append([position['value']['y'], position['value']['x']])
                            else:
                                raise ValueError('Invalid units %s for position in frame %d' %
                                                 (position['value']['units'], frame_index))
                        else:
                            # print('WARNING: Could not find posiiton units in metadata, assuming mm')
                            ps_um = dataset.metadata.system.eff_pixel_size_um
                            position_list_time_point.append(
                                [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])

                    position_list_frame.append(position_list_time_point[0])  # Assuming single time point for now.

                # Define positions and position indicies used
                positions_used, position_indicies_used = [], []
                for index, pos in enumerate(position_list_frame):
                    for color in illumination_list_frame[index][0]['value']:
                        if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
                            position_indicies_used.append(index)
                            positions_used.append(pos)

                # Generate ROI for this blur vector
                from htdeblur.blurkernel import getPositionListBoundingBox
                blur_vector_roi = getPositionListBoundingBox(positions_used)

                # Append to list
                blur_vector_roi_list.append(blur_vector_roi)

                # Crop illumination list to values within the support used
                illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])

                # Store corresponding positions
                position_list.append(positions_used)

        # Apply kernel scaling or compression if necessary
        if 'scale' in corrections:

            # We need to use phase-ramp based kernel generation if we modify the positions
            use_phase_ramp = True

            # Modify position list
            for index in range(len(position_list)):
                _positions = np.asarray(position_list[index])
                for scale_correction in corrections['scale']:
                    factor, axis = corrections['scale']['factor'], corrections['scale']['axis']
                    _positions[:, axis] = ((_positions[:, axis] - yp.min(_positions[:, axis])) * factor + yp.min(_positions[:, axis]))
                position_list[index] = _positions.tolist()

        # Synthesize blur vectors
        blur_vector_list = []
        for frame_index in range(dataset.shape[0]):
            #  Generate blur vectors
            if use_phase_ramp:
                from llops.operators import PhaseRamp
                kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
                offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32', dataset.backend)

                # Create phase ramp and calculate offset
                R = PhaseRamp(kernel_shape, dtype='complex32', backend=dataset.backend)

                # Generate blur vector
                blur_vector = yp.zeros(R.M, dtype='complex32', backend=dataset.backend)
                for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
                    pos = yp.cast(pos, dtype=dataset.dtype, backend=dataset.backend)
                    blur_vector += (R * (yp.cast(pos - offset, 'complex32')))

                # Take inverse Fourier Transform
                blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)

                if position_list[frame_index][0][-1] > position_list[frame_index][0][0]:
                    blur_vector = yp.flip(blur_vector)

            else:
                blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
                                         dtype=dtype, backend=backend)

            # Normalize illuminaiton vectors
            blur_vector /= yp.scalar(yp.sum(blur_vector))

            # Append to list
            blur_vector_list.append(blur_vector)

        # Return
        return blur_vector_list, blur_vector_roi_list