コード例 #1
0
def dogonvole(image, psf, gpu_algorithm, kernel=(2., 2., 0.), blur=(0.9, 0.9, 0.), niter=10, use_gpu=1):
    """
    Perform deconvolution and difference of gaussian processing.
    Parameters
    ----------
    image : ndarray
    psf : ndarray
    kernel : tuple
    blur : tuple
    niter : int
    Returns
    -------
    image : ndarray
        Processed image same shape as image input.
    """
    global hot_pixels

    if not psf.sum() == 1.:
        raise ValueError("psf must be normalized so it sums to 1")
    image = image.astype('float32')
    imin = image.min()
    border =1
    for y, x in hot_pixels:
        y_min = y-border
        x_min = x-border
        y_max = y+border
        x_max = x+border
        if y-1<=border:
            y_min=0
        if x-1<=border:
            x_min=0
        if y+1>=2048-border:
            y_max=2048
        if x+1>=2048-border:
            x_max=2048
        image[y, x] = np.average(image[y_min:y_max, x_min:x_max]);
        
    img_bg = gaussian(image, kernel[:len(image.shape)], preserve_range=True)
    image = numpy.subtract(image, img_bg)
    numpy.place(image, image<0, 1./2**16)
    image = image.astype('uint16')
    if len(image.shape)==3:
        for i in range(image.shape[2]):
            if use_gpu==1:
                image[:,:,i] = gpu_algorithm.run(fd_data.Acquisition(data=image[:,:,i], kernel=psf), niter=niter).data
            else:
                image[:,:,i] = restoration.richardson_lucy(image[:,:,i], psf,niter, clip=False)
    elif len(image.shape)==2:
        if use_gpu==1:
            image = gpu_algorithm.run(fd_data.Acquisition(data=image, kernel=psf), niter=niter).data
        else:
            image = restoration.richardson_lucy(image, psf, niter, clip=False)
    else:
        raise ValueError('image is not a supported dimensionality.')
    image = gaussian(image, blur[:len(image.shape)], preserve_range=True)
    return image
コード例 #2
0
def dogonvole(image, psf, kernel=(2., 2., 0.), blur=(1.2, 1.2, 0.), niter=20):
    """
    Perform deconvolution and difference of gaussian processing.

    Parameters
    ----------
    image : ndarray
    psf : ndarray
    kernel : tuple
    blur : tuple
    niter : int

    Returns
    -------
    image : ndarray
        Processed image same shape as image input.
    """
    global hot_pixels, use_gpu, gpu_algorithm
    if not psf.sum() == 1.:
        raise ValueError("psf must be normalized so it sums to 1")
    image = image.astype('float32')
    imin = image.min()
    for y, x in hot_pixels:
        image[y, x] = imin

    img_bg = ndimage.gaussian_filter(image, kernel[:len(image.shape)])
    image = numpy.subtract(image, img_bg)
    numpy.place(image, image < 0, 1. / 2**16)
    image = image.astype('uint16')
    if len(image.shape) == 3:
        for i in range(image.shape[2]):
            if use_gpu == 1:
                image[:, :,
                      i] = gpu_algorithm.run(fd_data.Acquisition(data=image,
                                                                 kernel=psf),
                                             niter=niter).data
            else:
                image[:, :, i] = restoration.richardson_lucy(image[:, :, i],
                                                             psf,
                                                             niter,
                                                             clip=False)
    elif len(image.shape) == 2:
        if use_gpu == 1:
            image = gpu_algorithm.run(fd_data.Acquisition(data=image,
                                                          kernel=psf),
                                      niter=niter).data
        else:
            image = restoration.richardson_lucy(image, psf, niter, clip=False)
    else:
        raise ValueError('image is not a supported dimensionality.')
    image = ndimage.gaussian_filter(image, blur[:len(image.shape)])
    return image
コード例 #3
0
ファイル: deconvolution.py プロジェクト: eric-czech/codex
    def run(self, tile):
        if not np.issubdtype(tile.dtype, np.unsignedinteger):
            raise ValueError('Only unsigned integer images supported; '
                             'type given = {}'.format(tile.dtype))
        if tile.min() < 0:
            raise ValueError('Image to deconvolve cannot have negative values')

        # Tile should have shape (cycles, z, channel, height, width)
        ncyc, nz, nch, nh, nw = self.config.tile_dims

        psfs = generate_psfs(self.config)
        img_cyc = []
        for icyc in range(ncyc):
            img_ch = []
            for ich in range(nch):
                acq = fd_data.Acquisition(tile[icyc, :, ich, :, :],
                                          kernel=psfs[ich])
                logger.debug(
                    'Running deconvolution for cycle {}, channel {} [dtype = {}]'
                    .format(icyc, ich, acq.data.dtype))
                res = self.algo.run(acq,
                                    self.n_iter,
                                    session_config=get_tf_config(self)).data

                # Restore mean intensity if a scale factor was given
                if self.scale_factor is not None:
                    res = rescale_stack(acq.data, res, self.scale_factor)

                # Clip float32 and convert to type of original image (i.e. w/ no scaling)
                res = np_utils.arr_to_uint(res, acq.data.dtype)

                img_ch.append(res)
            img_cyc.append(np.stack(img_ch, 1))
        return np.stack(img_cyc, 0)
コード例 #4
0
def mutate(d, data_fn=None, kern_fn=None):
    """Apply functions data and/or kernel function to acquisition"""
    return fd_data.Acquisition(
        data=data_fn(d.data) if data_fn else d.data,
        actual=data_fn(d.actual) if data_fn else d.actual,
        kernel=kern_fn(d.kernel) if kern_fn else d.kernel,
    )
コード例 #5
0
ファイル: deconvolution.py プロジェクト: rensutheart/MEL
def main():
    parser = get_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
                        level=logging.getLevelName(args.log_level.upper()))
    logger = logging.getLogger('DeconvolutionCLI')

    acq = fd_data.Acquisition(data=io.imread(args.data_path),
                              kernel=resolve_psf(args, logger))
    logger.debug('Loaded data with shape {} and psf with shape {}'.format(
        acq.data.shape, acq.kernel.shape))

    logger.info('Beginning deconvolution of data file "{}"'.format(
        args.data_path))
    start_time = timer()

    # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
    # already equal to powers of 2 (which is common with examples) up to the next power of 2
    algo = fd_restoration.RichardsonLucyDeconvolver(n_dims=acq.data.ndim,
                                                    pad_min=[1, 1,
                                                             1]).initialize()
    res = algo.run(acq, niter=args.n_iter)

    end_time = timer()
    logger.info(
        'Deconvolution complete (in {:.3f} seconds)'.format(end_time -
                                                            start_time))

    io.imsave(args.output_path, res.data)
    logger.info('Result saved to "{}"'.format(args.output_path))
コード例 #6
0
ファイル: CZI_Processor.py プロジェクト: rensutheart/MEL
    def runDeconvolution(self, position, timePoint, numIterations=25, session_config=None):
        stack = self.getStack(position, timePoint)
        #stack = self.getScaledStack(position, timePoint, 0.5)


        if(stack.shape[0] == 1 or len(stack) < 3):
            print("The data does not seem to contain any z information. Currently this can't be deconvolved.")
            return stack

        acquisition = fd_data.Acquisition(data = stack, kernel=self.generatePSF())
        print('Loaded data with shape {} and psf with shape {}'.format(acquisition.data.shape, acquisition.kernel.shape))

        start_time = time.time()

        # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
        # already equal to powers of 2 (which is common with examples) up to the next power of 2
        algorithm = fd_restoration.RichardsonLucyDeconvolver(n_dims=acquisition.data.ndim, pad_min=[1, 1, 1]).initialize() # , device="/GPU:1"
        print('before run')


        res = algorithm.run(acquisition, niter=numIterations, session_config=session_config)

        end_time = time.time()
        print('Deconvolution complete (in {:.3f} seconds)'.format(end_time - start_time))

        print(res.info)

        return res.data
コード例 #7
0
ファイル: Deconvolution.py プロジェクト: wollmanlab/PySpots
    def deconvolve(self):
        self.load_psf()
        stk = self.stk.astype('float64')

        stk = stk - stk.min()
        stk = stk / stk.max()
        if self.verbose:
            iterable = tqdm(range(int(round(self.deconvolution_batches / 2))),
                            desc='Deconvolving Stack')
        else:
            iterable = range(int(round(self.deconvolution_batches / 2)))
        step = int(stk.shape[0] / (self.deconvolution_batches / 2))
        if self.gpu:
            if self.deconvolution_batches > 1:
                for i in iterable:
                    i0 = int(step * i)
                    for j in iterable:
                        j0 = int(step * j)
                        temp = stk[i0:i0 + step, j0:j0 + step, :]
                        if self.gpu:
                            stk[i0:i0 + step,
                                j0:j0 + step, :] = self.gpu_algorithm.run(
                                    fd_data.Acquisition(data=temp,
                                                        kernel=self.psf),
                                    niter=self.deconvolution_niterations).data
                        else:
                            stk[i0:i0 + step,
                                j0:j0 + step, :] = restoration.richardson_lucy(
                                    temp,
                                    self.psf,
                                    self.deconvolution_niterations,
                                    clip=False)
            else:
                stk = self.gpu_algorithm.run(
                    fd_data.Acquisition(data=stk, kernel=self.psf),
                    niter=self.deconvolution_niterations).data
        else:
            stk = restoration.richardson_lucy(stk,
                                              self.psf,
                                              self.deconvolution_niterations,
                                              clip=False)
        self.stk = stk
        del self.gpu_algorithm
コード例 #8
0
    def _decon_shape(self, data, kernel):
        # Apply blur to original shape and run restoration on blurred image
        acq = fd_data.Acquisition(data=data, kernel=kernel, actual=data)
        acq = tfv.reblur(acq, scale=.001) # Add low amount of noise
        res = tfv.decon_tf(acq, 10, real_domain_fft=False)

        # Binarize resulting image and validate that pixels match original exactly
        bin_res = (res > res.mean()).astype(np.int64)
        bin_tru = acq.actual.astype(np.int64)
        return bin_res, bin_tru
コード例 #9
0
    def _run(self, tile, **kwargs):
        if not np.issubdtype(tile.dtype, np.unsignedinteger):
            raise ValueError('Only unsigned integer images supported; '
                             'type given = {}'.format(tile.dtype))

        # Tile should have shape (cycles, z, channel, height, width)
        ncyc, nz, nch, nh, nw = tile.shape

        # Ensure that given tile has same number of channels as required in configuration
        # since PSF generation is specific for each channel
        if nch != self.config.n_channels_per_cycle:
            raise AssertionError(
                'Given tile with shape {} ({} channels) does not have expected number of channels {}'
                .format(tile.shape, nch, self.config.n_channels_per_cycle))

        img_cyc = []
        for icyc in range(ncyc):
            img_ch = []
            for ich in range(nch):
                img = tile[icyc, :, ich, :, :]

                # Skip deconvolution if channel was configured to be ignored
                if self.channels is not None and ich not in self.channels:
                    img_ch.append(img)
                    continue

                acq = fd_data.Acquisition(img, kernel=self.psfs[ich])
                logger.debug(
                    'Running deconvolution for cycle {}, channel {} [dtype = {}]'
                    .format(icyc, ich, acq.data.dtype))
                res = self.algo.run(acq,
                                    self.n_iter,
                                    session_config=get_tf_config(self)).data

                # Restore mean intensity if a scale factor was given
                if self.scale_factor is not None:
                    res, mean_ratio = rescale_stack(acq.data, res,
                                                    self.scale_factor)
                    self.record({
                        'mean_ratio': mean_ratio,
                        'cycle': icyc,
                        'channel': ich
                    })

                # Clip float32 and convert to type of original image (i.e. w/ no scaling)
                res = np_utils.arr_to_uint(res, acq.data.dtype)

                img_ch.append(res)
            img_cyc.append(np.stack(img_ch, 1))
        return np.stack(img_cyc, 0)
コード例 #10
0
    def _run(self, tile, **kwargs):
        if not np.issubdtype(tile.dtype, np.unsignedinteger):
            raise ValueError('Only unsigned integer images supported; '
                             'type given = {}'.format(tile.dtype))

        # Tile should have shape (cycles, z, channel, height, width)
        dims = self.config.tile_dims
        if dims != tile.shape:
            raise AssertionError(
                'Given tile with shape {} does not match expected shape {}'.
                format(tile.shape, dims))
        ncyc, nz, nch, nh, nw = dims

        img_cyc = []
        for icyc in range(ncyc):
            img_ch = []
            for ich in range(nch):
                acq = fd_data.Acquisition(tile[icyc, :, ich, :, :],
                                          kernel=self.psfs[ich])
                logger.debug(
                    'Running deconvolution for cycle {}, channel {} [dtype = {}]'
                    .format(icyc, ich, acq.data.dtype))
                res = self.algo.run(acq,
                                    self.n_iter,
                                    session_config=get_tf_config(self)).data

                # Restore mean intensity if a scale factor was given
                if self.scale_factor is not None:
                    res, mean_ratio = rescale_stack(acq.data, res,
                                                    self.scale_factor)
                    self.record({
                        'mean_ratio': mean_ratio,
                        'cycle': icyc,
                        'channel': ich
                    })

                # Clip float32 and convert to type of original image (i.e. w/ no scaling)
                res = np_utils.arr_to_uint(res, acq.data.dtype)

                img_ch.append(res)
            img_cyc.append(np.stack(img_ch, 1))
        return np.stack(img_cyc, 0)
コード例 #11
0
def reblur(acq, scale=.05, seed=1):
    """Apply blurring operation to the ground-truth data in an acquisition

    This operation works by convolving the ground-truth image with the configured kernel and then
    adding poisson noise

    Args:
        acq: Acquisition to blur
        scale: Fraction of min/max value range of acquisition ground-truth image to use as standard deviation in
            poisson noise
        seed: Seed for poisson noise generation
    Result:
        New acquisition object with same ground-truth and kernel, but newly assigned blurred data
    """
    sd = scale * (acq.actual.max() - acq.actual.min())
    np.random.seed(seed)
    noise = np.random.poisson(sd, size=acq.actual.shape)
    data = fftconvolve(acq.actual, acq.kernel, 'same') + noise
    return fd_data.Acquisition(data=data.astype(acq.data.dtype),
                               kernel=acq.kernel,
                               actual=acq.actual)
コード例 #12
0
def deconv_volume(
    vol: np.ndarray,
    psf: np.ndarray,
    deconvolver: tfd_restoration.RichardsonLucyDeconvolver,
    n_iter: int,
    observer: Optional[Callable] = None,
) -> np.ndarray:
    """perform RL deconvolution on volume vol using deconvolver
    
    Parameters
    ----------
    vol : np.ndarray
        input volume
    psf : np.ndarray
        point spread function
    deconvolver : tfd_restoration.RichardsonLucyDeconvolver
        see init_rl_deconvolver
    n_iter : int
        number of RL iterations
    observer : Optional[Callable], optional
        NOT YET IMPLEMENTED
        observer callback so that progress updates for each iteration
        can be displayed. Also, add option to save intermediate results within
        a certain range of iterations.(the default is None)
    
    Returns
    -------
    np.ndarray
        deconvolved volume
    """
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85)
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(log_device_placement=False,
                            gpu_options=gpu_options)
    aq = fd_data.Acquisition(data=vol, kernel=psf)
    if observer is not None:
        warnings.warn("Observer function for iteration not yet implemented.")
    result = deconvolver.run(aq, niter=n_iter, session_config=config)
    logger.debug(f"flowdec info: {result.info}")
    return result.data
コード例 #13
0
 def deconvolve(self):
     """ Deconvolve with calculated Point Spread Function"""
     self.psf = self.psf_dict[self.channel] 
     img = self.img.astype(float)
     # preserve scale
     img_min = img.min()
     img = img-img_min
     img_max = img.max()
     img = img/img_max
     img = img+10**-4 # Cant have 0
     if self.verbose:
         self.update_user('Deconvolution')
     if self.gpu:
         self.gpu_algorithm = fd_restoration.RichardsonLucyDeconvolver(2).initialize()
         img = self.gpu_algorithm.run(fd_data.Acquisition(data=img, kernel=self.psf), niter=self.deconvolution_niterations).data
         del self.gpu_algorithm
     else:
         img = restoration.richardson_lucy(img, self.psf,self.deconvolution_niterations, clip=False)
     # restore scale
     img = img-10**-4
     img = img*img_max
     img = img+img_min
     self.img = img
コード例 #14
0

# Using tqdm to report progress
for filename in tqdm.tqdm(glob.glob(path)):
    # Assume each image would load as [z, x, y, channels] and that there are two
    # channels (first at 561 nm, second at 657 nm)
    img = skimage.io.imread(filename)
    [z, channels, x, y] = img.shape
    # Move channels axis (1) to last axis (-1) to give result shape [z, x, y, channels]
    img_final = np.moveaxis(img, 1, -1)
    # Alternatively this will accomplish the same thing in a more general way:
    # img_final = np.transpose(img, (0, 2, 3, 1))
    assert img_final.ndim == 4
    assert img_final.shape[-1] == 2  # Make sure there are 2 channels

    res = [
        algo.run(fd_data.Acquisition(img_final[..., i], psfs[i]),
                 niter=25).data for i in range(img_final.shape[-1])
    ]

    # Create new axis at the end so that result is also [z,y,x, channels],
    # not [channels, z,y,x]
    res = np.stack(res, -1)

    filename_deconv = os.path.splitext(filename)
    final_name = filename_deconv[0] + "_D{}.tif",
    final_name_str = convertTuple(final_name)

    for i in range(res.shape[-1]):  # Loop over channels
        res_path = final_name_str.format(i + 1)
        skimage.io.imsave(res_path, res[..., i])
コード例 #15
0
def run_deconvolution(args, psfs, config):
    global IMG_ID
    files = utils.get_files(args.input_dir, '.*\.tif$')
    times = []
    mean_ratios = []
    scale_factor = float(args.scale_factor)

    # Tone down TF logging, though only the first setting below actually
    # seems to make any difference
    utils.disable_tf_logging()
    session_config = tf.ConfigProto(log_device_placement=False)

    n_iter = int(args.n_iter)
    pad_dims = np.array([int(p) for p in args.pad_dims.split(',')])
    if args.observer_dir is not None and not args.dry_run:
        if args.observer_coords is None:
            raise ValueError(
                'Must set "observer-coords" property when using observer')
        observer_fn = get_iteration_observer_fn(args.observer_dir,
                                                args.observer_coords)
    else:
        observer_fn = None
    algo = fd_restoration.RichardsonLucyDeconvolver(
        n_dims=3,
        pad_mode=args.pad_mode,
        pad_min=pad_dims,
        epsilon=1e-6,
        observer_fn=observer_fn).initialize()

    # Stacks load as (cycles, z, channel, height, width)
    imgs = img_generator(files)
    img_dtypes = set()
    for i, (f, img) in enumerate(imgs):
        logger.debug(
            '{} tile "{}" ({} of {}) --> shape = {}, dtype = {}'.format(
                'Would deconvolve' if args.dry_run else 'Deconvolving', f,
                i + 1, len(files), img.shape, img.dtype))
        img_dtypes.add(img.dtype)
        if len(img_dtypes) > 1:
            raise ValueError('Image has conflicting dtype with prior images; '
                             'all dtypes seen = {}'.format(list(img_dtypes)))
        if not np.issubdtype(img.dtype, np.unsignedinteger):
            raise ValueError('Only unsigned integer images supported; '
                             'type given = {}'.format(img.dtype))
        if img.min() < 0:
            raise ValueError('Image to deconvolve cannot have negative values')

        utils.validate_stack_shape(img, config)
        ncyc, nz, nch, nh, nw = img.shape

        # Loop through each cycle and channel so that for each, a single 3D z-stack
        # can be extracted for deconvolution
        res_stack = []
        for icyc in range(ncyc):
            res_ch = []
            for ich in range(nch):
                acq = fd_data.Acquisition(data=img[icyc, :, ich, :, :],
                                          kernel=psfs[ich])

                if args.dry_run:
                    continue
                IMG_ID = dict(tile=i + 1, channel=ich + 1, cycle=icyc + 1)

                # Results have shape (nz, nh, nw)
                start_time = timer()
                res = algo.run(acq,
                               niter=n_iter,
                               session_config=session_config).data
                end_time = timer()
                times.append({
                    'cycle': icyc + 1,
                    'channel': ich + 1,
                    'time': end_time - start_time
                })

                # This is a transformation used in the Nolanlab code to rescale means
                # of deconvolution results back to the original (they're not usually
                # off by much though).  scale_factor is then a tunable way to lower or
                # raise the intensity values so that when clipping to uint type (with
                # no scaling) there is less saturation
                if args.scale_mode == 'stack':
                    mean_ratio = acq.data.mean() / utils.arr_to_uint(
                        res, img.dtype).mean()
                    mean_ratios.append({
                        'cycle': icyc + 1,
                        'channel': ich + 1,
                        'ratio': mean_ratio
                    })
                    res *= (mean_ratio * scale_factor)
                elif args.scale_mode == 'slice':
                    for iz in range(nz):
                        mean_ratio = acq.data[iz].mean() / utils.arr_to_uint(
                            res[iz], img.dtype).mean()
                        mean_ratios.append({
                            'cycle': icyc + 1,
                            'channel': ich + 1,
                            'ratio': mean_ratio,
                            'z': iz + 1
                        })
                        res[iz] = res[iz] * (mean_ratio * scale_factor)
                else:
                    raise ValueError('Scale mode "{}" not valid'.format(
                        args.scale_mode))

                # Clip float32 and convert to type of original image (i.e. w/ no scaling)
                res = utils.arr_to_uint(res, img.dtype)

                res_ch.append(res)

            if args.dry_run:
                continue

            # Stack (nz, nh, nw) results to (nz, nch, nh, nw)
            res_ch = np.stack(res_ch, 1)

            if list(res_ch.shape) != [nz, nch, nh, nw]:
                raise ValueError(
                    'Stack across channels has wrong shape --> expected = {}, actual = {}'
                    .format([nz, nch, nh, nw], list(res_ch.shape)))
            res_stack.append(res_ch)

        if args.dry_run:
            continue

        # Stack (nz, nch, nh, nw) results along first axis to match input
        # like (ncyc, nz, nch, nh, nw)
        res_stack = np.stack(res_stack, 0)

        # Validate resulting data type
        if res_stack.dtype != img.dtype:
            raise ValueError(
                'Final stack has wrong dtype --> expected = {}, actual = {}'.
                format(img.dtype, res_stack.dtype))

        # Validate resulting shape matches the input
        if list(res_stack.shape) != list(img.shape):
            raise ValueError(
                'Final stack has wrong shape --> expected = {}, actual = {}'.
                format(list(img.shape), list(res_stack.shape)))

        res_file = osp.join(args.output_dir, osp.basename(f))
        logger.debug(
            'Saving deconvolved tile to "{}" --> shape = {}, dtype = {}'.
            format(res_file, res_stack.shape, res_stack.dtype))
        # See tiffwriter docs at http://scikit-image.org/docs/dev/api/skimage.external.tifffile
        # .html#skimage.external.tifffile.TiffWriter for more info on how scikit-image
        # handles imagej formatting -- the docs aren't very explicit but they do mention
        # that with 'imagej=True' it can handle arrays up to 6 dims in TZCYXS order
        imsave(res_file, res_stack, imagej=True)
    return times, mean_ratios
コード例 #16
0
ファイル: flowdec.py プロジェクト: czbiohub/decon
# See: http://www.cellimagelibrary.org/images/CCDB_2
actual = fd_data.neuron_25pct().data
# actual.shape = (50, 256, 256)

# Create a gaussian kernel that will be used to blur the original acquisition
kernel = np.zeros_like(actual)
for offset in [0, 1]:
    kernel[tuple((np.array(kernel.shape) - offset) // 2)] = 1
kernel = ndimage.gaussian_filter(kernel, sigma=1.)
# kernel.shape = (50, 256, 256)

# Convolve the original image with our fake PSF
data = signal.fftconvolve(actual, kernel, mode='same')
# data.shape = (50, 256, 256)

# Run the deconvolution process and note that deconvolution initialization is best kept separate from
# execution since the "initialize" operation corresponds to creating a TensorFlow graph, which is a
# relatively expensive operation and should not be repeated across multiple executions
algo = fd_restoration.RichardsonLucyDeconvolver(data.ndim).initialize()
res = algo.run(fd_data.Acquisition(data=data, kernel=kernel), niter=5).data

fig, axs = plt.subplots(1, 3)
axs = axs.ravel()
fig.set_size_inches(18, 12)
center = tuple([slice(None), slice(10, -10), slice(10, -10)])
titles = ['Original Image', 'Blurred Image', 'Reconstructed Image']
for i, d in enumerate([actual, data, res]):
    img = exposure.adjust_gamma(d[center].max(axis=0), gamma=.2)
    axs[i].imshow(img, cmap='Spectral_r')
    axs[i].set_title(titles[i])
    axs[i].axis('off')
コード例 #17
0
# in a loop making different numbers of iterations, multiples of base value of n_iter
multiRunFactor = 1
timingListIter = []
timingListTime = []

#send std output to a log file
sys.stdout = open(
    'FlowDecLogGold' + str(base_iter) + 'multi' + str(multiRunFactor) +
    'GAUSS.txt', 'w')
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

for i in range(1, multiRunFactor + 1):
    niter = (base_iter * i)
    # start measuring time
    startDec = time.process_time()
    res = algo.run(fd_data.Acquisition(data=raw, kernel=kernel),
                   niter=(niter)).data
    # measure time here includes only the deconvolution, no file saving
    DecTime = (time.process_time() - startDec)
    # save the result # using skimage.external.tifffile.imsave
    resultFileName = ('result' + rawImg + PSF + str(niter) + 'iterations.tif')
    imsave(resultFileName, res)
    # measure time here includes file saving
    #DecTime = (time.process_time() - startDec)
    print('Saved result image TIFF file ' + resultFileName)
    print(
        str(DecTime) + ' is how many sec ' + str(niter) + ' iterations took.')
    timingListIter.append(niter)
    timingListTime.append(DecTime)

#benchmarking data output
コード例 #18
0
# deconvolution by flowdec

from flowdec import data as fd_data
from flowdec import restoration as fd_restoration
from skimage import io
import numpy as np
from tifffile import imsave

img_out = io.imread("flowdec/datasets/bit_5.tif")
img = np.squeeze(img_out)
psf = io.imread("flowdec/datasets/PSF647_38z.tif")
assert img.ndim == 3
assert psf.ndim == 3

algo = fd_restoration.RichardsonLucyDeconvolver(3,pad_mode='none').initialize()
res = algo.run(fd_data.Acquisition(img,psf), niter=25).data # run deconvolution

imsave('bit_5_D.tif', res)
コード例 #19
0
def deconvolveTF(img, kernel, iteration):
    imgInit = fd_restoration.RichardsonLucyDeconvolver(img.ndim).initialize()
    deconv = imgInit.run(fd_data.Acquisition(data=img, kernel=kernel),
                         niter=iteration).data
    return deconv
コード例 #20
0
        return io.imread(args.psf_path)
    # Otherwise, load PSF configuration file and generate a PSF from that
    else:
        psf = fd_psf.GibsonLanni.load(args.psf_config_path)
        logger.info('Loaded psf with configuration: {}'.format(psf.to_json()))
        return psf.generate()


if __name__ == '__main__':
    parser = get_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
                        level=logging.getLevelName(args.log_level.upper()))
    logger = logging.getLogger('DeconvolutionCLI')

    acq = fd_data.Acquisition(data=io.imread(args.data_path),
                              kernel=resolve_psf(args))
    logger.debug('Loaded data with shape {} and psf with shape {}'.format(
        acq.data.shape, acq.kernel.shape))

    logger.info('Beginning deconvolution of data file "{}"'.format(
        args.data_path))
    start_time = timer()

    # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
    # already equal to powers of 2 (which is common with examples) up to the next power of 2
    algo = fd_restoration.RichardsonLucyDeconvolver(n_dims=acq.data.ndim,
                                                    pad_min=[1, 1,
                                                             1]).initialize()
    res = algo.run(acq, niter=args.n_iter)

    end_time = timer()
コード例 #21
0
parser.add_argument('input', type=str)
parser.add_argument('psf', type=str)
parser.add_argument('output_prefix', type=str)
parser.add_argument('channel', type=int, default=0)

args = parser.parse_args()
print(args)

inputpath = args.input
psfpath = args.psf
outputprefix = args.output_prefix
channel = args.channel

NX, NY, NZ, NC, NT = get_movie_shape(inputpath)
psf = read_psf(psfpath)

for t, frame in enumerate(movie_input_generator(inputpath, channel=channel)):
    print('Processing frame {0}'.format(t))
    algo = fd_restoration.RichardsonLucyDeconvolver(3).initialize()
    res = algo.run(fd_data.Acquisition(data=frame, kernel=psf), niter=30).data
    bf.write_image(outputprefix.format(t),
                   res,
                   z=0,
                   t=0,
                   c=0,
                   size_z=NZ,
                   size_t=1,
                   size_c=1,
                   pixel_type=bf.omexml.PT_FLOAT)
javabridge.kill_vm()