Пример #1
0
    def runDeconvolution(self, position, timePoint, numIterations=25, session_config=None):
        stack = self.getStack(position, timePoint)
        #stack = self.getScaledStack(position, timePoint, 0.5)


        if(stack.shape[0] == 1 or len(stack) < 3):
            print("The data does not seem to contain any z information. Currently this can't be deconvolved.")
            return stack

        acquisition = fd_data.Acquisition(data = stack, kernel=self.generatePSF())
        print('Loaded data with shape {} and psf with shape {}'.format(acquisition.data.shape, acquisition.kernel.shape))

        start_time = time.time()

        # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
        # already equal to powers of 2 (which is common with examples) up to the next power of 2
        algorithm = fd_restoration.RichardsonLucyDeconvolver(n_dims=acquisition.data.ndim, pad_min=[1, 1, 1]).initialize() # , device="/GPU:1"
        print('before run')


        res = algorithm.run(acquisition, niter=numIterations, session_config=session_config)

        end_time = time.time()
        print('Deconvolution complete (in {:.3f} seconds)'.format(end_time - start_time))

        print(res.info)

        return res.data
Пример #2
0
def main():
    parser = get_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
                        level=logging.getLevelName(args.log_level.upper()))
    logger = logging.getLogger('DeconvolutionCLI')

    acq = fd_data.Acquisition(data=io.imread(args.data_path),
                              kernel=resolve_psf(args, logger))
    logger.debug('Loaded data with shape {} and psf with shape {}'.format(
        acq.data.shape, acq.kernel.shape))

    logger.info('Beginning deconvolution of data file "{}"'.format(
        args.data_path))
    start_time = timer()

    # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
    # already equal to powers of 2 (which is common with examples) up to the next power of 2
    algo = fd_restoration.RichardsonLucyDeconvolver(n_dims=acq.data.ndim,
                                                    pad_min=[1, 1,
                                                             1]).initialize()
    res = algo.run(acq, niter=args.n_iter)

    end_time = timer()
    logger.info(
        'Deconvolution complete (in {:.3f} seconds)'.format(end_time -
                                                            start_time))

    io.imsave(args.output_path, res.data)
    logger.info('Result saved to "{}"'.format(args.output_path))
Пример #3
0
    def test_observer(self):
        acq = fd_data.bars_25pct()
        imgs = []

        def observer(img, *_):
            imgs.append(img)
            self.assertEqual(acq.data.shape, img.shape, msg='Observer image and original shapes not equal')
        algo = fd_restoration.RichardsonLucyDeconvolver(n_dims=3, observer_fn=observer).initialize()
        algo.run(acq, niter=5)
        self.assertEqual(len(imgs), 5)
Пример #4
0
def run_deconvolution(device):
    import os
    import tensorflow as tf
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = device
    #acq = fd_data.load_celegans_channel('CY3')

    for i in range(10):
        acq = fd_data.bars_25pct()
        algo = fd_restoration.RichardsonLucyDeconvolver(3).initialize()
        res = algo.run(acq, niter=50, session_config=config).data
        time.sleep(5)
    return res
Пример #5
0
 def deconvolve(self):
     """ Deconvolve with calculated Point Spread Function"""
     self.psf = self.psf_dict[self.channel] 
     img = self.img.astype(float)
     # preserve scale
     img_min = img.min()
     img = img-img_min
     img_max = img.max()
     img = img/img_max
     img = img+10**-4 # Cant have 0
     if self.verbose:
         self.update_user('Deconvolution')
     if self.gpu:
         self.gpu_algorithm = fd_restoration.RichardsonLucyDeconvolver(2).initialize()
         img = self.gpu_algorithm.run(fd_data.Acquisition(data=img, kernel=self.psf), niter=self.deconvolution_niterations).data
         del self.gpu_algorithm
     else:
         img = restoration.richardson_lucy(img, self.psf,self.deconvolution_niterations, clip=False)
     # restore scale
     img = img-10**-4
     img = img*img_max
     img = img+img_min
     self.img = img
Пример #6
0
# deconvolution by flowdec

from flowdec import data as fd_data
from flowdec import restoration as fd_restoration
from skimage import io
import numpy as np
from tifffile import imsave

img_out = io.imread("flowdec/datasets/bit_5.tif")
img = np.squeeze(img_out)
psf = io.imread("flowdec/datasets/PSF647_38z.tif")
assert img.ndim == 3
assert psf.ndim == 3

algo = fd_restoration.RichardsonLucyDeconvolver(3,pad_mode='none').initialize()
res = algo.run(fd_data.Acquisition(img,psf), niter=25).data # run deconvolution

imsave('bit_5_D.tif', res)
Пример #7
0
        
if __name__=='__main__':
    niter = args.niter
    md_path = args.md_path
    k = args.k
    zstart = args.zstart
    zskip = args.zskip
    zmax = args.zmax
    out_path = args.out_path
    use_gpu = args.gpu
    ncpu = args.ncpu
#    flatfield_path = args.flatfield_path
#    flatfield_dict = pickle.load(open(flatfield_path, 'rb'))
    if use_gpu == 1:
        print(device_lib.list_local_devices())
        gpu_algorithm = fd_restoration.RichardsonLucyDeconvolver(2).initialize()
#         assert(ncpu==1, 'If using GPU only use single-threaded to prevent conflicts with GPU usage.')
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    print(args)
    md = Metadata(md_path)
    seqfish_config = importlib.import_module(args.cword_config)
    bitmap = seqfish_config.bitmap
    pfunc = partial(hdata_multi_z_pseudo_maxprjZ_wrapper, md=md, k=args.k, zstart=args.zstart, zskip=args.zskip, zmax=args.zmax, ndecon_iter=niter)
    good_positions = pickle.load(open(args.tforms_path, 'rb'))['good']
    func_inputs = []
    for p, t in good_positions.items():
        tforms_xyz = {k: (v[0][0], v[0][1], int(np.round(np.mean(v[0][2])))) for k, v in t.items()}
        txy = {k: (v[0], v[1]) for k, v in tforms_xyz.items()}
        tzz = {k: v[2] for k, v in tforms_xyz.items()}
        func_inputs.append((HybeData(os.path.join(out_path, p)), p, txy, tzz))
Пример #8
0
from flowdec import data as fd_data

channels = ['CY3', 'FITC', 'DAPI']
acqs = fd_data.load_celegans()
acqs.keys()

acqs['CY3'].shape(), acqs['CY3'].dtype()

for ch in channels:
    print(' Image stats (' + ch + '):', describe(acqs[ch].data.ravel()))

import tensorflow as tf
from flowdec import restoration as tfd_restoration

niter = 500
algo = tfd_restoration.RichardsonLucyDeconvolver(n_dims=3).initialize()
res = {ch: algo.run(acqs[ch], niter=niter) for ch in channels}


# Stack and rescale original image and result to RGB with leading z dimension
def to_rgb(acqs):
    return np.stack([
        rescale_intensity(acqs[ch].data, out_range='uint8').astype('uint8')
        for ch in channels
    ],
                    axis=-1)


img_acq = to_rgb(acqs)
print('Image shape (z,y,x,c):', img_acq.shape)
print('Image dytpe:', img_acq.dtype)
Пример #9
0
if __name__ == '__main__':
    parser = get_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
                        level=logging.getLevelName(args.log_level.upper()))
    logger = logging.getLogger('DeconvolutionCLI')

    acq = fd_data.Acquisition(data=io.imread(args.data_path),
                              kernel=resolve_psf(args))
    logger.debug('Loaded data with shape {} and psf with shape {}'.format(
        acq.data.shape, acq.kernel.shape))

    logger.info('Beginning deconvolution of data file "{}"'.format(
        args.data_path))
    start_time = timer()

    # Initialize deconvolution with a padding minimum of 1, which will force any images with dimensions
    # already equal to powers of 2 (which is common with examples) up to the next power of 2
    algo = fd_restoration.RichardsonLucyDeconvolver(n_dims=acq.data.ndim,
                                                    pad_min=[1, 1,
                                                             1]).initialize()
    res = algo.run(acq, niter=args.n_iter)

    end_time = timer()
    logger.info(
        'Deconvolution complete (in {:.3f} seconds)'.format(end_time -
                                                            start_time))

    io.imsave(args.output_path, res.data)
    logger.info('Result saved to "{}"'.format(args.output_path))
Пример #10
0
"""Export Project TensorFlow Graphs for use in other TF client APIs"""
from flowdec import restoration as fd_restoration
import flowdec
import shutil
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

if __name__ == '__main__':
    for args in [['richardsonlucy', 1, 'complex'],
                 ['richardsonlucy', 2, 'complex'],
                 ['richardsonlucy', 3, 'complex']]:
        algo_name, ndims, domain = args
        logger.info('Building graph export for arguments {}'.format(args))

        graph_dir = '{}-{}-{}d'.format(algo_name, domain, ndims)
        export_dir = os.path.abspath(
            os.path.join(flowdec.tf_graph_dir, graph_dir))

        if os.path.exists(export_dir):
            shutil.rmtree(export_dir)

        algo = fd_restoration.RichardsonLucyDeconvolver(
            ndims, pad_mode='log2',
            real_domain_fft=(domain == 'real')).initialize()

        algo.graph.save(export_dir, save_as_text=False)

# rsync -rP ~/repos/hammer/flowdec/tensorflow/* ~/repos/imagej/ops-experiments/ops-experiments-tensorflow/src/main/resources/tensorflow/graphs/
Пример #11
0
 def initialize(self):
     self.algo = fd_restoration.RichardsonLucyDeconvolver(
         n_dims=3).initialize()
     return self
Пример #12
0
# See: http://www.cellimagelibrary.org/images/CCDB_2
actual = fd_data.neuron_25pct().data
# actual.shape = (50, 256, 256)

# Create a gaussian kernel that will be used to blur the original acquisition
kernel = np.zeros_like(actual)
for offset in [0, 1]:
    kernel[tuple((np.array(kernel.shape) - offset) // 2)] = 1
kernel = ndimage.gaussian_filter(kernel, sigma=1.)
# kernel.shape = (50, 256, 256)

# Convolve the original image with our fake PSF
data = signal.fftconvolve(actual, kernel, mode='same')
# data.shape = (50, 256, 256)

# Run the deconvolution process and note that deconvolution initialization is best kept separate from
# execution since the "initialize" operation corresponds to creating a TensorFlow graph, which is a
# relatively expensive operation and should not be repeated across multiple executions
algo = fd_restoration.RichardsonLucyDeconvolver(data.ndim).initialize()
res = algo.run(fd_data.Acquisition(data=data, kernel=kernel), niter=5).data

fig, axs = plt.subplots(1, 3)
axs = axs.ravel()
fig.set_size_inches(18, 12)
center = tuple([slice(None), slice(10, -10), slice(10, -10)])
titles = ['Original Image', 'Blurred Image', 'Reconstructed Image']
for i, d in enumerate([actual, data, res]):
    img = exposure.adjust_gamma(d[center].max(axis=0), gamma=.2)
    axs[i].imshow(img, cmap='Spectral_r')
    axs[i].set_title(titles[i])
    axs[i].axis('off')
Пример #13
0
            .format(i, sumRaw, sumBlurredModel, decon_crop.max(),
                    decon_crop.std(), convergenceResiduals.max(),
                    convergenceR.max(), structSim.max(), kerngaussh.max()))


# Run the deconvolution process and note that deconvolution initialization is best kept separate from
# execution since the "initialize" operation corresponds to creating a TensorFlow graph, which is a
# relatively expensive operation and should not be repeated across multiple executions

# initialize the TF graph for the deconvolution settings in use for certain sized input and psf images
# works for doing the same input data multiple times with different iteractions
# should work for doing different input data with same sizes of image and psf,
# eg a time series split into tiff 1 file per time point????
startAlgoinit = time.process_time()
# Run algorithm with observer function to track concvergence
algo = fd_restoration.RichardsonLucyDeconvolver(
    raw.ndim, observer_fn=observer).initialize()
# Run algorithm without observer function - much faster obvs.
#algo = fd_restoration.RichardsonLucyDeconvolver(raw.ndim).initialize()
TFinitTime = (time.process_time() - startAlgoinit)

# run the deconvolution itself
# in a loop making different numbers of iterations, multiples of base value of n_iter
multiRunFactor = 1
timingListIter = []
timingListTime = []

#send std output to a log file
sys.stdout = open(
    'FlowDecLogGold' + str(base_iter) + 'multi' + str(multiRunFactor) +
    'GAUSS.txt', 'w')
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def init_rl_deconvolver(**kwargs):
    """initializes the tensorflow-based Richardson Lucy Deconvolver """
    return tfd_restoration.RichardsonLucyDeconvolver(n_dims=3,
                                                     start_mode="input",
                                                     **kwargs).initialize()
Пример #15
0
def deconvolveTF(img, kernel, iteration):
    imgInit = fd_restoration.RichardsonLucyDeconvolver(img.ndim).initialize()
    deconv = imgInit.run(fd_data.Acquisition(data=img, kernel=kernel),
                         niter=iteration).data
    return deconv
Пример #16
0
 def initialize(self):
     self.algo = fd_restoration.RichardsonLucyDeconvolver(
         n_dims=3, pad_mode=self.pad_mode,
         pad_min=self.pad_min).initialize()
     self.psfs = generate_psfs(self.config)
     return self
Пример #17
0
def run_deconvolution(args, psfs, config):
    global IMG_ID
    files = utils.get_files(args.input_dir, '.*\.tif$')
    times = []
    mean_ratios = []
    scale_factor = float(args.scale_factor)

    # Tone down TF logging, though only the first setting below actually
    # seems to make any difference
    utils.disable_tf_logging()
    session_config = tf.ConfigProto(log_device_placement=False)

    n_iter = int(args.n_iter)
    pad_dims = np.array([int(p) for p in args.pad_dims.split(',')])
    if args.observer_dir is not None and not args.dry_run:
        if args.observer_coords is None:
            raise ValueError(
                'Must set "observer-coords" property when using observer')
        observer_fn = get_iteration_observer_fn(args.observer_dir,
                                                args.observer_coords)
    else:
        observer_fn = None
    algo = fd_restoration.RichardsonLucyDeconvolver(
        n_dims=3,
        pad_mode=args.pad_mode,
        pad_min=pad_dims,
        epsilon=1e-6,
        observer_fn=observer_fn).initialize()

    # Stacks load as (cycles, z, channel, height, width)
    imgs = img_generator(files)
    img_dtypes = set()
    for i, (f, img) in enumerate(imgs):
        logger.debug(
            '{} tile "{}" ({} of {}) --> shape = {}, dtype = {}'.format(
                'Would deconvolve' if args.dry_run else 'Deconvolving', f,
                i + 1, len(files), img.shape, img.dtype))
        img_dtypes.add(img.dtype)
        if len(img_dtypes) > 1:
            raise ValueError('Image has conflicting dtype with prior images; '
                             'all dtypes seen = {}'.format(list(img_dtypes)))
        if not np.issubdtype(img.dtype, np.unsignedinteger):
            raise ValueError('Only unsigned integer images supported; '
                             'type given = {}'.format(img.dtype))
        if img.min() < 0:
            raise ValueError('Image to deconvolve cannot have negative values')

        utils.validate_stack_shape(img, config)
        ncyc, nz, nch, nh, nw = img.shape

        # Loop through each cycle and channel so that for each, a single 3D z-stack
        # can be extracted for deconvolution
        res_stack = []
        for icyc in range(ncyc):
            res_ch = []
            for ich in range(nch):
                acq = fd_data.Acquisition(data=img[icyc, :, ich, :, :],
                                          kernel=psfs[ich])

                if args.dry_run:
                    continue
                IMG_ID = dict(tile=i + 1, channel=ich + 1, cycle=icyc + 1)

                # Results have shape (nz, nh, nw)
                start_time = timer()
                res = algo.run(acq,
                               niter=n_iter,
                               session_config=session_config).data
                end_time = timer()
                times.append({
                    'cycle': icyc + 1,
                    'channel': ich + 1,
                    'time': end_time - start_time
                })

                # This is a transformation used in the Nolanlab code to rescale means
                # of deconvolution results back to the original (they're not usually
                # off by much though).  scale_factor is then a tunable way to lower or
                # raise the intensity values so that when clipping to uint type (with
                # no scaling) there is less saturation
                if args.scale_mode == 'stack':
                    mean_ratio = acq.data.mean() / utils.arr_to_uint(
                        res, img.dtype).mean()
                    mean_ratios.append({
                        'cycle': icyc + 1,
                        'channel': ich + 1,
                        'ratio': mean_ratio
                    })
                    res *= (mean_ratio * scale_factor)
                elif args.scale_mode == 'slice':
                    for iz in range(nz):
                        mean_ratio = acq.data[iz].mean() / utils.arr_to_uint(
                            res[iz], img.dtype).mean()
                        mean_ratios.append({
                            'cycle': icyc + 1,
                            'channel': ich + 1,
                            'ratio': mean_ratio,
                            'z': iz + 1
                        })
                        res[iz] = res[iz] * (mean_ratio * scale_factor)
                else:
                    raise ValueError('Scale mode "{}" not valid'.format(
                        args.scale_mode))

                # Clip float32 and convert to type of original image (i.e. w/ no scaling)
                res = utils.arr_to_uint(res, img.dtype)

                res_ch.append(res)

            if args.dry_run:
                continue

            # Stack (nz, nh, nw) results to (nz, nch, nh, nw)
            res_ch = np.stack(res_ch, 1)

            if list(res_ch.shape) != [nz, nch, nh, nw]:
                raise ValueError(
                    'Stack across channels has wrong shape --> expected = {}, actual = {}'
                    .format([nz, nch, nh, nw], list(res_ch.shape)))
            res_stack.append(res_ch)

        if args.dry_run:
            continue

        # Stack (nz, nch, nh, nw) results along first axis to match input
        # like (ncyc, nz, nch, nh, nw)
        res_stack = np.stack(res_stack, 0)

        # Validate resulting data type
        if res_stack.dtype != img.dtype:
            raise ValueError(
                'Final stack has wrong dtype --> expected = {}, actual = {}'.
                format(img.dtype, res_stack.dtype))

        # Validate resulting shape matches the input
        if list(res_stack.shape) != list(img.shape):
            raise ValueError(
                'Final stack has wrong shape --> expected = {}, actual = {}'.
                format(list(img.shape), list(res_stack.shape)))

        res_file = osp.join(args.output_dir, osp.basename(f))
        logger.debug(
            'Saving deconvolved tile to "{}" --> shape = {}, dtype = {}'.
            format(res_file, res_stack.shape, res_stack.dtype))
        # See tiffwriter docs at http://scikit-image.org/docs/dev/api/skimage.external.tifffile
        # .html#skimage.external.tifffile.TiffWriter for more info on how scikit-image
        # handles imagej formatting -- the docs aren't very explicit but they do mention
        # that with 'imagej=True' it can handle arrays up to 6 dims in TZCYXS order
        imsave(res_file, res_stack, imagej=True)
    return times, mean_ratios