Пример #1
0
def writeN5(img, path, dataset_name, blockSize, gzip_compression_level=4, n_threads=0):
  """ img: the RandomAccessibleInterval to store in N5 format.
      path: the directory to store the N5 data.
      dataset_name: the name of the img data.
      blockSize: an array or list as long as dimensions has the img, specifying
                 how to chop up the img into pieces.
      gzip_compression_level: defaults to 4, ranges from 0 (no compression) to 9 (maximum;
                              see java.util.zip.Deflater for details.).
      n_threads: defaults to as many as CPU cores, for parallel writing. """
  N5Utils.save(img, N5FSWriter(path, GsonBuilder()),
               dataset_name, blockSize,
               GzipCompression(gzip_compression_level) if gzip_compression_level > 0 else RawCompression(),
               newFixedThreadPool(n_threads=n_threads, name="jython-n5writer"))
Пример #2
0
def ensurePointMatches(filepaths, csvDir, params, n_adjacent):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches", exe=newFixedThreadPool(numCPUs()))
    exeload = newFixedThreadPool()
    try:
        count = 1
        for result in w.chunkConsume(
                numCPUs() * 2,
                pointmatchingTasks(filepaths, csvDir, params, n_adjacent,
                                   exeload)):
            if result:  # is False when CSV file already exists
                syncPrint("Completed %i/%i" %
                          (count, len(filepaths) * n_adjacent))
            count += 1
        syncPrint("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrint("Finished all pointmatching tasks.")
    except:
        print sys.exc_info()
    finally:
        exeload.shutdown()
        w.destroy()
Пример #3
0
 def preload(cachedCellImg, loader, block_size, filepaths):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                              name="preloader")
     try:
         # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
         cache = cachedCellImg.getCache()
         f1 = cache.getClass().getDeclaredField(
             "cache")  # LoaderCacheAsCacheAdapter.cache
         f1.setAccessible(True)
         softCache = f1.get(cache)
         cache = None
         f2 = softCache.getClass().getDeclaredField(
             "map")  # SoftRefLoaderCache.map
         f2.setAccessible(True)
         keys = sorted(f2.get(softCache).keySet())
         if 0 == len(keys):
             return
         first = keys[-1] - (keys[-1] % block_size[2])
         last = max(len(filepaths), first + block_size[2] - 1)
         keys = None
         msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
         futures = []
         for index in xrange(first, first + block_size[2]):
             futures.append(
                 exe.submit(TimeItTask(softCache.get, index, loader)))
         softCache = None
         # Wait for all
         count = 1
         while len(futures) > 0:
             r, t = futures.pop(0).get()
             # t in miliseconds
             if t > 500:
                 if msg:
                     syncPrint(msg)
                     msg = None
                 syncPrint("preloaded index %i in %f ms" %
                           (first + count, t))
             count += 1
         if not msg:  # msg was printed
             syncPrint("Completed preloading %i-%i" %
                       (first, first + block_size[2] - 1))
     except:
         syncPrint(sys.exc_info())
     finally:
         exe.shutdown()
Пример #4
0
 def __init__(self,
              filepaths,
              loadImg,
              matrices,
              img_dimensions,
              cell_dimensions,
              interval,
              preload=None):
     self.filepaths = filepaths
     self.loadImg = loadImg  # function to load images
     self.matrices = matrices
     self.img_dimensions = img_dimensions
     self.cell_dimensions = cell_dimensions  # x,y must match dims of interval
     self.interval = interval  # when smaller than the image, will crop
     self.cache = SoftMemoize(partial(TranslatedSectionGet.makeCell, self),
                              maxsize=256)
     self.exe = newFixedThreadPool(
         -1
     ) if preload is not None else None  # BEWARE native memory leak if not closed
     self.preload = preload
Пример #5
0
def registeredView(img_filenames,
                   img_loader,
                   getCalibration,
                   csv_dir,
                   modelclass,
                   params,
                   exe=None):
    """ img_filenames: a list of file names
      csv_dir: directory for CSV files
      exe: an ExecutorService for concurrent execution of tasks
      params: dictionary of parameters
      returns a stack view of all registered images, e.g. 3D volumes as a 4D. """
    original_exe = exe
    if not exe:
        exe = newFixedThreadPool()
    try:
        matrices = computeForwardTransforms(img_filenames, img_loader,
                                            getCalibration, csv_dir, exe,
                                            modelclass, params)
        affines = asBackwardConcatTransforms(matrices)
        #
        for i, affine in enumerate(affines):
            matrix = affine.getRowPackedCopy()
            print i, "matrix: [", matrix[0:4]
            print "           ", matrix[4:8]
            print "           ", matrix[8:12], "]"
        #
        # TODO replace with a lazy loader
        images = [
            img_loader.load(img_filename) for img_filename in img_filenames
        ]
        registered = Views.stack([
            viewTransformed(img, getCalibration(img_filename),
                            affine) for img, img_filename, affine in izip(
                                images, img_filenames, affines)
        ])
        return registered
    finally:
        if not original_exe:
            exe.shutdownNow()
Пример #6
0
def export8bitN5(
        filepaths,
        loadFn,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  filepaths: the ordered list of filepaths, one per serial section.
  loadFn: a function to load a filepath into an ImagePlus.
  name: name to assign to the N5 volume.
  matrices: the list of transformation matrices (each one is an array), one per section
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld. 0 means no compression.
  invert:  Defaults to True (necessary for FIBSEM). Whether to invert the images upon loading.
  CLAHE_params: defaults to [400, 256, 3.0]. If not None, the a list of the 3 parameters needed for a CLAHE filter to apply to each image.
  n5_threads: defaults to 0, meaning as many as CPU cores.
  block_size: defaults to 128x128x128 px. A list of 3 integer numbers, the dimensions of each individual block.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    # A CacheLoader that interprets the list of filepaths as a 3D volume: a stack of 2D slices
    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices),
        loadFn=loadFn)

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    exe_preloader = newFixedThreadPool(n_threads=min(
        block_size[2], n5_threads if n5_threads > 0 else numCPUs()),
                                       name="preloader")

    def preload(cachedCellImg, loader, block_size, filepaths, exe):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = max(0, keys[-1] - (keys[-1] % block_size[2]))
            last = min(len(filepaths), first + block_size[2]) - 1
            keys = None
            syncPrintQ("### Preloading %i-%i ###" % (first, last))
            futures = []
            for index in xrange(first, last + 1):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            loaded_any = False
            count = 0
            while len(futures) > 0:
                r, t = futures.pop(0).get()  # waits for the image to load
                if t > 1000:  # in miliseconds. Less than this is for sure a cache hit, more a cache miss and reload
                    loaded_any = True
                r = None
                # t in miliseconds
                syncPrintQ("preloaded index %i in %f ms" % (first + count, t))
                count += 1
            if not loaded_any:
                syncPrintQ("Completed preloading %i-%i" %
                           (first, first + block_size[2] - 1))
        except:
            syncPrintQ(sys.exc_info())

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size, filepaths,
                exe_preloader), 10, 60, TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
        exe_preloader.shutdown()
Пример #7
0
def ensurePointMatches(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                       properties):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches",
                      exe=newFixedThreadPool(properties["n_threads"]))
    exeload = newFixedThreadPool()
    try:
        if properties.get("use_SIFT", False):
            syncPrintQ("use_SIFT is True")
            # Pre-extract SIFT features for all images first
            # ensureSIFTFeatures returns the features list so the Future will hold it in memory: can't hold onto them
            # therefore consume the tasks in chunks:
            chunk_size = properties["n_threads"] * 2
            count = 1
            for result in w.chunkConsume(
                    chunk_size,  # tasks to submit before starting to wait for futures
                (Task(ensureSIFTFeatures,
                      filepath,
                      paramsSIFT,
                      properties,
                      csvDir,
                      validateByFileExists=properties.get(
                          "SIFT_validateByFileExists"))
                 for filepath in filepaths)):
                count += 1
                if 0 == count % chunk_size:
                    syncPrintQ(
                        "Completed extracting or validating SIFT features for %i images."
                        % count)
            w.awaitAll()
            syncPrintQ(
                "Completed extracting or validating SIFT features for all images."
            )
            # Compute pointmatches across adjacent sections
            count = 1
            for result in w.chunkConsume(
                    chunk_size,
                    generateSIFTMatches(filepaths, n_adjacent, params,
                                        paramsSIFT, properties, csvDir)):
                count += 1
                syncPrintQ("Completed SIFT pointmatches %i/%i" %
                           (count, len(filepaths) * n_adjacent))
        else:
            # Use blockmatches
            syncPrintQ("using blockmatches")
            loadFPMem = SoftMemoize(lambda path: loadFloatProcessor(
                path, params, paramsSIFT, scale=True),
                                    maxsize=properties["n_threads"] +
                                    n_adjacent)
            count = 1
            for result in w.chunkConsume(
                    properties["n_threads"],
                    pointmatchingTasks(filepaths, csvDir, params, paramsSIFT,
                                       n_adjacent, exeload, properties,
                                       loadFPMem)):
                if result:  # is False when CSV file already exists
                    syncPrintQ("Completed %i/%i" %
                               (count, len(filepaths) * n_adjacent))
                count += 1
            syncPrintQ("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrintQ("Finished all pointmatching tasks.")
    except:
        printException()
    finally:
        exeload.shutdown()
        w.destroy()
Пример #8
0
  def run():
    exe = newFixedThreadPool(min(len(cropped), numCPUs()))
    try:
      # Dummy for in-RAM reading of isotropic images
      img_filenames = [str(i) for i in xrange(len(cropped))]
      loader = InRAMLoader(dict(zip(img_filenames, cropped)))
      getCalibration = params.get("getCalibration", None)
      if not getCalibration:
        getCalibration = lambda img: [1.0] * cropped[0].numDimensions()
      csv_dir = params["csv_dir"]
      modelclass = params["modelclass"]
      # Matrices describing the registration on the basis of the cropped images
      matrices = computeOptimizedTransforms(img_filenames, loader, getCalibration,
                                            csv_dir, exe, modelclass, params)
      # Store outside, so they can be e.g. printed, and used beyond here
      for matrix, affine in zip(matrices, affines):
        affine.set(*matrix)

      # Combine the transforms: scaling (by calibration)
      #                         + the coarse registration (i.e. manual translations)
      #                         + the translation introduced by the ROI cropping
      #                         + the affine matrices computed above over the cropped images.
      coarse_matrices = []
      for coarse_affine in coarse_affines:
        matrix = zeros(12, 'd')
        coarse_affine.toArray(matrix)
        coarse_matrices.append(matrix)

      # NOTE: both coarse_matrices and matrices are from the camera X to camera 0. No need to invert them.
      # NOTE: uses identity calibration because the coarse_matrices already include the calibration scaling to isotropy
      transforms = mergeTransforms([1.0, 1.0, 1.0], coarse_matrices, [minC, maxC], matrices, invert2=False)

      print "calibration:", [1.0, 1.0, 1.0]
      print "cmTransforms:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in coarse_matrices)
      print "ROI", [minC, maxC]
      print "fineTransformsPostROICrop:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in matrices)
      print "invert2:", False
      
      # Show registered images
      registered = [transformedView(img, transform, interval=cropped[0])
                    for img, transform in izip(original_images, transforms)]
      registered_imp = showAsStack(registered, title="Registered with %s" % params["modelclass"].getSimpleName())
      registered_imp.setDisplayRange(cropped_imp.getDisplayRangeMin(), cropped_imp.getDisplayRangeMax())

      """
      # TEST: same as above, but without merging the transforms. WORKS, same result
      # Copy into ArrayImg, otherwise they are rather slow to browse
      def copy(img1, affine):
        # Copy in two steps. Otherwise the nearest neighbor interpolation on top of another
        # nearest neighbor interpolation takes a huge amount of time
        dimensions = Intervals.dimensionsAsLongArray(img1)
        aimg1 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img1, aimg1.factory()), aimg1)
        img2 = transformedView(aimg1, affine)
        aimg2 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img2, aimg2.factory()), aimg2)
        return aimg2
      futures = [exe.submit(Task(copy, img, affine)) for img, affine in izip(cropped, affines)]
      aimgs = [f.get() for f in futures]
      showAsStack(aimgs, title="DEBUG Registered with %s" % params["modelclass"].getSimpleName())
      """
    except:
      print sys.exc_info()
    finally:
      exe.shutdown()
      SwingUtilities.invokeLater(lambda: run_button.setEnabled(True))
Пример #9
0
def multiviewDeconvolution(images, blockSizes, PSF_kernels, n_iterations, lambda_val=0.0006, weights=None,
                           filterBlocksForContent=False, PSF_type=PSFTYPE.INDEPENDENT, exe=None, printFn=syncPrint):
  """
  Apply Bayesian-based multi-view deconvolution to the list of images,
  returning the deconvolved image. Uses Stephan Preibisch's library,
  currently available with the BigStitcher Fiji update site.

  images: a list of images, registered and all with the same dimensions.
  blockSizes: how to chop up the volume of each image for parallel processing.
             When None, a single block with the image dimensions is used,
             plus half of the transformed kernel dimensions for that view.
  PSF_kernels: the images containing the point spread function for each input image. Requirement: the dimensions must be an odd number.
  n_iterations: the number of iterations for the deconvolution. A number between 10 and 50 is desirable. The more iterations, the higher the computational cost.
  lambda_val: default is 0.0006 as recommended by Preibisch.
  weights: a list of FloatType images with the weight for every pixel. If None, then all pixels get a value of 1.
  filterBlocksForContent: whether to check before processing a block if the block has any data in it. Default is False.
  PSF_type: defaults to PSFTYPE.INDEPENDENT.
  exe: a thread pool for concurrent execution. If None, a new one is created, using as many threads as CPUs are available.
  printFn: the function to use for printing error messages. Defaults to syncPrint (thread-safe access to the built-in `print` function).

  Returns an imglib2 ArrayImg, or None if something went wrong.
  """

  mvd_exe = exe
  if not exe:
    mvd_exe = newFixedThreadPool() # as many threads as CPUs

  try:
    mvd_weights = weights
    if not weights:
      mvd_weights = repeat(Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0])))

    for i, PSF_kernel in enumerate(PSF_kernels):
      for d in xrange(PSF_kernel.numDimensions()):
        if 0 == PSF_kernel.dimension(d) % 2:
          printFn("for image at index %i, PSF kernel dimension %i is not odd." % (i, d))
          return None

    if not blockSizes:
      # Whole image dimensions + half of the transformed PSF kernel dimensions
      kernel_max = int(max(PSF_kernel.dimension(d)
                           for d in xrange(PSF_kernel.numDimensions())
                           for PSF_kernel in PSF_kernels) * 2)
      syncPrint("kernel max dimension *2: %i" % kernel_max)
      blockSizes = []
      for image in images:
        blockSizes.append([image.dimension(d) + kernel_max
                           for d in xrange(image.numDimensions())])
        syncPrint("blockSize:" + str(blockSizes[-1]))

    cptf = createFactory(mvd_exe, lambda_val, blockSizes[0]) # TODO which blockSize to give here?
    filterBlocksForContent = False # Run once with True, none were removed
    dviews = [DeconView(mvd_exe, img, weight, PSF_kernel, PSF_type, blockSize, 1, filterBlocksForContent)
              for img, blockSize, weight, PSF_kernel in izip(images, blockSizes, mvd_weights, PSF_kernels)]
    decon = MultiViewDeconvolutionSeq(DeconViews(dviews, mvd_exe), n_iterations, PsiInitBlurredFusedFactory(), cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      printFn("Something went wrong initializing MultiViewDeconvolution")
      return None
    else:
      decon.runIterations()
      return decon.getPSI()
  finally:
    # Only shut down the thread pool if it was created here
    if not exe:
      mvd_exe.shutdownNow()
Пример #10
0
def registerDeconvolvedTimePoints(targetDir,
                                  params,
                                  modelclass,
                                  exe=None,
                                  verbose=True,
                                  subrange=None):
    """ Can only be run after running deconvolveTimePoints, because it
      expects deconvolved images to exist under <targetDir>/deconvolved/,
      with a name pattern like: TM_\d+_CM0\d_CM0\d-deconvolved.zip
      
      Tests if files exist first, if not, will stop execution.

      Will write the features, pointmatches and registration affine matrices
      into a csv folder under targetDir.

      If a CSV file with the affine transform matrices exist, it will read them out
      and provide the 4D img right away.
      Else, it will check which files are missing their features and pointmatches as CSV files,
      create them, and ultimately create the CSV filew ith the affine transform matrices,
      and then provide the 4D img.

      targetDir: the directory containing the deconvolved images.
      params: for feature extraction and registration.
      modelclass: the model to use, e.g. Translation3D, AffineTransform3D.
      exe: the ExecutorService to use (optional).
      subrange: the range of time point indices to process, as enumerated
                by the folder name, i.e. the number captured by /TM(\d+)/
      
      Returns an imglib2 4D img with the registered deconvolved 3D stacks."""

    deconvolvedDir = os.path.join(targetDir, "deconvolved")

    # A folder for features, pointmatches and matrices in CSV format
    csv_dir = os.path.join(deconvolvedDir, "csvs")
    if not os.path.exists(csv_dir):
        os.mkdir(csv_dir)

    # A datastructure to represent the timepoints, each with two filenames
    timepoint_views = defaultdict(defaultdict)
    pattern = re.compile("^TM(\d+)_(CM0\d-CM0\d)-deconvolved.zip$")
    for filename in sorted(os.listdir(deconvolvedDir)):
        m = re.match(pattern, filename)
        if m:
            stime, view = m.groups()
            timepoint_views[int(stime)][view] = filename

    # Filter by specified subrange, if any
    if subrange:
        subrange = set(subrange)
        for time in timepoint_views.keys(
        ):  # a list copy of the keys, so timepoints can be modified
            if time not in subrange:
                del timepoint_views[time]

    # Register only the view CM00-CM01, given that CM02-CM03 has the same transform
    matrices_name = "matrices-%s" % modelclass.getSimpleName()
    matrices = None
    if os.path.exists(os.path.join(csv_dir, matrices_name + ".csv")):
        matrices = loadMatrices(matrices_name, csv_dir)
        if len(matrices) != len(timepoint_views):
            syncPrint(
                "Ignoring existing matrices CSV file: length (%i) doesn't match with expected number of timepoints (%i)"
                % (len(matrices), len(timepoint_views)))
            matrices = None
    if not matrices:
        original_exe = exe
        if not exe:
            exe = newFixedThreadPool()
        try:
            # Deconvolved images are isotropic
            def getCalibration(img_filepath):
                return [1, 1, 1]

            timepoints = []  # sorted
            filepaths = []  # sorted
            for timepoint, views in sorted(timepoint_views.iteritems(),
                                           key=itemgetter(0)):
                timepoints.append(timepoint)
                filepaths.append(
                    os.path.join(deconvolvedDir, views["CM00-CM01"]))
            #
            #matrices_fwd = computeForwardTransforms(filepaths, ImageJLoader(), getCalibration,
            #                                        csv_dir, exe, modelclass, params, exe_shutdown=False)
            #matrices = [affine.getRowPackedCopy() for affine in asBackwardConcatTransforms(matrices_fwd)]
            matrices = computeOptimizedTransforms(filepaths,
                                                  ImageJLoader(),
                                                  getCalibration,
                                                  csv_dir,
                                                  exe,
                                                  modelclass,
                                                  params,
                                                  verbose=verbose)
            saveMatrices(matrices_name, matrices, csv_dir)
        finally:
            if not original_exe:
                exe.shutdownNow()  # Was created new

    # Convert matrices into twice as many affine transforms
    affines = []
    for matrix in matrices:
        aff = AffineTransform3D()
        aff.set(*matrix)
        affines.append(aff)
        affines.append(aff)  # twice: also for the CM02-CM03

    # Show the registered deconvolved series as a 4D volume.
    filepaths = []
    for timepoint in sorted(timepoint_views.iterkeys()):
        views = timepoint_views.get(timepoint)
        for view_name in sorted(views.keys()):  # ["CM00-CM01", "CM02-CM03"]
            filepaths.append(os.path.join(deconvolvedDir, views[view_name]))

    img = Load.lazyStack(
        filepaths,
        TransformedLoader(ImageJLoader(),
                          dict(izip(filepaths, affines)),
                          asImg=True))
    return img
Пример #11
0
def deconvolveTimePoints(srcDir,
                         targetDir,
                         kernel_filepath,
                         calibration,
                         cameraTransformations,
                         fineTransformsPostROICrop,
                         params,
                         roi,
                         subrange=None,
                         camera_groups=((0, 1), (2, 3)),
                         fine_fwd=False,
                         n_threads=0):  # 0 means all
    """
     Main program entry point.
     For each time point folder TM\d+, find the KLB files of the 4 cameras,
     then register them all to camera CM01, and deconvolve CM01+CM02, and CM02+CM03,
     and store these two images in corresponding TM\d+ folders under targetDir.

     Assumes that each camera view has the same dimensions in each time point folder.
     A camera view may have dimensions different from those of the other cameras.

     Can be run as many times as necessary. Intermediate computations are saved
     as csv files (features, pointmatches and transformation matrices), and 
     the deconvolved images as well, into folder targetDir/deconvolved/ with
     a name pattern like TM\d+_CM0\d_CM0\d-deconvolved.zip
     
     srcDir: file path to a directory with TM\d+ subdirectories, one per time point.
     targetDir: file path to a directory for storing deconvolved images
                and CSV files with features, point matches and transformation matrices.
     kernel_filepath: file path to the 3D image of the point spread function (PSF),
                      which can be computed from fluorescent beads with the BigStitcher functions
                      and which must have odd dimensions.
     calibration: the array of [x, y, z] dimensions.
     cameraTransformations: a function that returns a map of camera index vs the 12-digit 3D affine matrices describing
                            the transform to register the camera view onto the camera at index 0.
     fineTransformsPostROICrop: a list of the transform matrices to be applied after both the coarse transform and the ROI crop.
     params: a dictionary with all the necessary parameters for feature extraction, registration and deconvolution.
     roi: the min and max coordinates for cropping the coarsely registered volumes prior to registration and deconvolution.
     subrange: defaults to None. Can be a list specifying the indices of time points to deconvolve.
     camera_groups: the camera views to fuse and deconvolve together. Defaults to two: ((0, 1), (2, 3))
     fine_fwd: whether the fineTransformsPostROICrop were computed all-to-all, which optimizes the pose and produces direct transforms,
               or, when False, the fineTransformsPostROICrop were computed from 0 to 1, 0 to 2, and 0 to 3, so they are inverted.
     n_threads: number of threads to use. Zero (default) means as many as possible.
  """
    kernel = readFloats(kernel_filepath, [19, 19, 25], header=434)
    klb_loader = KLBLoader()

    def getCalibration(img_filename):
        return calibration

    # Regular expression pattern describing KLB files to include
    pattern = re.compile("^SPM00_TM\d+_CM(\d+)_CHN0[01]\.klb$")

    # Find all time point folders with pattern TM\d{6} (a TM followed by 6 digits)
    def iterTMs():
        """ Return a generator over dicts of 4 KLB file paths for each time point. """
        for dirname in sorted(os.listdir(srcDir)):
            if not dirname.startswith("TM00"):
                continue
            filepaths = {}
            tm_dir = os.path.join(srcDir, dirname)
            for filename in sorted(os.listdir(tm_dir)):
                r = re.match(pattern, filename)
                if r:
                    camera_index = int(r.groups()[0])
                    filepaths[camera_index] = os.path.join(tm_dir, filename)
            yield filepaths

    if subrange:
        indices = set(subrange)
        TMs = [tm for i, tm in enumerate(iterTMs()) if i in indices]
    else:
        TMs = list(iterTMs())

    # Validate folders
    for filepaths in TMs:
        if 4 != len(filepaths):
            print "Folder %s has problems: found %i KLB files in it instead of 4." % (
                tm_dir, len(filepaths))
            print "Address the issues and rerun."
            return

    print "Will process these timepoints:",
    for i, TM in enumerate(TMs):
        print i
        pprint(TM)

    # All OK, submit all timepoint folders for registration and deconvolution

    # dimensions: all images from each camera have the same dimensions
    dimensions = [
        Intervals.dimensionsAsLongArray(klb_loader.get(filepath))
        for index, filepath in sorted(TMs[0].items(), key=itemgetter(0))
    ]

    cmTransforms = cameraTransformations(dimensions[0], dimensions[1],
                                         dimensions[2], dimensions[3],
                                         calibration)

    # Transforms apply to all time points equally
    #   If fine_fwd, the fine transform was forward.
    #   Otherwise, it was from CM00 to e.g. CM01, so backwards for CM01, needing an inversion.
    transforms = mergeTransforms(
        calibration, [cmTransforms[i] for i in sorted(cmTransforms.keys())],
        roi,
        fineTransformsPostROICrop,
        invert2=not fine_fwd)

    # Create target folder for storing deconvolved images
    if not os.path.exists(os.path.join(targetDir, "deconvolved")):
        os.mkdir(os.path.join(targetDir, "deconvolved"))

    # Transform kernel to each view
    matrices = fineTransformsPostROICrop

    # For the PSF kernel, transforms without the scaling up to isotropy
    # No need to account for the translation: the transformPSFKernelToView keeps the center point centered.
    PSF_kernels = [
        transformPSFKernelToView(kernel, affine3D(cmTransforms[i]))
        for i in xrange(4)
    ]
    PSF_kernels = [
        transformPSFKernelToView(k,
                                 affine3D(matrix).inverse())
        for k, matrix in izip(PSF_kernels, matrices)
    ]
    # TODO: if kernels are not ArrayImg, they should be made be.
    print "PSF_kernel[0]:", PSF_kernels[0], type(PSF_kernels[0])

    # DEBUG: write the kernelA
    for index in [0, 1, 2, 3]:
        writeZip(PSF_kernels[index],
                 "/tmp/kernel" + str(index) + ".zip",
                 title="kernel" + str(index)).flush()

    # A converter from FloatType to UnsignedShortType
    output_converter = createConverter(FloatType, UnsignedShortType)

    target_interval = FinalInterval(
        [0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])

    exe = newFixedThreadPool(n_threads=n_threads)
    try:
        # Submit for registration + deconvolution
        # The registration uses 2 parallel threads, and deconvolution all possible available threads.
        # Cannot invoke more than one time point at a time because the deconvolution requires a lot of memory.
        for i, filepaths in enumerate(TMs):
            if Thread.currentThread().isInterrupted(): break
            syncPrint("Deconvolving time point %i with files:\n  %s" %
                      (i, "\n  ".join(sorted(filepaths.itervalues()))))
            deconvolveTimePoint(filepaths,
                                targetDir,
                                klb_loader,
                                transforms,
                                target_interval,
                                params,
                                PSF_kernels,
                                exe,
                                output_converter,
                                camera_groups=camera_groups)
    finally:
        exe.shutdown(
        )  # Not accepting any more tasks but letting currently executing tasks to complete.
        # Wait until the last task (writing the last file) completes execution.
        exe.awaitTermination(5, TimeUnit.MINUTES)