예제 #1
0
  def run():
    exe = newFixedThreadPool(min(len(cropped), numCPUs()))
    try:
      # Dummy for in-RAM reading of isotropic images
      img_filenames = [str(i) for i in xrange(len(cropped))]
      loader = InRAMLoader(dict(zip(img_filenames, cropped)))
      getCalibration = params.get("getCalibration", None)
      if not getCalibration:
        getCalibration = lambda img: [1.0] * cropped[0].numDimensions()
      csv_dir = params["csv_dir"]
      modelclass = params["modelclass"]
      # Matrices describing the registration on the basis of the cropped images
      matrices = computeOptimizedTransforms(img_filenames, loader, getCalibration,
                                            csv_dir, exe, modelclass, params)
      # Store outside, so they can be e.g. printed, and used beyond here
      for matrix, affine in zip(matrices, affines):
        affine.set(*matrix)

      # Combine the transforms: scaling (by calibration)
      #                         + the coarse registration (i.e. manual translations)
      #                         + the translation introduced by the ROI cropping
      #                         + the affine matrices computed above over the cropped images.
      coarse_matrices = []
      for coarse_affine in coarse_affines:
        matrix = zeros(12, 'd')
        coarse_affine.toArray(matrix)
        coarse_matrices.append(matrix)

      # NOTE: both coarse_matrices and matrices are from the camera X to camera 0. No need to invert them.
      # NOTE: uses identity calibration because the coarse_matrices already include the calibration scaling to isotropy
      transforms = mergeTransforms([1.0, 1.0, 1.0], coarse_matrices, [minC, maxC], matrices, invert2=False)

      print "calibration:", [1.0, 1.0, 1.0]
      print "cmTransforms:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in coarse_matrices)
      print "ROI", [minC, maxC]
      print "fineTransformsPostROICrop:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in matrices)
      print "invert2:", False
      
      # Show registered images
      registered = [transformedView(img, transform, interval=cropped[0])
                    for img, transform in izip(original_images, transforms)]
      registered_imp = showAsStack(registered, title="Registered with %s" % params["modelclass"].getSimpleName())
      registered_imp.setDisplayRange(cropped_imp.getDisplayRangeMin(), cropped_imp.getDisplayRangeMax())

      """
      # TEST: same as above, but without merging the transforms. WORKS, same result
      # Copy into ArrayImg, otherwise they are rather slow to browse
      def copy(img1, affine):
        # Copy in two steps. Otherwise the nearest neighbor interpolation on top of another
        # nearest neighbor interpolation takes a huge amount of time
        dimensions = Intervals.dimensionsAsLongArray(img1)
        aimg1 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img1, aimg1.factory()), aimg1)
        img2 = transformedView(aimg1, affine)
        aimg2 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img2, aimg2.factory()), aimg2)
        return aimg2
      futures = [exe.submit(Task(copy, img, affine)) for img, affine in izip(cropped, affines)]
      aimgs = [f.get() for f in futures]
      showAsStack(aimgs, title="DEBUG Registered with %s" % params["modelclass"].getSimpleName())
      """
    except:
      print sys.exc_info()
    finally:
      exe.shutdown()
      SwingUtilities.invokeLater(lambda: run_button.setEnabled(True))
예제 #2
0
def deconvolveTimePoints(srcDir,
                         targetDir,
                         kernel_filepath,
                         calibration,
                         cameraTransformations,
                         fineTransformsPostROICrop,
                         params,
                         roi,
                         subrange=None,
                         camera_groups=((0, 1), (2, 3)),
                         fine_fwd=False,
                         n_threads=0):  # 0 means all
    """
     Main program entry point.
     For each time point folder TM\d+, find the KLB files of the 4 cameras,
     then register them all to camera CM01, and deconvolve CM01+CM02, and CM02+CM03,
     and store these two images in corresponding TM\d+ folders under targetDir.

     Assumes that each camera view has the same dimensions in each time point folder.
     A camera view may have dimensions different from those of the other cameras.

     Can be run as many times as necessary. Intermediate computations are saved
     as csv files (features, pointmatches and transformation matrices), and 
     the deconvolved images as well, into folder targetDir/deconvolved/ with
     a name pattern like TM\d+_CM0\d_CM0\d-deconvolved.zip
     
     srcDir: file path to a directory with TM\d+ subdirectories, one per time point.
     targetDir: file path to a directory for storing deconvolved images
                and CSV files with features, point matches and transformation matrices.
     kernel_filepath: file path to the 3D image of the point spread function (PSF),
                      which can be computed from fluorescent beads with the BigStitcher functions
                      and which must have odd dimensions.
     calibration: the array of [x, y, z] dimensions.
     cameraTransformations: a function that returns a map of camera index vs the 12-digit 3D affine matrices describing
                            the transform to register the camera view onto the camera at index 0.
     fineTransformsPostROICrop: a list of the transform matrices to be applied after both the coarse transform and the ROI crop.
     params: a dictionary with all the necessary parameters for feature extraction, registration and deconvolution.
     roi: the min and max coordinates for cropping the coarsely registered volumes prior to registration and deconvolution.
     subrange: defaults to None. Can be a list specifying the indices of time points to deconvolve.
     camera_groups: the camera views to fuse and deconvolve together. Defaults to two: ((0, 1), (2, 3))
     fine_fwd: whether the fineTransformsPostROICrop were computed all-to-all, which optimizes the pose and produces direct transforms,
               or, when False, the fineTransformsPostROICrop were computed from 0 to 1, 0 to 2, and 0 to 3, so they are inverted.
     n_threads: number of threads to use. Zero (default) means as many as possible.
  """
    kernel = readFloats(kernel_filepath, [19, 19, 25], header=434)
    klb_loader = KLBLoader()

    def getCalibration(img_filename):
        return calibration

    # Regular expression pattern describing KLB files to include
    pattern = re.compile("^SPM00_TM\d+_CM(\d+)_CHN0[01]\.klb$")

    # Find all time point folders with pattern TM\d{6} (a TM followed by 6 digits)
    def iterTMs():
        """ Return a generator over dicts of 4 KLB file paths for each time point. """
        for dirname in sorted(os.listdir(srcDir)):
            if not dirname.startswith("TM00"):
                continue
            filepaths = {}
            tm_dir = os.path.join(srcDir, dirname)
            for filename in sorted(os.listdir(tm_dir)):
                r = re.match(pattern, filename)
                if r:
                    camera_index = int(r.groups()[0])
                    filepaths[camera_index] = os.path.join(tm_dir, filename)
            yield filepaths

    if subrange:
        indices = set(subrange)
        TMs = [tm for i, tm in enumerate(iterTMs()) if i in indices]
    else:
        TMs = list(iterTMs())

    # Validate folders
    for filepaths in TMs:
        if 4 != len(filepaths):
            print "Folder %s has problems: found %i KLB files in it instead of 4." % (
                tm_dir, len(filepaths))
            print "Address the issues and rerun."
            return

    print "Will process these timepoints:",
    for i, TM in enumerate(TMs):
        print i
        pprint(TM)

    # All OK, submit all timepoint folders for registration and deconvolution

    # dimensions: all images from each camera have the same dimensions
    dimensions = [
        Intervals.dimensionsAsLongArray(klb_loader.get(filepath))
        for index, filepath in sorted(TMs[0].items(), key=itemgetter(0))
    ]

    cmTransforms = cameraTransformations(dimensions[0], dimensions[1],
                                         dimensions[2], dimensions[3],
                                         calibration)

    # Transforms apply to all time points equally
    #   If fine_fwd, the fine transform was forward.
    #   Otherwise, it was from CM00 to e.g. CM01, so backwards for CM01, needing an inversion.
    transforms = mergeTransforms(
        calibration, [cmTransforms[i] for i in sorted(cmTransforms.keys())],
        roi,
        fineTransformsPostROICrop,
        invert2=not fine_fwd)

    # Create target folder for storing deconvolved images
    if not os.path.exists(os.path.join(targetDir, "deconvolved")):
        os.mkdir(os.path.join(targetDir, "deconvolved"))

    # Transform kernel to each view
    matrices = fineTransformsPostROICrop

    # For the PSF kernel, transforms without the scaling up to isotropy
    # No need to account for the translation: the transformPSFKernelToView keeps the center point centered.
    PSF_kernels = [
        transformPSFKernelToView(kernel, affine3D(cmTransforms[i]))
        for i in xrange(4)
    ]
    PSF_kernels = [
        transformPSFKernelToView(k,
                                 affine3D(matrix).inverse())
        for k, matrix in izip(PSF_kernels, matrices)
    ]
    # TODO: if kernels are not ArrayImg, they should be made be.
    print "PSF_kernel[0]:", PSF_kernels[0], type(PSF_kernels[0])

    # DEBUG: write the kernelA
    for index in [0, 1, 2, 3]:
        writeZip(PSF_kernels[index],
                 "/tmp/kernel" + str(index) + ".zip",
                 title="kernel" + str(index)).flush()

    # A converter from FloatType to UnsignedShortType
    output_converter = createConverter(FloatType, UnsignedShortType)

    target_interval = FinalInterval(
        [0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])

    exe = newFixedThreadPool(n_threads=n_threads)
    try:
        # Submit for registration + deconvolution
        # The registration uses 2 parallel threads, and deconvolution all possible available threads.
        # Cannot invoke more than one time point at a time because the deconvolution requires a lot of memory.
        for i, filepaths in enumerate(TMs):
            if Thread.currentThread().isInterrupted(): break
            syncPrint("Deconvolving time point %i with files:\n  %s" %
                      (i, "\n  ".join(sorted(filepaths.itervalues()))))
            deconvolveTimePoint(filepaths,
                                targetDir,
                                klb_loader,
                                transforms,
                                target_interval,
                                params,
                                PSF_kernels,
                                exe,
                                output_converter,
                                camera_groups=camera_groups)
    finally:
        exe.shutdown(
        )  # Not accepting any more tasks but letting currently executing tasks to complete.
        # Wait until the last task (writing the last file) completes execution.
        exe.awaitTermination(5, TimeUnit.MINUTES)