示例#1
0
def mergeTransforms(calibration, matrices1, roi, matrices2, invert2=False):
    """
  calibration: a sequence like e.g. [1.0, 1.0, 5.0].
  matrices1: sequence of one-dimensional arrays with 12 digits, each describing a 3D affine
             that was computed from the scaled images (according to the calibration).
  roi: a two-dimensional sequence, with the minimum coordinates at 0 and the maximum at 1.
  matrices2: sequence of one-dimensional arrays with 12 digits, each describing a 3D affine
             that applies after the translation introduced by the ROI is accounted for.
  
  Returns a list of AffineTransform3D, each expressing the combined
          scaling (by calibration) + tranform + translation + transform.
  """
    # Scale to isotropy
    scale3D = AffineTransform3D()
    scale3D.set(calibration[0], 0.0, 0.0, 0.0, 0.0, calibration[1], 0.0, 0.0,
                0.0, 0.0, calibration[2], 0.0)
    # Translate to ROI origin of coords
    roi_translation = affine3D(
        [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]])

    transforms = []
    for m1, m2 in izip(matrices1, matrices2):
        aff = AffineTransform3D()
        aff.set(*m1)
        aff.concatenate(scale3D)
        aff.preConcatenate(roi_translation)
        aff.preConcatenate(affine3D(m2).inverse() if invert2 else affine3D(m2))
        transforms.append(aff)

    return transforms
示例#2
0
def makeRegistrationUI(original_images, original_calibration, coarse_affines, params, images, minC, maxC, cropped, cropped_imp):
  """
  Register cropped images either all to all or all to the first one,
  and print out a config file with the coarse affines,
  the ROI for cropping, and the refined affines post-crop.

  original_images: the original, raw images, with original dimensions.
  original_calibration: the calibration of the images as they are on disk.
  coarse_affines: list of AffineTransform3D, one per image, that specify the translation between images
                  as manually set using the makeTranslationUI.
  params: dictionary with parameters for registering the given cropped images.
          This includes a calibration that is likely [1.0, 1.0, 1.0] as the cropped images
          are expected to have been scaled to isotropy.
  images: the list of near-original images but scaled (by calibration) to isotropy.
          (These are really views of the original images, using nearest neighbor interpolation
           to scale them to isotropy.)
  minC, maxC: the minimum and maximum coordinates of a ROI with which the cropped images were made.
  cropped: the list of images that have been scaled to isotropy, translated and cropped by the ROI.
           (These are really interval views of the images, the latter using nearest neighbor interpolation.)
  cropped_imp: the ImagePlus holding the virtual stack of cropped image views.

  The computed registration will merge the scaling to isotropy + first transform (a translation)
   + roi cropping translation + the params["modelclass"] registration transform, to read directly
   from the original images using a nearest interpolation, for best performance (piling two nearest
   interpolations over one another would result in very slow access to pixel data).
  """

  panel = JPanel()
  panel.setBorder(BorderFactory.createEmptyBorder(10,10,10,10))
  gb = GridBagLayout()
  panel.setLayout(gb)
  gc = GBC()

  calibration = params["calibration"]
  params["cal X"] = calibration[0]
  params["cal Y"] = calibration[1]
  params["cal Z"] = calibration[2]

  # Add a label and a text field for every parameter, with titles for every block
  strings = [["Calibration",
              "cal X", "cal Y", "cal Z"],
             ["Difference of Gaussian",
              "minPeakValue", "sigmaSmaller", "sigmaLarger"],
             ["Feature extraction",
              "radius", "min_angle", "max_per_peak",
              "angle_epsilon", "len_epsilon_sq",
              "pointmatches_nearby", "pointmatches_search_radius"],
             ["RANSAC parameters for the model",
              "maxEpsilon", "minInlierRatio", "minNumInliers",
              "n_iterations", "maxTrust"],
             ["All to all registration",
              "maxAllowedError", "maxPlateauwidth", "maxIterations", "damp"]]
  # Insert all as fields, with values populated from the params dictionary
  # and updating dynamically the params dictionary
  params = dict(params) # copy, will be updated
  insertFloatFields(panel, gb, gc, params, strings)

  # Identity transforms prior to registration
  affines = [affine3D([1, 0, 0, 0,
                       0, 1, 0, 0,
                       0, 0, 1, 0]) for img in cropped]
  
  def run():
    exe = newFixedThreadPool(min(len(cropped), numCPUs()))
    try:
      # Dummy for in-RAM reading of isotropic images
      img_filenames = [str(i) for i in xrange(len(cropped))]
      loader = InRAMLoader(dict(zip(img_filenames, cropped)))
      getCalibration = params.get("getCalibration", None)
      if not getCalibration:
        getCalibration = lambda img: [1.0] * cropped[0].numDimensions()
      csv_dir = params["csv_dir"]
      modelclass = params["modelclass"]
      # Matrices describing the registration on the basis of the cropped images
      matrices = computeOptimizedTransforms(img_filenames, loader, getCalibration,
                                            csv_dir, exe, modelclass, params)
      # Store outside, so they can be e.g. printed, and used beyond here
      for matrix, affine in zip(matrices, affines):
        affine.set(*matrix)

      # Combine the transforms: scaling (by calibration)
      #                         + the coarse registration (i.e. manual translations)
      #                         + the translation introduced by the ROI cropping
      #                         + the affine matrices computed above over the cropped images.
      coarse_matrices = []
      for coarse_affine in coarse_affines:
        matrix = zeros(12, 'd')
        coarse_affine.toArray(matrix)
        coarse_matrices.append(matrix)

      # NOTE: both coarse_matrices and matrices are from the camera X to camera 0. No need to invert them.
      # NOTE: uses identity calibration because the coarse_matrices already include the calibration scaling to isotropy
      transforms = mergeTransforms([1.0, 1.0, 1.0], coarse_matrices, [minC, maxC], matrices, invert2=False)

      print "calibration:", [1.0, 1.0, 1.0]
      print "cmTransforms:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in coarse_matrices)
      print "ROI", [minC, maxC]
      print "fineTransformsPostROICrop:\n    %s\n    %s\n    %s\n    %s" % tuple(str(m) for m in matrices)
      print "invert2:", False
      
      # Show registered images
      registered = [transformedView(img, transform, interval=cropped[0])
                    for img, transform in izip(original_images, transforms)]
      registered_imp = showAsStack(registered, title="Registered with %s" % params["modelclass"].getSimpleName())
      registered_imp.setDisplayRange(cropped_imp.getDisplayRangeMin(), cropped_imp.getDisplayRangeMax())

      """
      # TEST: same as above, but without merging the transforms. WORKS, same result
      # Copy into ArrayImg, otherwise they are rather slow to browse
      def copy(img1, affine):
        # Copy in two steps. Otherwise the nearest neighbor interpolation on top of another
        # nearest neighbor interpolation takes a huge amount of time
        dimensions = Intervals.dimensionsAsLongArray(img1)
        aimg1 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img1, aimg1.factory()), aimg1)
        img2 = transformedView(aimg1, affine)
        aimg2 = ArrayImgs.unsignedShorts(dimensions)
        ImgUtil.copy(ImgView.wrap(img2, aimg2.factory()), aimg2)
        return aimg2
      futures = [exe.submit(Task(copy, img, affine)) for img, affine in izip(cropped, affines)]
      aimgs = [f.get() for f in futures]
      showAsStack(aimgs, title="DEBUG Registered with %s" % params["modelclass"].getSimpleName())
      """
    except:
      print sys.exc_info()
    finally:
      exe.shutdown()
      SwingUtilities.invokeLater(lambda: run_button.setEnabled(True))

  def launchRun(event):
    # Runs on the event dispatch thread
    run_button.setEnabled(False) # will be re-enabled at the end of run()
    # Fork:
    Thread(run).start()


  def printAffines(event):
    for i, affine in enumerate(affines):
      matrix = zeros(12, 'd')
      affine.toArray(matrix)
      msg = "# Refined post-crop affine matrix " + str(i) + ": \n" + \
            "affine" + str(i) + ".set(*[%f, %f, %f, %f,\n %f, %f, %f, %f,\n %f, %f, %f, %f])" % tuple(matrix.tolist())
      # Print everywhere
      print msg
      IJ.log(msg)
      System.out.println(msg)

  def prepareDeconvolutionScriptUI(event):
    """
    # DEBUG generateDeconvolutionScriptUI: generate params as a loadable serialized file
    with open("/tmp/parameters.pickle", 'w') as f:
      import pickle
      def asArrays(affines):
        arrays = []
        for affine in affines:
          matrix = zeros(12, 'd')
          affine.toArray(matrix)
          arrays.append(matrix)
        return arrays
        
      pickle.dump([params["srcDir"], params["tgtDir"], calibration,
                   asArrays(coarse_affines), [minC, maxC], asArrays(affines)], f)
    """
    generateDeconvolutionScriptUI(params["srcDir"], params["tgtDir"], calibration,
                                  coarse_affines, [minC, maxC], affines)
  
  # Buttons
  panel_buttons = JPanel()
  gc.gridx = 0
  gc.gridy += 1
  gc.gridwidth = 2
  gb.setConstraints(panel_buttons, gc)
  panel.add(panel_buttons)
  
  run_button = JButton("Run")
  run_button.addActionListener(launchRun)
  gb.setConstraints(run_button, gc)
  panel_buttons.add(run_button)
  
  print_button = JButton("Print affines")
  print_button.addActionListener(printAffines)
  panel_buttons.add(print_button)

  prepare_button = JButton("Prepare deconvolution script")
  prepare_button.addActionListener(prepareDeconvolutionScriptUI)
  panel_buttons.add(prepare_button)

  frame = JFrame("Registration")
  frame.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE)
  frame.addWindowListener(CloseControl())
  frame.getContentPane().add(panel)
  frame.pack()
  frame.setLocationRelativeTo(None) # center in the screen
  frame.setVisible(True)
示例#3
0
def deconvolveTimePoints(srcDir,
                         targetDir,
                         kernel_filepath,
                         calibration,
                         cameraTransformations,
                         fineTransformsPostROICrop,
                         params,
                         roi,
                         subrange=None,
                         camera_groups=((0, 1), (2, 3)),
                         fine_fwd=False,
                         n_threads=0):  # 0 means all
    """
     Main program entry point.
     For each time point folder TM\d+, find the KLB files of the 4 cameras,
     then register them all to camera CM01, and deconvolve CM01+CM02, and CM02+CM03,
     and store these two images in corresponding TM\d+ folders under targetDir.

     Assumes that each camera view has the same dimensions in each time point folder.
     A camera view may have dimensions different from those of the other cameras.

     Can be run as many times as necessary. Intermediate computations are saved
     as csv files (features, pointmatches and transformation matrices), and 
     the deconvolved images as well, into folder targetDir/deconvolved/ with
     a name pattern like TM\d+_CM0\d_CM0\d-deconvolved.zip
     
     srcDir: file path to a directory with TM\d+ subdirectories, one per time point.
     targetDir: file path to a directory for storing deconvolved images
                and CSV files with features, point matches and transformation matrices.
     kernel_filepath: file path to the 3D image of the point spread function (PSF),
                      which can be computed from fluorescent beads with the BigStitcher functions
                      and which must have odd dimensions.
     calibration: the array of [x, y, z] dimensions.
     cameraTransformations: a function that returns a map of camera index vs the 12-digit 3D affine matrices describing
                            the transform to register the camera view onto the camera at index 0.
     fineTransformsPostROICrop: a list of the transform matrices to be applied after both the coarse transform and the ROI crop.
     params: a dictionary with all the necessary parameters for feature extraction, registration and deconvolution.
     roi: the min and max coordinates for cropping the coarsely registered volumes prior to registration and deconvolution.
     subrange: defaults to None. Can be a list specifying the indices of time points to deconvolve.
     camera_groups: the camera views to fuse and deconvolve together. Defaults to two: ((0, 1), (2, 3))
     fine_fwd: whether the fineTransformsPostROICrop were computed all-to-all, which optimizes the pose and produces direct transforms,
               or, when False, the fineTransformsPostROICrop were computed from 0 to 1, 0 to 2, and 0 to 3, so they are inverted.
     n_threads: number of threads to use. Zero (default) means as many as possible.
  """
    kernel = readFloats(kernel_filepath, [19, 19, 25], header=434)
    klb_loader = KLBLoader()

    def getCalibration(img_filename):
        return calibration

    # Regular expression pattern describing KLB files to include
    pattern = re.compile("^SPM00_TM\d+_CM(\d+)_CHN0[01]\.klb$")

    # Find all time point folders with pattern TM\d{6} (a TM followed by 6 digits)
    def iterTMs():
        """ Return a generator over dicts of 4 KLB file paths for each time point. """
        for dirname in sorted(os.listdir(srcDir)):
            if not dirname.startswith("TM00"):
                continue
            filepaths = {}
            tm_dir = os.path.join(srcDir, dirname)
            for filename in sorted(os.listdir(tm_dir)):
                r = re.match(pattern, filename)
                if r:
                    camera_index = int(r.groups()[0])
                    filepaths[camera_index] = os.path.join(tm_dir, filename)
            yield filepaths

    if subrange:
        indices = set(subrange)
        TMs = [tm for i, tm in enumerate(iterTMs()) if i in indices]
    else:
        TMs = list(iterTMs())

    # Validate folders
    for filepaths in TMs:
        if 4 != len(filepaths):
            print "Folder %s has problems: found %i KLB files in it instead of 4." % (
                tm_dir, len(filepaths))
            print "Address the issues and rerun."
            return

    print "Will process these timepoints:",
    for i, TM in enumerate(TMs):
        print i
        pprint(TM)

    # All OK, submit all timepoint folders for registration and deconvolution

    # dimensions: all images from each camera have the same dimensions
    dimensions = [
        Intervals.dimensionsAsLongArray(klb_loader.get(filepath))
        for index, filepath in sorted(TMs[0].items(), key=itemgetter(0))
    ]

    cmTransforms = cameraTransformations(dimensions[0], dimensions[1],
                                         dimensions[2], dimensions[3],
                                         calibration)

    # Transforms apply to all time points equally
    #   If fine_fwd, the fine transform was forward.
    #   Otherwise, it was from CM00 to e.g. CM01, so backwards for CM01, needing an inversion.
    transforms = mergeTransforms(
        calibration, [cmTransforms[i] for i in sorted(cmTransforms.keys())],
        roi,
        fineTransformsPostROICrop,
        invert2=not fine_fwd)

    # Create target folder for storing deconvolved images
    if not os.path.exists(os.path.join(targetDir, "deconvolved")):
        os.mkdir(os.path.join(targetDir, "deconvolved"))

    # Transform kernel to each view
    matrices = fineTransformsPostROICrop

    # For the PSF kernel, transforms without the scaling up to isotropy
    # No need to account for the translation: the transformPSFKernelToView keeps the center point centered.
    PSF_kernels = [
        transformPSFKernelToView(kernel, affine3D(cmTransforms[i]))
        for i in xrange(4)
    ]
    PSF_kernels = [
        transformPSFKernelToView(k,
                                 affine3D(matrix).inverse())
        for k, matrix in izip(PSF_kernels, matrices)
    ]
    # TODO: if kernels are not ArrayImg, they should be made be.
    print "PSF_kernel[0]:", PSF_kernels[0], type(PSF_kernels[0])

    # DEBUG: write the kernelA
    for index in [0, 1, 2, 3]:
        writeZip(PSF_kernels[index],
                 "/tmp/kernel" + str(index) + ".zip",
                 title="kernel" + str(index)).flush()

    # A converter from FloatType to UnsignedShortType
    output_converter = createConverter(FloatType, UnsignedShortType)

    target_interval = FinalInterval(
        [0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])

    exe = newFixedThreadPool(n_threads=n_threads)
    try:
        # Submit for registration + deconvolution
        # The registration uses 2 parallel threads, and deconvolution all possible available threads.
        # Cannot invoke more than one time point at a time because the deconvolution requires a lot of memory.
        for i, filepaths in enumerate(TMs):
            if Thread.currentThread().isInterrupted(): break
            syncPrint("Deconvolving time point %i with files:\n  %s" %
                      (i, "\n  ".join(sorted(filepaths.itervalues()))))
            deconvolveTimePoint(filepaths,
                                targetDir,
                                klb_loader,
                                transforms,
                                target_interval,
                                params,
                                PSF_kernels,
                                exe,
                                output_converter,
                                camera_groups=camera_groups)
    finally:
        exe.shutdown(
        )  # Not accepting any more tasks but letting currently executing tasks to complete.
        # Wait until the last task (writing the last file) completes execution.
        exe.awaitTermination(5, TimeUnit.MINUTES)