示例#1
0
 def preloadCells(self, index):
     # Submit jobs to concurrently preload cells ahead into the cache, if not there already
     if self.preload is not None and self.preload > 0 and 0 == index % self.preload:
         # e.g. if index=0 and preload=5, will load [1,2,3,4]
         for i in xrange(index + 1,
                         min(index + self.preload, len(self.filepaths))):
             self.exe.submit(Task(self.cache, i))
示例#2
0
def pointmatchingTasks(filepaths, csvDir, params, n_adjacent, exeload):
    loadFPMem = SoftMemoize(
        lambda path: loadFloatProcessor(path, params, scale=True), maxsize=64)
    for i in xrange(len(filepaths) - n_adjacent):
        for inc in xrange(1, n_adjacent + 1):
            #syncPrint("Preparing extractBlockMatches for: \n  1: %s\n  2: %s" % (filepaths[i], filepaths[i+inc]))
            yield Task(extractBlockMatches, filepaths[i], filepaths[i + inc],
                       params, csvDir, exeload, loadFPMem)
示例#3
0
def pointmatchingTasks(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                       exeload, properties, loadFPMem):
    for i in xrange(len(filepaths) - n_adjacent):
        for inc in xrange(1, n_adjacent + 1):
            #syncPrintQ("Preparing extractBlockMatches for: \n  1: %s\n  2: %s" % (filepaths[i], filepaths[i+inc]))
            yield Task(extractBlockMatches, filepaths[i], filepaths[i + inc],
                       params, paramsSIFT, properties, csvDir, exeload,
                       loadFPMem)
示例#4
0
def generateSIFTMatches(filepaths, n_adjacent, params, paramsSIFT, properties,
                        csvDir):
    paramsRod = {
        "rod": params["rod"]
    }  # only this parameter is needed for SIFT pointmatches
    for i in xrange(max(1, len(filepaths) - n_adjacent)):
        for inc in xrange(1, min(n_adjacent + 1, len(filepaths))):
            yield Task(extractSIFTMatches, filepaths[i], filepaths[i + inc],
                       paramsRod, paramsSIFT, properties, csvDir)
示例#5
0
def ensureFeaturesForAll(img_filenames,
                         img_loader,
                         getCalibration,
                         csv_dir,
                         params,
                         exe,
                         verbose=True):
    """ Ensure features exist in CSV files, or create them, for each image file. """
    futures = [
        exe.submit(
            Task(ensureFeatures,
                 img_filename,
                 img_loader,
                 getCalibration,
                 csv_dir,
                 params,
                 verbose=verbose)) for img_filename in img_filenames
    ]
    # Wait until all complete
    for f in futures:
        f.get()
示例#6
0
def computeForwardTransforms(img_filenames,
                             img_loader,
                             getCalibration,
                             csv_dir,
                             exe,
                             modelclass,
                             params,
                             exe_shutdown=True):
    """ Compute transforms from image i to image i+1,
      returning an identity transform for the first image,
      and with each transform being from i to i+1 (forward transforms).
      Returns a list of affine 3D matrices, each a double[] with 12 values.
  """
    try:
        # Ensure features exist in CSV files, or create them
        ensureFeaturesForAll(img_filenames, img_loader, getCalibration,
                             csv_dir, params, exe)

        # Create models: ensures first that pointmatches exist in CSV files, or creates them
        futures = [
            exe.submit(
                Task(fitModel, img1_filename,
                     img2_filename, img_loader, getCalibration, csv_dir,
                     modelclass(), exe, params)) for img1_filename,
            img2_filename in izip(img_filenames, img_filenames[1:])
        ]
        # Wait until all complete
        # First image gets identity
        matrices = [array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 'd')
                    ] + [f.get() for f in futures]

        return matrices

    finally:
        if exe_shutdown:
            exe.shutdown()
示例#7
0
def extractBlockMatches(filepath1, filepath2, params, paramsSIFT, properties,
                        csvDir, exeload, load):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        fp1 = futures[0].get(
        )  # FloatProcessor, already Gaussian-blurred, contrast-corrected and scaled!
        fp2 = futures[1].get()  # FloatProcessor, idem

        # Define points from the mesh
        sourcePoints = ArrayList()
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        # Don't use blockmatching if the dimensions are different
        use_blockmatching = fp1.getWidth() == fp2.getWidth() and fp1.getHeight(
        ) == fp2.getHeight()

        # Fill the sourcePoints
        mesh = TransformMesh(params["meshResolution"], fp1.width, fp1.height)
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        syncPrintQ("Extracting block matches for \n S: " + filepath1 +
                   "\n T: " + filepath2 + "\n  with " +
                   str(sourcePoints.size()) + " mesh sourcePoints.")
        # Run
        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            fp1,
            fp2,
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrintQ(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrintQ(
                "... therefore invoking SIFT pointmatching for:\n  S: " +
                basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath1,
                         params,
                         paramsSIFT,
                         scale=False)),
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath2,
                         params,
                         paramsSIFT,
                         scale=False))
            ]

            fp1 = futures[0].get()  # FloatProcessor, original
            fp2 = futures[1].get()  # FloatProcessor, original

            # Images can be of different size: scale them the same way
            area1 = fp1.width * fp1.height
            area2 = fp2.width * fp2.height

            if area1 == area2:
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = int(
                    max(properties.get("SIFT_max_size", 2048),
                        fp1.width * params["scale"]))
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT1
            else:
                bigger, smaller = (fp1, fp2) if area1 > area2 else (fp2, fp1)
                target_width_bigger = int(
                    max(1024, bigger.width * params["scale"]))
                if 1024 == target_width_bigger:
                    target_width_smaller = int(1024 * float(smaller.width) /
                                               bigger.width)
                else:
                    target_width_smaller = smaller.width * params["scale"]
                #
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = target_width_bigger
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT.clone()
                paramsSIFT2.maxOctaveSize = target_width_smaller
                paramsSIFT2.minOctaveSize = int(paramsSIFT2.maxOctaveSize /
                                                pow(2, paramsSIFT2.steps))

            ijSIFT1 = SIFT(FloatArray2DSIFT(paramsSIFT1))
            features1 = ArrayList()  # of Point instances
            ijSIFT1.extractFeatures(fp1, features1)

            ijSIFT2 = SIFT(FloatArray2DSIFT(paramsSIFT2))
            features2 = ArrayList()  # of Point instances
            ijSIFT2.extractFeatures(fp2, features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                params.get(
                    "max_sd",
                    1.5),  # max_sd: maximal difference in size (ratio max/min)
                TranslationModel2D(),
                params.get("max_id", Double.MAX_VALUE
                           ),  # max_id: maximal distance in image space
                params.get("rod", 0.9))  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        printException()
示例#8
0
def loadPointMatchesTasks(filepaths, csvDir, params, n_adjacent):
    for i in xrange(max(1, len(filepaths) - n_adjacent)):
        for inc in xrange(1, min(n_adjacent + 1, len(filepaths))):
            yield Task(loadPointMatchesPlus, filepaths, i, i + inc, csvDir,
                       params)
示例#9
0
def ensurePointMatches(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                       properties):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches",
                      exe=newFixedThreadPool(properties["n_threads"]))
    exeload = newFixedThreadPool()
    try:
        if properties.get("use_SIFT", False):
            syncPrintQ("use_SIFT is True")
            # Pre-extract SIFT features for all images first
            # ensureSIFTFeatures returns the features list so the Future will hold it in memory: can't hold onto them
            # therefore consume the tasks in chunks:
            chunk_size = properties["n_threads"] * 2
            count = 1
            for result in w.chunkConsume(
                    chunk_size,  # tasks to submit before starting to wait for futures
                (Task(ensureSIFTFeatures,
                      filepath,
                      paramsSIFT,
                      properties,
                      csvDir,
                      validateByFileExists=properties.get(
                          "SIFT_validateByFileExists"))
                 for filepath in filepaths)):
                count += 1
                if 0 == count % chunk_size:
                    syncPrintQ(
                        "Completed extracting or validating SIFT features for %i images."
                        % count)
            w.awaitAll()
            syncPrintQ(
                "Completed extracting or validating SIFT features for all images."
            )
            # Compute pointmatches across adjacent sections
            count = 1
            for result in w.chunkConsume(
                    chunk_size,
                    generateSIFTMatches(filepaths, n_adjacent, params,
                                        paramsSIFT, properties, csvDir)):
                count += 1
                syncPrintQ("Completed SIFT pointmatches %i/%i" %
                           (count, len(filepaths) * n_adjacent))
        else:
            # Use blockmatches
            syncPrintQ("using blockmatches")
            loadFPMem = SoftMemoize(lambda path: loadFloatProcessor(
                path, params, paramsSIFT, scale=True),
                                    maxsize=properties["n_threads"] +
                                    n_adjacent)
            count = 1
            for result in w.chunkConsume(
                    properties["n_threads"],
                    pointmatchingTasks(filepaths, csvDir, params, paramsSIFT,
                                       n_adjacent, exeload, properties,
                                       loadFPMem)):
                if result:  # is False when CSV file already exists
                    syncPrintQ("Completed %i/%i" %
                               (count, len(filepaths) * n_adjacent))
                count += 1
            syncPrintQ("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrintQ("Finished all pointmatching tasks.")
    except:
        printException()
    finally:
        exeload.shutdown()
        w.destroy()
示例#10
0
def findPointMatches(img1_filename,
                     img2_filename,
                     img_loader,
                     getCalibration,
                     csv_dir,
                     exe,
                     params,
                     verbose=True):
    """ Attempt to load them from a CSV file, otherwise compute them and save them. """
    names = set([
        "minPeakValue",
        "sigmaSmaller",
        "sigmaLarger",  # DoG peak params
        "radius",
        "min_angle",
        "max_per_peak",  # Constellation params
        "angle_epsilon",
        "len_epsilon_sq"
    ])  # pointmatches params
    pm_params = {k: params[k] for k in names}
    # Attempt to load pointmatches from CSV file
    pointmatches = loadPointMatches(img1_filename,
                                    img2_filename,
                                    csv_dir,
                                    pm_params,
                                    verbose=verbose)
    if pointmatches is not None:
        return pointmatches

    # Load features from CSV files
    # otherwise compute them and save them.
    img_filenames = [img1_filename, img2_filename]
    names = set([
        "minPeakValue", "sigmaSmaller", "sigmaLarger", "radius", "min_angle",
        "max_per_peak"
    ])
    feature_params = {k: params[k] for k in names}
    csv_features = [
        loadFeatures(img_filename, csv_dir, feature_params, verbose=verbose)
        for img_filename in img_filenames
    ]
    # If features were loaded, just return them, otherwise compute them (and save them to CSV files)
    futures = [
        Getter(fs) if fs else exe.submit(
            Task(makeFeatures, img_filename, img_loader, getCalibration,
                 csv_dir, feature_params))
        for fs, img_filename in izip(csv_features, img_filenames)
    ]
    features = [f.get() for f in futures]

    if verbose:
        for img_filename, fs in izip(img_filenames, features):
            syncPrint("Found %i constellation features in image %s" %
                      (len(fs), basename(img_filename)))

    # Compare all possible pairs of constellation features: the PointMatches
    pointmatches_nearby = params.get('pointmatches_nearby', 0)
    if 1 == pointmatches_nearby:
        # Use a RadiusNeighborSearchOnKDTree
        pm = PointMatches.fromNearbyFeatures(
            params['pointmatches_search_radius'], features[0], features[1],
            params["angle_epsilon"], params["len_epsilon_sq"])
    else:
        if 2 == pointmatches_nearby:
            method = PointMatches.fromFeaturesScaleInvariant
        else:  # 0
            method = PointMatches.fromFeatures
        # All to all
        pm = method(features[0], features[1], params["angle_epsilon"],
                    params["len_epsilon_sq"])

    if verbose:
        syncPrint("Found %i point matches between:\n    %s\n    %s" % \
                  (len(pm.pointmatches), basename(img1_filename), basename(img2_filename)))

    # Store as CSV file
    savePointMatches(img1_filename, img2_filename, pm.pointmatches, csv_dir,
                     pm_params)
    #
    return pm.pointmatches
示例#11
0
def computeOptimizedTransforms(img_filenames,
                               img_loader,
                               getCalibration,
                               csv_dir,
                               exe,
                               modelclass,
                               params,
                               verbose=True):
    """ Compute transforms for all images at once,
      simultaneously considering registrations between image i to image i+1, i+2 ... i+n,
      where n is params["n_adjacent"].
      Alternatively, if the params["all_to_all"] exists and is truthy, all tiles will be connected to all tiles.
      Then all matches are optimized together using mpicbg.models.TileConfiguration.
      Fixed tiles are specified in a list of indices with params["fixed_tile_index"].
      Expects, in total:
       * params["n_adjacent"] or params["all_to_all"]
       * params["fixed_tile_index"] (when absent, defaults to [0]: a list with the first tile index in it)
       * params["maxAllowedError"]
       * params["maxPlateauwidth"]
       * params["maxIterations"]
       * params["damp"]
      Returns a list of affine 3D matrices, each a double[] with 12 values, corresponding to the img_filenames.
  """
    # Ensure features exist in CSV files, or create them
    ensureFeaturesForAll(img_filenames,
                         img_loader,
                         getCalibration,
                         csv_dir,
                         params,
                         exe,
                         verbose=verbose)

    # One Tile per time point
    tiles = [Tile(modelclass()) for _ in img_filenames]

    # Extract pointmatches from img_filename i to all in range(i+1, i+n)
    def findPointMatchesProxy(i, j):
        pointmatches = findPointMatches(img_filenames[i],
                                        img_filenames[j],
                                        img_loader,
                                        getCalibration,
                                        csv_dir,
                                        exe,
                                        params,
                                        verbose=verbose)
        return i, j, pointmatches

    #
    futures = []

    if params.get("all_to_all", False):
        for i, j in combinations(xrange(len(img_filenames)), 2):
            futures.append(exe.submit(Task(findPointMatchesProxy, i, j)))
    else:
        n = params["n_adjacent"]
        for i in xrange(len(img_filenames) - n + 1):
            for inc in xrange(1, n):
                # All features were extracted already, so the 'exe' won't be used in findPointMatches
                futures.append(
                    exe.submit(Task(findPointMatchesProxy, i, i + inc)))

    # Join tiles with tiles for which pointmatches were computed
    for f in futures:
        i, j, pointmatches = f.get()
        if 0 == len(pointmatches):
            syncPrint("Zero pointmatches for %i vs %i" % (i, j))
            continue
        syncPrint("connecting tile %i with %i" % (i, j))
        tiles[i].connect(tiles[j], pointmatches)  # reciprocal connection

    # Optimize tile pose
    tc = TileConfiguration()
    tc.addTiles(tiles)
    fixed_tile_indices = params.get("fixed_tile_indices",
                                    [0])  # default: fix first tile
    syncPrint("Fixed tile indices: %s" % str(fixed_tile_indices))
    for index in fixed_tile_indices:
        tc.fixTile(tiles[index])
    #
    if TranslationModel3D != modelclass:
        syncPrint("Running TileConfiguration.preAlign, given %s" %
                  modelclass.getSimpleName())
        tc.preAlign()
    else:
        syncPrint("No prealign, model is %s" % modelclass.getSimpleName())
    #
    maxAllowedError = params["maxAllowedError"]
    maxPlateauwidth = params["maxPlateauwidth"]
    maxIterations = params["maxIterations"]
    damp = params["damp"]
    tc.optimizeSilentlyConcurrent(ErrorStatistic(maxPlateauwidth + 1),
                                  maxAllowedError, maxIterations,
                                  maxPlateauwidth, damp)

    # TODO problem: can fail when there are 0 inliers

    # Return model matrices as double[] arrays with 12 values
    matrices = []
    for tile in tiles:
        a = nativeArray('d', [3, 4])
        tile.getModel().toMatrix(
            a)  # Can't use model.toArray: different order of elements
        matrices.append(a[0] + a[1] +
                        a[2])  # Concat: flatten to 1-dimensional array

    return matrices
示例#12
0
def deconvolveTimePoint(filepaths,
                        targetDir,
                        klb_loader,
                        transforms,
                        target_interval,
                        params,
                        PSF_kernels,
                        exe,
                        output_converter,
                        camera_groups=((0, 1), (2, 3)),
                        write=writeZip):
    """ filepaths is a dictionary of camera index vs filepath to a KLB file.
      With the default camera_groups=((0, 1), (2, 3)) this function will generate
      two deconvolved views, one for each channel,
      where CHN00 is made of CM00 + CM01, and
            CHNO1 is made of CM02 + CM03.
      Will take the camera registrations (cmIsotropicTransforms), which are coarse,
      apply them to the images, then crop the images, then apply the fine transformations.
      If the deconvolved images exist, it will neither compute it nor write it."""
    tm_dirname = filepaths[0][filepaths[0].rfind("_TM") +
                              1:filepaths[0].rfind("_CM")]

    n_threads = max(1, Runtime.getRuntime().availableProcessors() - 1)

    def prepare(index):
        # Prepare the img for deconvolution:
        # 0. Transform in one step.
        # 1. Ensure its pixel values conform to expectations (no zeros inside)
        # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
        syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
        img = klb_loader.get(filepaths[index])  # of UnsignedShortType
        imgP = prepareImgForDeconvolution(
            img, transforms[index], target_interval)  # returns of FloatType
        # Copy transformed view into ArrayImg for best performance in deconvolution
        imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
        #ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
        ImgUtil.copy(imgP, imgA, n_threads / 2)  # parallel copying
        syncPrint("--Completed preparing %s CM0%i for deconvolution" %
                  (tm_dirname, index))
        imgP = None
        img = None
        return (index, imgA)

    def strings(indices):
        cameras = "-".join("CM0%i" % i for i in indices)
        name = cameras + "-deconvolved"
        filename = tm_dirname + "_" + name + ".zip"
        path = os.path.join(targetDir, "deconvolved/" + filename)
        return filename, path

    # Find out which pairs haven't been created yet
    futures = []
    todo = []
    for indices in camera_groups:
        filename, path = strings(indices)
        if not os.path.exists(path):
            todo.append(indices)
            for index in indices:
                futures.append(exe.submit(Task(prepare, index)))

    # Dictionary of index vs imgA
    prepared = dict(f.get() for f in futures)

    def writeToDisk(writeZip, img, path, title=''):
        writeZip(img, path,
                 title=title).flush()  # flush the returned ImagePlus

    # Each deconvolution run uses many threads when run with CPU
    # So do one at a time.
    last_future = None
    for indices in todo:
        if Thread.currentThread().isInterrupted(): break
        images = [prepared[index] for index in indices]
        syncPrint("Invoked deconvolution for %s %s" %
                  (tm_dirname, " ".join("%i" % i for i in indices)))
        # Deconvolve: merge two views into a single volume
        n_iterations = params["CM_%s_n_iterations" %
                              "_".join("%i" % i for i in indices)]
        img = multiviewDeconvolution(images,
                                     params["blockSizes"],
                                     PSF_kernels,
                                     n_iterations,
                                     exe=exe)
        # On-the-fly convert to 16-bit: data values are well within the 16-bit range
        imgU = convert(img, output_converter, UnsignedShortType)
        filename, path = strings(indices)
        # Write in a separate thread so as not to wait
        last_future = exe.submit(
            Task(writeToDisk, writeZip, imgU, path, title=filename))
        imgU = None
        img = None
        images = None

    if last_future:
        last_future.get()

    prepared = None