def getFeatures(imPath, p):
    features = HashSet()
    im = IJ.openImage(imPath)
    SIFT(FloatArray2DSIFT(p)).extractFeatures(im.getProcessor(), features)
    IJ.log(str(features.size()) + ' features extracted')
    im.close()
    return features
Exemplo n.º 2
0
 def extractFeatures(ip, params):
     sift = FloatArray2DSIFT(params)
     sift.init(
         FloatArray2D(ip.convertToFloat().getPixels(), ip.getWidth(),
                      ip.getHeight()))
     features = sift.run()  # instances of mpicbg.imagefeatures.Feature
     return features
Exemplo n.º 3
0
 def get_drift(self, index1, index2):
     '''Returns th drift between the images at the given indices.
     :param index1: The index of the first image (0-based).
     :param index2: The index of the second image (0-based).
     '''
     if not self.drift_matrix[index1][index2]:
         if index1 == index2:
             self.drift_matrix[index1][index2] = (0, 0)
         model = TranslationModel2D()
         candidates = FloatArray2DSIFT.createMatches(self.all_features[index1],
                                                     self.all_features[index2],
                                                     1.5,
                                                     None,
                                                     Float.MAX_VALUE,
                                                     self.param.rod
                                                    )
         # print '%i potentially corresponding features identified' % len(candidates)
         inliers = Vector()
         model.filterRansac(candidates,
                            inliers,
                            1000,
                            self.param.maxEpsilon,
                            self.param.minInlierRatio
                           )
         self.drift_matrix[index1][index2] = (model.createAffine().translateX,
                                              model.createAffine().translateY)
     return self.drift_matrix[index1][index2]
Exemplo n.º 4
0
def ensureSIFTFeatures(filepath,
                       paramsSIFT,
                       properties,
                       csvDir,
                       validateByFileExists=False):
    """
     filepath: to the image from which SIFT features have been or have to be extracted.
     params: dict of registration parameters, including the key "scale".
     paramsSIFT: FloatArray2DSIFT.Params instance.
     csvDir: directory into which serialized features have been or will be saved.
     load: function to load an image as an ImageProcessor from the filepath.
     validateByFileExists: whether to merely check that the .obj file exists as a quick form of validation.
     
     First check if serialized features exist for the image, and if the Params match.
     Otherwise extract the features and store them serialized.
     Returns the ArrayList of Feature instances.
  """
    path = os.path.join(csvDir,
                        os.path.basename(filepath) + ".SIFT-features.obj")
    if validateByFileExists:
        if os.path.exists(path):
            return True
    # An ArrayList whose last element is a mpicbg.imagefeatures.FloatArray2DSIFT.Param
    # and all other elements are mpicbg.imagefeatures.Feature
    features = deserialize(path) if os.path.exists(path) else None
    if features:
        if features.get(features.size() - 1).equals(paramsSIFT):
            features.remove(features.size() - 1)  # removes the Params
            syncPrintQ("Loaded %i SIFT features for %s" %
                       (features.size(), os.path.basename(filepath)))
            return features
        else:
            # Remove the file: paramsSIFT have changed
            os.remove(path)
    # Else, extract de novo:
    try:
        # Extract features
        imp = loadImp(filepath)
        ip = imp.getProcessor()
        paramsSIFT = paramsSIFT.clone()
        ijSIFT = SIFT(FloatArray2DSIFT(paramsSIFT))
        features = ArrayList()  # of Feature instances
        ijSIFT.extractFeatures(ip, features)
        ip = None
        imp.flush()
        imp = None
        features.add(
            paramsSIFT
        )  # append Params instance at the end for future validation
        serialize(features, path)
        features.remove(features.size() -
                        1)  # to return without the Params for immediate use
        syncPrintQ("Extracted %i SIFT features for %s" %
                   (features.size(), os.path.basename(filepath)))
    except:
        printException()
    return features
def getSIFTMatchingParameters(steps, initialSigma, minOctaveSize,
                              maxOctaveSize, fdBins, fdSize):
    p = FloatArray2DSIFT.Param().clone()
    p.steps = steps
    p.initialSigma = initialSigma
    p.minOctaveSize = minOctaveSize
    p.maxOctaveSize = maxOctaveSize
    p.fdBins = fdBins
    p.fdSize = fdSize
    return p
Exemplo n.º 6
0
def extractSIFTMatches(filepath1, filepath2, params, paramsSIFT, properties,
                       csvDir):
    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:
        # Load from CSV files or extract features de novo
        features1 = ensureSIFTFeatures(filepath1, paramsSIFT, properties,
                                       csvDir)
        features2 = ensureSIFTFeatures(filepath2, paramsSIFT, properties,
                                       csvDir)
        #syncPrintQ("Loaded %i features for %s\n       %i features for %s" % (features1.size(), os.path.basename(filepath1),
        #                                                                     features2.size(), os.path.basename(filepath2)))
        # Vector of PointMatch instances
        sourceMatches = FloatArray2DSIFT.createMatches(
            features1,
            features2,
            params.get(
                "max_sd",
                1.5),  # max_sd: maximal difference in size (ratio max/min)
            TranslationModel2D(),
            params.get(
                "max_id",
                Double.MAX_VALUE),  # max_id: maximal distance in image space
            params.get("rod", 0.9))  # rod: ratio of best vs second best

        syncPrintQ("Found %i SIFT pointmatches for %s vs %s" %
                   (sourceMatches.size(), os.path.basename(filepath1),
                    os.path.basename(filepath2)))

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)
        return True
    except:
        printException()
Exemplo n.º 7
0
def extractBlockMatches(filepath1, filepath2, params, paramsSIFT, properties,
                        csvDir, exeload, load):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        fp1 = futures[0].get(
        )  # FloatProcessor, already Gaussian-blurred, contrast-corrected and scaled!
        fp2 = futures[1].get()  # FloatProcessor, idem

        # Define points from the mesh
        sourcePoints = ArrayList()
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        # Don't use blockmatching if the dimensions are different
        use_blockmatching = fp1.getWidth() == fp2.getWidth() and fp1.getHeight(
        ) == fp2.getHeight()

        # Fill the sourcePoints
        mesh = TransformMesh(params["meshResolution"], fp1.width, fp1.height)
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        syncPrintQ("Extracting block matches for \n S: " + filepath1 +
                   "\n T: " + filepath2 + "\n  with " +
                   str(sourcePoints.size()) + " mesh sourcePoints.")
        # Run
        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            fp1,
            fp2,
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrintQ(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrintQ(
                "... therefore invoking SIFT pointmatching for:\n  S: " +
                basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath1,
                         params,
                         paramsSIFT,
                         scale=False)),
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath2,
                         params,
                         paramsSIFT,
                         scale=False))
            ]

            fp1 = futures[0].get()  # FloatProcessor, original
            fp2 = futures[1].get()  # FloatProcessor, original

            # Images can be of different size: scale them the same way
            area1 = fp1.width * fp1.height
            area2 = fp2.width * fp2.height

            if area1 == area2:
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = int(
                    max(properties.get("SIFT_max_size", 2048),
                        fp1.width * params["scale"]))
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT1
            else:
                bigger, smaller = (fp1, fp2) if area1 > area2 else (fp2, fp1)
                target_width_bigger = int(
                    max(1024, bigger.width * params["scale"]))
                if 1024 == target_width_bigger:
                    target_width_smaller = int(1024 * float(smaller.width) /
                                               bigger.width)
                else:
                    target_width_smaller = smaller.width * params["scale"]
                #
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = target_width_bigger
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT.clone()
                paramsSIFT2.maxOctaveSize = target_width_smaller
                paramsSIFT2.minOctaveSize = int(paramsSIFT2.maxOctaveSize /
                                                pow(2, paramsSIFT2.steps))

            ijSIFT1 = SIFT(FloatArray2DSIFT(paramsSIFT1))
            features1 = ArrayList()  # of Point instances
            ijSIFT1.extractFeatures(fp1, features1)

            ijSIFT2 = SIFT(FloatArray2DSIFT(paramsSIFT2))
            features2 = ArrayList()  # of Point instances
            ijSIFT2.extractFeatures(fp2, features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                params.get(
                    "max_sd",
                    1.5),  # max_sd: maximal difference in size (ratio max/min)
                TranslationModel2D(),
                params.get("max_id", Double.MAX_VALUE
                           ),  # max_id: maximal distance in image space
                params.get("rod", 0.9))  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        printException()
Exemplo n.º 8
0
    print("+=============================================+")
    print("| Registration of image pairs {: 4d} / {: 4d}     |".format(
        i + 1, N))
    print("+=============================================+")
    print

    # Loading the pair of images
    print("Loading the images...")
    imp1 = IJ.openImage(list1[i])
    print(imp1)
    imp2 = IJ.openImage(list2[i])
    print(imp2)
    print

    # Parameters for SIFT: NOTE 4 steps, larger maxOctaveSize
    p = FloatArray2DSIFT.Param()
    p.fdSize = 4  # number of samples per row and column
    p.fdBins = 8  # number of bins per local histogram
    p.maxOctaveSize = 1024  # largest scale octave in pixels
    p.minOctaveSize = 128  # smallest scale octave in pixels
    p.steps = 4  # number of steps per scale octave
    p.initialSigma = 1.6

    def extractFeatures(ip, params):
        sift = FloatArray2DSIFT(params)
        sift.init(
            FloatArray2D(ip.convertToFloat().getPixels(), ip.getWidth(),
                         ip.getHeight()))
        features = sift.run()  # instances of mpicbg.imagefeatures.Feature
        return features
    'searchRadius': 100,  # a low value: we expect little translation
    'blockRadius': 200  # small, yet enough
}

# Parameters for computing the transformation models
paramsTileConfiguration = {
    "n_adjacent": 3,  # minimum of 1; Number of adjacent sections to pair up
    "maxAllowedError": 0,  # Saalfeld recommends 0
    "maxPlateauwidth": 200,  # Like in TrakEM2
    "maxIterations":
    2,  # Saalfeld recommends 1000 -- here, 2 iterations (!!) shows the lowest mean and max error for dataset FIBSEM_L1116
    "damp": 1.0,  # Saalfeld recommends 1.0, which means no damp
}

# Parameters for SIFT features, in case blockmatching fails due to large translation
paramsSIFT = FloatArray2DSIFT.Param()
paramsSIFT.fdSize = 8  # default is 4
paramsSIFT.fdBins = 8  # default is 8
paramsSIFT.maxOctaveSize = 1024  # will be changed later to adapt to image size
paramsSIFT.steps = 3
paramsSIFT.minOctaveSize = 256  # will be changed later to adapt to image size
paramsSIFT.initialSigma = 1.6  # default 1.6

params["paramsSIFT"] = paramsSIFT

# Ensure target directories exist
if not os.path.exists(tgtDir):
    os.mkdir(tgtDir)

csvDir = os.path.join(tgtDir, "csvs")
Exemplo n.º 10
0
def extractBlockMatches(filepath1,
                        filepath2,
                        params,
                        csvDir,
                        exeload,
                        load=loadFPMem):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        # Define points from the mesh
        sourcePoints = ArrayList()
        mesh = TransformMesh(params["meshResolution"], dimensions[0],
                             dimensions[1])
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        syncPrint("Extracting block matches for \n S: " + filepath1 +
                  "\n T: " + filepath2 + "\n  with " +
                  str(sourcePoints.size()) + " mesh sourcePoints.")

        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            futures[0].get(),  # FloatProcessor
            futures[1].get(),  # FloatProcessor
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrint(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrint("... therefore invoking SIFT pointmatching for:\n  S: " +
                      basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(Task(loadFloatProcessor, filepath1,
                                    scale=False)),
                exeload.submit(Task(loadFloatProcessor, filepath2,
                                    scale=False))
            ]
            ijSIFT = SIFT(FloatArray2DSIFT(paramsSIFT))
            features1 = ArrayList()  # of Point instances
            ijSIFT.extractFeatures(futures[0].get(), features1)
            features2 = ArrayList()  # of Point instances
            ijSIFT.extractFeatures(futures[1].get(), features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                1.5,  # max_sd
                TranslationModel2D(),
                Double.MAX_VALUE,
                params["rod"])  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        syncPrint(sys.exc_info())
        syncPrint("".join(traceback.format_exception()), out="stderr")