示例#1
0
def ensureSIFTFeatures(filepath,
                       paramsSIFT,
                       properties,
                       csvDir,
                       validateByFileExists=False):
    """
     filepath: to the image from which SIFT features have been or have to be extracted.
     params: dict of registration parameters, including the key "scale".
     paramsSIFT: FloatArray2DSIFT.Params instance.
     csvDir: directory into which serialized features have been or will be saved.
     load: function to load an image as an ImageProcessor from the filepath.
     validateByFileExists: whether to merely check that the .obj file exists as a quick form of validation.
     
     First check if serialized features exist for the image, and if the Params match.
     Otherwise extract the features and store them serialized.
     Returns the ArrayList of Feature instances.
  """
    path = os.path.join(csvDir,
                        os.path.basename(filepath) + ".SIFT-features.obj")
    if validateByFileExists:
        if os.path.exists(path):
            return True
    # An ArrayList whose last element is a mpicbg.imagefeatures.FloatArray2DSIFT.Param
    # and all other elements are mpicbg.imagefeatures.Feature
    features = deserialize(path) if os.path.exists(path) else None
    if features:
        if features.get(features.size() - 1).equals(paramsSIFT):
            features.remove(features.size() - 1)  # removes the Params
            syncPrintQ("Loaded %i SIFT features for %s" %
                       (features.size(), os.path.basename(filepath)))
            return features
        else:
            # Remove the file: paramsSIFT have changed
            os.remove(path)
    # Else, extract de novo:
    try:
        # Extract features
        imp = loadImp(filepath)
        ip = imp.getProcessor()
        paramsSIFT = paramsSIFT.clone()
        ijSIFT = SIFT(FloatArray2DSIFT(paramsSIFT))
        features = ArrayList()  # of Feature instances
        ijSIFT.extractFeatures(ip, features)
        ip = None
        imp.flush()
        imp = None
        features.add(
            paramsSIFT
        )  # append Params instance at the end for future validation
        serialize(features, path)
        features.remove(features.size() -
                        1)  # to return without the Params for immediate use
        syncPrintQ("Extracted %i SIFT features for %s" %
                   (features.size(), os.path.basename(filepath)))
    except:
        printException()
    return features
示例#2
0
def loadFloatProcessor(filepath, params, paramsSIFT, scale=True):
    try:
        fp = loadImp(filepath).getProcessor().convertToFloatProcessor()
        # Preprocess images: Gaussian-blur to scale down, then normalize contrast
        if scale:
            fp = Filter.createDownsampled(fp, params["scale"], 0.5,
                                          paramsSIFT.initialSigma)
            Util.normalizeContrast(fp)  # TODO should be outside the if clause?
        return fp
    except:
        syncPrintQ(sys.exc_info())
示例#3
0
 def translate(self, dx, dy):
     a = zeros(2, 'l')
     self.interval.min(a)
     width = self.cell_dimensions[0]
     height = self.cell_dimensions[1]
     x0 = max(0, min(a[0] + dx, self.img_dimensions[0] - width))
     y0 = max(0, min(a[1] + dy, self.img_dimensions[1] - height))
     self.interval = FinalInterval([x0, y0],
                                   [x0 + width - 1, y0 + height - 1])
     syncPrintQ(str(Intervals.dimensionsAsLongArray(self.interval)))
     self.cache.clear()
示例#4
0
def qualityControl(filepaths,
                   csvDir,
                   params,
                   properties,
                   paramsTileConfiguration,
                   imp=None):
    """
     Show a 3-column table with the indices of all compared pairs of sections and their pointmatches.
  """

    rows = []
    """
  for task in loadPointMatchesTasks(filepaths, csvDir, params, paramsTileConfiguration["n_adjacent"]):
    i, j, pointmatches = task.call() # pointmatches is a list
    rows.append([i, j, len(pointmatches)])
    syncPrintQ("Counting pointmatches for sections %i::%i = %i" % (i, j, len(pointmatches)))
  """

    # Same, in parallel:
    w = ParallelTasks("loadPointMatches")
    for i, j, pointmatches in w.chunkConsume(
            properties["n_threads"],
            loadPointMatchesTasks(filepaths, csvDir, params,
                                  paramsTileConfiguration["n_adjacent"])):
        rows.append([i, j, len(pointmatches)])
        syncPrintQ("Counting pointmatches for sections %i::%i = %i" %
                   (i, j, len(pointmatches)))
    w.awaitAll()
    w.destroy()

    if imp is None:
        img_title = properties["srcDir"].split('/')[-2]
        imp = WindowManager.getImage(img_title)
        destroy = None
        setStackSlice = None

    print imp

    if imp:
        ob = SetStackSlice(imp)
        exe = Executors.newSingleThreadScheduledExecutor()
        exe.scheduleWithFixedDelay(ob, 0, 500, TimeUnit.MILLISECONDS)
    else:
        print "image titled %s is not open." % img_title

    table, frame = showTable(
        rows,
        column_names=["section i", "section j", "n_pointmatches"],
        title="Number of pointmatches",
        onCellClickFn=ob.setFromTableCell)
    frame.addWindowListener(ExecutorCloser(exe))

    return table, frame
示例#5
0
def deserialize(filepath):
  f = None
  o = None
  obj = None
  try:
    f = FileInputStream(filepath)
    o = ObjectInputStream(f)
    obj = o.readObject()
  except:
    syncPrintQ(sys.exc_info())
  finally:
    if f:
      f.close()
    if o:
      o.close()
  if obj is None:
    syncPrintQ("Failed to deserialize object at " + filepath)
  return obj
示例#6
0
def serialize(obj, filepath):
  if not Serializable.isAssignableFrom(obj.getClass()):
    syncPrintQ("Object doesn't implement Serializable: " + str(obj))
    return False
  f = None
  o = None
  try:
    f = FileOutputStream(filepath)
    o = ObjectOutputStream(f)
    o.writeObject(obj)
    o.flush()
    f.getFD().sync() # ensure file is written to disk
    return True
  except:
    syncPrintQ(sys.exc_info())
  finally:
    if o:
      o.close()
    if f:
      f.close()
示例#7
0
def extractSIFTMatches(filepath1, filepath2, params, paramsSIFT, properties,
                       csvDir):
    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:
        # Load from CSV files or extract features de novo
        features1 = ensureSIFTFeatures(filepath1, paramsSIFT, properties,
                                       csvDir)
        features2 = ensureSIFTFeatures(filepath2, paramsSIFT, properties,
                                       csvDir)
        #syncPrintQ("Loaded %i features for %s\n       %i features for %s" % (features1.size(), os.path.basename(filepath1),
        #                                                                     features2.size(), os.path.basename(filepath2)))
        # Vector of PointMatch instances
        sourceMatches = FloatArray2DSIFT.createMatches(
            features1,
            features2,
            params.get(
                "max_sd",
                1.5),  # max_sd: maximal difference in size (ratio max/min)
            TranslationModel2D(),
            params.get(
                "max_id",
                Double.MAX_VALUE),  # max_id: maximal distance in image space
            params.get("rod", 0.9))  # rod: ratio of best vs second best

        syncPrintQ("Found %i SIFT pointmatches for %s vs %s" %
                   (sourceMatches.size(), os.path.basename(filepath1),
                    os.path.basename(filepath2)))

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)
        return True
    except:
        printException()
示例#8
0
def makeLinkedTiles(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                    properties):
    if properties.get("precompute", True):
        ensurePointMatches(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                           properties)
    try:
        tiles = [Tile(TranslationModel2D()) for _ in filepaths]
        # FAILS when running in parallel, for mysterious reasons related to jython internals, perhaps syncPrint fails
        #w = ParallelTasks("loadPointMatches")
        #for i, j, pointmatches in w.chunkConsume(properties["n_threads"], loadPointMatchesTasks(filepaths, csvDir, params, n_adjacent)):
        syncPrintQ("Loading all pointmatches.")
        if properties.get("use_SIFT"):
            params = {"rod": params["rod"]}
        for task in loadPointMatchesTasks(filepaths, csvDir, params,
                                          n_adjacent):
            i, j, pointmatches = task.call()
            syncPrintQ("%i, %i : %i" % (i, j, len(pointmatches)))
            tiles[i].connect(tiles[j], pointmatches)  # reciprocal connection
        syncPrintQ("Finished loading all pointmatches.")
        return tiles
    except Exception as e:
        print i, j
        print e
    finally:
        #w.destroy()
        pass
示例#9
0
 def preload(cachedCellImg, loader, block_size, filepaths, exe):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     try:
         # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
         cache = cachedCellImg.getCache()
         f1 = cache.getClass().getDeclaredField(
             "cache")  # LoaderCacheAsCacheAdapter.cache
         f1.setAccessible(True)
         softCache = f1.get(cache)
         cache = None
         f2 = softCache.getClass().getDeclaredField(
             "map")  # SoftRefLoaderCache.map
         f2.setAccessible(True)
         keys = sorted(f2.get(softCache).keySet())
         if 0 == len(keys):
             return
         first = max(0, keys[-1] - (keys[-1] % block_size[2]))
         last = min(len(filepaths), first + block_size[2]) - 1
         keys = None
         syncPrintQ("### Preloading %i-%i ###" % (first, last))
         futures = []
         for index in xrange(first, last + 1):
             futures.append(
                 exe.submit(TimeItTask(softCache.get, index, loader)))
         softCache = None
         # Wait for all
         loaded_any = False
         count = 0
         while len(futures) > 0:
             r, t = futures.pop(0).get()  # waits for the image to load
             if t > 1000:  # in miliseconds. Less than this is for sure a cache hit, more a cache miss and reload
                 loaded_any = True
             r = None
             # t in miliseconds
             syncPrintQ("preloaded index %i in %f ms" % (first + count, t))
             count += 1
         if not loaded_any:
             syncPrintQ("Completed preloading %i-%i" %
                        (first, first + block_size[2] - 1))
     except:
         syncPrintQ(sys.exc_info())
示例#10
0
 def keyPressed(self, event):
     try:
         dx, dy = self.delta.get(event.getKeyCode(), (0, 0))
         if dx + dy == 0:
             return
         syncPrintQ("Translating source")
         if event.isShiftDown():
             dx *= self.shift
             dy *= self.shift
         if event.isAltDown():
             dx *= self.alt
             dy *= self.alt
         syncPrintQ("... by x=%i, y=%i" % (dx, dy))
         self.cellGet.translate(dx, dy)
         self.imp.updateAndDraw()
         event.consume()
     except:
         syncPrintQ(str(sys.exc_info()))
示例#11
0
def extractBlockMatches(filepath1, filepath2, params, paramsSIFT, properties,
                        csvDir, exeload, load):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        fp1 = futures[0].get(
        )  # FloatProcessor, already Gaussian-blurred, contrast-corrected and scaled!
        fp2 = futures[1].get()  # FloatProcessor, idem

        # Define points from the mesh
        sourcePoints = ArrayList()
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        # Don't use blockmatching if the dimensions are different
        use_blockmatching = fp1.getWidth() == fp2.getWidth() and fp1.getHeight(
        ) == fp2.getHeight()

        # Fill the sourcePoints
        mesh = TransformMesh(params["meshResolution"], fp1.width, fp1.height)
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        syncPrintQ("Extracting block matches for \n S: " + filepath1 +
                   "\n T: " + filepath2 + "\n  with " +
                   str(sourcePoints.size()) + " mesh sourcePoints.")
        # Run
        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            fp1,
            fp2,
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrintQ(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrintQ(
                "... therefore invoking SIFT pointmatching for:\n  S: " +
                basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath1,
                         params,
                         paramsSIFT,
                         scale=False)),
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath2,
                         params,
                         paramsSIFT,
                         scale=False))
            ]

            fp1 = futures[0].get()  # FloatProcessor, original
            fp2 = futures[1].get()  # FloatProcessor, original

            # Images can be of different size: scale them the same way
            area1 = fp1.width * fp1.height
            area2 = fp2.width * fp2.height

            if area1 == area2:
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = int(
                    max(properties.get("SIFT_max_size", 2048),
                        fp1.width * params["scale"]))
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT1
            else:
                bigger, smaller = (fp1, fp2) if area1 > area2 else (fp2, fp1)
                target_width_bigger = int(
                    max(1024, bigger.width * params["scale"]))
                if 1024 == target_width_bigger:
                    target_width_smaller = int(1024 * float(smaller.width) /
                                               bigger.width)
                else:
                    target_width_smaller = smaller.width * params["scale"]
                #
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = target_width_bigger
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT.clone()
                paramsSIFT2.maxOctaveSize = target_width_smaller
                paramsSIFT2.minOctaveSize = int(paramsSIFT2.maxOctaveSize /
                                                pow(2, paramsSIFT2.steps))

            ijSIFT1 = SIFT(FloatArray2DSIFT(paramsSIFT1))
            features1 = ArrayList()  # of Point instances
            ijSIFT1.extractFeatures(fp1, features1)

            ijSIFT2 = SIFT(FloatArray2DSIFT(paramsSIFT2))
            features2 = ArrayList()  # of Point instances
            ijSIFT2.extractFeatures(fp2, features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                params.get(
                    "max_sd",
                    1.5),  # max_sd: maximal difference in size (ratio max/min)
                TranslationModel2D(),
                params.get("max_id", Double.MAX_VALUE
                           ),  # max_id: maximal distance in image space
                params.get("rod", 0.9))  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        printException()
示例#12
0
def loadImp(filepath):
    """ Returns an ImagePlus """
    syncPrintQ("Loading image " + filepath)
    return IJ.openImage(filepath)
示例#13
0
def ensurePointMatches(filepaths, csvDir, params, paramsSIFT, n_adjacent,
                       properties):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches",
                      exe=newFixedThreadPool(properties["n_threads"]))
    exeload = newFixedThreadPool()
    try:
        if properties.get("use_SIFT", False):
            syncPrintQ("use_SIFT is True")
            # Pre-extract SIFT features for all images first
            # ensureSIFTFeatures returns the features list so the Future will hold it in memory: can't hold onto them
            # therefore consume the tasks in chunks:
            chunk_size = properties["n_threads"] * 2
            count = 1
            for result in w.chunkConsume(
                    chunk_size,  # tasks to submit before starting to wait for futures
                (Task(ensureSIFTFeatures,
                      filepath,
                      paramsSIFT,
                      properties,
                      csvDir,
                      validateByFileExists=properties.get(
                          "SIFT_validateByFileExists"))
                 for filepath in filepaths)):
                count += 1
                if 0 == count % chunk_size:
                    syncPrintQ(
                        "Completed extracting or validating SIFT features for %i images."
                        % count)
            w.awaitAll()
            syncPrintQ(
                "Completed extracting or validating SIFT features for all images."
            )
            # Compute pointmatches across adjacent sections
            count = 1
            for result in w.chunkConsume(
                    chunk_size,
                    generateSIFTMatches(filepaths, n_adjacent, params,
                                        paramsSIFT, properties, csvDir)):
                count += 1
                syncPrintQ("Completed SIFT pointmatches %i/%i" %
                           (count, len(filepaths) * n_adjacent))
        else:
            # Use blockmatches
            syncPrintQ("using blockmatches")
            loadFPMem = SoftMemoize(lambda path: loadFloatProcessor(
                path, params, paramsSIFT, scale=True),
                                    maxsize=properties["n_threads"] +
                                    n_adjacent)
            count = 1
            for result in w.chunkConsume(
                    properties["n_threads"],
                    pointmatchingTasks(filepaths, csvDir, params, paramsSIFT,
                                       n_adjacent, exeload, properties,
                                       loadFPMem)):
                if result:  # is False when CSV file already exists
                    syncPrintQ("Completed %i/%i" %
                               (count, len(filepaths) * n_adjacent))
                count += 1
            syncPrintQ("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrintQ("Finished all pointmatching tasks.")
    except:
        printException()
    finally:
        exeload.shutdown()
        w.destroy()
示例#14
0
def savePointMatches(img_filename1,
                     img_filename2,
                     pointmatches,
                     directory,
                     params,
                     coords_header=["x1", "y1", "x2", "y2"]):
    filename = basename(img_filename1) + '.' + basename(
        img_filename2) + ".pointmatches.csv"
    path = os.path.join(directory, filename)
    msg = [str(len(pointmatches))]
    ra = None
    try:
        """
    with open(path, 'w') as csvfile:
      w = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
      # First two rows: parameter names and values
      keys = params.keys()
      msg.append("keys: " + ",".join(map(str, keys)))
      msg.append("vals: " + ",".join(str(params[key]) for key in keys))
      #for pm in pointmatches:
      #  msg.append(", ".join(map(str, PointMatches.asRow(pm))))
      w.writerow(keys)
      w.writerow(tuple(params[key] for key in keys))
      # PointMatches header
      if 0 == len(pointmatches):
        # Can't know whether there are 2 or 3 dimensions per coordinate
        w.writerow(coords_header)
      else:
        w.writerow(PointMatches.csvHeader(next(iter(pointmatches)))) # support both lists and sets
      # One PointMatch per row
      for pm in pointmatches:
        w.writerow(PointMatches.asRow(pm))
      # Ensure it's written
      csvfile.flush()
      os.fsync(csvfile.fileno())
    """
        # DEBUG write differently, the above FAILS for ~20 out of 130,000 files
        lines = []
        keys = params.keys()
        lines.append(",".join(map(str, keys)))
        lines.append(",".join(map(str, (params[key] for key in keys))))
        header = coords_header if 0 == len(pointmatches) \
                               else PointMatches.csvHeader(next(iter(pointmatches)))
        lines.append(",".join(header))
        for pm in pointmatches:
            p1 = pm.getP1().getW()  # a double[] array
            p2 = pm.getP2().getW()  # a double[] array
            lines.append("%f,%f,%f,%f" % (p1[0], p1[1], p2[0], p2[1]))
        body = "\n".join(lines)
        ra = RandomAccessFile(path, 'rw')
        ra.writeBytes(body)
        ra.getFD().sync()  # ensure it's written
    except:
        syncPrintQ("Failed to save pointmatches at %s\n%s" %
                   (path, "\n".join(msg)))
        printException()
        if os.path.exists(path):
            os.remove(path)
    finally:
        if ra is not None:
            ra.close()