def ensurePointMatches(filepaths, csvDir, params, n_adjacent):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches", exe=newFixedThreadPool(numCPUs()))
    exeload = newFixedThreadPool()  # uses as many threads as CPUs
    try:
        count = 1
        for result in w.chunkConsume(
                numCPUs() * 2,
                pointmatchingTasks(filepaths, csvDir, params, n_adjacent,
                                   exeload)):
            if result:  # is False when CSV file already exists
                syncPrint("Completed %i/%i" %
                          (count, len(filepaths) * n_adjacent))
            count += 1
        syncPrint("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrint("Finished all pointmatching tasks.")
    except:
        print sys.exc_info()
    finally:
        exeload.shutdown()
        w.destroy()
 def preload(cachedCellImg, loader, block_size, filepaths):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                              name="preloader")
     try:
         # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
         cache = cachedCellImg.getCache()
         f1 = cache.getClass().getDeclaredField(
             "cache")  # LoaderCacheAsCacheAdapter.cache
         f1.setAccessible(True)
         softCache = f1.get(cache)
         cache = None
         f2 = softCache.getClass().getDeclaredField(
             "map")  # SoftRefLoaderCache.map
         f2.setAccessible(True)
         keys = sorted(f2.get(softCache).keySet())
         if 0 == len(keys):
             return
         first = keys[-1] - (keys[-1] % block_size[2])
         last = max(len(filepaths), first + block_size[2] - 1)
         keys = None
         msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
         futures = []
         for index in xrange(first, first + block_size[2]):
             futures.append(
                 exe.submit(TimeItTask(softCache.get, index, loader)))
         softCache = None
         # Wait for all
         count = 1
         while len(futures) > 0:
             r, t = futures.pop(0).get()
             # t in miliseconds
             if t > 500:
                 if msg:
                     syncPrint(msg)
                     msg = None
                 syncPrint("preloaded index %i in %f ms" %
                           (first + count, t))
             count += 1
         if not msg:  # msg was printed
             syncPrint("Completed preloading %i-%i" %
                       (first, first + block_size[2] - 1))
     except:
         syncPrint(sys.exc_info())
     finally:
         exe.shutdown()
예제 #3
0
srcDir = "/groups/cardona/cardonalab/FIBSEM_L1116/"  # MUST have an ending slash
tgtDir = "/groups/cardona/cardonalab/Albert/FIBSEM_L1116/"
csvDir = os.path.join(tgtDir, "csvs")

# Recursive search into srcDir for files ending in InLens_raw.tif
filepaths = findFilePaths(srcDir, "InLens_raw.tif")

# Image properties: ASSUMES all images have the same properties
# (While the script an cope with images of different dimensions for registration,
# the visualization and export would need minor adjustments to cope.)
properties = {
    'img_dimensions': [16875, 18125],
    "srcDir": srcDir,
    'pixelType': UnsignedShortType,
    'n_threads': numCPUs(),  # number of parallel threads to use
    'invert': True,  # For viewAligned. FIBSEM images need to be inverted
    'CLAHE_params':
    [200, 256,
     3.0],  # For viewAligned. Use None to disable. Blockradius, nBins, slope.
}

# Parameters for blockmatching
params = {
    'scale': 0.1,  # 10%
    'meshResolution': 10,  # 10 x 10 points = 100 point matches maximum
    'minR': 0.1,  # min PMCC (Pearson product-moment correlation coefficient)
    'rod': 0.9,  # max second best r / best r
    'maxCurvature': 1000.0,  # default is 10
    'searchRadius': 100,  # a low value: we expect little translation
    'blockRadius': 200,  # small, yet enough
예제 #4
0
                                              makeRadiusSearch(peaks2),
                                              params['radius'],
                                              params['max_per_peak'],
                                              params['min_neighbors'],
                                              params['max_neighbors'])

print "img2: Found %i features" % len(features2)

pm = PointMatchesPlus.fromNearbyFeatures(params['pointmatches_search_radius'],
#pm = PointMatchesPlus.fromFeatures(
                                         features1,
                                         features2,
                                         params['angle_epsilon'],
                                         params['len_epsilon_sq'],
                                         ConstellationPlus.COMPARE_ALL,
                                         numCPUs())

print "Found %i pointmatches" % len(pm.pointmatches)

model = TranslationModel3D()

modelFound, inliers = fit(model, pm.pointmatches,
                          params['n_iterations'], params['maxEpsilon'],
                          params['minInlierRatio'], params['minNumInliers'], params['maxTrust'])

if modelFound:
  print "Model found from %i inliers" % len(inliers)
  a = nativeArray('d', [3, 4])
  model.toMatrix(a) # Can't use model.toArray: different order of elements
  matrix = a[0] + a[1] + a[2] # Concat: flatten to 1-dimensional array
  print matrix