示例#1
0
 def preload(cachedCellImg, loader):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
     cache = cachedCellImg.getCache()
     f1 = cache.getClass().getDeclaredField(
         "cache")  # LoaderCacheAsCacheAdapter.cache
     f1.setAccessible(True)
     softCache = f1.get(cache)
     f2 = softCache.getClass().getDeclaredField(
         "map")  # SoftRefLoaderCache.map
     f2.setAccessible(True)
     keys = sorted(f2.get(softCache).getKeys())
     if 0 == len(keys):
         return
     first = keys[-1] - (keys[-1] % blockSize[2])
     syncPrint("Preloading %i-%i" % (first, first + blockSize[2] - 1))
     exe = newFixedThreadPool(n_threads=-1, name="preloader")
     try:
         for index in xrange(first, first + blockSize[2]):
             exe.submit(Task, softCache.get, index, loader)
     except:
         syncPrint(sys.exc_info())
     finally:
         exe.shutdown()
def ensurePointMatches(filepaths, csvDir, params, n_adjacent):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches", exe=newFixedThreadPool(numCPUs()))
    exeload = newFixedThreadPool()  # uses as many threads as CPUs
    try:
        count = 1
        for result in w.chunkConsume(
                numCPUs() * 2,
                pointmatchingTasks(filepaths, csvDir, params, n_adjacent,
                                   exeload)):
            if result:  # is False when CSV file already exists
                syncPrint("Completed %i/%i" %
                          (count, len(filepaths) * n_adjacent))
            count += 1
        syncPrint("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrint("Finished all pointmatching tasks.")
    except:
        print sys.exc_info()
    finally:
        exeload.shutdown()
        w.destroy()
 def preload(cachedCellImg, loader, block_size, filepaths):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                              name="preloader")
     try:
         # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
         cache = cachedCellImg.getCache()
         f1 = cache.getClass().getDeclaredField(
             "cache")  # LoaderCacheAsCacheAdapter.cache
         f1.setAccessible(True)
         softCache = f1.get(cache)
         cache = None
         f2 = softCache.getClass().getDeclaredField(
             "map")  # SoftRefLoaderCache.map
         f2.setAccessible(True)
         keys = sorted(f2.get(softCache).keySet())
         if 0 == len(keys):
             return
         first = keys[-1] - (keys[-1] % block_size[2])
         last = max(len(filepaths), first + block_size[2] - 1)
         keys = None
         msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
         futures = []
         for index in xrange(first, first + block_size[2]):
             futures.append(
                 exe.submit(TimeItTask(softCache.get, index, loader)))
         softCache = None
         # Wait for all
         count = 1
         while len(futures) > 0:
             r, t = futures.pop(0).get()
             # t in miliseconds
             if t > 500:
                 if msg:
                     syncPrint(msg)
                     msg = None
                 syncPrint("preloaded index %i in %f ms" %
                           (first + count, t))
             count += 1
         if not msg:  # msg was printed
             syncPrint("Completed preloading %i-%i" %
                       (first, first + block_size[2] - 1))
     except:
         syncPrint(sys.exc_info())
     finally:
         exe.shutdown()
def register(view_index,
             filepaths,
             modelclass,
             csv_dir,
             params,
             n_threads=0,
             workaround2487=True):
    # Work around jython bug https://bugs.jython.org/issue2487 , a race condition on type initialization
    if 0 == view_index and workaround2487:
        # bogus call for first two filepaths: single-threaded execution to initalize types
        register(view_index, {view_index: filepaths[view_index][0:2]},
                 modelclass,
                 "/tmp/",
                 params,
                 n_threads=1,
                 workaround2487=False)

    exe = newFixedThreadPool(n_threads)
    paths = filepaths[view_index]
    try:
        name = "matrices-view-%i" % view_index
        matrices = loadMatrices(name, csv_dir)
        if not matrices:
            matrices = computeForwardTransforms(paths, klb_loader,
                                                getCalibration, csv_dir, exe,
                                                modelclass, params)
            # Debug: print identity transforms
            identity_indices = [
                i for i, m in enumerate(matrices) if 1.0 == m[0]
                and 0.0 == m[1] and 0.0 == m[2] and 0.0 == m[3] and 0.0 == m[4]
                and 1.0 == m[5] and 0.0 == m[6] and 0.0 == m[7] and 0.0 == m[8]
                and 0.0 == m[9] and 1.0 == m[10] and 0.0 == m[11]
            ]
            syncPrint("View %i: identity matrices at [%s]" %
                      (view_index, ", ".join(
                          str(index) for index in identity_indices)))
            saveMatrices(name, matrices, csv_dir)
    finally:
        exe.shutdown()

    transforms = asBackwardConcatTransforms(matrices,
                                            transformclass=Translation3D)
    path_transforms = dict(izip(paths, transforms))
    registered_loader = RegisteredLoader(klb_loader, path_transforms)

    return Load.lazyStack(paths, registered_loader)
 def __init__(self,
              filepaths,
              loadImg,
              matrices,
              img_dimensions,
              cell_dimensions,
              interval,
              copy_threads=1,
              preload=None):
     self.filepaths = filepaths
     self.loadImg = loadImg  # function to load images
     self.matrices = matrices
     self.img_dimensions = img_dimensions
     self.cell_dimensions = cell_dimensions  # x,y must match dims of interval
     self.interval = interval  # when smaller than the image, will crop
     self.copy_threads = max(1, copy_threads)
     self.cache = SoftMemoize(partial(TranslatedSectionGet.makeCell, self),
                              maxsize=256)
     self.exe = newFixedThreadPool(
         -1)  # BEWARE native memory leak if not closed
     self.preload = preload
示例#6
0
    with open(csv_sums_path, 'r') as csvfile:
        reader = csv.reader(csvfile, delimiter=',', quotechar="\"")
        header = reader.next()  # skip
        sums = [(filename, float(s)) for i, (filename, s) in enumerate(reader)]
else:
    # Compute:
    TMs = []
    for foldername in sorted(os.listdir(srcDir)):
        index = foldername[2:]
        filename = os.path.join(
            foldername,
            "SPM00_TM%s_CM00_CM01_CHN00.weightFused.TimeRegistration.klb" %
            index)
        TMs.append(filename)

    exe = newFixedThreadPool(-1)
    try:

        def computeSum(filename, aimg=None):
            syncPrint(filename)
            img = aimg if aimg is not None else klb.readFull(
                os.path.join(srcDir, filename))
            try:
                return filename, sum(
                    img.getImg().update(None).getCurrentStorageArray())
            except:
                syncPrint("Failed to compute sum: retry")
                return computeSum(filename, aimg=img)

        futures = [exe.submit(Task(computeSum, filename)) for filename in TMs]
def deconvolve(images, kernels, name, n_iterations):
  # Bayesian-based multi-view deconvolution
  exe = newFixedThreadPool(Runtime.getRuntime().availableProcessors() -2)
# RANSAC parameters: reduce list of pointmatches to a spatially coherent subset
paramsModel = {
  "maxEpsilon": somaDiameter, # max allowed alignment error in calibrated units (a distance)
  "minInlierRatio": 0.0000001, # ratio inliers/candidates
  "minNumInliers": 5, # minimum number of good matches to accept the result
  "n_iterations": 2000, # for estimating the model
  "maxTrust": 4, # for rejecting candidates
}

# Joint dictionary of parameters
params = {}
params.update(paramsDoG)
params.update(paramsFeatures)
params.update(paramsModel)

exe = newFixedThreadPool(2)

#viewInBDV(imgB0, imgB1, imgB2, imgB3)


# Parameter exploration target: increase dramatically the number of inlier point matches,
# in order to be able to reliably estimate a TranslationModel3D (and RigidiModel3D) across camera views.
# Which requires extracting features optimized for the overlapping regions.
# Strategy 1: allow more features per peak
params["max_per_peak"] = 4 # was 3
# Did increase the number of features but not by much, and not the number of point matches

# Strategy 2: allow constellations with smaller angles to better capture whatever feature could be available
params["min_angle"] = 0.25 # was: 1.57
# Did increase the number of features by a lot (about double), and a tiny bit the point matches
示例#9
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
# RANSAC parameters: reduce list of pointmatches to a spatially coherent subset
paramsModel = {
  "maxEpsilon": somaDiameter, # max allowed alignment error in calibrated units (a distance)
  "minInlierRatio": 0.0000001, # ratio inliers/candidates
  "minNumInliers": 5, # minimum number of good matches to accept the result
  "n_iterations": 2000, # for estimating the model
  "maxTrust": 4, # for rejecting candidates
}

# Joint dictionary of parameters
params = {}
params.update(paramsDoG)
params.update(paramsFeatures)
params.update(paramsModel)

exe = newFixedThreadPool(2)

#viewInBDV(imgB0, imgB1, imgB2, imgB3)


# Parameter exploration target: increase dramatically the number of inlier point matches,
# in order to be able to reliably estimate a TranslationModel3D (and RigidiModel3D) across camera views.
# Which requires extracting features optimized for the overlapping regions.
# Strategy 1: allow more features per peak
params["max_per_peak"] = 4 # was 3
# Did increase the number of features but not by much, and not the number of point matches

# Strategy 2: allow constellations with smaller angles to better capture whatever feature could be available
params["min_angle"] = 0.25 # was: 1.57
# Did increase the number of features by a lot (about double), and a tiny bit the point matches
示例#11
0

# Ensure target directory exists
if not os.path.exists(targetFolder):
    os.mkdir(targetFolder)


def convert(filepath, targetFolder):
    imp = IJ.openImage(filepath)
    if imp.getType() == ImagePlus.GRAY32:
        ic = ImageConverter(imp)
        ic.setDoScaling(False)
        ic.convertToGray16()
        filename = os.path.basename(filepath)
        writeZip(imp, os.path.join(targetFolder, filename), title=filename)
    else:
        syncPrint("Not a 32-bit image: " + filename)


exe = newFixedThreadPool()
try:
    futures = []
    for filename in sorted(os.listdir(sourceFolder)):
        filepath = os.path.join(sourceFolder, filename)
        if accept(filepath):
            futures.append(exe.submit(Task(convert, filepath, targetFolder)))
    for f in futures:
        f.get()  # wait
finally:
    exe.shutdownNow()