示例#1
0
def savePointMatches(img_filename1, img_filename2, pointmatches, directory,
                     params):
    filename = basename(img_filename1) + '.' + basename(
        img_filename2) + ".pointmatches.csv"
    path = os.path.join(directory, filename)
    try:
        with open(path, 'w') as csvfile:
            w = csv.writer(csvfile,
                           delimiter=',',
                           quotechar='"',
                           quoting=csv.QUOTE_NONNUMERIC)
            # First two rows: parameter names and values
            keys = params.keys()
            w.writerow(keys)
            w.writerow(tuple(params[key] for key in keys))
            # PointMatches header
            w.writerow(PointMatches.csvHeader(next(
                iter(pointmatches))))  # support both lists and sets
            # One PointMatch per row
            for pm in pointmatches:
                w.writerow(PointMatches.asRow(pm))
            # Ensure it's written
            csvfile.flush()
            os.fsync(csvfile.fileno())
    except:
        syncPrint("Failed to save pointmatches at %s" % path)
        syncPrint(str(sys.exc_info()))
示例#2
0
def checkParams(params, names, values, epsilon):
    """ params: the actual parameters to use.
      names: names of parameters in the CSV file.
      values: values (as strings) of parameters in the CSV file. """
    def report(name, value):
        syncPrint("Mismatching parameters: '%s' :: %s != %s" %
                  (name, str(params[name]), str(value)))

    for name, value in izip(names, values):
        value1 = params.get(name, None)
        if value1 is None:  # parameter in the CSV file header does not exist in params
            return report(name, value)
        t1 = type(params[name])
        t1 = types.FloatType if t1 == types.IntType else t1  # Make all numbers look like floats
        t2 = types.ListType if '[' == value[0] else types.FloatType
        if t1 != t2:
            return report(name, value)
        if t1 == types.FloatType:
            if abs(params[name] - float(value)) > epsilon:
                return report(name, value)
        elif t1 == types.ListType:
            # both are lists
            for a, b in izip(params[name], imap(float,
                                                value[1:-1].split(","))):
                if abs(a - b) > epsilon:
                    return report(name, value)
        else:
            syncPrint("Don't know how to compare type %s with %s" %
                      (str(t1), str(t2)))
            return False
    return True
示例#3
0
 def fetch(url, maximum_try=5):
     for _ in range(maximum_try):
       try:
           html = urllib2.urlopen(url).read()
           return html
       except BaseException, e:
           syncPrint('Error: ' + str(e) + '\n' + 'on url: ' + url)
示例#4
0
def makeFeatures(img_filename, img_loader, getCalibration, csv_dir, params):
    """ Helper function to extract features from an image. """
    img = img_loader.load(img_filename)
    # Find a list of peaks by difference of Gaussian
    peaks = []
    sigmaSmaller = params["sigmaSmaller"]
    sigmaLarger = params["sigmaLarger"]
    if type(sigmaSmaller) == types.FloatType:
        sigmaSmaller = [sigmaSmaller]
        sigmaLarger = [sigmaLarger]
    for ss, sl in izip(sigmaSmaller, sigmaLarger):
        peaks.extend(
            getDoGPeaks(img, getCalibration(img_filename), ss, sl,
                        params['minPeakValue']))
    #
    if 0 == len(peaks):
        features = []
    else:
        # Create a KDTree-based search for nearby peaks
        search = makeRadiusSearch(peaks)
        # Create list of Constellation features
        features = extractFeatures(peaks, search, params['radius'],
                                   params['min_angle'], params['max_per_peak'])
    if 0 == len(features):
        syncPrint("No peaks found for %s" % img_filename)
    # Store features in a CSV file (even if without features)
    saveFeatures(img_filename, csv_dir, features, params)
    return features
示例#5
0
def plot2DRoiOverZ(imp,
                   roi=None,
                   show=True,
                   XaxisLabel='Z',
                   YaxisLabel='I',
                   Zscale='1.0'):
    """
  Take an ImagePlus and a 2D ROI (optional, can be read from the ImagePlus)
  and plot the average value of the 2D ROI in each Z slice.

  Return 4 elements: the two lists of values for the Y (intensity) and X (slice index),
  and the Plot and PlotWindow instances.
  """
    roi = roi if roi else imp.getRoi()
    if not roi:
        syncPrint("Set a ROI first.")
        return
    # List of 2D points from where pixel values are to be read
    points = roiPoints(roi)
    stack = imp.getStack()
    intensity = [
        sum(stack.getProcessor(slice_index).getf(p.x, p.y)
            for p in points) / len(points)
        for slice_index in xrange(1,
                                  imp.getNSlices() + 1)
    ]
    xaxis = [z * Zscale for z in range(1, imp.getNSlices() + 1)]
    plot = Plot("Intensity", XaxisLabel, YaxisLabel, xaxis, intensity)
    if show:
        win = plot.show()
    else:
        win = None
    return intensity, xaxis, plot, win
示例#6
0
def loadFloatProcessor(filepath, params, scale=True):
    try:
        fp = loadImp(filepath).getProcessor().convertToFloatProcessor()
        # Preprocess images: Gaussian-blur to scale down, then normalize contrast
        if scale:
            fp = Filter.createDownsampled(fp, params["scale"], 0.5, 1.6)
            Util.normalizeContrast(fp)
        return fp
    except:
        syncPrint(sys.exc_info())
示例#7
0
 def translate(self, dx, dy):
     a = zeros(2, 'l')
     self.interval.min(a)
     width = self.cell_dimensions[0]
     height = self.cell_dimensions[1]
     x0 = max(0, min(a[0] + dx, self.img_dimensions[0] - width))
     y0 = max(0, min(a[1] + dy, self.img_dimensions[1] - height))
     self.interval = FinalInterval([x0, y0],
                                   [x0 + width - 1, y0 + height - 1])
     syncPrint(str(Intervals.dimensionsAsLongArray(self.interval)))
     self.cache.clear()
示例#8
0
def fit(model, pointmatches, n_iterations, maxEpsilon, minInlierRatio,
        minNumInliers, maxTrust):
    """ Fit a model to the pointmatches, finding the subset of inlier pointmatches
      that agree with a joint transformation model. """
    inliers = ArrayList()
    try:
        modelFound = model.filterRansac(pointmatches, inliers, n_iterations,
                                        maxEpsilon, minInlierRatio,
                                        minNumInliers, maxTrust)
    except NotEnoughDataPointsException, e:
        syncPrint(str(e))
        return False, inliers
示例#9
0
def setupEngine(use_cuda=True, askForMultipleDevices=False):
  """
  Attempt to load the CUDA libraries. Otherwise use CPU threads.
  Return a function that creates the ComputeBlockSeqThread(CPU|CUDA)Factory.

  For the CUDA version to work, do, in Ubuntu 16.04:
   1. Download the .deb file for CUDA 10.0 from:
     https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda-repo-ubuntu1604-10-0-local-10.0.130-410.48_1.0-1_amd64
   2. Install the CUDA deb package and more:
      $ sudo dpkg -i cuda-repo-ubuntu1604-10-0-*deb
      $ sudo apt-key add /var/cuda-repo-10-0-local-10.0.130-410.48/7fa2af80.pub
      $ sudo apt-get update
      $ sudo apt-get install cuda
   3. Download the FourierConvolutionCUDALib from:
     https://github.com/StephanPreibisch/FourierConvolutionCUDALib
   4. Install it:
      $ cd FourierConvolutionCUDALib/
      $ mkdir build
      $ cd build
      $ cmake ..
      $ make
      $ sudo make install
  """
  cuda = None
  devices = []
  idToCudaDevice = {}
  if use_cuda:
    so_paths = ["/usr/local/lib/libFourierConvolutionCUDALib.so",
                "/groups/cardona/home/championa/code/FourierConvolutionCUDALib/build-cuda-8-master/src/libFourierConvolutionCUDALib.so"] 
    for so_path in so_paths:
      if os.path.exists(so_path):
        # Still opens a dialog to ask for the one and only existing library
        #cuda = NativeLibraryTools.loadNativeLibrary(ArrayList(["FourierConvolutionCuda"]), File(so_path), CUDAFourierConvolution)
        cuda = Native.loadLibrary(so_path, CUDAFourierConvolution)
        break
    if not cuda:
      # Fire up file dialogs:
      cuda = NativeLibraryTools.loadNativeLibrary(ArrayList(["fftCUDA", "FourierConvolutionCuda"]), CUDAFourierConvolution)
    if not cuda:
      syncPrint("Could not load CUDA JNA library for FFT convolution.")
    else:
      syncPrint("Will use CUDA for FFT convolution.")
      devices = CUDATools.queryCUDADetails(cuda, askForMultipleDevices)
      idToCudaDevice = {index: device for index, device in enumerate(devices)}
  # Return function
  def createFactoryFn(exe, lambda_val, blockSize):
    if use_cuda and cuda:
      return ComputeBlockSeqThreadCUDAFactory(exe, MultiViewDeconvolution.minValue, lambda_val, blockSize, cuda, HashMap(idToCudaDevice))
    else:
      return ComputeBlockSeqThreadCPUFactory(exe, MultiViewDeconvolution.minValue, lambda_val, blockSize, ArrayImgFactory(FloatType()))

  return createFactoryFn
示例#10
0
def writeZip(img, path, title=""):
  if isinstance(img, RandomAccessibleInterval):
    imp = IL.wrap(img, title)
  elif isinstance(img, ImagePlus):
    imp = img
    if title:
      imp.setTitle(title)
  else:
    syncPrint("Cannot writeZip to %s:\n  Unsupported image type %s" % (path, str(type(img))))
    return None
  #
  FileSaver(imp).saveAsZip(path)
  return imp
示例#11
0
 def getAllSets(self, doc=None):
     '''retrieve all sets in sets page, there might be more than one page, this method did not handle this case.'''
     if doc is None:
         url = r'http://www.flickr.com/photos/{username}/sets/'.format(username=self.username)
         doc = self.fetch(url)
     pattern = r'<a[^>]+href=\"/photos/{username}/sets/(\d+)/[^>]+title=\"(.*?)\"[^>]*>'.format(username=self.username)
     map = {} 
     for match in re.finditer(pattern, doc):
         setId = match.group(1)
         setName = match.group(2)
         map[setId] = setName
         syncPrint('Found set id={setId} name={setName}'.format(setId=setId, setName=setName))
     return map
示例#12
0
def loadMatrices(name, csv_dir):
    """ Load all matrices as a list of arrays of doubles
      from a CSV file named <name>.csv """
    path = os.path.join(csv_dir, name + ".csv")
    if not os.path.exists(path):
        return None
    try:
        with open(path, 'r') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            reader.next()  # skip header
            matrices = [array(imap(float, row), 'd') for row in reader if row]
            return matrices
    except:
        syncPrint("Could not load matrices from path %s" % path)
        syncPrint(str(sys.exc_info()))
示例#13
0
def makeLinkedTiles(filepaths, csvDir, params, n_adjacent):
    ensurePointMatches(filepaths, csvDir, params, n_adjacent)
    try:
        tiles = [Tile(TranslationModel2D()) for _ in filepaths]
        # FAILS when running in parallel, for mysterious reasons related to jython internals, perhaps syncPrint fails
        #w = ParallelTasks("loadPointMatches")
        #for i, j, pointmatches in w.chunkConsume(numCPUs() * 2, loadPointMatchesTasks(filepaths, csvDir, params, n_adjacent)):
        syncPrint("Loading all pointmatches.")
        for task in loadPointMatchesTasks(filepaths, csvDir, params,
                                          n_adjacent):
            i, j, pointmatches = task.call()
            tiles[i].connect(tiles[j], pointmatches)  # reciprocal connection
        syncPrint("Finsihed loading all pointmatches.")
        return tiles
    finally:
        #w.destroy()
        pass
示例#14
0
 def download_and_save_image_file(url, path):
     '''download image from a image page(not necessarily a .jpg)'''
     create_path(path)
     if os.path.isfile(path):
         syncPrint('%-64s has been downloaded to %s, skip.' % (url, path))
         return
             
     photoID = BasePhotoCrawler.getPhotoId(url)
     doc = BasePhotoCrawler.fetch(url + 'sizes/')
     pattern = r'<img[^>]+src=\"(http://\w+\.staticflickr\.com/\w+/{id}\w+\.(jpg|png))[^>]*>'.format(id=photoID)
     try:
       m = re.search(pattern, doc).group(1)
       syncPrint('downloading   %-64s  to   %s' % (url, path))
       img = BasePhotoCrawler.fetch(m)
       open(path, "w+").write(img)
     except:
       # (TODO) break down exception handling
       print 'Error: no regex match in %s' % url
示例#15
0
 def prepare(index):
     # Prepare the img for deconvolution:
     # 0. Transform in one step.
     # 1. Ensure its pixel values conform to expectations (no zeros inside)
     # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
     syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
     img = klb_loader.get(filepaths[index])  # of UnsignedShortType
     imgP = prepareImgForDeconvolution(
         img, transforms[index], target_interval)  # returns of FloatType
     # Copy transformed view into ArrayImg for best performance in deconvolution
     imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
     #ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
     ImgUtil.copy(imgP, imgA, n_threads / 2)  # parallel copying
     syncPrint("--Completed preparing %s CM0%i for deconvolution" %
               (tm_dirname, index))
     imgP = None
     img = None
     return (index, imgA)
示例#16
0
 def preload(cachedCellImg, loader, block_size, filepaths):
     """
 Find which is the last cell index in the cache, identify to which block
 (given the blockSize[2] AKA Z dimension) that index belongs to,
 and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
 If they are already loaded, these operations are insignificant.
 """
     exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                              name="preloader")
     try:
         # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
         cache = cachedCellImg.getCache()
         f1 = cache.getClass().getDeclaredField(
             "cache")  # LoaderCacheAsCacheAdapter.cache
         f1.setAccessible(True)
         softCache = f1.get(cache)
         cache = None
         f2 = softCache.getClass().getDeclaredField(
             "map")  # SoftRefLoaderCache.map
         f2.setAccessible(True)
         keys = sorted(f2.get(softCache).keySet())
         if 0 == len(keys):
             return
         first = keys[-1] - (keys[-1] % block_size[2])
         last = max(len(filepaths), first + block_size[2] - 1)
         keys = None
         msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
         futures = []
         for index in xrange(first, first + block_size[2]):
             futures.append(
                 exe.submit(TimeItTask(softCache.get, index, loader)))
         softCache = None
         # Wait for all
         count = 1
         while len(futures) > 0:
             r, t = futures.pop(0).get()
             # t in miliseconds
             if t > 500:
                 if msg:
                     syncPrint(msg)
                     msg = None
                 syncPrint("preloaded index %i in %f ms" %
                           (first + count, t))
             count += 1
         if not msg:  # msg was printed
             syncPrint("Completed preloading %i-%i" %
                       (first, first + block_size[2] - 1))
     except:
         syncPrint(sys.exc_info())
     finally:
         exe.shutdown()
示例#17
0
def fitModel(img1_filename, img2_filename, img_loader, getCalibration, csv_dir,
             model, exe, params):
    """ The model can be any subclass of mpicbg.models.Affine3D, such as:
        TranslationModel3D, RigidModel3D, SimilarityModel3D,
        AffineModel3D, InterpolatedAffineModel3D
      Returns the transformation matrix as a 1-dimensional array of doubles,
      which is the identity when the model cannot be fit. """
    pointmatches = findPointMatches(img1_filename, img2_filename, img_loader,
                                    getCalibration, csv_dir, exe, params)
    if 0 == len(pointmatches):
        modelFound = False
    else:
        modelFound, inliers = fit(model, pointmatches, params["n_iterations"],
                                  params["maxEpsilon"],
                                  params["minInlierRatio"],
                                  params["minNumInliers"], params["maxTrust"])
    if modelFound:
        syncPrint(
            "Found %i inliers for:\n    %s\n    %s" %
            (len(inliers), basename(img1_filename), basename(img2_filename)))
        a = nativeArray('d', [3, 4])
        model.toMatrix(
            a)  # Can't use model.toArray: different order of elements
        matrix = a[0] + a[1] + a[2]  # Concat: flatten to 1-dimensional array:
    else:
        syncPrint("Model not found for:\n    %s\n    %s" %
                  (img1_filename, img2_filename))
        # Return identity
        matrix = array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 'd')
    syncPrint("found %i pointmatches, with matrix:\n[%s]\nbetween \n    %s\n    %s" % \
              (len(pointmatches), ", ".join("%.2f" % v for v in matrix), basename(img1_filename), basename(img2_filename)))
    return matrix
示例#18
0
def saveMatrices(name, matrices, csv_dir):
    """ Store all matrices in a CSV file named <name>.csv """
    path = os.path.join(csv_dir, name + ".csv")
    try:
        with open(path, 'w') as csvfile:
            w = csv.writer(csvfile,
                           delimiter=',',
                           quotechar='"',
                           quoting=csv.QUOTE_NONNUMERIC)
            # Write header: 12 m<i><j> names
            # handle 2D (length 6) and 3D matrices (length 12)
            iseq, jseq = ((0, 1, 2),
                          (0, 1, 2, 3)) if 12 == len(matrices[0]) else ((0, 1),
                                                                        (0, 1,
                                                                         2))
            w.writerow(tuple("m%i%i" % (i, j) for i in iseq for j in jseq))
            for matrix in matrices:
                w.writerow(matrix)
            csvfile.flush()
            os.fsync(csvfile.fileno())
    except:
        syncPrint("Failed to save matrices at path %s" % path)
        syncPrint(str(sys.exc_info()))
示例#19
0
def saveFeatures(img_filename, directory, features, params):
    path = os.path.join(directory, basename(img_filename)) + ".features.csv"
    try:
        with open(path, 'w') as csvfile:
            w = csv.writer(csvfile,
                           delimiter=',',
                           quotechar='"',
                           quoting=csv.QUOTE_NONNUMERIC)
            # First two rows: parameter names and values
            keys = params.keys()
            w.writerow(keys)
            w.writerow(tuple(params[key] for key in keys))
            # Feature header
            w.writerow(Constellation.csvHeader())
            # One row per Constellation feature
            for feature in features:
                w.writerow(feature.asRow())
            # Ensure it's written
            csvfile.flush()
            os.fsync(csvfile.fileno())
    except:
        syncPrint("Failed to save features at %s" % path)
        syncPrint(str(sys.exc_info()))
示例#20
0
def loadFeatures(img_filename,
                 directory,
                 params,
                 validateOnly=False,
                 epsilon=0.00001,
                 verbose=True):
    """ Attempts to load features from filename + ".features.csv" if it exists,
      returning a list of Constellation features or None.
      params: dictionary of parameters with which features are wanted now,
              to compare with parameter with which features were extracted.
              In case of mismatch, return None.
      epsilon: allowed error when comparing floating-point values.
      validateOnly: if True, return after checking that parameters match. """
    try:
        csvpath = os.path.join(directory,
                               basename(img_filename) + ".features.csv")
        if os.path.exists(csvpath):
            with open(csvpath, 'r') as csvfile:
                reader = csv.reader(csvfile, delimiter=',', quotechar='"')
                # First line contains parameter names, second line their values
                if not checkParams(params, reader.next(), reader.next(),
                                   epsilon):
                    return None
                if validateOnly:
                    return True  # would return None above, which is falsy
                reader.next()  # skip header with column names
                features = [
                    Constellation.fromRow(map(float, row)) for row in reader
                ]
                if verbose:
                    syncPrint("Loaded %i features for %s" %
                              (len(features), img_filename))
                return features
        else:
            if verbose:
                syncPrint("No stored features found at %s" % csvpath)
            return None
    except:
        syncPrint("Could not load features for %s" % img_filename)
        syncPrint(str(sys.exc_info()))
        return None
示例#21
0
def loadPointMatches(img1_filename,
                     img2_filename,
                     directory,
                     params,
                     epsilon=0.00001,
                     verbose=True):
    """ Attempts to load point matches from filename1 + '.' + filename2 + ".pointmatches.csv" if it exists,
      returning a list of PointMatch instances or None.
      params: dictionary of parameters with which pointmatches are wanted now,
              to compare with parameter with which pointmatches were made.
              In case of mismatch, return None.
      epsilon: allowed error when comparing floating-point values. """
    try:
        csvpath = os.path.join(
            directory,
            basename(img1_filename) + '.' + basename(img2_filename) +
            ".pointmatches.csv")
        if not os.path.exists(csvpath):
            if verbose:
                syncPrint("No stored pointmatches found at %s" % csvpath)
            return None
        with open(csvpath, 'r') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            # First line contains parameter names, second line their values
            if not checkParams(params, reader.next(), reader.next(), epsilon):
                return None
            reader.next()  # skip header with column names
            pointmatches = PointMatches.fromRows(reader).pointmatches
            if verbose:
                syncPrint("Loaded %i pointmatches for %s, %s" %
                          (len(pointmatches), img1_filename, img2_filename))
            return pointmatches
    except:
        syncPrint("Could not load pointmatches for pair %s, %s" %
                  (img1_filename, img2_filename))
        syncPrint(str(sys.exc_info()))
        return None
示例#22
0
 def keyPressed(self, event):
     try:
         dx, dy = self.delta.get(event.getKeyCode(), (0, 0))
         if dx + dy == 0:
             return
         syncPrint("Translating source")
         if event.isShiftDown():
             dx *= self.shift
             dy *= self.shift
         if event.isAltDown():
             dx *= self.alt
             dy *= self.alt
         syncPrint("... by x=%i, y=%i" % (dx, dy))
         self.cellGet.translate(dx, dy)
         self.imp.updateAndDraw()
         event.consume()
     except:
         syncPrint(str(sys.exc_info()))
示例#23
0
def ensurePointMatches(filepaths, csvDir, params, n_adjacent):
    """ If a pointmatches csv file doesn't exist, will create it. """
    w = ParallelTasks("ensurePointMatches", exe=newFixedThreadPool(numCPUs()))
    exeload = newFixedThreadPool()
    try:
        count = 1
        for result in w.chunkConsume(
                numCPUs() * 2,
                pointmatchingTasks(filepaths, csvDir, params, n_adjacent,
                                   exeload)):
            if result:  # is False when CSV file already exists
                syncPrint("Completed %i/%i" %
                          (count, len(filepaths) * n_adjacent))
            count += 1
        syncPrint("Awaiting all remaining pointmatching tasks to finish.")
        w.awaitAll()
        syncPrint("Finished all pointmatching tasks.")
    except:
        print sys.exc_info()
    finally:
        exeload.shutdown()
        w.destroy()
示例#24
0
def export8bitN5(
        filepaths,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        copy_threads=2,
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  name: name to assign to the N5 volume.
  img3D: the serial sections to export.
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld.
  block_size: defaults to 128x128x128 px.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices, copy_threads))

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    def preload(cachedCellImg, loader, block_size, filepaths):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        exe = newFixedThreadPool(n_threads=min(block_size[2], numCPUs()),
                                 name="preloader")
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = keys[-1] - (keys[-1] % block_size[2])
            last = max(len(filepaths), first + block_size[2] - 1)
            keys = None
            msg = "Preloading %i-%i" % (first, first + block_size[2] - 1)
            futures = []
            for index in xrange(first, first + block_size[2]):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            count = 1
            while len(futures) > 0:
                r, t = futures.pop(0).get()
                # t in miliseconds
                if t > 500:
                    if msg:
                        syncPrint(msg)
                        msg = None
                    syncPrint("preloaded index %i in %f ms" %
                              (first + count, t))
                count += 1
            if not msg:  # msg was printed
                syncPrint("Completed preloading %i-%i" %
                          (first, first + block_size[2] - 1))
        except:
            syncPrint(sys.exc_info())
        finally:
            exe.shutdown()

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size), 10, 60,
        TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
示例#25
0
 def __init__(self, keys):
     if type(keys).__name__ == 'str': key = str(keys).lower()
     elif type(keys).__name__ == 'list': key = '+'.join(str(keys).lower())
     else: syncPrint(type(keys).__name__)
     self.key = key
     super(SearchResultCrawler, self).__init__()
示例#26
0
def loadImp(filepath):
    # Images are TIFF with bit pack compression: can't byte-read array
    syncPrint("Loading image " + filepath)
    return IJ.openImage(filepath)
示例#27
0
        if len(sys.argv) == 3:
            username = sys.argv[2]
            ready = True
            crawler = UserCrawler(username)
    elif str(op).lower() == 'tag': # tag
        if len(sys.argv) == 3:
            tag = sys.argv[2]
            ready = True
            crawler = TagCrawler(tag)
    elif str(op).lower() == 'search': # search
        if len(sys.argv) >= 3:
            keywords = sys.argv[2:]
            ready = True
            crawler = SearchResultCrawler(keywords)

    if not ready:
        syncPrint('invalid usage, read README.')
        sys.exit(1)


    # Let the crawler finish after an input.
    # (TODO) need to find a better way to interact with user.
    # Currently the program expect an input to exit, even when crawling finish for all
    # photos(eg. user crawling).
    raw_input()
    syncPrint('Exiting...')
    crawler.finish()



示例#28
0
def extractBlockMatches(filepath1, filepath2, params, csvDir, exeload, load):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        fp1 = futures[0].get(
        )  # FloatProcessor, already Gaussian-blurred, contrast-corrected and scaled!
        fp2 = futures[1].get()  # FloatProcessor, idem

        # Define points from the mesh
        sourcePoints = ArrayList()
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        # Don't use blockmatching if the dimensions are different
        use_blockmatching = fp1.getWidth() == fp2.getWidth() and fp1.getHeight(
        ) == fp2.getHeight()

        if use_blockmatching:
            # Fill the sourcePoints
            mesh = TransformMesh(params["meshResolution"], fp1.width,
                                 fp1.height)
            PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
            syncPrint("Extracting block matches for \n S: " + filepath1 +
                      "\n T: " + filepath2 + "\n  with " +
                      str(sourcePoints.size()) + " mesh sourcePoints.")
            # Run
            BlockMatching.matchByMaximalPMCCFromPreScaledImages(
                fp1,
                fp2,
                params["scale"],  # float
                params["blockRadius"],  # X
                params["blockRadius"],  # Y
                params["searchRadius"],  # X
                params["searchRadius"],  # Y
                params["minR"],  # float
                params["rod"],  # float
                params["maxCurvature"],  # float
                sourcePoints,
                sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrint(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrint("... therefore invoking SIFT pointmatching for:\n  S: " +
                      basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(
                    Task(loadFloatProcessor, filepath1, params, scale=False)),
                exeload.submit(
                    Task(loadFloatProcessor, filepath2, params, scale=False))
            ]

            fp1 = futures[0].get()  # FloatProcessor, original
            fp2 = futures[1].get()  # FloatProcessor, original

            # Images can be of different size: scale them the same way
            area1 = fp1.width * fp1.height
            area2 = fp2.width * fp2.height

            if area1 == area2:
                paramsSIFT1 = params["paramsSIFT"].clone()
                paramsSIFT1.maxOctaveSize = int(
                    max(1024, fp1.width * params["scale"]))
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT1
            else:
                bigger, smaller = (fp1, fp2) if area1 > area2 else (fp2, fp1)
                target_width_bigger = int(
                    max(1024, bigger.width * params["scale"]))
                if 1024 == target_width_bigger:
                    target_width_smaller = int(1024 * float(smaller.width) /
                                               bigger.width)
                else:
                    target_width_smaller = smaller.width * params["scale"]
                #
                paramsSIFT1 = params["paramsSIFT"].clone()
                paramsSIFT1.maxOctaveSize = target_width_bigger
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = params["paramsSIFT"].clone()
                paramsSIFT2.maxOctaveSize = target_width_smaller
                paramsSIFT2.minOctaveSize = int(paramsSIFT2.maxOctaveSize /
                                                pow(2, paramsSIFT2.steps))

            ijSIFT1 = SIFT(FloatArray2DSIFT(paramsSIFT1))
            features1 = ArrayList()  # of Point instances
            ijSIFT1.extractFeatures(fp1, features1)

            ijSIFT2 = SIFT(FloatArray2DSIFT(paramsSIFT2))
            features2 = ArrayList()  # of Point instances
            ijSIFT2.extractFeatures(fp2, features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                1.5,  # max_sd
                TranslationModel2D(),
                Double.MAX_VALUE,
                params["rod"])  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        syncPrint(sys.exc_info())
        syncPrint("".join(traceback.format_exception()), out="stderr")
示例#29
0
def findPointMatches(img1_filename,
                     img2_filename,
                     img_loader,
                     getCalibration,
                     csv_dir,
                     exe,
                     params,
                     verbose=True):
    """ Attempt to load them from a CSV file, otherwise compute them and save them. """
    names = set([
        "minPeakValue",
        "sigmaSmaller",
        "sigmaLarger",  # DoG peak params
        "radius",
        "min_angle",
        "max_per_peak",  # Constellation params
        "angle_epsilon",
        "len_epsilon_sq"
    ])  # pointmatches params
    pm_params = {k: params[k] for k in names}
    # Attempt to load pointmatches from CSV file
    pointmatches = loadPointMatches(img1_filename,
                                    img2_filename,
                                    csv_dir,
                                    pm_params,
                                    verbose=verbose)
    if pointmatches is not None:
        return pointmatches

    # Load features from CSV files
    # otherwise compute them and save them.
    img_filenames = [img1_filename, img2_filename]
    names = set([
        "minPeakValue", "sigmaSmaller", "sigmaLarger", "radius", "min_angle",
        "max_per_peak"
    ])
    feature_params = {k: params[k] for k in names}
    csv_features = [
        loadFeatures(img_filename, csv_dir, feature_params, verbose=verbose)
        for img_filename in img_filenames
    ]
    # If features were loaded, just return them, otherwise compute them (and save them to CSV files)
    futures = [
        Getter(fs) if fs else exe.submit(
            Task(makeFeatures, img_filename, img_loader, getCalibration,
                 csv_dir, feature_params))
        for fs, img_filename in izip(csv_features, img_filenames)
    ]
    features = [f.get() for f in futures]

    if verbose:
        for img_filename, fs in izip(img_filenames, features):
            syncPrint("Found %i constellation features in image %s" %
                      (len(fs), basename(img_filename)))

    # Compare all possible pairs of constellation features: the PointMatches
    pointmatches_nearby = params.get('pointmatches_nearby', 0)
    if 1 == pointmatches_nearby:
        # Use a RadiusNeighborSearchOnKDTree
        pm = PointMatches.fromNearbyFeatures(
            params['pointmatches_search_radius'], features[0], features[1],
            params["angle_epsilon"], params["len_epsilon_sq"])
    else:
        if 2 == pointmatches_nearby:
            method = PointMatches.fromFeaturesScaleInvariant
        else:  # 0
            method = PointMatches.fromFeatures
        # All to all
        pm = method(features[0], features[1], params["angle_epsilon"],
                    params["len_epsilon_sq"])

    if verbose:
        syncPrint("Found %i point matches between:\n    %s\n    %s" % \
                  (len(pm.pointmatches), basename(img1_filename), basename(img2_filename)))

    # Store as CSV file
    savePointMatches(img1_filename, img2_filename, pm.pointmatches, csv_dir,
                     pm_params)
    #
    return pm.pointmatches
示例#30
0
 def report(name, value):
     syncPrint("Mismatching parameters: '%s' :: %s != %s" %
               (name, str(params[name]), str(value)))
示例#31
0
def computeOptimizedTransforms(img_filenames,
                               img_loader,
                               getCalibration,
                               csv_dir,
                               exe,
                               modelclass,
                               params,
                               verbose=True):
    """ Compute transforms for all images at once,
      simultaneously considering registrations between image i to image i+1, i+2 ... i+n,
      where n is params["n_adjacent"].
      Alternatively, if the params["all_to_all"] exists and is truthy, all tiles will be connected to all tiles.
      Then all matches are optimized together using mpicbg.models.TileConfiguration.
      Fixed tiles are specified in a list of indices with params["fixed_tile_index"].
      Expects, in total:
       * params["n_adjacent"] or params["all_to_all"]
       * params["fixed_tile_index"] (when absent, defaults to [0]: a list with the first tile index in it)
       * params["maxAllowedError"]
       * params["maxPlateauwidth"]
       * params["maxIterations"]
       * params["damp"]
      Returns a list of affine 3D matrices, each a double[] with 12 values, corresponding to the img_filenames.
  """
    # Ensure features exist in CSV files, or create them
    ensureFeaturesForAll(img_filenames,
                         img_loader,
                         getCalibration,
                         csv_dir,
                         params,
                         exe,
                         verbose=verbose)

    # One Tile per time point
    tiles = [Tile(modelclass()) for _ in img_filenames]

    # Extract pointmatches from img_filename i to all in range(i+1, i+n)
    def findPointMatchesProxy(i, j):
        pointmatches = findPointMatches(img_filenames[i],
                                        img_filenames[j],
                                        img_loader,
                                        getCalibration,
                                        csv_dir,
                                        exe,
                                        params,
                                        verbose=verbose)
        return i, j, pointmatches

    #
    futures = []

    if params.get("all_to_all", False):
        for i, j in combinations(xrange(len(img_filenames)), 2):
            futures.append(exe.submit(Task(findPointMatchesProxy, i, j)))
    else:
        n = params["n_adjacent"]
        for i in xrange(len(img_filenames) - n + 1):
            for inc in xrange(1, n):
                # All features were extracted already, so the 'exe' won't be used in findPointMatches
                futures.append(
                    exe.submit(Task(findPointMatchesProxy, i, i + inc)))

    # Join tiles with tiles for which pointmatches were computed
    for f in futures:
        i, j, pointmatches = f.get()
        if 0 == len(pointmatches):
            syncPrint("Zero pointmatches for %i vs %i" % (i, j))
            continue
        syncPrint("connecting tile %i with %i" % (i, j))
        tiles[i].connect(tiles[j], pointmatches)  # reciprocal connection

    # Optimize tile pose
    tc = TileConfiguration()
    tc.addTiles(tiles)
    fixed_tile_indices = params.get("fixed_tile_indices",
                                    [0])  # default: fix first tile
    syncPrint("Fixed tile indices: %s" % str(fixed_tile_indices))
    for index in fixed_tile_indices:
        tc.fixTile(tiles[index])
    #
    if TranslationModel3D != modelclass:
        syncPrint("Running TileConfiguration.preAlign, given %s" %
                  modelclass.getSimpleName())
        tc.preAlign()
    else:
        syncPrint("No prealign, model is %s" % modelclass.getSimpleName())
    #
    maxAllowedError = params["maxAllowedError"]
    maxPlateauwidth = params["maxPlateauwidth"]
    maxIterations = params["maxIterations"]
    damp = params["damp"]
    tc.optimizeSilentlyConcurrent(ErrorStatistic(maxPlateauwidth + 1),
                                  maxAllowedError, maxIterations,
                                  maxPlateauwidth, damp)

    # TODO problem: can fail when there are 0 inliers

    # Return model matrices as double[] arrays with 12 values
    matrices = []
    for tile in tiles:
        a = nativeArray('d', [3, 4])
        tile.getModel().toMatrix(
            a)  # Can't use model.toArray: different order of elements
        matrices.append(a[0] + a[1] +
                        a[2])  # Concat: flatten to 1-dimensional array

    return matrices
示例#32
0
def multiviewDeconvolution(images, blockSizes, PSF_kernels, n_iterations, lambda_val=0.0006, weights=None,
                           filterBlocksForContent=False, PSF_type=PSFTYPE.INDEPENDENT, exe=None, printFn=syncPrint):
  """
  Apply Bayesian-based multi-view deconvolution to the list of images,
  returning the deconvolved image. Uses Stephan Preibisch's library,
  currently available with the BigStitcher Fiji update site.

  images: a list of images, registered and all with the same dimensions.
  blockSizes: how to chop up the volume of each image for parallel processing.
             When None, a single block with the image dimensions is used,
             plus half of the transformed kernel dimensions for that view.
  PSF_kernels: the images containing the point spread function for each input image. Requirement: the dimensions must be an odd number.
  n_iterations: the number of iterations for the deconvolution. A number between 10 and 50 is desirable. The more iterations, the higher the computational cost.
  lambda_val: default is 0.0006 as recommended by Preibisch.
  weights: a list of FloatType images with the weight for every pixel. If None, then all pixels get a value of 1.
  filterBlocksForContent: whether to check before processing a block if the block has any data in it. Default is False.
  PSF_type: defaults to PSFTYPE.INDEPENDENT.
  exe: a thread pool for concurrent execution. If None, a new one is created, using as many threads as CPUs are available.
  printFn: the function to use for printing error messages. Defaults to syncPrint (thread-safe access to the built-in `print` function).

  Returns an imglib2 ArrayImg, or None if something went wrong.
  """

  mvd_exe = exe
  if not exe:
    mvd_exe = newFixedThreadPool() # as many threads as CPUs

  try:
    mvd_weights = weights
    if not weights:
      mvd_weights = repeat(Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0])))

    for i, PSF_kernel in enumerate(PSF_kernels):
      for d in xrange(PSF_kernel.numDimensions()):
        if 0 == PSF_kernel.dimension(d) % 2:
          printFn("for image at index %i, PSF kernel dimension %i is not odd." % (i, d))
          return None

    if not blockSizes:
      # Whole image dimensions + half of the transformed PSF kernel dimensions
      kernel_max = int(max(PSF_kernel.dimension(d)
                           for d in xrange(PSF_kernel.numDimensions())
                           for PSF_kernel in PSF_kernels) * 2)
      syncPrint("kernel max dimension *2: %i" % kernel_max)
      blockSizes = []
      for image in images:
        blockSizes.append([image.dimension(d) + kernel_max
                           for d in xrange(image.numDimensions())])
        syncPrint("blockSize:" + str(blockSizes[-1]))

    cptf = createFactory(mvd_exe, lambda_val, blockSizes[0]) # TODO which blockSize to give here?
    filterBlocksForContent = False # Run once with True, none were removed
    dviews = [DeconView(mvd_exe, img, weight, PSF_kernel, PSF_type, blockSize, 1, filterBlocksForContent)
              for img, blockSize, weight, PSF_kernel in izip(images, blockSizes, mvd_weights, PSF_kernels)]
    decon = MultiViewDeconvolutionSeq(DeconViews(dviews, mvd_exe), n_iterations, PsiInitBlurredFusedFactory(), cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      printFn("Something went wrong initializing MultiViewDeconvolution")
      return None
    else:
      decon.runIterations()
      return decon.getPSI()
  finally:
    # Only shut down the thread pool if it was created here
    if not exe:
      mvd_exe.shutdownNow()
示例#33
0
 def prepare(index):
   # Prepare the img for deconvolution:
   # 0. Transform in one step.
   # 1. Ensure its pixel values conform to expectations (no zeros inside)
   # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
   syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
示例#34
0
def export8bitN5(
        filepaths,
        loadFn,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  filepaths: the ordered list of filepaths, one per serial section.
  loadFn: a function to load a filepath into an ImagePlus.
  name: name to assign to the N5 volume.
  matrices: the list of transformation matrices (each one is an array), one per section
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld. 0 means no compression.
  invert:  Defaults to True (necessary for FIBSEM). Whether to invert the images upon loading.
  CLAHE_params: defaults to [400, 256, 3.0]. If not None, the a list of the 3 parameters needed for a CLAHE filter to apply to each image.
  n5_threads: defaults to 0, meaning as many as CPU cores.
  block_size: defaults to 128x128x128 px. A list of 3 integer numbers, the dimensions of each individual block.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    # A CacheLoader that interprets the list of filepaths as a 3D volume: a stack of 2D slices
    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices),
        loadFn=loadFn)

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    exe_preloader = newFixedThreadPool(n_threads=min(
        block_size[2], n5_threads if n5_threads > 0 else numCPUs()),
                                       name="preloader")

    def preload(cachedCellImg, loader, block_size, filepaths, exe):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = max(0, keys[-1] - (keys[-1] % block_size[2]))
            last = min(len(filepaths), first + block_size[2]) - 1
            keys = None
            syncPrintQ("### Preloading %i-%i ###" % (first, last))
            futures = []
            for index in xrange(first, last + 1):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            loaded_any = False
            count = 0
            while len(futures) > 0:
                r, t = futures.pop(0).get()  # waits for the image to load
                if t > 1000:  # in miliseconds. Less than this is for sure a cache hit, more a cache miss and reload
                    loaded_any = True
                r = None
                # t in miliseconds
                syncPrintQ("preloaded index %i in %f ms" % (first + count, t))
                count += 1
            if not loaded_any:
                syncPrintQ("Completed preloading %i-%i" %
                           (first, first + block_size[2] - 1))
        except:
            syncPrintQ(sys.exc_info())

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size, filepaths,
                exe_preloader), 10, 60, TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
        exe_preloader.shutdown()
示例#35
0
def registerDeconvolvedTimePoints(targetDir,
                                  params,
                                  modelclass,
                                  exe=None,
                                  verbose=True,
                                  subrange=None):
    """ Can only be run after running deconvolveTimePoints, because it
      expects deconvolved images to exist under <targetDir>/deconvolved/,
      with a name pattern like: TM_\d+_CM0\d_CM0\d-deconvolved.zip
      
      Tests if files exist first, if not, will stop execution.

      Will write the features, pointmatches and registration affine matrices
      into a csv folder under targetDir.

      If a CSV file with the affine transform matrices exist, it will read them out
      and provide the 4D img right away.
      Else, it will check which files are missing their features and pointmatches as CSV files,
      create them, and ultimately create the CSV filew ith the affine transform matrices,
      and then provide the 4D img.

      targetDir: the directory containing the deconvolved images.
      params: for feature extraction and registration.
      modelclass: the model to use, e.g. Translation3D, AffineTransform3D.
      exe: the ExecutorService to use (optional).
      subrange: the range of time point indices to process, as enumerated
                by the folder name, i.e. the number captured by /TM(\d+)/
      
      Returns an imglib2 4D img with the registered deconvolved 3D stacks."""

    deconvolvedDir = os.path.join(targetDir, "deconvolved")

    # A folder for features, pointmatches and matrices in CSV format
    csv_dir = os.path.join(deconvolvedDir, "csvs")
    if not os.path.exists(csv_dir):
        os.mkdir(csv_dir)

    # A datastructure to represent the timepoints, each with two filenames
    timepoint_views = defaultdict(defaultdict)
    pattern = re.compile("^TM(\d+)_(CM0\d-CM0\d)-deconvolved.zip$")
    for filename in sorted(os.listdir(deconvolvedDir)):
        m = re.match(pattern, filename)
        if m:
            stime, view = m.groups()
            timepoint_views[int(stime)][view] = filename

    # Filter by specified subrange, if any
    if subrange:
        subrange = set(subrange)
        for time in timepoint_views.keys(
        ):  # a list copy of the keys, so timepoints can be modified
            if time not in subrange:
                del timepoint_views[time]

    # Register only the view CM00-CM01, given that CM02-CM03 has the same transform
    matrices_name = "matrices-%s" % modelclass.getSimpleName()
    matrices = None
    if os.path.exists(os.path.join(csv_dir, matrices_name + ".csv")):
        matrices = loadMatrices(matrices_name, csv_dir)
        if len(matrices) != len(timepoint_views):
            syncPrint(
                "Ignoring existing matrices CSV file: length (%i) doesn't match with expected number of timepoints (%i)"
                % (len(matrices), len(timepoint_views)))
            matrices = None
    if not matrices:
        original_exe = exe
        if not exe:
            exe = newFixedThreadPool()
        try:
            # Deconvolved images are isotropic
            def getCalibration(img_filepath):
                return [1, 1, 1]

            timepoints = []  # sorted
            filepaths = []  # sorted
            for timepoint, views in sorted(timepoint_views.iteritems(),
                                           key=itemgetter(0)):
                timepoints.append(timepoint)
                filepaths.append(
                    os.path.join(deconvolvedDir, views["CM00-CM01"]))
            #
            #matrices_fwd = computeForwardTransforms(filepaths, ImageJLoader(), getCalibration,
            #                                        csv_dir, exe, modelclass, params, exe_shutdown=False)
            #matrices = [affine.getRowPackedCopy() for affine in asBackwardConcatTransforms(matrices_fwd)]
            matrices = computeOptimizedTransforms(filepaths,
                                                  ImageJLoader(),
                                                  getCalibration,
                                                  csv_dir,
                                                  exe,
                                                  modelclass,
                                                  params,
                                                  verbose=verbose)
            saveMatrices(matrices_name, matrices, csv_dir)
        finally:
            if not original_exe:
                exe.shutdownNow()  # Was created new

    # Convert matrices into twice as many affine transforms
    affines = []
    for matrix in matrices:
        aff = AffineTransform3D()
        aff.set(*matrix)
        affines.append(aff)
        affines.append(aff)  # twice: also for the CM02-CM03

    # Show the registered deconvolved series as a 4D volume.
    filepaths = []
    for timepoint in sorted(timepoint_views.iterkeys()):
        views = timepoint_views.get(timepoint)
        for view_name in sorted(views.keys()):  # ["CM00-CM01", "CM02-CM03"]
            filepaths.append(os.path.join(deconvolvedDir, views[view_name]))

    img = Load.lazyStack(
        filepaths,
        TransformedLoader(ImageJLoader(),
                          dict(izip(filepaths, affines)),
                          asImg=True))
    return img