예제 #1
0
파일: ui.py 프로젝트: acardona/scripts
 def translate(self, dx, dy, dz):
     # Z within bounds
     self.indexZ += dz
     self.indexZ = min(self.img3D.dimension(2) - 1, max(0, self.indexZ))
     # X, Y can be beyond bounds
     self.interval2D = FinalInterval(
         [self.interval2D.min(0) + dx,
          self.interval2D.min(1) + dy],
         [self.interval2D.max(0) + dx,
          self.interval2D.max(1) + dy])
     self.updatePixels()
     return self.interval2D.min(0), self.interval2D.min(1), self.indexZ
def show(title, nuclei, radius, bounds, scale=1.0):
    points = [RealPoint.wrap([c * scale for c in coords]) for coords in nuclei]
    interval = FinalInterval([int(b * scale) for b in bounds[0]],
                             [int(b * scale) for b in bounds[1]])
    img = virtualPointsRAI(points, radius * scale, interval)
    imp = showStack(img, title=title)
    return imp, img, points
def getFFTFromView(v, extension, extSize, paddedDimensions, fftSize):
    FFTMethods.dimensionsRealToComplexFast(extSize, paddedDimensions, fftSize)
    fft = ArrayImgFactory(ComplexFloatType()).create(fftSize,
                                                     ComplexFloatType())
    FFT.realToComplex(
        Views.interval(
            PhaseCorrelation2Util.extendImageByFactor(v, extension),
            FFTMethods.paddingIntervalCentered(
                v, FinalInterval(paddedDimensions))), fft, exe)
    return fft
예제 #4
0
 def translate(self, dx, dy):
     a = zeros(2, 'l')
     self.interval.min(a)
     width = self.cell_dimensions[0]
     height = self.cell_dimensions[1]
     x0 = max(0, min(a[0] + dx, self.img_dimensions[0] - width))
     y0 = max(0, min(a[1] + dy, self.img_dimensions[1] - height))
     self.interval = FinalInterval([x0, y0],
                                   [x0 + width - 1, y0 + height - 1])
     syncPrintQ(str(Intervals.dimensionsAsLongArray(self.interval)))
     self.cache.clear()
 def __init__(self, img4d, kdtrees):
     # The last coordinate, Z (number of slices), is the number of slices per timepoint 3D volume
     # times the number of timepoints, times the number of channels: two.
     super(VirtualStack,
           self).__init__(img4d.dimension(0), img4d.dimension(1),
                          img4d.dimension(2) * img4d.dimension(3) * 2)
     self.img4d = img4d
     self.dimensions = array([img4d.dimension(0), img4d.dimension(1)], 'l')
     self.kdtrees = kdtrees
     self.dimensions3d = FinalInterval(
         [img4d.dimension(0),
          img4d.dimension(1),
          img4d.dimension(2)])
예제 #6
0
 def keyPressed(self, ke):
   keyCode = ke.getKeyCode()
   if ke.isControlDown() and keyCode in Navigator.moves:
     d, sign = Navigator.moves[keyCode]
     inc = 200 if ke.isShiftDown() else 20
     mins[d] += sign * inc
     maxs[d] += sign * inc
     # Replace source with shifted cropped volume
     fsource.set(stack, Views.zeroMin(Views.interval(imgE, FinalInterval(mins, maxs))))
     imp.updateVirtualSlice()
     return
   # Else, pass the event onto other listeners
   for kl in kls:
     kl.keyPressed(ke)
예제 #7
0
파일: ui.py 프로젝트: acardona/scripts
class ViewFloatProcessor(FloatProcessor):
    """
  A 2D FloatProcessor whose float[] pixel array is populated from the pixels within
  an interval on a source 3D RandomAccessibleInterval at a specified indexZ (the section index).
  The interval and indexZ are editable via the translate method.
  """
    def __init__(self, img3D, interval2D, indexZ):
        self.img3D = img3D
        self.interval2D = interval2D
        self.indexZ = indexZ
        super(FloatProcessor, self).__init__(interval2D.dimension(0),
                                             interval2D.dimension(1))
        self.updatePixels()

    def translate(self, dx, dy, dz):
        # Z within bounds
        self.indexZ += dz
        self.indexZ = min(self.img3D.dimension(2) - 1, max(0, self.indexZ))
        # X, Y can be beyond bounds
        self.interval2D = FinalInterval(
            [self.interval2D.min(0) + dx,
             self.interval2D.min(1) + dy],
            [self.interval2D.max(0) + dx,
             self.interval2D.max(1) + dy])
        self.updatePixels()
        return self.interval2D.min(0), self.interval2D.min(1), self.indexZ

    def updatePixels(self):
        # Copy interval into pixels
        view = Views.interval(
            Views.extendZero(Views.hyperSlice(self.img3D, 2, self.indexZ)),
            self.interval2D)
        aimg = ArrayImgs.floats(
            self.getPixels(),
            [self.interval2D.dimension(0),
             self.interval2D.dimension(1)])
        ImgUtil.copy(view, aimg)
def populateInstances(instances, synth_imgs, class_index, mins, maxs):
    # Populate the training data: create the filter bank for each feature image
    # by reading values from the interval defined by mins and maxs
    target = ArrayImgs.floats([width, height])
    interval = FinalInterval(mins, maxs)
    n_samples = Intervals.numElements(interval)
    for img in synth_imgs:
        vectors = [zeros(len(attributes), 'd') for _ in xrange(n_samples)]
        for k, op in enumerate(filterBank(img, sumType=DoubleType())):
            imgOp = compute(op).into(target)
            for i, v in enumerate(Views.interval(imgOp, interval)):
                vectors[i][k] = v.getRealDouble()
        for vector in vectors:
            vector[-1] = class_index
            instances.add(DenseInstance(1.0, vector))
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
def oneStep(index=0):
    # Combining transforms into one, via a translation to account of the ROI crop
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    t1 = cmIsotropicTransforms[index]
    t2 = affine3D(
        [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]])
    t3 = affine3D(fineTransformsPostROICrop[index]).inverse()
    aff = AffineTransform3D()
    aff.set(t1)
    aff.preConcatenate(t2)
    aff.preConcatenate(t3)
    # Final interval is now rooted at 0,0,0 given that the transform includes the translation
    imgP = prepareImgForDeconvolution(
        img, aff,
        FinalInterval([0, 0, 0],
                      [maxC - minC for minC, maxC in izip(roi[0], roi[1])]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "one step index %i" % index).show()
예제 #11
0
def deconvolveTimePoints(srcDir,
                         targetDir,
                         kernel_filepath,
                         calibration,
                         cameraTransformations,
                         fineTransformsPostROICrop,
                         params,
                         roi,
                         subrange=None,
                         camera_groups=((0, 1), (2, 3)),
                         fine_fwd=False,
                         n_threads=0):  # 0 means all
    """
     Main program entry point.
     For each time point folder TM\d+, find the KLB files of the 4 cameras,
     then register them all to camera CM01, and deconvolve CM01+CM02, and CM02+CM03,
     and store these two images in corresponding TM\d+ folders under targetDir.

     Assumes that each camera view has the same dimensions in each time point folder.
     A camera view may have dimensions different from those of the other cameras.

     Can be run as many times as necessary. Intermediate computations are saved
     as csv files (features, pointmatches and transformation matrices), and 
     the deconvolved images as well, into folder targetDir/deconvolved/ with
     a name pattern like TM\d+_CM0\d_CM0\d-deconvolved.zip
     
     srcDir: file path to a directory with TM\d+ subdirectories, one per time point.
     targetDir: file path to a directory for storing deconvolved images
                and CSV files with features, point matches and transformation matrices.
     kernel_filepath: file path to the 3D image of the point spread function (PSF),
                      which can be computed from fluorescent beads with the BigStitcher functions
                      and which must have odd dimensions.
     calibration: the array of [x, y, z] dimensions.
     cameraTransformations: a function that returns a map of camera index vs the 12-digit 3D affine matrices describing
                            the transform to register the camera view onto the camera at index 0.
     fineTransformsPostROICrop: a list of the transform matrices to be applied after both the coarse transform and the ROI crop.
     params: a dictionary with all the necessary parameters for feature extraction, registration and deconvolution.
     roi: the min and max coordinates for cropping the coarsely registered volumes prior to registration and deconvolution.
     subrange: defaults to None. Can be a list specifying the indices of time points to deconvolve.
     camera_groups: the camera views to fuse and deconvolve together. Defaults to two: ((0, 1), (2, 3))
     fine_fwd: whether the fineTransformsPostROICrop were computed all-to-all, which optimizes the pose and produces direct transforms,
               or, when False, the fineTransformsPostROICrop were computed from 0 to 1, 0 to 2, and 0 to 3, so they are inverted.
     n_threads: number of threads to use. Zero (default) means as many as possible.
  """
    kernel = readFloats(kernel_filepath, [19, 19, 25], header=434)
    klb_loader = KLBLoader()

    def getCalibration(img_filename):
        return calibration

    # Regular expression pattern describing KLB files to include
    pattern = re.compile("^SPM00_TM\d+_CM(\d+)_CHN0[01]\.klb$")

    # Find all time point folders with pattern TM\d{6} (a TM followed by 6 digits)
    def iterTMs():
        """ Return a generator over dicts of 4 KLB file paths for each time point. """
        for dirname in sorted(os.listdir(srcDir)):
            if not dirname.startswith("TM00"):
                continue
            filepaths = {}
            tm_dir = os.path.join(srcDir, dirname)
            for filename in sorted(os.listdir(tm_dir)):
                r = re.match(pattern, filename)
                if r:
                    camera_index = int(r.groups()[0])
                    filepaths[camera_index] = os.path.join(tm_dir, filename)
            yield filepaths

    if subrange:
        indices = set(subrange)
        TMs = [tm for i, tm in enumerate(iterTMs()) if i in indices]
    else:
        TMs = list(iterTMs())

    # Validate folders
    for filepaths in TMs:
        if 4 != len(filepaths):
            print "Folder %s has problems: found %i KLB files in it instead of 4." % (
                tm_dir, len(filepaths))
            print "Address the issues and rerun."
            return

    print "Will process these timepoints:",
    for i, TM in enumerate(TMs):
        print i
        pprint(TM)

    # All OK, submit all timepoint folders for registration and deconvolution

    # dimensions: all images from each camera have the same dimensions
    dimensions = [
        Intervals.dimensionsAsLongArray(klb_loader.get(filepath))
        for index, filepath in sorted(TMs[0].items(), key=itemgetter(0))
    ]

    cmTransforms = cameraTransformations(dimensions[0], dimensions[1],
                                         dimensions[2], dimensions[3],
                                         calibration)

    # Transforms apply to all time points equally
    #   If fine_fwd, the fine transform was forward.
    #   Otherwise, it was from CM00 to e.g. CM01, so backwards for CM01, needing an inversion.
    transforms = mergeTransforms(
        calibration, [cmTransforms[i] for i in sorted(cmTransforms.keys())],
        roi,
        fineTransformsPostROICrop,
        invert2=not fine_fwd)

    # Create target folder for storing deconvolved images
    if not os.path.exists(os.path.join(targetDir, "deconvolved")):
        os.mkdir(os.path.join(targetDir, "deconvolved"))

    # Transform kernel to each view
    matrices = fineTransformsPostROICrop

    # For the PSF kernel, transforms without the scaling up to isotropy
    # No need to account for the translation: the transformPSFKernelToView keeps the center point centered.
    PSF_kernels = [
        transformPSFKernelToView(kernel, affine3D(cmTransforms[i]))
        for i in xrange(4)
    ]
    PSF_kernels = [
        transformPSFKernelToView(k,
                                 affine3D(matrix).inverse())
        for k, matrix in izip(PSF_kernels, matrices)
    ]
    # TODO: if kernels are not ArrayImg, they should be made be.
    print "PSF_kernel[0]:", PSF_kernels[0], type(PSF_kernels[0])

    # DEBUG: write the kernelA
    for index in [0, 1, 2, 3]:
        writeZip(PSF_kernels[index],
                 "/tmp/kernel" + str(index) + ".zip",
                 title="kernel" + str(index)).flush()

    # A converter from FloatType to UnsignedShortType
    output_converter = createConverter(FloatType, UnsignedShortType)

    target_interval = FinalInterval(
        [0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])

    exe = newFixedThreadPool(n_threads=n_threads)
    try:
        # Submit for registration + deconvolution
        # The registration uses 2 parallel threads, and deconvolution all possible available threads.
        # Cannot invoke more than one time point at a time because the deconvolution requires a lot of memory.
        for i, filepaths in enumerate(TMs):
            if Thread.currentThread().isInterrupted(): break
            syncPrint("Deconvolving time point %i with files:\n  %s" %
                      (i, "\n  ".join(sorted(filepaths.itervalues()))))
            deconvolveTimePoint(filepaths,
                                targetDir,
                                klb_loader,
                                transforms,
                                target_interval,
                                params,
                                PSF_kernels,
                                exe,
                                output_converter,
                                camera_groups=camera_groups)
    finally:
        exe.shutdown(
        )  # Not accepting any more tasks but letting currently executing tasks to complete.
        # Wait until the last task (writing the last file) completes execution.
        exe.awaitTermination(5, TimeUnit.MINUTES)
예제 #12
0
def multiviewDeconvolution(images, blockSizes, PSF_kernels, n_iterations, lambda_val=0.0006, weights=None,
                           filterBlocksForContent=False, PSF_type=PSFTYPE.INDEPENDENT, exe=None, printFn=syncPrint):
  """
  Apply Bayesian-based multi-view deconvolution to the list of images,
  returning the deconvolved image. Uses Stephan Preibisch's library,
  currently available with the BigStitcher Fiji update site.

  images: a list of images, registered and all with the same dimensions.
  blockSizes: how to chop up the volume of each image for parallel processing.
             When None, a single block with the image dimensions is used,
             plus half of the transformed kernel dimensions for that view.
  PSF_kernels: the images containing the point spread function for each input image. Requirement: the dimensions must be an odd number.
  n_iterations: the number of iterations for the deconvolution. A number between 10 and 50 is desirable. The more iterations, the higher the computational cost.
  lambda_val: default is 0.0006 as recommended by Preibisch.
  weights: a list of FloatType images with the weight for every pixel. If None, then all pixels get a value of 1.
  filterBlocksForContent: whether to check before processing a block if the block has any data in it. Default is False.
  PSF_type: defaults to PSFTYPE.INDEPENDENT.
  exe: a thread pool for concurrent execution. If None, a new one is created, using as many threads as CPUs are available.
  printFn: the function to use for printing error messages. Defaults to syncPrint (thread-safe access to the built-in `print` function).

  Returns an imglib2 ArrayImg, or None if something went wrong.
  """

  mvd_exe = exe
  if not exe:
    mvd_exe = newFixedThreadPool() # as many threads as CPUs

  try:
    mvd_weights = weights
    if not weights:
      mvd_weights = repeat(Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0])))

    for i, PSF_kernel in enumerate(PSF_kernels):
      for d in xrange(PSF_kernel.numDimensions()):
        if 0 == PSF_kernel.dimension(d) % 2:
          printFn("for image at index %i, PSF kernel dimension %i is not odd." % (i, d))
          return None

    if not blockSizes:
      # Whole image dimensions + half of the transformed PSF kernel dimensions
      kernel_max = int(max(PSF_kernel.dimension(d)
                           for d in xrange(PSF_kernel.numDimensions())
                           for PSF_kernel in PSF_kernels) * 2)
      syncPrint("kernel max dimension *2: %i" % kernel_max)
      blockSizes = []
      for image in images:
        blockSizes.append([image.dimension(d) + kernel_max
                           for d in xrange(image.numDimensions())])
        syncPrint("blockSize:" + str(blockSizes[-1]))

    cptf = createFactory(mvd_exe, lambda_val, blockSizes[0]) # TODO which blockSize to give here?
    filterBlocksForContent = False # Run once with True, none were removed
    dviews = [DeconView(mvd_exe, img, weight, PSF_kernel, PSF_type, blockSize, 1, filterBlocksForContent)
              for img, blockSize, weight, PSF_kernel in izip(images, blockSizes, mvd_weights, PSF_kernels)]
    decon = MultiViewDeconvolutionSeq(DeconViews(dviews, mvd_exe), n_iterations, PsiInitBlurredFusedFactory(), cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      printFn("Something went wrong initializing MultiViewDeconvolution")
      return None
    else:
      decon.runIterations()
      return decon.getPSI()
  finally:
    # Only shut down the thread pool if it was created here
    if not exe:
      mvd_exe.shutdownNow()
        True).cellDimensions(cell_dimensions))

# View the image as an ImageJ ImagePlus with an underlying VirtualStack
IL.wrap(cachedCellImg, "sections").show()

# Now show a UI that enables moving a window around a data set
from net.imglib2.view import Views
from net.imglib2 import FinalInterval
from jarray import array
from java.awt.event import KeyAdapter, KeyEvent
from net.imglib2.img.display.imagej import ImageJVirtualStack

mins = array([1307, 448, 0], 'l')
maxs = array([1307 + 976 - 1, 448 + 732 - 1, len(filepaths) - 1], 'l')
imgE = Views.extendZero(cachedCellImg)
crop = Views.interval(imgE, FinalInterval(mins, maxs))
imp = IL.wrap(crop, "sections crop")  # ImagePlus
imp.show()

# Once shown, a reference to the ij.gui.ImageWindow exists
win = imp.getWindow()

# Remove and store key listeners from the ImageCanvas
kls = win.getCanvas().getKeyListeners()
for kl in kls:
    win.getCanvas().removeKeyListener(kl)

stack = imp.getStack(
)  # an net.imglib2.img.display.imagej.ImageJVirtualStackUnsignedByte
fsource = ImageJVirtualStack.getDeclaredField("source")
fsource.setAccessible(True)
예제 #14
0
# Transform the kernel for each view
kernels = [kernel,
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB1"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB2"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB3"]))]

def deconvolve(images, kernels, name, n_iterations):
  # Bayesian-based multi-view deconvolution
  exe = newFixedThreadPool(Runtime.getRuntime().availableProcessors() -2)
  try:
    mylambda = 0.0006
    blockSize = Intervals.dimensionsAsIntArray(images[0]) # [128, 128, 128]
    cptf = ComputeBlockSeqThreadCPUFactory(exe, mylambda, blockSize, ArrayImgFactory(FloatType()))
    psiInitFactory = PsiInitBlurredFusedFactory() # PsiInitAvgPreciseFactory() fails with type mismatch: UnsignedByteType (?) vs FloatType
    weight = Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0]))
    filterBlocksForContent = False # Run once with True, none were removed
    decon_views = DeconViews([DeconView(exe, img, weight, kernel, PSFTYPE.INDEPENDENT, blockSize, 1, filterBlocksForContent)
                              for img in images],
                             exe)
    #n_iterations = 10
    decon = MultiViewDeconvolutionSeq(decon_views, n_iterations, psiInitFactory, cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      print "Something went wrong initializing MultiViewDeconvolution"
    else:
      decon.runIterations()
      img = decon.getPSI()
      imp = IL.wrap(img, name + "_deconvolved_" + str(n_iterations) + "_iterations")
      imp.show()
      FileSaver(imp).saveAsZip("/home/albert/lab/Raghav-IsoView-PSF/" + imp.getTitle() + ".zip")
  finally:
예제 #15
0
from net.imagej.ops.convert import ConvertPixCopy
from net.imglib2.meta import ImgPlus
from net.imglib2.img.display.imagej import ImageJFunctions
from ij.plugin.filter import BackgroundSubtracter
from fiji.plugin.trackmate.detection import DetectionUtils
from net.imagej.ops.threshold import Otsu

inputDirectory = '/home/bnorthan/Brian2014/Projects/RogueImageJPlugins/SpotDetection/Images/'
inputName = 'B013-D0-L-UV_cropped2.tif'

dataset = data.open(inputDirectory + inputName)
display.createDisplay(dataset.getName(), dataset)

dimensions2D = array([dataset.dimension(0), dataset.dimension(1)], 'l')
cropIntervalRed = FinalInterval(
    array([0, 0, 0], 'l'),
    array([dataset.dimension(0) - 1,
           dataset.dimension(1) - 1, 0], 'l'))
cropIntervalGreen = FinalInterval(
    array([0, 0, 1], 'l'),
    array([dataset.dimension(0) - 1,
           dataset.dimension(1) - 1, 1], 'l'))

red = ops.crop(cropIntervalRed, None, dataset.getImgPlus())
green = ops.crop(cropIntervalGreen, None, dataset.getImgPlus())

display.createDisplay("red", data.create(red))
display.createDisplay("green", data.create(green))

red32 = ImgPlus(ops.create(dimensions2D, FloatType()))
ops.convert(red32, red, ConvertPixCopy())
예제 #16
0
from ij import IJ
from net.imglib2.img.display.imagej import ImageJFunctions as IJF
from net.imglib2.view import Views

from jarray import zeros

from net.imglib2 import FinalInterval
imp = IJ.getImage()
image = IJF.wrap(imp)

min = zeros(image.numDimensions(), 'l')
max = zeros(image.numDimensions(), 'l')
min[image.numDimensions() - 1] = 0
max[image.numDimensions() - 1] = image.dimension(image.numDimensions() - 1) - 1
min[image.numDimensions() - 2] = 0
max[image.numDimensions() - 2] = image.dimension(image.numDimensions() - 2) - 1
for d in range(0, image.numDimensions() - 2):

    min[d] = -10
    max[d] = image.dimension(d) + 10

interval = FinalInterval(min, max)
print(interval)
infinite = Views.extendZero(image)
IJF.show(Views.interval(infinite, interval))
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()


# Show only a cropped middle area
x0 = 3 * dimensions[0] / 8
y0 = 3 * dimensions[1] / 8
x1 = x0 + 2 * dimensions[0] / 8 - 1
y1 = y0 + 2 * dimensions[1] / 8 - 1
print "Crop to: x=%i y=%i width=%i height=%i" % (x0, y0, x1 - x0 + 1,
                                                 y1 - y0 + 1)
viewAligned(filepaths, csvDir, params, paramsTileConfiguration, dimensions,
            FinalInterval([x0, y0], [x1, y1]))

# Write the whole volume in N5 format
name = srcDir.split('/')[-2]
exportDir = "/groups/cardona/cardonalab/FIBSEM_L1116_exports/n5/"
# Export ROI:
# x=864 y=264 width=15312 h=17424
interval = FinalInterval([864, 264], [864 + 15312 - 1, 264 + 17424 - 1])

# Don't use compression: less than 5% gain, at considerable processing cost.
# Expects matrices.csv file to exist already
#export8bitN5(filepaths, dimensions, loadMatrices("matrices", csvDir),
#             name, exportDir, interval, gzip_compression=0, block_size=[256, 256, 64], # ~4 MB per block
#             copy_threads=1, n5_threads=0)
def poreDetectionUV(inputImp, inputDataset, inputRoi, ops, data, display, detectionParameters):
	
	title =  inputImp.getTitle()
	title=title.replace('UV', 'SD')
	
	print title
	
	#trueColorImp= WindowManager.getImage(title)
	#print type( trueColorImp)
	
	# calculate are of roi 
	stats=inputImp.getStatistics()
	inputRoiArea=stats.area
	
	print inputRoi
	
	# get the bounding box of the active roi
	inputRec = inputRoi.getBounds()
	x1=long(inputRec.getX())
	y1=long(inputRec.getY())
	x2=x1+long(inputRec.getWidth())-1
	y2=y1+long(inputRec.getHeight())-1

	print x1
	print y1
	print x2
	print y2
	
	# crop the roi
	interval=FinalInterval( array([x1, y1 ,0], 'l'), array([x2, y2, 2], 'l') )
	cropped=ops.crop(interval, None, inputDataset.getImgPlus() ) 
	
	datacropped=data.create(cropped)
	display.createDisplay("cropped", datacropped)
	croppedPlus=IJ.getImage()
	
	duplicator=Duplicator()
	substackMaker=SubstackMaker()
	
	# duplicate the roi
	duplicate=duplicator.run(croppedPlus)
	#duplicate.show()
	
	# convert duplicate of roi to HSB and get brightness
	IJ.run(duplicate, "HSB Stack", "");
	brightnessPlus=substackMaker.makeSubstack(duplicate, "3-3")
	brightness=ImgPlus(ImageJFunctions.wrapByte(brightnessPlus))
	brightnessPlus.setTitle("Brightness")
	#brightnessPlus.show()
	
	# make another duplicate, split channels and get red
	duplicate=duplicator.run(croppedPlus)
	channels=ChannelSplitter().split(duplicate)
	redPlus=channels[0]
	red=ImgPlus(ImageJFunctions.wrapByte(redPlus))
	redPlus.show()
	
	# convert to lab
	IJ.run(croppedPlus, "Color Transformer", "colour=Lab")
	IJ.selectWindow('Lab')
	labPlus=IJ.getImage()
	
	# get the A channel
	APlus=substackMaker.makeSubstack(labPlus, "2-2")
	APlus.setTitle('A')
	APlus.show()
	APlus.getProcessor().resetMinAndMax()
	APlus.updateAndDraw()
	AThresholded=threshold(APlus, -10, 50)
	
	# get the B channel
	BPlus=substackMaker.makeSubstack(labPlus, "3-3")
	BPlus.setTitle('B')
	BPlus.show()
	BPlus.getProcessor().resetMinAndMax()
	BPlus.updateAndDraw()
	BThresholded=threshold(BPlus, -10, 50)
	
	# AND the Athreshold and Bthreshold to get a map of the red pixels
	ic = ImageCalculator();
	redMask = ic.run("AND create", AThresholded, BThresholded);
	IJ.run(redMask, "Divide...", "value=255");
	#redMask.show()
	
	labPlus.close()
	
	# threshold the spots from the red channel
	thresholdedred=SpotDetectionGray(red, data, display, ops, False)
	display.createDisplay("thresholdedred", data.create(thresholdedred))
	impthresholdedred = ImageJFunctions.wrap(thresholdedred, "wrapped")
	
	# threshold the spots from the brightness channel
	thresholded=SpotDetectionGray(brightness, data, display, ops, False)
	display.createDisplay("thresholded", data.create(thresholded))
	impthresholded=ImageJFunctions.wrap(thresholded, "wrapped")
	
	# or the thresholding results from red and brightness channel
	impthresholded = ic.run("OR create", impthresholded, impthresholdedred);
	
	# convert to mask
	Prefs.blackBackground = True
	IJ.run(impthresholded, "Convert to Mask", "")
	
	# clear the region outside the roi
	clone=inputRoi.clone()
	clone.setLocation(0,0)
	Utility.clearOutsideRoi(impthresholded, clone)
	
	# create a hidden roi manager
	roim = RoiManager(True)
	
	# count the particlesimp.getProcessor().setColor(Color.green)
	countParticles(impthresholded, roim, detectionParameters.minSize, detectionParameters.maxSize, detectionParameters.minCircularity, detectionParameters.maxCircularity)
	
	# define a function to determine the percentage of pixels that are foreground in a binary image
	# inputs:
	#    imp: binary image, 0=background, 1=foreground
	#    roi: an roi
	def isRed(imp, roi):
		stats = imp.getStatistics()
	
		if (stats.mean>detectionParameters.redPercentage): return True
		else: return False
	
	def notRed(imp, roi):
		stats = imp.getStatistics()
	
		if (stats.mean>detectionParameters.redPercentage): return False
		else: return True

	allList=[]

	for roi in roim.getRoisAsArray():
		allList.append(roi.clone())
	
	# count particles that are red
	redList=CountParticles.filterParticlesWithFunction(redMask, allList, isRed)
	# count particles that are red
	blueList=CountParticles.filterParticlesWithFunction(redMask, allList, notRed)

	print "Total particles: "+str(len(allList))
	print "Filtered particles: "+str(len(redList))

	# for each roi add the offset such that the roi is positioned in the correct location for the 
	# original image
	[roi.setLocation(roi.getXBase()+x1, roi.getYBase()+y1) for roi in allList]
	
	# create an overlay and add the rois
	overlay1=Overlay()
		
	inputRoi.setStrokeColor(Color.green)
	overlay1.add(inputRoi)
	[CountParticles.addParticleToOverlay(roi, overlay1, Color.red) for roi in redList]
	[CountParticles.addParticleToOverlay(roi, overlay1, Color.cyan) for roi in blueList]
	
	def drawAllRoisOnImage(imp, mainRoi, redList, blueList):
		imp.getProcessor().setColor(Color.green)
		IJ.run(imp, "Line Width...", "line=3");
		imp.getProcessor().draw(inputRoi)
		imp.updateAndDraw()
		IJ.run(imp, "Line Width...", "line=1");
		[CountParticles.drawParticleOnImage(imp, roi, Color.magenta) for roi in redList]
		[CountParticles.drawParticleOnImage(imp, roi, Color.green) for roi in blueList]
		imp.updateAndDraw()
	
	drawAllRoisOnImage(inputImp, inputRoi, redList, blueList)
	#drawAllRoisOnImage(trueColorImp, inputRoi, redList, blueList)
	
	# draw overlay
	#inputImp.setOverlay(overlay1)
	#inputImp.updateAndDraw()
	
	statsdict=CountParticles.calculateParticleStats(APlus, BPlus, redMask, roim.getRoisAsArray())
	
	print inputRoiArea

	areas=statsdict['Areas']
	poreArea=0
	for area in areas:
		poreArea=poreArea+area

	ATotal=0
	ALevels=statsdict['ALevel']
	for A in ALevels:
		ATotal=ATotal+A

	AAverage=ATotal/len(ALevels)

	BTotal=0
	BLevels=statsdict['BLevel']
	for B in BLevels:
		BTotal=BTotal+B

	BAverage=BTotal/len(BLevels)

	redTotal=0
	redPercentages=statsdict['redPercentage']
	for red in redPercentages:
		redTotal=redTotal+red

	redAverage=redTotal/len(redPercentages)
	pixwidth=inputImp.getCalibration().pixelWidth

	inputRoiArea=inputRoiArea/(pixwidth*pixwidth)
	
	print str(len(allList))+" "+str(len(redList))+" "+str(len(blueList))+" "+str(poreArea/inputRoiArea)+" "+str(redAverage)
                vectors[i][k] = v.getRealDouble()
        for vector in vectors:
            vector[-1] = class_index
            instances.add(DenseInstance(1.0, vector))


# pick pixels on the black line for class 0 (membrane), 4x4
populateInstances(training_data, synth_imgs_membrane, 0, [14, 14], [17, 17])
# pick pixels in the very center for class 1 (mitochondrial boundary), 2x2
populateInstances(training_data, synth_imgs_mit_boundary, 1, [15, 15],
                  [16, 16])

# Populate the training data for class "other" from two images
# entirely filled with background or foreground plus noise
target = ArrayImgs.floats([width, height])
interval = FinalInterval([14, 14], [17, 17])
n_samples = Intervals.numElements(interval)
for ci, v in enumerate([fillValue, backgroundValue]):
    for _ in xrange(training_data.size() /
                    4):  # the other 2/4 are the membrane and mit boundary
        other = syntheticEM([], width, height, 0, v, noise=True)
        vectors = [zeros(len(attributes), 'd') for _ in xrange(n_samples)]
        for k, op in enumerate(filterBank(IL.wrap(other),
                                          sumType=DoubleType())):
            imgOp = compute(op).into(target)
            for i, v in enumerate(Views.interval(imgOp, interval)):
                vectors[i][k] = v.getRealDouble()
        for vector in vectors:
            vector[-1] = ci + 2  # class index
            training_data.add(DenseInstance(1.0, vector))
예제 #20
0
from itertools import islice, imap, izip
from net.imglib2 import FinalInterval, RealPoint
from net.imglib2.view import Views
from net.imglib2.type.numeric.integer import UnsignedByteType
from net.imglib2.img.display.imagej import ImageJVirtualStackUnsignedByte
from ij import ImagePlus, CompositeImage
from java.util import TreeMap


baseDir = "/home/albert/shares/cardonalab/Albert/2017-05-10_1018/"
# A file whose header has the points as "x::y::z"
csvFilename = "deconvolved/CM00-CM01_deltaFoF.csv"

somaDiameter = 8 # pixels
radius = somaDiameter / 2.0
interval = FinalInterval([406, 465, 325])
minimum = -1.0 # actual min: -5.0
maximum = 2.0 # actual max: 9.4
span = maximum - minimum
range_max = 255 # should be 255

def to8bitRange(values):
  # Ensure the value is inside [minimum, maximum] range, then rezero by subtracting minimum, and divide by span (maximum - minimum)
  return [UnsignedByteType(int(((min(max(val, minimum), maximum) - minimum) / span) * range_max)) for val in values]

def withVirtualStack(time_window=None, subsample=None):
  with open(os.path.join(baseDir, csvFilename), 'r') as csvfile:
    reader = csv.reader(csvfile, delimiter=',', quotechar='"')
    header = reader.next()
    peaks = [RealPoint.wrap(imap(float, peak.split('::'))) for peak in islice(header, 1, None)]
    frames = [virtualPointsRAI(peaks, radius, interval, inside=to8bitRange(map(float, islice(row, 1, None)))) for row in reader]
예제 #21
0
          (x0, y0, x1 - x0 + 1, y1 - y0 + 1))

# Adjust image loader as needed:
if filepaths[0].endswith(".dat"):
    syncPrint("Using io.readFIBSEMdat to read image files.")
    loadFn = lambda filepath: readFIBSEMdat(
        filepath, channel_index=0, asImagePlus=True)[0]
    setupImageLoader(loader=loadFn)
else:
    loadFn = IJ.openImage
    syncPrint("Using IJ.openImage to read image files.")

# Triggers the whole alignment and ends by showing a virtual stack of the aligned sections.
# Crashware: can be restarted anytime, will resume from where it left off.
viewAligned(filepaths, csvDir, params, paramsSIFT, paramsTileConfiguration,
            dimensions, FinalInterval([x0, y0], [x1, y1]))

# When the alignment is good enough, then export as N5 by swapping "False" for "True" below:

if False:
    # Write the whole volume in N5 format
    name = srcDir.split('/')[-2]
    exportDir = os.path.join(tgtDir, "n5")
    # Export ROI:
    # x=864 y=264 width=15312 h=17424
    interval = FinalInterval([0, 0], [dimensions[0] - 1, dimensions[1] - 1])

    export8bitN5(
        filepaths,
        loadFn,
        dimensions,
예제 #22
0
from net.imglib2.converter import Converters, ColorChannelOrder
from net.imglib2.view import Views
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.realtransform import RealViews, AffineTransform2D
from net.imglib2.img.array import ArrayImgs
from net.imglib2.util import Intervals, ImgUtil
from net.imglib2.interpolation.randomaccess import NLinearInterpolatorFactory

img1 = Views.dropSingletonDimensions(IL.wrap(imp))
transform = AffineTransform2D()
transform.set(scale, 0, 0, 0, scale, 0)

# Origins and dimensions (hence, interval) of the target image
interval2 = FinalInterval([
    int(img1.dimension(0) * scale),
    int(img1.dimension(1) * scale),
    img1.dimension(2)
])
# Interval of a single stack slice of the target image
sliceInterval = FinalInterval([interval2.dimension(0), interval2.dimension(1)])

slices2 = []
for index in xrange(img1.dimension(2)):
    # One single 2D RGB slice
    imgSlice1 = Views.hyperSlice(img1, 2, index)
    # Views of the 3 color channels, as extended and interpolatable
    channels = [
        Views.interpolate(
            Views.extendZero(Converters.argbChannel(imgSlice1, i)),
            NLinearInterpolatorFactory()) for i in [1, 2, 3]
    ]
예제 #23
0
fov = img3D # whole


# UNCOMMENT to open the whole stack
#imp = showStack(fov, title=name)
#imp.show()


# Directly open section 8031 and only for an ROI over a bit of the brain
x = 11227
y = 7897
width = 3240
height = 3320
"""
fov = Views.interval(img3D, FinalInterval([x, y, 0], # min coords
                                          [x + width - 1, # max coords
                                           y + height -1,
                                           img3D.dimension(2) -1]))
imp = wrap(fov, title="Small ROI in the Groucho brain", n_channels=1)
imp.setPosition(8031) # in the middle of a 64-section block
imp.show()
"""

# Better: navigatable with arrows in X,Y and < > in Z
impN = navigate2DROI(img3D,
                     FinalInterval([x, y], # min coords
                                   [x + width - 1, # max coords
                                    y + height -1]),
                     indexZ=9718) # section shown

    iterableROI = Regions.iterable(boundedDiscreteROI)
    return Regions.sample(iterableROI, image)


from ij import IJ
from loci.plugins import BF
from net.imglib2 import FinalInterval

w = image.dimension(0)
h = image.dimension(1)
ch = image.dimension(2)
d = image.dimension(3)

# CROP EACH CHANNEL
HRP, CH2 = [
    ops.run("crop", image, FinalInterval.createMinSize(0, 0, i, 0, w, h, 1, d),
            True) for i in [HRP_channel, CH2_channel]
]

### BACKGROUND SUBTRACTION

# Threshold HRP
HRP_m = ops.threshold().otsu(HRP)
HRP_m_f = ops.convert().float32(HRP_m)


# Subtract background
def subBkg(im):
    im_m_f = ops.convert().float32(ops.threshold().otsu(im))
    out = ops.create().img(im_m_f)
    im_m_f = ops.threshold().otsu(
예제 #25
0
파일: pyramid.py 프로젝트: mwinding/scripts
def pyramid(img,
            top_level,
            min_width=32,
            ViewOutOfBounds=Views.extendBorder
            interpolation_factory=NLinearInterpolatorFactory()):
  """
  Create an image pyramid as interpolated scaled views of the provided img.
  """
  imgR = Views.interpolate(ViewOutOfBounds(img), interpolation_factory)

  # Create levels of a pyramid as interpolated views
  width = img.dimension(0)
  pyramid = [img]
  scale = 1.0
  level_index = 1
  while width > min_width and level_index <= top_level:
    scale /= 2.0
    width /= 2
    s = [scale for d in xrange(img.numDimensions())]
    scaled = Views.interval(RealViews.transform(imgR, Scale(s)),
                            FinalInterval([int(img.dimension(d) * scale)
                                           for d in xrange(img.numDimensions())]))
    pyramid.append(scaled)
    level_index += 1 # for next iteration
  
  return pyramid


# TODO pyramidGauss a la Saalfeld
    print "Dimensions are correct."
else:
    print "Mismatching dimensions!"
imp2.setDimensions(nChannels, nSlices, nFrames)
com2 = CompositeImage(imp2, CompositeImage.COMPOSITE)
com2.show()

# Visualization 3: two-channels with the BigDataViewer

from bdv.util import BdvFunctions, Bdv
from net.imglib2.view import Views
from net.imglib2 import FinalInterval

# Open a new BigDataViewer window with the 4D image data
bdv = BdvFunctions.show(vol4d, "vol4d")


# Create a bounded 3D volume view from a KDTree
def as3DVolume(kdtree, dimensions3d):
    sd = SpheresData(kdtree, radius, inside, outside)
    vol3d = Views.interval(Views.raster(sd), dimensions3d)
    return vol3d


# Define a 4D volume as a sequence of generative Spheres 3D volumes
dims3d = FinalInterval(map(vol4d.dimension, xrange(3)))
spheres4d = Views.stack(
    [as3DVolume(kdtrees[ti], dims3d) for ti in sorted(kdtrees.iterkeys())])

BdvFunctions.show(spheres4d, "spheres4d", Bdv.options().addTo(bdv))
예제 #27
0
inputDataset = Utility.getDatasetByName(data, inputImp.getTitle())

truecolor1 = Duplicator().run(inputImp)
truecolor1.show()

# get the roi that will be processed
inputRoi = inputImp.getRoi().clone()

inputRec = inputRoi.getBounds()
x1 = long(inputRec.getX())
y1 = long(inputRec.getY())
x2 = x1 + long(inputRec.getWidth()) - 1
y2 = y1 + long(inputRec.getHeight()) - 1

# crop the roi
interval = FinalInterval(array([x1, y1, 0], 'l'), array([x2, y2, 2], 'l'))
cropped = ops.crop(interval, None, inputDataset.getImgPlus())

dataset = data.create(cropped)
display.createDisplay("cropped", dataset)

dimensions2D = array([dataset.dimension(0), dataset.dimension(1)], 'l')
cropIntervalBlue = FinalInterval(
    array([0, 0, 2], 'l'),
    array([dataset.dimension(0) - 1,
           dataset.dimension(1) - 1, 2], 'l'))
blue = ops.crop(cropIntervalBlue, None, dataset.getImgPlus())

display.createDisplay("blue", data.create(blue))

#blue32=ImgPlus( ops.create( dimensions2D, FloatType()) )
예제 #28
0
import sys
sys.path.append("/home/albert/lab/scripts/python/imagej/IsoView-GCaMP/")
from lib.io import readN5
from lib.dogpeaks import createDoG
from lib.synthetic import virtualPointsRAI
from lib.ui import showStack
from net.imglib2 import RealPoint, FinalInterval


points = [RealPoint.wrap([255, 255, 255]),
          RealPoint.wrap([255, 255, 0]),
          RealPoint.wrap([128, 384, 128])]

rai = virtualPointsRAI(points, 70, FinalInterval([512, 512, 512]))
imp = showStack(rai, title="test virtualPointsRAI")
#  c.next().set(random() * 65535)
ImgMath.compute(ImgMath.number(17)).into(imgF)
ImgMath.compute(ImgMath.img(imgF)).into(imgU)
aff = AffineTransform3D()
"""
aff.set(1, 0, 0, 0,
        0, 1, 0, 0,
        0, 0, 1, 0)
"""
aff.set(*[
    0.9999949529841275, -0.0031770224721305684, 2.3118912942710207e-05,
    -1.6032353998500826, 0.003177032139125933, 0.999994860398559,
    -0.00043086338151948394, -0.4401520585103873, -2.1749931475206362e-05,
    0.0004309346564745992, 0.9999999069111268, 6.543187040788581
])
interval = FinalInterval([0, 0, 0], [d - 1 for d in dimensions])


def test(img):
    imgT = TransformView.transformView(img, aff, interval,
                                       MultiViewDeconvolution.minValueImg,
                                       MultiViewDeconvolution.outsideValueImg,
                                       1)  # 1: linear interpolation
    imgA = ArrayImgs.floats(dimensions)
    ImgUtil.copy(ImgView.wrap(imgT, imgA.factory()), imgA)


print "Start test:"
timeit(3, test, imgU)
timeit(3, test, imgF)
예제 #30
0
def poreDetectionUV(inputImp, inputDataset, inputRoi, ops, data, display,
                    detectionParameters):

    # set calibration
    detectionParameters.setCalibration(inputImp)

    # calculate area of roi
    stats = inputImp.getStatistics()
    inputRoiArea = stats.area

    # get the bounding box of the active roi
    inputRec = inputRoi.getBounds()
    x1 = long(inputRec.getX())
    y1 = long(inputRec.getY())
    x2 = x1 + long(inputRec.getWidth()) - 1
    y2 = y1 + long(inputRec.getHeight()) - 1

    # crop the roi
    interval = FinalInterval(array([x1, y1, 0], 'l'), array([x2, y2, 2], 'l'))
    #cropped=ops.image().crop(interval, None, inputDataset.getImgPlus() )
    cropped = ops.image().crop(inputDataset.getImgPlus(), interval)

    datacropped = data.create(cropped)
    display.createDisplay("cropped", datacropped)
    croppedPlus = IJ.getImage()

    # instantiate the duplicator and the substackmaker classes
    duplicator = Duplicator()
    substackMaker = SubstackMaker()

    # duplicate the roi
    duplicate = duplicator.run(croppedPlus)

    # convert duplicate of roi to HSB and get brightness
    IJ.run(duplicate, "HSB Stack", "")
    brightnessPlus = substackMaker.makeSubstack(duplicate, "3-3")
    brightness = ImgPlus(ImageJFunctions.wrapByte(brightnessPlus))
    brightnessPlus.setTitle("Brightness")
    #brightnessPlus.show()

    # make another duplicate, split channels and get red
    duplicate = duplicator.run(croppedPlus)
    channels = ChannelSplitter().split(duplicate)
    redPlus = channels[0]
    red = ImgPlus(ImageJFunctions.wrapByte(redPlus))

    # convert to lab
    IJ.run(croppedPlus, "Color Transformer", "colour=Lab")
    IJ.selectWindow('Lab')
    labPlus = IJ.getImage()

    croppedPlus.changes = False
    croppedPlus.close()

    # get the A channel
    APlus = substackMaker.makeSubstack(labPlus, "2-2")
    APlus.setTitle('A')
    #APlus.show()
    APlus.getProcessor().resetMinAndMax()
    #APlus.updateAndDraw()
    AThresholded = threshold(APlus, -10, 50)

    # get the B channel
    BPlus = substackMaker.makeSubstack(labPlus, "3-3")
    BPlus.setTitle('B')
    #BPlus.show()
    BPlus.getProcessor().resetMinAndMax()
    #BPlus.updateAndDraw()
    BThresholded = threshold(BPlus, -10, 50)

    # AND the Athreshold and Bthreshold to get a map of the red pixels
    ic = ImageCalculator()
    redMask = ic.run("AND create", AThresholded, BThresholded)
    IJ.run(redMask, "Divide...", "value=255")

    labPlus.close()

    fast = True

    # threshold the spots from the red channel
    if (fast == False):
        thresholdedred = SpotDetectionGray(red, data, display, ops, "triangle")
        impthresholdedred = ImageJFunctions.wrap(thresholdedred, "wrapped")
    else:
        impthresholdedred = SpotDetection2(redPlus)

    # threshold the spots from the brightness channel
    if (fast == False):
        thresholded = SpotDetectionGray(brightness, data, display, ops,
                                        "triangle")
        impthresholded = ImageJFunctions.wrap(thresholded, "wrapped")
    else:
        impthresholded = SpotDetection2(brightnessPlus)

    # or the thresholding results from red and brightness channel
    impthresholded = ic.run("OR create", impthresholded, impthresholdedred)

    roim = RoiManager(True)

    # convert to mask
    Prefs.blackBackground = True
    IJ.run(impthresholded, "Convert to Mask", "")

    def isRed(imp, roi):
        stats = imp.getStatistics()

        if (stats.mean > detectionParameters.porphyrinRedPercentage):
            return True
        else:
            return False

    def notRed(imp, roi):
        stats = imp.getStatistics()

        if (stats.mean > detectionParameters.porphyrinRedPercentage):
            return False
        else:
            return True

    roiClone = inputRoi.clone()
    roiClone.setLocation(0, 0)
    Utility.clearOutsideRoi(impthresholded, roiClone)

    impthresholded.show()

    countParticles(impthresholded, roim, detectionParameters.porphyrinMinSize, detectionParameters.porphyrinMaxSize, \
     detectionParameters.porphyrinMinCircularity, detectionParameters.porphyrinMaxCircularity)

    uvPoreList = []
    for roi in roim.getRoisAsArray():
        uvPoreList.append(roi.clone())

    #allList=uvPoreList+closedPoresList+openPoresList

    # count particles that are porphyrins (red)
    porphyrinList = CountParticles.filterParticlesWithFunction(
        redMask, uvPoreList, isRed)
    # count particles that are visible on uv but not porphyrins
    sebumList = CountParticles.filterParticlesWithFunction(
        redMask, uvPoreList, notRed)

    # for each roi add the offset such that the roi is positioned in the correct location for the
    # original image
    [
        roi.setLocation(roi.getXBase() + x1,
                        roi.getYBase() + y1) for roi in uvPoreList
    ]

    # draw the ROIs on to the image
    inputImp.getProcessor().setColor(Color.green)
    IJ.run(inputImp, "Line Width...", "line=3")
    inputImp.getProcessor().draw(inputRoi)
    IJ.run(inputImp, "Line Width...", "line=1")
    [
        CountParticles.drawParticleOnImage(inputImp, roi, Color.magenta)
        for roi in porphyrinList
    ]
    [
        CountParticles.drawParticleOnImage(inputImp, roi, Color.green)
        for roi in sebumList
    ]
    inputImp.updateAndDraw()

    # calculate stats for the UV visible particles
    detectionParameters.setCalibration(APlus)
    statsDictUV = CountParticles.calculateParticleStatsUV(
        APlus, BPlus, redMask, roim.getRoisAsArray())

    totalUVPoreArea = 0
    for area in statsDictUV['Areas']:
        totalUVPoreArea = totalUVPoreArea + area
    averageUVPoreArea = totalUVPoreArea / len(statsDictUV['Areas'])

    poreDiameter = 0
    for diameter in statsDictUV['Diameters']:
        poreDiameter = poreDiameter + diameter
    poreDiameter = poreDiameter / len(statsDictUV['Diameters'])

    redTotal = 0

    for red in statsDictUV['redPercentage']:
        redTotal = redTotal + red
    redAverage = redTotal / len(statsDictUV['redPercentage'])

    statslist = [len(porphyrinList), 100 * redAverage]
    statsheader = [Messages.Porphyrins, Messages.PercentageRedPixels]

    print("Roi Area: " + str(inputRoiArea))
    print("Total Pore Area: " + str(totalUVPoreArea))
    print("Average Pore Area: " + str(averageUVPoreArea))
    print str(len(uvPoreList)) + " " + str(len(porphyrinList)) + " " + str(
        len(sebumList)) + " " + str(
            100 * totalUVPoreArea / inputRoiArea) + " " + str(100 * redAverage)
    print "cp min circularity" + str(
        detectionParameters.closedPoresMinCircularity) + ":" + str(
            detectionParameters.closedPoresMinSize)

    # close the thresholded image
    impthresholded.changes = False
    impthresholded.close()

    return uvPoreList, statslist, statsheader