def getShiftFromFFTs(fft1, fft2, v1, v2, minOverlap, nHighestPeaks):
    pcm = PhaseCorrelation2.calculatePCMInPlace(fft1, fft2,
                                                ArrayImgFactory(FloatType()),
                                                FloatType(), exe)
    peak = PhaseCorrelation2.getShift(pcm, v1, v2, nHighestPeaks, minOverlap,
                                      True, True, exe)
    spshift = peak.getSubpixelShift()
    if spshift is not None:
        return spshift.getFloatPosition(0), spshift.getFloatPosition(1)
    else:
        IJ.log('There is a peak.getSubpixelShift issue. sFOV ' + str(sFOV) +
               ' s ' + str(s))
        return None
コード例 #2
0
    def CreatePhantom(self):
        if (self.numChannels > 1):
            image = self.ops.run("create", [
                self.objectSizeX, self.objectSizeY, self.objectSizeZ,
                self.numChannels
            ], FloatType())
        else:
            image = self.ops.run(
                "create",
                [self.objectSizeX, self.objectSizeY, self.objectSizeZ],
                FloatType())

        ax = [Axes.X, Axes.Y, Axes.Z, Axes.CHANNEL]
        self.phantom = ImgPlus(image, "phantom", ax)
def getShiftFromViews(v1, v2):
    # Thread pool
    exe = Executors.newFixedThreadPool(
        Runtime.getRuntime().availableProcessors())
    try:
        # PCM: phase correlation matrix
        pcm = PhaseCorrelation2.calculatePCM(
            v1, v2, ArrayImgFactory(FloatType()), FloatType(),
            ArrayImgFactory(ComplexFloatType()), ComplexFloatType(), exe)
        # Minimum image overlap to consider, in pixels
        minOverlap = v1.dimension(0) / 10
        # Returns an instance of PhaseCorrelationPeak2
        peak = PhaseCorrelation2.getShift(pcm, v1, v2, nHighestPeaks,
                                          minOverlap, True, True, exe)
    except Exception, e:
        print e
コード例 #4
0
def MakeMultiChannelPhantom(ops, size):

    if len(size) > 3:
        numChannels = size[3]
    else:
        numChannels = 1

    image = ops.run("create", size, FloatType())
    ax = [Axes.X, Axes.Y, Axes.Z, Axes.CHANNEL]
    imgPlus = ImgPlus(image, "phantom", ax)

    location = zeros(3, 'i')
    location[0] = 40
    location[1] = size[1] / 2
    location[2] = size[2] / 2

    #ops.run("addsphere",  image, location, radius, 1.0)
    #ops.run("addassymetricspherel",  image, location, 1.0, radius1, radius2)

    shapes = Add3DShapes(ops, size)

    def AddShapes(hyperSlice):
        #shapes.addRandomPointsInROI(hyperSlice, 100.0, 20)
        shapes.addCenterSphere(hyperSlice, 5.0, 20)

    if (numChannels > 1):
        for d in range(0, numChannels):
            hyperSlice = Views.hyperSlice(image, 3, d)
            AddShapes(hyperSlice)
            location[0] += 10
    else:
        AddShapes(image)

    return imgPlus
コード例 #5
0
def combine(op, title, *ops):
  edges_img = img.factory().imgFactory(FloatType()).create(img)
  compute(op(*ops)).into(edges_img)
  imp = IL.wrap(edges_img, title)
  imp.getProcessor().resetMinAndMax()
  imp.show()
  return imp
def createType(bytesPerPixel):
    if 1:
        return UnsignedByteType()
    if 2:
        return UnsignedShortType()
    if 4:
        return FloatType()
    if 8:
        return UnsignedLongType()
コード例 #7
0
def SpotDetectionGray(gray, data, display, ops, invert):

    # get the dimensions
    dimensions2D = array([gray.dimension(0), gray.dimension(1)], 'l')
    factory = gray.getImg().factory()

    # wrap as ImagePlus
    imp = ImageJFunctions.wrap(gray, "wrapped")

    # create and call background subtractor
    bgs = BackgroundSubtracter()
    bgs.rollingBallBackground(imp.getProcessor(), 50.0, False, False, True,
                              True, True)

    # wrap the result of background subtraction as Img and display it
    iplus = ImagePlus("bgs", imp.getProcessor())

    #	if (invert==True):
    #		iplus.getProcessor().invert()

    imgBgs = ImageJFunctions.wrapByte(iplus)
    display.createDisplay("back_sub", data.create(ImgPlus(imgBgs)))

    # convert the background subtracted image to 32 bit
    temp = ops.run("createimg", factory, FloatType(), dimensions2D)
    imgBgs32 = ImgPlus(temp)
    ops.convert(imgBgs32, imgBgs, ConvertPixCopy())
    #display.createDisplay("back_sub 32", data.create(ImgPlus(imgBgs32)))

    # create the Laplacian of Gaussian filter
    kernel = DetectionUtils.createLoGKernel(3.0, 2, array([1.0, 1.0], 'd'))

    # apply the log filter and display the result
    log = ImgPlus(ops.run("createimg", factory, FloatType(), dimensions2D))
    ops.convolve(log, imgBgs32, kernel)
    #display.createDisplay("log", data.create(ImgPlus(log)))

    # apply the threshold operation
    #thresholded=ops.run("threshold", ops.create( dimensions2D, BitType()), log, Triangle())
    thresholded = ops.run("triangle", log)

    return ImgPlus(thresholded)
コード例 #8
0
def SpotDetectionLog(img, data, ops, thresholdmethod, dimensions2D, factory):

	# create the Laplacian of Gaussian filter
	kernel = DetectionUtils.createLoGKernel( 3.0, 2, array([1.0, 1.0], 'd' ) )

	# apply the log filter and display the result
	log=ImgPlus( ops.run("createimg", factory, FloatType(), dimensions2D) )
	ops.convolve(log, img, kernel)
	
	# apply the threshold operation
	thresholded = ops.run(thresholdmethod, log)
	
コード例 #9
0
class FnCursor(Point, Cursor, RandomAccess):
    def __init__(self, n, wavelength, offset):
        super(Point, self).__init__(n)
        self.offset = offset  # in radians
        self.wavelength = wavelength
        self.t = FloatType()

    def copyRandomAccess(self):
        return FnCursor(self.numDimensions(), self.wavelength, self.offset)

    def copyCursor(self):
        return self.copyRandomAccess()

    def get(self):
        x = self.getLongPosition(0)
        y = self.getLongPosition(1)
        val = sin(x * 2 * pi / self.wavelength + self.offset)\
            + sin(y * 2 * pi / self.wavelength + self.offset)
        self.t.set(val)
        if 0 == x % 64 and 0 == y % 64:
            System.out.println("x, y: " + str(x) + ", " + str(y) + " :: " +
                               str(val))
        return t
コード例 #10
0
# spot detection routine for gray scale data.  Detects light objects
def SpotDetectionGray(gray, data, display, ops, thresholdmethod, showSteps=False):
	
	# get the dimensions
	dimensions2D=array( [gray.dimension(0), gray.dimension(1)], 'l')
	factory=gray.getImg().factory()

	if (invert==True):
		imp.getProcessor().invert()
	
	imgBgs=gray;#ImageJFunctions.wrapByte(imp)

	if (showSteps): display.createDisplay("back_sub", data.create(ImgPlus(imgBgs))) 

	# convert the background subtracted image to 32 bit
	temp=ops.run( "createimg", factory, FloatType(), dimensions2D )
	imgBgs32=ImgPlus( temp )
	ops.convert(imgBgs32, imgBgs, ConvertPixCopy() )
	#display.createDisplay("back_sub 32", data.create(ImgPlus(imgBgs32))) 
コード例 #11
0
ファイル: segmentation_em.py プロジェクト: mwinding/scripts
def filterBankRotations(img,
                        angles=xrange(0, 46, 9), # sequence, in degrees
                        filterBankFn=filterBank, # function that takes an img as sole positional argument
                        outputType=FloatType()):
  """ img: a RandomAccessibleInterval.
      filterBankFn: the function from which to obtain a sequence of ImgMath ops.
      angles: a sequence of angles in degrees.
      outputType: for materializing rotated operations and rotating them back.

      For every angle, will prepare a rotated view of the image,
      then create a list of ops on the basis of that rotated view,
      then materialize each op into an image so that an unrotated view
      can be returned back.

      returns a list of unrotated views, each containing the values of applying
      each op to the rotated view. 
  """
  ops_rotations = []
  
  for angle in angles:
    imgRot = img if 0 == angle else rotatedView(img, angle)
    ops = filterBankFn(imgRot)

    # Materialize these two combination ops and rotate them back (rather, a rotated view)
    interval = Intervals.translate(img, [(imgRot.dimension(d) - img.dimension(d)) / 2
                                         for d in xrange(img.numDimensions())])
    for op in ops:
      imgOpRot = compute(op).intoArrayImg(outputType)
      if 0 == angle:
        ops_rotations.append(imgOpRot)
        continue
      # Rotate them back and crop view
      imgOpUnrot = rotatedView(imgOpRot, -angle, enlarge=False)
      imgOp = Views.zeroMin(Views.interval(imgOpUnrot, interval))
      #if angle == 0 or angle == 45:
      #  IL.wrap(imgOpRot, "imgOpRot angle=%i" % angle).show()
      #  IL.wrap(imgOpUnrot, "imgOpUnrot angle=%i" % angle).show()
      #  IL.wrap(imgOp, "imgOp angle=%i" % angle).show()
      ops_rotations.append(imgOp)
  
  return ops_rotations
コード例 #12
0
ファイル: test_imgmath_view.py プロジェクト: mwinding/scripts
        c.fwd()
        c.localize(pos)
        print "Cursor:", pos, "::", c.get()

    # Test RandomAccess
    ra = iraf.randomAccess()
    c = iraf.cursor()
    while c.hasNext():
        c.fwd()
        ra.setPosition(c)
        c.localize(pos)
        print "RandomAccess:", pos, "::", ra.get()

    # Test source img: should be untouched
    c = img.cursor()
    while c.hasNext():
        print "source:", c.next()

    # Test interval view: the middle 2x2 square
    v = Views.interval(iraf, [1, 1], [2, 2])
    IL.wrap(v, "+2 view").show()


# An array from 0 to 15
a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
pixels = array(a, 'b')
img = ArrayImgs.unsignedBytes(pixels, [4, 4])

test(add(img, 2).view())
test(add(img, 2).view(FloatType()))
コード例 #13
0
	# MIP Red
	red_mip = getMIP(red)
	if debugging:
		ui.show('RED_MIP',red_mip)

	# Calculate T = Mean+Std*.25
	# Set threshold (T,65k)
	# Convert to Mask
	Rmean = ops.stats().mean(red_mip).getRealDouble()
	Rstd = ops.stats().stdDev(red_mip).getRealDouble()
	T = Rmean*.25
	# D for particlemask
	D = Rmean + Rstd*0.25
	log('Calculated threshold for the red is {}'.format(T))
	red_mask = ops.threshold().apply(ops.convert().float32(red_mip),FloatType(T))
	
	particlemask = ops.threshold().apply(ops.convert().float32(red_mip),FloatType(D))

	# "Opening" with "Area"=5 px^2
	# "Dilation" with Neighb=1, Count=1
	red_mask = ops.morphology().open(red_mask,[HyperSphereShape(2)])
	red_mask = ops.morphology().dilate(red_mask,HyperSphereShape(1))
	
	particlemask = ops.morphology().open(particlemask,[HyperSphereShape(2)])
	particlemask = ops.morphology().dilate(particlemask,HyperSphereShape(1))

	# Perform Top Hat with area=maxPSize
	r = math.ceil(math.sqrt(maxPSize/3.141593))
	green_tophat = ops.morphology().topHat(green_mip,[HyperSphereShape(long(r))])
	if debugging:
コード例 #14
0
 def __init__(self, n, wavelength, offset):
     super(Point, self).__init__(n)
     self.offset = offset  # in radians
     self.wavelength = wavelength
     self.t = FloatType()
コード例 #15
0
from net.imglib2.type.numeric.real import FloatType

ft = FloatType(10)
ft.pow(2)

print ft

from net.imglib2.algorithm.math.ImgMath import compute, add, power
from net.imglib2.img.array import ArrayImgs
from net.imglib2.img.display.imagej import ImageJFunctions as IL

img = ArrayImgs.floats([10, 10, 10])

compute(add(img, 5)).into(img)  # in place

compute(power(img, 2)).into(img)  # in place

print 25 * 10 * 10 * 10 == sum(t.get() for t in img.cursor())

IL.wrap(img, "5 squared").show()
コード例 #16
0
        self.offset = offset  # in radians
        self.wavelength = wavelength

    def getValue(self, index):
        x = index % self.width
        y = index / self.width
        val = sin(x * 2 * pi / self.wavelength + self.offset)\
            + sin(y * 2 * pi / self.wavelength + self.offset)
        return min(int(val * 65535 / 2), 65535)

    def putValue(self, index, value):
        pass  # ignore


sinaccess = SinDataAccess(dimensions[0], -pi / 2, dimensions[0] / 4)
t = FloatType()
img = ArrayImg(sinaccess, dimensions, t.getEntitiesPerPixel())
img.setLinkedType(t.getNativeTypeFactory().createLinkedType(img))

#IL.show(img)

# 5. Functional image but without faking an ArrayImg
from net.imglib2.img import AbstractImg
from net.imglib2 import Cursor, RandomAccess, Point
from net.imglib2.type.numeric.real import FloatType
from math import sin, pi
from java.lang import System


class FnCursor(Point, Cursor, RandomAccess):
    def __init__(self, n, wavelength, offset):
コード例 #17
0
int_converter = Util.genericIntegerTypeConverter()
# Create the integral image of an 8-bit input, stored as 64-bit
alg = IntegralImg(img, UnsignedLongType(), int_converter)
alg.process()
integralImg = alg.getResult()

# Read out blocks of radius 5 (i.e. 10x10 for a 2d image)
# in a way that is entirely n-dimensional (applies to 1d, 2d, 3d, 4d ...)
radius = 5
nd = img.numDimensions()
op = div(block(Views.extendBorder(integralImg), [radius] * nd),
         pow(radius * 2, nd))
blurred = img.factory().create(img)  # an 8-bit image
# Compute in floats, store result into longs
# using a default generic RealType converter via t.setReal(t.getRealDouble())
compute(op).into(blurred, None, FloatType(), None)

# Show the blurred image with the same LUT as the original
imp2 = IL.wrap(blurred, "integral image radius 5 blur")
imp2.getProcessor().setLut(imp.getProcessor().getLut())
imp2.show()

# Compare with Gaussian blur
from ij import ImagePlus
from ij.plugin.filter import GaussianBlur
from ij.gui import Line, ProfilePlot

# Gaussian of the original image
imp_gauss = ImagePlus(imp.getTitle() + " Gauss",
                      imp.getProcessor().duplicate())
GaussianBlur().blurGaussian(imp_gauss.getProcessor(), radius)
コード例 #18
0
# @OpService ops
# @Dataset data

from net.imglib2.type.numeric.real import FloatType

print ops.stats().sum(FloatType(), data)
コード例 #19
0
#create the phantom
phantom = spheres.CreatePhantom()
# create the phantom
#phantom=MakeMultiChannelPhantom(ops, size)
#MakeMultiChannelPhantom([256, 256, 128, 5])
display.createDisplay("phantom",  data.create(phantom));

# create the psf
psf = ops.run("psf", psfsize[0], psfsize[2], [100, 100, 300], 400, 1.4, 1.51, 1.51, 10)
display.createDisplay("psf",  data.create(psf));

#extended = ops.run("extend", None, phantom, extensionxy, extensionz, BoundaryType.ZERO, FFTTarget.MINES_SPEED)
extended = ops.run("extend", None, phantom, spheres.objectSizeX, spheres.objectSizeY, spheres.objectSizeZ, ExtensionType.DIMENSION, BoundaryType.ZERO, FFTTarget.MINES_SPEED)
display.createDisplay("extended",  data.create(extended));

background=FloatType()
background.setReal(0.0001)
ops.run("add", extended, background)

extendedPsf = ops.run("extend", None, psf, spheres.objectSizeX, spheres.objectSizeY, spheres.objectSizeZ, ExtensionType.DIMENSION, BoundaryType.ZERO, FFTTarget.MINES_SPEED)
#extendedPsf = ops.run("extend", None, psf, extensionxy, 0, BoundaryType.ZERO, FFTTarget.MINES_SPEED)
display.createDisplay("extended psf",  data.create(extendedPsf));

# convolve
convolved=ops.run("frequencyfilter", extended, extendedPsf, ConvolutionRaiRai())
display.createDisplay("convolved",  data.create(convolved))

extendedDimensions=[]
for i in range(0,3):
	extendedDimensions.append(extended.dimension(i))
コード例 #20
0
directory="/home/bnorthan/Brian2014/Projects/deconware2/deconware-scripts/images/"

helaCellsName="helacellsredcropped.tif"
barsName="Bars.tif"

helaCells=data.open(directory+helaCellsName);
bars=data.open(directory+barsName);

display.createDisplay("HelaCells", helaCells);

gaussianKernel =ops.gaussKernel( 2, 5.0);
gaussianKernel3D =ops.gaussKernel(array([4.0, 4.0, 2.0], 'd'));
#logKernel=ops.logKernel(2, 3.0, None, FloatType(),PlanarImgFactory()) 
logKernel=ops.logKernel(2, 3.0) 

display.createDisplay("gausskernel", ImgPlus(gaussianKernel));

dimensions2D=array([helaCells.dimension(0), helaCells.dimension(1)], 'l');
dimensions3D=array([bars.dimension(0), bars.dimension(1), bars.dimension(2)], 'l');

gaussianFiltered=ops.createImg( dimensions2D)
logFiltered=ops.createImg(FloatType(), PlanarImgFactory(), dimensions2D)
barsGaussianFiltered=ops.createImg(FloatType(), PlanarImgFactory(), dimensions3D)

ops.convolve(gaussianFiltered, helaCells, gaussianKernel)
ops.convolve(logFiltered, helaCells, logKernel)
ops.convolve(barsGaussianFiltered, bars, gaussianKernel3D);

display.createDisplay("gaussian", ImgPlus(gaussianFiltered));
display.createDisplay("log", ImgPlus(logFiltered));
display.createDisplay("bars convolved", ImgPlus(barsGaussianFiltered));
コード例 #21
0
# Load an RGB or ARGB image
#imp = IJ.openImage("https://imagej.nih.gov/ij/images/leaf.jpg")
imp = IJ.getImage()

# Access its pixel data from an ImgLib2 data structure:
# a RandomAccessibleInterval<ARGBType>
img = IL.wrapRGBA(imp)

# Read out single channels
red = Converters.argbChannel(img, 1)
green = Converters.argbChannel(img, 2)
blue = Converters.argbChannel(img, 3)

# Create an empty image of type FloatType (floating-point values)
# Here, the img is used to read out the interval: the dimensions for the new image
brightness = ArrayImgFactory(FloatType()).create(img)

# Compute the brightness: pick the maximum intensity pixel of every channel
# and then normalize it by dividing by the number of channels
compute(div(maximum([red, green, blue]), 255.0)).into(brightness)

# Show the brightness image
impB = IL.wrap(brightness, imp.getTitle() + " brightness")
impB.show()

# Compute now the image color saturation
saturation = ArrayImgFactory(FloatType()).create(img)
compute(
    let(
        "red",
        red,  # store as directly readable variables (no dictionary lookups)
コード例 #22
0
# Scale the X,Y axis down to isotropy with the Z axis
cal = imp.getCalibration()
scale2D = cal.pixelWidth / cal.pixelDepth
iso = Compute.inFloats(Scale2D(Red(ImgLib.wrap(imp)), scale2D))
#ImgLib.wrap(iso).show()

# Find peaks by difference of Gaussian
sigma = (cell_diameter / cal.pixelWidth) * scale2D
peaks = DoGPeaks(iso, sigma, sigma * 0.5, minPeak, 1)
print "Found", len(peaks), "peaks"

# Copy ImgLib1 iso image into ImgLib2 copy image
copy = ArrayImgFactory().create(
    [iso.getDimension(0),
     iso.getDimension(1),
     iso.getDimension(2)], FloatType())
c1 = iso.createCursor()
c2 = copy.cursor()
while c1.hasNext():
    c1.fwd()
    c2.fwd()
    c2.get().set(c1.getType().getRealFloat())

#ImageJFunctions.show(copy)

# Measure mean intensity at every peak
sphereFactory = HyperSphereNeighborhood.factory()
radius = 2
intensities1 = []
copy2 = Views.extendValue(copy, FloatType(0))
ra = copy2.randomAccess()
コード例 #23
0
  aff.set(*matrix)
  return aff

# Transform the kernel for each view
kernels = [kernel,
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB1"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB2"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB3"]))]

def deconvolve(images, kernels, name, n_iterations):
  # Bayesian-based multi-view deconvolution
  exe = newFixedThreadPool(Runtime.getRuntime().availableProcessors() -2)
  try:
    mylambda = 0.0006
    blockSize = Intervals.dimensionsAsIntArray(images[0]) # [128, 128, 128]
    cptf = ComputeBlockSeqThreadCPUFactory(exe, mylambda, blockSize, ArrayImgFactory(FloatType()))
    psiInitFactory = PsiInitBlurredFusedFactory() # PsiInitAvgPreciseFactory() fails with type mismatch: UnsignedByteType (?) vs FloatType
    weight = Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0]))
    filterBlocksForContent = False # Run once with True, none were removed
    decon_views = DeconViews([DeconView(exe, img, weight, kernel, PSFTYPE.INDEPENDENT, blockSize, 1, filterBlocksForContent)
                              for img in images],
                             exe)
    #n_iterations = 10
    decon = MultiViewDeconvolutionSeq(decon_views, n_iterations, psiInitFactory, cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      print "Something went wrong initializing MultiViewDeconvolution"
    else:
      decon.runIterations()
      img = decon.getPSI()
      imp = IL.wrap(img, name + "_deconvolved_" + str(n_iterations) + "_iterations")
      imp.show()
コード例 #24
0
# create and call background subtractor
bgs = BackgroundSubtracter()
bgs.rollingBallBackground(imp.getProcessor(), 50.0, False, False, True, True,
                          True)

# wrap the result of background subtraction as Img
iplus = ImagePlus("bgs", imp.getProcessor())
imgBgs = ImageJFunctions.wrapShort(iplus)

###############################################################
# Step 2:  Laplacian of Gaussian Filtering
###############################################################

# convert to 32 bit
imgBgs32 = ops.run("createimg", imgBgs, FloatType())
ops.convert(imgBgs32, imgBgs, ConvertPixCopy())

# create the Laplacian of Gaussian filter
kernel = DetectionUtils.createLoGKernel(3.0, 2, array([1.0, 1.0], 'd'))

# create the output Img for convolution
log = ImgPlus(ops.run("createimg", inputData.getImgPlus(), FloatType()))

# apply the log filter
ops.convolve(log, imgBgs32, kernel)

###############################################################
# Step 3:  Threshold
###############################################################
コード例 #25
0
# @DatasetService data
# @DisplayService display
# @IOService io
# @OpService ops
# @net.imagej.Dataset image
# @net.imagej.Dataset psf

from net.imglib2.meta import ImgPlus
from net.imglib2.type.numeric.real import FloatType

from jarray import array

sumImg=ops.sum(FloatType(), image.getImgPlus())
sumPsf=ops.sum(FloatType(), psf.getImgPlus())

convolved=ops.convolve(image.getImgPlus(), psf.getImgPlus());
display.createDisplay("convolved", ImgPlus(convolved));

size=array([0, 0, 0], 'l')
convolved2=ops.convolve(None, image, psf, size)
display.createDisplay("convolved2", ImgPlus(convolved2));

sumConvolved=ops.sum(FloatType(), convolved)
sumConvolved2=ops.sum(FloatType(), convolved2)

print sumImg.getRealDouble()
print sumPsf.getRealDouble()
print sumImg.getRealDouble()*sumPsf.getRealDouble()
print sumConvolved.getRealDouble()
print sumConvolved2.getRealDouble()
コード例 #26
0
                           psfX / 2 + psfXSize / 2 - 1,
                           psfY / 2 + psfYSize / 2 - 1, psfZ - 1))

psf_ = ops.convert().float32(psf_)

maxPSF = ops.stats().max(psf_).getRealFloat()
psfBackground = psfBackgroundPercent * maxPSF

# subtract background from psf
for t in psf_:
    val = t.getRealFloat() - psfBackground
    if val < 0:
        val = 0
    t.setReal(val)

# normalize psf
sumpsf = ops.stats().sum(psf_)
sumpsf = FloatType(sumpsf.getRealFloat())

print sumpsf

psf_ = ops.math().divide(psf_, sumpsf)

# convert image to 32 bit
img_ = ops.convert().float32(data.getImgPlus())

# now deconvolve
deconvolved_ = ops.deconvolve().richardsonLucy(img_, psf_, None, None, None,
                                               None, None, 30, nonCirculant,
                                               acceleration)
コード例 #27
0
def multiviewDeconvolution(images, blockSizes, PSF_kernels, n_iterations, lambda_val=0.0006, weights=None,
                           filterBlocksForContent=False, PSF_type=PSFTYPE.INDEPENDENT, exe=None, printFn=syncPrint):
  """
  Apply Bayesian-based multi-view deconvolution to the list of images,
  returning the deconvolved image. Uses Stephan Preibisch's library,
  currently available with the BigStitcher Fiji update site.

  images: a list of images, registered and all with the same dimensions.
  blockSizes: how to chop up the volume of each image for parallel processing.
             When None, a single block with the image dimensions is used,
             plus half of the transformed kernel dimensions for that view.
  PSF_kernels: the images containing the point spread function for each input image. Requirement: the dimensions must be an odd number.
  n_iterations: the number of iterations for the deconvolution. A number between 10 and 50 is desirable. The more iterations, the higher the computational cost.
  lambda_val: default is 0.0006 as recommended by Preibisch.
  weights: a list of FloatType images with the weight for every pixel. If None, then all pixels get a value of 1.
  filterBlocksForContent: whether to check before processing a block if the block has any data in it. Default is False.
  PSF_type: defaults to PSFTYPE.INDEPENDENT.
  exe: a thread pool for concurrent execution. If None, a new one is created, using as many threads as CPUs are available.
  printFn: the function to use for printing error messages. Defaults to syncPrint (thread-safe access to the built-in `print` function).

  Returns an imglib2 ArrayImg, or None if something went wrong.
  """

  mvd_exe = exe
  if not exe:
    mvd_exe = newFixedThreadPool() # as many threads as CPUs

  try:
    mvd_weights = weights
    if not weights:
      mvd_weights = repeat(Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0])))

    for i, PSF_kernel in enumerate(PSF_kernels):
      for d in xrange(PSF_kernel.numDimensions()):
        if 0 == PSF_kernel.dimension(d) % 2:
          printFn("for image at index %i, PSF kernel dimension %i is not odd." % (i, d))
          return None

    if not blockSizes:
      # Whole image dimensions + half of the transformed PSF kernel dimensions
      kernel_max = int(max(PSF_kernel.dimension(d)
                           for d in xrange(PSF_kernel.numDimensions())
                           for PSF_kernel in PSF_kernels) * 2)
      syncPrint("kernel max dimension *2: %i" % kernel_max)
      blockSizes = []
      for image in images:
        blockSizes.append([image.dimension(d) + kernel_max
                           for d in xrange(image.numDimensions())])
        syncPrint("blockSize:" + str(blockSizes[-1]))

    cptf = createFactory(mvd_exe, lambda_val, blockSizes[0]) # TODO which blockSize to give here?
    filterBlocksForContent = False # Run once with True, none were removed
    dviews = [DeconView(mvd_exe, img, weight, PSF_kernel, PSF_type, blockSize, 1, filterBlocksForContent)
              for img, blockSize, weight, PSF_kernel in izip(images, blockSizes, mvd_weights, PSF_kernels)]
    decon = MultiViewDeconvolutionSeq(DeconViews(dviews, mvd_exe), n_iterations, PsiInitBlurredFusedFactory(), cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      printFn("Something went wrong initializing MultiViewDeconvolution")
      return None
    else:
      decon.runIterations()
      return decon.getPSI()
  finally:
    # Only shut down the thread pool if it was created here
    if not exe:
      mvd_exe.shutdownNow()
コード例 #28
0
r1 = Rectangle(1708, 680, 1792, 1760)
r2 = Rectangle(520, 248, 1660, 1652)
cut1 = Views.zeroMin(
    Views.interval(red, [r1.x, r1.y],
                   [r1.x + r1.width - 1, r1.y + r1.height - 1]))
cut2 = Views.zeroMin(
    Views.interval(red, [r2.x, r2.y],
                   [r2.x + r2.width - 1, r2.y + r2.height - 1]))

# Thread pool
exe = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())

try:
    # PCM: phase correlation matrix
    pcm = PhaseCorrelation2.calculatePCM(cut1, cut2,
                                         ArrayImgFactory(FloatType()),
                                         FloatType(),
                                         ArrayImgFactory(ComplexFloatType()),
                                         ComplexFloatType(), exe)

    # Number of phase correlation peaks to check with cross-correlation
    nHighestPeaks = 10

    # Minimum image overlap to consider, in pixels
    minOverlap = cut1.dimension(0) / 10

    # Returns an instance of PhaseCorrelationPeak2
    peak = PhaseCorrelation2.getShift(pcm, cut1, cut2, nHighestPeaks,
                                      minOverlap, True, True, exe)

    print "Translation:", peak.getSubpixelShift()
コード例 #29
0
print corners3VC
print corners3VR
block3VL = block(imgE, corners3VC)
block3VC = block(imgE, corners3VL)
block3VR = block(imgE, corners3VR)
op5 = sub(block3VC, block3VL, block3VR)  # center minus sides
op6 = sub(add(block3VL, block3VR), block3VC)  # sides minus center

# Two bright-black-bright horizontal features 4x8 / 4x8 / 4x8
corners3HT = [[x, y - 2] for x, y in cornersHT]
corners3HC = [[x, y + 2] for x, y in cornersHT]
corners3HB = [[x, y + 2] for x, y in cornersHB]
print corners3HT
print corners3HC
print corners3HB
block3HT = block(imgE, corners3HT)
block3HC = block(imgE, corners3HC)
block3HB = block(imgE, corners3HB)
op7 = sub(block3HC, block3HT, block3HB)  # center minus top and bottom
op8 = sub(add(block3HT, block3HB), block3HC)  # top and bottom minus center

for name, op in ((name, eval(name)) for name in vars()
                 if re.match(r"^op\d+$", name)):
    # For development:
    if WindowManager.getImage(name):
        continue  # don't open
    #
    opimp = IL.wrap(op.view(FloatType()), name)
    opimp.getProcessor().resetMinAndMax()
    opimp.show()
コード例 #30
0
ファイル: test_lib_asm.py プロジェクト: mwinding/scripts
 def convert(self, sampler):
     return FloatType(UnsignedByteToFloatAccess(sampler))
コード例 #31
0
 def createFactoryFn(exe, lambda_val, blockSize):
   if use_cuda and cuda:
     return ComputeBlockSeqThreadCUDAFactory(exe, MultiViewDeconvolution.minValue, lambda_val, blockSize, cuda, HashMap(idToCudaDevice))
   else:
     return ComputeBlockSeqThreadCPUFactory(exe, MultiViewDeconvolution.minValue, lambda_val, blockSize, ArrayImgFactory(FloatType()))