Exemplo n.º 1
0
 def run(self):
     for filename in self.filenames:
         try:
             img = self.klb.readFull(os.path.join(srcDir, filename))
             computeInto(maximum(self.aimg, img), self.aimg)
         except:
             syncPrint("Skipping failed image: %s\n%s" %
                       (filename, sys.exc_info()))
Exemplo n.º 2
0
def projectLastDimension(img, showEarly=False):
    """
  Project the last dimension, e.g. a 4D image becomes a 3D image,
  using the provided reducing function (e.g. min, max, sum).
  """
    last_dimension = img.numDimensions() - 1
    # The collapsed image
    imgC = ArrayImgs.unsignedShorts(
        [img.dimension(d) for d in xrange(last_dimension)])

    if showEarly:
        showStack(
            imgC,
            title="projected")  # show it early, will be updated progressively

    if img.dimension(last_dimension) > 10:
        # one by one
        print "One by one"
        for i in xrange(img.dimension(last_dimension)):
            print i
            compute(maximum(imgC, Views.hyperSlice(img, last_dimension,
                                                   i))).into(imgC)
    else:
        # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate:
        imgV = Views.collapseReal(img)
        # Reduce each vector to a single scalar, using a Converter
        # The Converter class
        reduce_max = makeCompositeToRealConverter(
            reducer_class=Math,
            reducer_method="max",
            reducer_method_signature="(DD)D")
        img3DC = convert(imgV, reduce_max.newInstance(),
                         img.randomAccess().get().getClass())
        ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgC)

    return imgC
# Access its pixel data from an ImgLib2 data structure:
# a RandomAccessibleInterval<ARGBType>
img = IL.wrapRGBA(imp)

# Read out single channels
red = Converters.argbChannel(img, 1)
green = Converters.argbChannel(img, 2)
blue = Converters.argbChannel(img, 3)

# Create an empty image of type FloatType (floating-point values)
# Here, the img is used to read out the interval: the dimensions for the new image
brightness = ArrayImgFactory(FloatType()).create(img)

# Compute the brightness: pick the maximum intensity pixel of every channel
# and then normalize it by dividing by the number of channels
compute(div(maximum([red, green, blue]), 255.0)).into(brightness)

# Show the brightness image
impB = IL.wrap(brightness, imp.getTitle() + " brightness")
impB.show()

# Compute now the image color saturation
saturation = ArrayImgFactory(FloatType()).create(img)
compute(
    let(
        "red",
        red,  # store as directly readable variables (no dictionary lookups)
        "green",
        green,  # so that only 3 cursors are needed instead of 6
        "blue",
        blue,
Exemplo n.º 4
0
def op(channel):
    return minimum(
        255, maximum(0, mul(exp(mul(gamma, log(div(channel, 255)))), 255)))
Exemplo n.º 5
0
    chunk_size = len(filtered) / n_threads
    aimgs = []
    first = klb.readFull(os.path.join(srcDir, filtered[0]))
    dimensions = Intervals.dimensionsAsLongArray(first)

    for i in xrange(n_threads):
        m = Max(dimensions, filtered[i * chunk_size:(i + 1) * chunk_size])
        m.start()
        threads.append(m)

    # Await completion of all
    for m in threads:
        m.join()

    # Merge all results into a single maximum projection
    max_projection = computeInto(maximum([m.aimg for m in threads]),
                                 ArrayImgs.floats(dimensions))

    max3D = writeZip(max_projection,
                     max_projection_path,
                     title="max projection")
    max3D.show()

# Step 3: detect the nuclei and write their coordinates to a CSV file
calibration = [0.40625, 0.40625, 2.5]

somaDiameters = [4.0, 4.9, 5.7, 6.5]  # 10, 12, 14, 16 px

peak_map = {}

for somaDiameter in somaDiameters:
Exemplo n.º 6
0
 def mergeMax(img1, img2, imgT):
   return compute(maximum(img1, img2)).into(imgT)
Exemplo n.º 7
0
from net.imglib2.algorithm.math.ImgMath import compute, maximum
from net.imglib2.converter import Converters, ColorChannelOrder
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.view import Views
from ij import IJ, ImagePlus

# Fetch an RGB image stack (or any RGB image with more than 1 dimension)
imp_rgb = IJ.getImage(
)  # IJ.openImage("http://imagej.nih.gov/ij/images/flybrain.zip")

img = IL.wrap(imp_rgb)  # an ARGBType Img
red = Converters.argbChannel(img, 1)  # a view of the ARGB red channel

# Project the last dimension using the max function
last_d = red.numDimensions() - 1
op = maximum(
    [Views.hyperSlice(red, last_d, i) for i in xrange(red.dimension(last_d))])
img_max_red = compute(op).intoArrayImg()

IL.wrap(img_max_red, "max projection of the red channel)").show()

# Now project all 3 color channels and compose an RGB image
last_dim_index = img.numDimensions() - 1
channel_stacks = [[
    Views.hyperSlice(Converters.argbChannel(img, channel_index),
                     last_dim_index, slice_index)
    for slice_index in xrange(img.dimension(last_dim_index))
] for channel_index in [1, 2, 3]]  # 1: red, 2: green, 3: blue

channels = Views.stack([maximum(cs).view() for cs in channel_stacks])
max_rgb = Converters.mergeARGB(channels, ColorChannelOrder.RGB)
Exemplo n.º 8
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
# Two bright-black-bright vertical features 4x8 - 4x8 - 4x8
block3VL = block(imgE, shift(cornersV, -bs - bs / 2, -bl / 2))
block3VC = block(imgE, shift(cornersV, -bs / 2, -bl / 2))
block3VR = block(imgE, shift(cornersV, bs / 2, -bl / 2))
op5 = sub(block3VC, block3VL, block3VR)  # center minus sides
op6 = sub(add(block3VL, block3VR), block3VC)  # sides minus center

# Two bright-black-bright horizontal features 8x4 / 8x4 / 8x4
block3HT = block(imgE, shift(cornersH, -bl / 2, -bs - bs / 2))
block3HC = block(imgE, shift(cornersH, -bl / 2, -bs / 2))
block3HB = block(imgE, shift(cornersH, -bl / 2, bs / 2))
op7 = sub(block3HC, block3HT, block3HB)  # center minus top and bottom
op8 = sub(add(block3HT, block3HB), block3HC)  # top and bottom minus center

# Combination of vertical and horizontal edge detection
op9 = maximum(op1, op3)
op10 = maximum(op6, op8)

# corners of a square block where 0,0 is at the top left
cornersS = [[0, 0], [bs, 0], [0, bs], [bs, bs]]

# 2x2 squares for oblique edge detection
blockSTL = block(imgE, shift(cornersS, -bs, -bs))  # top left
blockSTR = block(imgE, shift(cornersS, 0, -bs))  # top right
blockSBL = block(imgE, shift(cornersS, -bs, 0))  # bottom left
blockSBR = block(imgE, cornersS)  # bottom right
op11 = sub(add(blockSTL, blockSBR), blockSTR, blockSBL)
op12 = sub(add(blockSTR, blockSBL), blockSTL, blockSBR)

# Combination of vertical, horizontal and oblique edge detection
op13 = maximum(op1, op3, op6, op8, op11, op12)
def filterBank(img,
               sumType=UnsignedLongType(),
               converter=Util.genericRealTypeConverter()):
    """ Haar-like features from Viola and Jones
      tuned to identify neuron membranes in electron microscopy. """
    # Create the integral image, stored as 64-bit
    alg = IntegralImg(img, sumType, converter)
    alg.process()
    integralImg = alg.getResult()
    imgE = Views.extendBorder(integralImg)

    # corners of a 4x8 or 8x4 rectangular block where 0,0 is the top left
    bs = 4  # short side
    bl = 8  # long side
    cornersV = [
        [0, 0],
        [bs - 1, 0],  # Vertical
        [0, bl - 1],
        [bs - 1, bl - 1]
    ]
    cornersH = [
        [0, 0],
        [bl - 1, 0],  # Horizontal
        [0, bs - 1],
        [bl - 1, bs - 1]
    ]

    # Two adjacent vertical rectangles 4x8 - 4x8 centered on the pixel
    blockVL = block(imgE, shift(cornersV, -bs, -bl / 2))
    blockVR = block(imgE, shift(cornersV, 0, -bl / 2))
    op1 = sub(blockVL, blockVR)
    op2 = sub(blockVR, blockVL)

    # Two adjacent horizontal rectangles 8x4 - 8x4 centered on the pixel
    blockHT = block(imgE, shift(cornersH, -bs, -bl / 2))
    blockHB = block(imgE, shift(cornersH, -bs, 0))
    op3 = sub(blockHT, blockHB)
    op4 = sub(blockHB, blockHT)

    # Two bright-black-bright vertical features 4x8 - 4x8 - 4x8
    block3VL = block(imgE, shift(cornersV, -bs - bs / 2, -bl / 2))
    block3VC = block(imgE, shift(cornersV, -bs / 2, -bl / 2))
    block3VR = block(imgE, shift(cornersV, bs / 2, -bl / 2))
    op5 = sub(block3VC, block3VL, block3VR)  # center minus sides
    op6 = sub(add(block3VL, block3VR), block3VC)  # sides minus center

    # Two bright-black-bright horizontal features 8x4 / 8x4 / 8x4
    block3HT = block(imgE, shift(cornersH, -bl / 2, -bs - bs / 2))
    block3HC = block(imgE, shift(cornersH, -bl / 2, -bs / 2))
    block3HB = block(imgE, shift(cornersH, -bl / 2, bs / 2))
    op7 = sub(block3HC, block3HT, block3HB)  # center minus top and bottom
    op8 = sub(add(block3HT, block3HB), block3HC)  # top and bottom minus center

    # Combination of vertical and horizontal edge detection
    op9 = maximum(op1, op3)
    op10 = maximum(op6, op8)

    # corners of a square block where 0,0 is at the top left
    cornersS = [[0, 0], [bs, 0], [0, bs], [bs, bs]]

    # 2x2 squares for oblique edge detection
    blockSTL = block(imgE, shift(cornersS, -bs, -bs))  # top left
    blockSTR = block(imgE, shift(cornersS, 0, -bs))  # top right
    blockSBL = block(imgE, shift(cornersS, -bs, 0))  # bottom left
    blockSBR = block(imgE, cornersS)  # bottom right
    op11 = sub(add(blockSTL, blockSBR), blockSTR, blockSBL)
    op12 = sub(add(blockSTR, blockSBL), blockSTL, blockSBR)

    # Combination of vertical, horizontal and oblique edge detection
    op13 = maximum(op1, op3, op6, op8, op11, op12)

    # Edge detectors: sum of 3 adjacent pixels (not dividing by the other 6
    # to avoid penalizing Y membrane configurations)
    op14 = maximum(add(offset(op13, [-1, -1]), op13, offset(op13, [1, 1])),
                   add(offset(op13, [0, -1]), op13, offset(op13, [0, 1])),
                   add(offset(op13, [1, -1]), op13, offset(op13, [-1, 1])),
                   add(offset(op13, [-1, 0]), op13, offset(op13, [1, 0])))

    # Return a list of all ops
    #return [ob for name, ob in vars().iteritems() if re.match(r"^op\d+$", name)]
    # Ordered
    return [
        op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13,
        op14
    ]
Exemplo n.º 11
0
def test(red, green, blue, easy=True):
    saturation = let(
        "red", red, "green", green, "blue", blue, "max",
        maximum("red", "green", "blue"), "min",
        minimum("red", "green", "blue"),
        IF(EQ(0, "max"), THEN(0), ELSE(div(sub("max", "min"), "max"))))

    brightness = div(maximum(red, green, blue), 255.0)

    hue = IF(
        EQ(0, saturation), THEN(0),
        ELSE(
            let(
                "red", red, "green", green, "blue", blue, "max",
                maximum("red", "green", "blue"), "min",
                minimum("red", "green", "blue"), "range", sub("max", "min"),
                "redc", div(sub("max", "red"), "range"), "greenc",
                div(sub("max", "green"), "range"), "bluec",
                div(sub("max", "blue"), "range"), "hue",
                div(
                    IF(
                        EQ("red", "max"), THEN(sub("bluec", "greenc")),
                        ELSE(
                            IF(EQ("green", "max"),
                               THEN(sub(add(2, "redc"), "bluec")),
                               ELSE(sub(add(4, "greenc"), "redc"))))), 6),
                IF(LT("hue", 0), THEN(add("hue", 1)), ELSE("hue")))))

    #print hierarchy(hue)

    #print "hue view:", hue.view( FloatType() ).iterationOrder()

    if easy:
        # About 26 ms
        """
    hsb = Views.stack( hue.view( FloatType() ),
                       saturation.view( FloatType() ),
                       brightness.view( FloatType() ) )
    """

        # About 13 ms: half! Still much worse than plain ImageJ,
        # but the source images are iterated 4 times, rather than just once,
        # and the saturation is computed twice,
        # and the min, max is computed 3 and 4 times, respectively.
        hsb = Views.stack(hue.viewDouble(FloatType()),
                          saturation.viewDouble(FloatType()),
                          brightness.viewDouble(FloatType()))
        """
    # Even worse: ~37 ms
    width, height = rgb.dimension(0), rgb.dimension(1)
    h = compute(hue).into(ArrayImgs.floats([width, height]))
    s = compute(saturation).into(ArrayImgs.floats([width, height]))
    b = compute(brightness).into(ArrayImgs.floats([width, height]))
    hsb = Views.stack( h, s, b )
    """

        imp = IL.wrap(hsb, "HSB view")
    else:
        # Tested it: takes more time (~40 ms vs 26 ms above)
        width, height = rgb.dimension(0), rgb.dimension(1)
        hb = zeros(width * height, 'f')
        sb = zeros(width * height, 'f')
        bb = zeros(width * height, 'f')
        h = ArrayImgs.floats(hb, [width, height])
        s = ArrayImgs.floats(sb, [width, height])
        b = ArrayImgs.floats(bb, [width, height])
        #print "ArrayImg:", b.iterationOrder()
        ImgUtil.copy(ImgView.wrap(hue.view(FloatType()), None), h)
        ImgUtil.copy(ImgView.wrap(saturation.view(FloatType()), None), s)
        ImgUtil.copy(ImgView.wrap(brightness.view(FloatType()), None), b)
        stack = ImageStack(width, height)
        stack.addSlice(FloatProcessor(width, height, hb, None))
        stack.addSlice(FloatProcessor(width, height, sb, None))
        stack.addSlice(FloatProcessor(width, height, bb, None))
        imp = ImagePlus("hsb", stack)
    return imp
red = Converters.argbChannel(img, 1)
green = Converters.argbChannel(img, 2)
blue = Converters.argbChannel(img, 3)

# Create an empty image of type FloatType (floating-point values)
# Here, the img is used to read out the interval: the dimensions for the new image
brightness = ArrayImgFactory(FloatType()).create(img)

# Compute the brightness: pick the maximum intensity pixel of every channel
# and then normalize it by dividing by the number of channels
compute(div(max([red, green, blue]), 255.0)).into(brightness)

# Show the brightness image
impB = IL.wrap(brightness, imp.getTitle() + " brightness")
impB.show()

# Compute now the image color saturation
saturation = ArrayImgFactory(FloatType()).create(img)
compute(
    let(
        "red", red, "green", green, "blue", blue, "max",
        maximum([var("red"), var("green"),
                 var("blue")]), "min",
        minimum([var("red"), var("green"),
                 var("blue")]),
        IF(EQ(0, var("max")), THEN(0),
           ELSE(div(sub(var("max"), var("min")),
                    var("max")))))).into(saturation)

impC = IL.wrap(saturation, imp.getTitle() + " saturation")
impC.show()