예제 #1
0
def compute_and_update_frame_translations_dt(imp, dt, options, shifts = None):
  """ imp contains a hyper virtual stack, and we want to compute
  the X,Y,Z translation between every t and t+dt time points in it
  using the given preferred channel. 
  if shifts were already determined at other (lower) dt 
  they will be used and updated.
  """
  nt = imp.getNFrames()
  # get roi (could be None)
  roi = imp.getRoi()
  #if roi:
  #  print "ROI is at", roi.getBounds()   
  # init shifts
  if shifts == None:
    shifts = []
    for t in range(nt):
      shifts.append(Point3f(0,0,0))
  # compute shifts
  IJ.showProgress(0)
  max_shifts = options['max_shifts']
  for t in range(dt, nt+dt, dt):
    if t > nt-1: # together with above range till nt+dt this ensures that the last data points are not missed out
      t = nt-1 # nt-1 is the last shift (0-based)
    IJ.log("      between frames "+str(t-dt+1)+" and "+str(t+1))      
    # get (cropped and processed) image at t-dt
    roi1 = shift_roi(imp, roi, shifts[t-dt])
    imp1 = extract_frame_process_roi(imp, t+1-dt, roi1, options)
    # get (cropped and processed) image at t-dt
    roi2 = shift_roi(imp, roi, shifts[t])
    imp2 = extract_frame_process_roi(imp, t+1, roi2, options)
    #if roi:
    #  print "ROI at frame",t-dt+1,"is",roi1.getBounds()   
    #  print "ROI at frame",t+1,"is",roi2.getBounds()   
    # compute shift
    local_new_shift = compute_shift(imp2, imp1)
    limit_shifts_to_maximal_shifts(local_new_shift, max_shifts)

    if roi: # total shift is shift of rois plus measured drift
      #print "correcting measured drift of",local_new_shift,"for roi shift:",shift_between_rois(roi2, roi1)
      local_new_shift = add_Point3f(local_new_shift, shift_between_rois(roi2, roi1))
    # determine the shift that we knew alrady
    local_shift = subtract_Point3f(shifts[t],shifts[t-dt])
    # compute difference between new and old measurement (which come from different dt)   
    add_shift = subtract_Point3f(local_new_shift,local_shift)
    #print "++ old shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),local_shift.x,local_shift.y,local_shift.z)
    #print "++ add shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),add_shift.x,add_shift.y,add_shift.z)
    # update shifts from t-dt to the end (assuming that the measured local shift will presist till the end)
    for i,tt in enumerate(range(t-dt,nt)):
      # for i>dt below expression basically is a linear drift predicition for the frames at tt>t
      # this is only important for predicting the best shift of the ROI 
      # the drifts for i>dt will be corrected by the next measurements
      shifts[tt].x += 1.0*i/dt * add_shift.x
      shifts[tt].y += 1.0*i/dt * add_shift.y
      shifts[tt].z += 1.0*i/dt * add_shift.z
      #print "updated shift till frame",tt+1,"is",shifts[tt].x,shifts[tt].y,shifts[tt].z
    IJ.showProgress(1.0*t/(nt+1))
  
  IJ.showProgress(1)
  return shifts
예제 #2
0
def shift_between_rois(roi2, roi1):
  """ computes the relative xy shift between two rois 
  """ 
  dr = Point3f(0,0,0)
  dr.x = roi2.getBounds().x - roi1.getBounds().x
  dr.y = roi2.getBounds().y - roi1.getBounds().y
  dr.z = 0
  return dr
def center_of_mass_imglib2(imp, bg_level):
  img = ImagePlusImgs.from(imp)
  #img = ops.math().subtract(img, 20)
  start_time = time.time()
  IJ.run(imp, "Subtract...", "value="+str(bg_level)+" stack");
  print("        subtract in [s]: "+str(round(time.time()-start_time,3)))    
  start_time = time.time()
  com = ops.geom().centerOfGravity(img)
  print("        center of mass in [s]: "+str(round(time.time()-start_time,3)))    
  if com.numDimensions()==3: # 3D data
    p3 = [com.getFloatPosition(0),com.getFloatPosition(1),com.getFloatPosition(2)]
  elif com.numDimensions()==2: # 2D data: put zero shift in z
    p3 = [com.getFloatPosition(0),com.getFloatPosition(1),0]
  return Point3f(p3)
예제 #4
0
def subtract_Point3f(p1, p2):
  p3 = Point3f(0,0,0)
  p3.x = p1.x - p2.x
  p3.y = p1.y - p2.y
  p3.z = p1.z - p2.z
  return p3
예제 #5
0
def add_Point3f(p1, p2):
  p3 = Point3f(0,0,0)
  p3.x = p1.x + p2.x
  p3.y = p1.y + p2.y
  p3.z = p1.z + p2.z
  return p3
예제 #6
0
def run(imp, preprocessor_path, postprocessor_path, threshold_method,
        user_comment):

    output_parameters = {
        "image title": "",
        "preprocessor path": float,
        "post processor path": float,
        "thresholding op": float,
        "use ridge detection": bool,
        "high contrast": int,
        "low contrast": int,
        "line width": int,
        "minimum line length": int,
        "mitochondrial footprint": float,
        "branch length mean": float,
        "branch length median": float,
        "branch length stdevp": float,
        "summed branch lengths mean": float,
        "summed branch lengths median": float,
        "summed branch lengths stdevp": float,
        "network branches mean": float,
        "network branches median": float,
        "network branches stdevp": float
    }

    output_order = [
        "image title", "preprocessor path", "post processor path",
        "thresholding op", "use ridge detection", "high contrast",
        "low contrast", "line width", "minimum line length",
        "mitochondrial footprint", "branch length mean",
        "branch length median", "branch length stdevp",
        "summed branch lengths mean", "summed branch lengths median",
        "summed branch lengths stdevp", "network branches mean",
        "network branches median", "network branches stdevp"
    ]

    # Perform any preprocessing steps...
    status.showStatus("Preprocessing image...")
    if preprocessor_path != None:
        if preprocessor_path.exists():
            preprocessor_thread = scripts.run(preprocessor_path, True)
            preprocessor_thread.get()
            imp = WindowManager.getCurrentImage()
    else:
        pass

    # Store all of the analysis parameters in the table
    if preprocessor_path == None:
        preprocessor_str = ""
    else:
        preprocessor_str = preprocessor_path.getCanonicalPath()
    if postprocessor_path == None:
        postprocessor_str = ""
    else:
        postprocessor_str = preprocessor_path.getCanonicalPath()

    output_parameters["preprocessor path"] = preprocessor_str
    output_parameters["post processor path"] = postprocessor_str
    output_parameters["thresholding op"] = threshold_method
    output_parameters["use ridge detection"] = str(use_ridge_detection)
    output_parameters["high contrast"] = rd_max
    output_parameters["low contrast"] = rd_min
    output_parameters["line width"] = rd_width
    output_parameters["minimum line length"] = rd_length

    # Create and ImgPlus copy of the ImagePlus for thresholding with ops...
    status.showStatus("Determining threshold level...")
    imp_title = imp.getTitle()
    slices = imp.getNSlices()
    frames = imp.getNFrames()
    output_parameters["image title"] = imp_title
    imp_calibration = imp.getCalibration()
    imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1,
                                   slices, 1, frames)
    img = ImageJFunctions.wrap(imp_channel)

    # Determine the threshold value if not manual...
    binary_img = ops.run("threshold.%s" % threshold_method, img)
    binary = ImageJFunctions.wrap(binary_img, 'binary')
    binary.setCalibration(imp_calibration)
    binary.setDimensions(1, slices, 1)

    # Get the total_area
    if binary.getNSlices() == 1:
        area = binary.getStatistics(Measurements.AREA).area
        area_fraction = binary.getStatistics(
            Measurements.AREA_FRACTION).areaFraction
        output_parameters[
            "mitochondrial footprint"] = area * area_fraction / 100.0
    else:
        mito_footprint = 0.0
        for slice in range(binary.getNSlices()):
            binary.setSliceWithoutUpdate(slice)
            area = binary.getStatistics(Measurements.AREA).area
            area_fraction = binary.getStatistics(
                Measurements.AREA_FRACTION).areaFraction
            mito_footprint += area * area_fraction / 100.0
        output_parameters[
            "mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth

    # Generate skeleton from masked binary ...
    # Generate ridges first if using Ridge Detection
    if use_ridge_detection and (imp.getNSlices() == 1):
        skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length)
    else:
        skeleton = Duplicator().run(binary)
        IJ.run(skeleton, "Skeletonize (2D/3D)", "")

    # Analyze the skeleton...
    status.showStatus("Setting up skeleton analysis...")
    skel = AnalyzeSkeleton_()
    skel.setup("", skeleton)
    status.showStatus("Analyzing skeleton...")
    skel_result = skel.run()

    status.showStatus("Computing graph based parameters...")
    branch_lengths = []
    summed_lengths = []
    graphs = skel_result.getGraph()

    for graph in graphs:
        summed_length = 0.0
        edges = graph.getEdges()
        for edge in edges:
            length = edge.getLength()
            branch_lengths.append(length)
            summed_length += length
        summed_lengths.append(summed_length)

    output_parameters["branch length mean"] = eztables.statistical.average(
        branch_lengths)
    output_parameters["branch length median"] = eztables.statistical.median(
        branch_lengths)
    output_parameters["branch length stdevp"] = eztables.statistical.stdevp(
        branch_lengths)

    output_parameters[
        "summed branch lengths mean"] = eztables.statistical.average(
            summed_lengths)
    output_parameters[
        "summed branch lengths median"] = eztables.statistical.median(
            summed_lengths)
    output_parameters[
        "summed branch lengths stdevp"] = eztables.statistical.stdevp(
            summed_lengths)

    branches = list(skel_result.getBranches())
    output_parameters["network branches mean"] = eztables.statistical.average(
        branches)
    output_parameters["network branches median"] = eztables.statistical.median(
        branches)
    output_parameters["network branches stdevp"] = eztables.statistical.stdevp(
        branches)

    # Create/append results to a ResultsTable...
    status.showStatus("Display results...")
    if "Mito Morphology" in list(WindowManager.getNonImageTitles()):
        rt = WindowManager.getWindow(
            "Mito Morphology").getTextPanel().getOrCreateResultsTable()
    else:
        rt = ResultsTable()

    rt.incrementCounter()
    for key in output_order:
        rt.addValue(key, str(output_parameters[key]))

    # Add user comments intelligently
    if user_comment != None and user_comment != "":
        if "=" in user_comment:
            comments = user_comment.split(",")
            for comment in comments:
                rt.addValue(comment.split("=")[0], comment.split("=")[1])
        else:
            rt.addValue("Comment", user_comment)

    rt.show("Mito Morphology")

    # Create overlays on the original ImagePlus and display them if 2D...
    if imp.getNSlices() == 1:
        status.showStatus("Generate overlays...")
        IJ.run(skeleton, "Green", "")
        IJ.run(binary, "Magenta", "")

        skeleton_ROI = ImageRoi(0, 0, skeleton.getProcessor())
        skeleton_ROI.setZeroTransparent(True)
        skeleton_ROI.setOpacity(1.0)
        binary_ROI = ImageRoi(0, 0, binary.getProcessor())
        binary_ROI.setZeroTransparent(True)
        binary_ROI.setOpacity(0.25)

        overlay = Overlay()
        overlay.add(binary_ROI)
        overlay.add(skeleton_ROI)

        imp.setOverlay(overlay)
        imp.updateAndDraw()

    # Generate a 3D model if a stack
    if imp.getNSlices() > 1:

        univ = Image3DUniverse()
        univ.show()

        pixelWidth = imp_calibration.pixelWidth
        pixelHeight = imp_calibration.pixelHeight
        pixelDepth = imp_calibration.pixelDepth

        # Add end points in yellow
        end_points = skel_result.getListOfEndPoints()
        end_point_list = []
        for p in end_points:
            end_point_list.append(
                Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2,
                           1 * pixelDepth, "endpoints")

        # Add junctions in magenta
        junctions = skel_result.getListOfJunctionVoxels()
        junction_list = []
        for p in junctions:
            junction_list.append(
                Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2,
                           1 * pixelDepth, "junctions")

        # Add the lines in green
        graphs = skel_result.getGraph()
        for graph in range(len(graphs)):
            edges = graphs[graph].getEdges()
            for edge in range(len(edges)):
                branch_points = []
                for p in edges[edge].getSlabs():
                    branch_points.append(
                        Point3f(p.x * pixelWidth, p.y * pixelHeight,
                                p.z * pixelDepth))
                univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0),
                                 "branch-%s-%s" % (graph, edge), True)

        # Add the surface
        univ.addMesh(binary)
        univ.getContent("binary").setTransparency(0.5)

    # Perform any postprocessing steps...
    status.showStatus("Running postprocessing...")
    if postprocessor_path != None:
        if postprocessor_path.exists():
            postprocessor_thread = scripts.run(postprocessor_path, True)
            postprocessor_thread.get()

    else:
        pass

    status.showStatus("Done analysis!")
# Note that a java-based implementation would be significantly faster.

from net.imglib2.img.array import ArrayImgs
from org.scijava.vecmath import Point3f
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.view import Views
from net.imglib2.type.numeric.integer import UnsignedByteType
from jarray import zeros
from net.imglib2.algorithm.morphology.distance import DistanceTransform


# First 3D mask: a sphere
img1 = ArrayImgs.unsignedBytes([100, 100, 100])
p = zeros(3, 'l')
cursor = img1.cursor()
middle = Point3f(49.5,49.5, 49.5)
distance_sq = float(30 * 30)

while cursor.hasNext():
  cursor.fwd()
  cursor.localize(p)
  if middle.distanceSquared(Point3f(p[0], p[1], p[2])) < distance_sq:
    cursor.get().setOne()
  else:
    cursor.get().setZero()

imp1 = IL.wrap(img1, "sphere")
imp1.setDisplayRange(0, 1)
imp1.show()

예제 #8
0
from net.imglib2 import KDTree, RealPoint, Cursor, RandomAccessible
from itertools import imap
from functools import partial
import operator
from net.imglib2.type.numeric.integer import UnsignedByteType
from net.imglib2.neighborsearch import NearestNeighborSearchOnKDTree
from net.imglib2.util import Intervals
from fiji.scripting import Weaver
from java.util import ArrayList
from net.imglib2.img import Img

# First 3D mask: a sphere
img1 = ArrayImgs.unsignedBytes([100, 100, 100])
p = zeros(3, 'l')
cursor = img1.cursor()
middle = Point3f(49.5, 49.5, 49.5)
distance_sq = float(30 * 30)

w = Weaver.method(
    """
public void fillSphere(final Cursor cursor, final Point3f middle, final float distance_sq) {
  final long[] p = new long[cursor.numDimensions()];
  while (cursor.hasNext()) {
    cursor.fwd();
    cursor.localize(p);
    final UnsignedByteType t = (UnsignedByteType) cursor.get();
    if (middle.distanceSquared(new Point3f( (float)p[0], (float)p[1], (float)p[2] ) ) < distance_sq) {
      t.setOne();
    } else {
      t.setZero();
    }
예제 #9
0
print "width=%f, depth=%f" % (width, depth)
# ImgLib.wrap(iso).show()

# Find Peaks by difference of Gaussian
# Perform a difference of Gaussian on the given Image, and this class itself becomes the List of found peaks, each as a float[] array that specifies its position
# https://javadoc.scijava.org/Fiji/script/imglib/analysis/DoGPeaks.html
# https://javadoc.scijava.org/Fiji/mpicbg/imglib/algorithm/scalespace/DifferenceOfGaussian.html
# Extracts local minima and maxima of a certain size. It therefore computes the difference of gaussian for an Image and detects all local minima and maxima in 3x3x3x....3 environment, which is returned as an ArrayList of DifferenceOfGaussianPeaks. The two sigmas define the scale on which extrema are identified, it correlates with the size of the object. Note that not only extrema of this size are found, but they will have the higher absolute values. Note as well that the values of the difference of gaussian image is also defined by the distance between the two sigmas. A normalization if necessary can be found in the ScaleSpace class. Also note a technical detail, the method findPeaks(Image img) can be called on any image if the image from where the extrema should be computed already exists.
sigma = (cell_diameter / cal.pixelWidth) * scale2D
peaks = DoGPeaks(iso, sigma, sigma * 0.5, minPeak, 1)
print "Found", len(peaks), "peaks"
# print peaks

# Convert the peaks into points in calibrated image space
ps = []
for peak in peaks:
    p = Point3f(peak)  # peak의 (x, y, z) 좌표 추출
    p.scale(width * 1 / scale2D)
    ps.append(p)

print ps

# Show the peaks as spheres in 3D, along with orthoslices:
# Image3DUniverse(int width, int height): Constructs a new universe with the specified width and height.
univ = Image3DUniverse(512, 512)
# addIcospheres(List<Point3f> points, Color3f color, int subdivisions, float radius, String name)
univ.addIcospheres(ps, Color3f(1, 0, 0), 2, cell_diameter / 2,
                   "Cells").setLocked(True)
univ.addOrthoslice(imp).setLocked(True)
univ.show()
def compute_and_update_frame_translations_dt(imp, channel, method, bg_level, dt, process, roiz, shifts = None):
  """ imp contains a hyper virtual stack, and we want to compute
  the X,Y,Z translation between every t and t+dt time points in it
  using the given preferred channel. 
  if shifts were already determined at other (lower) dt 
  they will be used and updated.
  """
  nt = imp.getNFrames()
  # get roi (could be None)
  roi = imp.getRoi()
  # init shifts
  if shifts == None:
    shifts = []
    for t in range(nt):
      shifts.append(Point3f(0,0,0))
  # compute shifts
  IJ.showProgress(0)
  for t in range(dt, nt+dt, dt):
    if t > nt-1: # together with above range till nt+dt this ensures that the last data points are not missed out
      t = nt-1 # nt-1 is the last shift (0-based)
    IJ.log("      between frames "+str(t-dt+1)+" and "+str(t+1))      
    # get (cropped and processed) image at t-dt
    start_time = time.time()   
    roi1, roiz1 = shift_roi(imp, roi, roiz, shifts[t-dt])
    imp1 = extract_frame_process_roi(imp, t+1-dt, channel, process, roi1, roiz1)
    # get (cropped and processed) image at t
    roi2, roiz2 = shift_roi(imp, roi, roiz, shifts[t])
    imp2 = extract_frame_process_roi(imp, t+1, channel, process, roi2, roiz2)
    print("    prepared images in [s]: "+str(round(time.time()-start_time,3)))
    if roi:
      print "    ROI at frame",t-dt+1,"is", roi1.getBounds().x, roi1.getBounds().y, roiz1.z
      print "    ROI at frame",t+1,"is", roi2.getBounds().x, roi2.getBounds().y, roiz2.z   
    # compute shift
    start_time = time.time()  
    if (method == 1):   
      local_new_shift = compute_shift_using_phasecorrelation(imp1, imp2, bg_level)
    elif (method == 2):
      local_new_shift = compute_shift_using_center_of_mass(imp1, imp2, bg_level)
    print("    computed shift in [s]: "+str(round(time.time()-start_time,3)))
    if roi: # total shift is shift of rois plus measured drift
      print "    measured additional shift of",local_new_shift,"on top of roi shift:",shift_between_rois(roi2, roiz2, roi1, roiz1)
      local_new_shift = add_Point3f(local_new_shift, shift_between_rois(roi2, roiz2, roi1, roiz1))
    print("    total local shift: "+str(round(local_new_shift.x,3))+" "+str(round(local_new_shift.y,3))+" "+str(round(local_new_shift.z,3)))
    # determine the shift that we knew alrady
    local_shift = subtract_Point3f(shifts[t],shifts[t-dt])
    # compute difference between new and old measurement (which come from different dt)   
    add_shift = subtract_Point3f(local_new_shift,local_shift)
    #print "++ old shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),local_shift.x,local_shift.y,local_shift.z)
    #print "++ add shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),add_shift.x,add_shift.y,add_shift.z)
    # update shifts from t-dt to the end (assuming that the measured local shift will presist till the end)
    for i,tt in enumerate(range(t-dt,nt)):
      # for i>dt below expression basically is a linear drift predicition for the frames at tt>t
      # this is only important for predicting the best shift of the ROI 
      # the drifts for i>dt will be corrected by the next measurements
      shifts[tt].x += 1.0*i/dt * add_shift.x
      shifts[tt].y += 1.0*i/dt * add_shift.y
      shifts[tt].z += 1.0*i/dt * add_shift.z
      # print "updated shift till frame",tt+1,"is",shifts[tt].x,shifts[tt].y,shifts[tt].z
    IJ.showProgress(1.0*t/(nt+1))
  
  IJ.showProgress(1)
  return shifts