def cell_processor(imp_particles, imp_measure, folder, impname, channel_name):

    # Get ROI manager instance, save to zip
    rm = RoiManager.getInstance()
    if not rm:
        rm = RoiManager()
    rm.reset()

    imp_particles.show()

    # Wait for user to add ROIs to manager
    Prefs.multiPointMode = True
    pause = WaitForUserDialog(
        "Select ROIs of interest and add to manager \n \nPress OK to continue")
    pause.show()

    # get the ROI manager and save
    rm = RoiManager.getInstance()
    rm.runCommand("save selected",
                  os.path.join(folder, impname + "_cells_ROIs.zip"))

    # select image to measure and show
    imp_measure.show()
    pixel_collector(rm, imp_measure, channel_name, impname, folder)

    return rm
Exemple #2
0
def assisted_SNTtrace(context, imp):
	'''
	Calls SNT in a given img for user assisted tracing.
	Uses WaitForUserDialog to confirm the end of tracing operations.
	Returns list of SNT Paths.
	'''
	snt = prepare_SNT(context, imp)
	pafm = snt.getPathAndFillManager()
	dialog = WaitForUserDialog("Go to next step...", "Press ok when tracing is done.")
	dialog.show()
	if dialog.escPressed():
		raise RuntimeError("Canceled by the user.")

	SNTpaths = pafm.getPaths()

	return SNTpaths
Exemple #3
0
def auto_threshold(imp, ch_name, is_auto_thresh, method="IsoData"):
    if is_auto_thresh:
        IJ.setAutoThreshold(imp, "{} dark".format(method))
        thres_min = imp.getProcessor().getMinThreshold()
    else:
        imp.show()
        IJ.run("Threshold...")
        # this is the method to wait the user set up the threshold
        wu = WaitForUserDialog("Set manual threshold for {}".format(ch_name), "Use slider to adjust threshold and press OK")
        wu.show()             
        
        thres_min = imp.getProcessor().getMinThreshold()
        thres_max = imp.getProcessor().getMaxThreshold()

        IJ.log(" -- {}: Min threshold {}".format(ch_name, thres_min))

        IJ.setThreshold(imp, thres_min, thres_max)
        imp.hide()
        WindowManager.getWindow("Threshold").close()

    return thres_min
def filter_original():
	# Filter out noise, keep edges
	IJ.run("Median...", "radius=4")
	# Enhance contrast
	IJ.run("Enhance Local Contrast (CLAHE)", "blocksize=19 histogram=256 maximum=3 mask=*None*");
	# Find edges
	IJ.run("Find Edges");
	# Threshold...conservatively!
	IJ.run('Threshold...', 'Default Dark')
	dial = WaitForUserDialog('Please threshold conservatively.')
	dial.show()
	
	# Now run the hough circles
	IJ.run("Hough Circles", "minimum=8 maximum=9 increment=1 number=0 threshold=0")
	image.changes = False
	image.close()
	
	# Select the hough space image
	dial = WaitForUserDialog('Select the hough space image.')
	dial.show()
	
	# Filter out all but the small points
	#IJ.run("Bandpass Filter...", "filter_large=7 filter_small=4 suppress=None tolerance=5 autoscale saturate")
	
	# Threshold the centers of the points
	IJ.run('Threshold...', 'Default Dark')
	dial = WaitForUserDialog('Please threshold circle centers. Should be near max.')
	dial.show()
	
	# Select all the particles, deal with ROI's
	
	IJ.run("Analyze Particles...", "size=0-Infinity include add");
Exemple #5
0
    def process(self,imp):
        # extract nucleus channel, 8-bit and twice binned
        imp.setC(self.nucleusChannel)
        ip = imp.getChannelProcessor().duplicate()
        ip = ip.convertToByteProcessor()
        ip = ip.bin(4)
        nucleus = ImagePlus("nucleus_channel", ip)

        # threshold image and separate clumped nuclei
        IJ.run(nucleus, "Auto Threshold", "method=Otsu white setthreshold show");
        IJ.run(nucleus, "Make Binary", "thresholded remaining black");
        IJ.run(nucleus, "Watershed", "");

        directory = imp.getTitle()
        directory = directory.replace(" ", "_")\
            .replace(",", "_")\
            .replace("#", "_series")\
            .replace("...", "")\
            .replace(".","_")
        directory = os.path.join(self.exportDir, directory)
        sliceDirectory = os.path.join(directory, "slices")
        print directory
        print sliceDirectory
        if not os.path.exists(sliceDirectory):
            os.makedirs(sliceDirectory)

        # Create a table to store the results
        table = ResultsTable()

        # Create a hidden ROI manager, to store a ROI for each blob or cell
        #roim = RoiManager(True)

        # remove small particles and border particles
        pa = ParticleAnalyzer(\
            ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES,\
            Measurements.CENTER_OF_MASS,\
            table,\
            self.minArea, self.maxArea,\
            0.0,1.0)

        if pa.analyze(nucleus):
            print "All ok, number of particles: ", table.size()
        else:
            print "There was a problem in analyzing", imp, nucleus
        table.save(os.path.join(directory, "rt.csv"))

        # read the center of mass coordinates
        cmx = table.getColumn(0)
        cmy = table.getColumn(1)

        if self.debug:
            imp.show()

        i=0
        for i in range(0, min(self.nCells,table.size())):
            # ROI around the cell
            cmx = table.getValue("XM",i)
            cmy = table.getValue("YM",i)
            x = 4 * cmx - (self.boxSize - 1) / 2
            y = 4 * cmy - (self.boxSize - 1) / 2
            if (x < self.edge or y < self.edge or x > imp.getWidth() - self.edge or y > imp.getHeight() - self.edge):
                continue
            roi = Roi(x,y,self.boxSize,self.boxSize)
            imp.setRoi(roi, False)

            cellStack = ImageStack(self.boxSize, self.boxSize)

            for z in range(1, imp.getNSlices() + 1):
                imp.setSlice(z)
                for c in range(1, imp.getNChannels() + 1):
                    imp.setC(c)
                    # copy ROI to stack
                    imp.copy()
                    impSlice = imp.getClipboard()
                    cellStack.addSlice(impSlice.getProcessor())
                    if self.slices:
                        sliceTitle = "cell_%s_z%s_c%s" % (str(i).zfill(4), str(z).zfill(3), str(c))
                        print sliceTitle
                        IJ.saveAsTiff(impSlice, os.path.join(sliceDirectory, sliceTitle))
                    impSlice.close()

            title = "cell_" + str(i).zfill(4)
            cell = ImagePlus(title, cellStack)

            # save ROI image
            IJ.saveAsTiff(cell, os.path.join(directory, title))
            cell.close()

            if self.debug:
                imp.updateAndDraw()
                wait = Wait("particle done")
                wait.show()
def main():
    # define here which membrane indices will be used in the analysis, with last index the "control" index
    membrane_indices = [-1, 0, 1, 3]

    # for now, work with frontmost open image...
    imp = IJ.getImage()
    im_title = imp.getTitle()
    settings = MembraneEvolutionAnalysisSettings(
        membrane_indices=membrane_indices)
    settings.loadPersistedSettings()

    timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d %H-%M-%S')
    DirectoryChooser.setDefaultDirectory((settings.output_path))
    dc = DirectoryChooser('Select the root folder for saving output')
    output_root = dc.getDirectory()
    if output_root is None:
        raise IOError('no output path chosen')
    settings.output_path = output_root

    # get calibration
    cal = imp.getCalibration()
    if cal.getTimeUnit() == "sec":
        cal.setTimeUnit('s')

    # pop up a dialog prompting for selection of zero time point, frame interval, and time step for analysis
    time_steps_not_ok = True
    while time_steps_not_ok:
        dialog = NonBlockingGenericDialog("Determine time parameters...")
        dialog.addNumericField("0 timepoint frame (1-index): ",
                               settings.zero_timepoint_frame, 0)
        dialog.addNumericField("Acquisition time step (s): ",
                               cal.frameInterval,
                               2)  # assume stored in seconds
        dialog.addNumericField(
            "Time step for analysis (s): ",
            cal.frameInterval * settings.analysis_frame_step, 2)
        dialog.showDialog()

        if dialog.wasCanceled():
            return

        zero_f = dialog.getNextNumber()
        acq_t_step = dialog.getNextNumber()
        analysis_t_step = dialog.getNextNumber()
        if acq_t_step != 0 and analysis_t_step != 0:
            analysis_frame_step = analysis_t_step / acq_t_step

            if round(analysis_frame_step) == analysis_frame_step:
                time_steps_not_ok = False
                settings.zero_timepoint_frame = zero_f
                settings.analysis_frame_step = analysis_frame_step
        if time_steps_not_ok:
            warning_dlg = GenericDialog("Error!")
            warning_dlg.addMessage(
                "Analysis time step must be an integer multiple of acquisition time steps, and neither should be zero!!"
            )
            warning_dlg.setOKLabel("Try again...")
            warning_dlg.showDialog()

            if warning_dlg.wasCanceled():
                return

    start_frame = int(((zero_f - 1) % analysis_frame_step) + 1)
    end_frame = int(imp.getNFrames() -
                    (imp.getNFrames() - zero_f) % analysis_frame_step)
    frames = [
        f + 1
        for f in range(start_frame - 1, end_frame, int(analysis_frame_step))
    ]
    print("frames = " + str(frames))
    imp.killRoi()
    analysis_imp = SubstackMaker().makeSubstack(
        imp,
        str(start_frame) + "-" + str(end_frame) + "-" +
        str(int(analysis_frame_step)))
    imp.changes = False
    imp.close()
    analysis_imp.show()
    drawn_membranes = [
        TimepointsMembranes(input_image_title=im_title,
                            time_point_s=(t - 1) * acq_t_step) for t in frames
    ]
    membranes_listener = UpdateRoiImageListener(drawn_membranes)
    analysis_imp.addImageListener(membranes_listener)

    # now attach roi listener to store all 0th membranes after showing a waitforuserdialog to prompt continuation
    IJ.setTool("freeline")
    for membrane_idx in membrane_indices:
        #		if membrane_idx>50:
        #			IJ.setTool("line");
        analysis_imp.killRoi()
        membranes_listener.resetLastFrame()
        membranes_listener.setCurrentMembraneIndex(membrane_idx)
        analysis_imp.setZ(1)
        continue_dlg = WaitForUserDialog(
            "Continue?", "Click OK once all the " + str(membrane_idx) +
            "-index membranes have been drawn")
        continue_dlg.show()
        membranes_listener.imageUpdated(analysis_imp)
        drawn_membranes = membranes_listener.getDrawnMembraneTimepointsList()
        json_path = os.path.join(output_root,
                                 "Membranes " + timestamp + ".json")
        f = open(json_path, 'w+')
        try:
            json.dump(drawn_membranes, f, default=encode_membrane)
        finally:
            f.close()
        # save csv containing mebrane measurements for current membrane index
        csv_path = os.path.join(
            output_root, ("Membrane measurements " + timestamp + ".csv"))
        if membrane_idx == membrane_indices[0]:
            try:
                f = open(csv_path, 'wb')
                writer = csv.writer(f)
                writer.writerow([
                    "Membrane index", ("Time point, " + cal.getTimeUnit()),
                    ("Membrane length, " + cal.getUnit()),
                    ("Euclidean length, " + cal.getUnit()),
                    "Membrane sinuoisty"
                ])
            finally:
                f.close()
        try:
            f = open(csv_path, 'ab')
            writer = csv.writer(f)
            for mems in drawn_membranes:
                mem = mems.getMembrane(membrane_idx)
                if mem is not None:
                    writer.writerow([
                        membrane_idx, mems.time_point_s,
                        mem.getPathLength() * cal.pixelWidth,
                        mem.getEuclidean() * cal.pixelWidth,
                        mem.getSinuosity()
                    ])
        finally:
            f.close()

    settings.persistSettings()
    settings.save_settings()
    print("Finished getting all membranes with indices " +
          str(membrane_indices))
    analysis_imp.close()
Exemple #7
0
def runTrackMate(imp):
    import fiji.plugin.trackmate.Settings as Settings
    import fiji.plugin.trackmate.Model as Model
    import fiji.plugin.trackmate.SelectionModel as SelectionModel
    import fiji.plugin.trackmate.TrackMate as TrackMate
    import fiji.plugin.trackmate.Logger as Logger
    import fiji.plugin.trackmate.detection.DetectorKeys as DetectorKeys
    import fiji.plugin.trackmate.detection.DogDetectorFactory as DogDetectorFactory
    import fiji.plugin.trackmate.tracking.sparselap.SparseLAPTrackerFactory as SparseLAPTrackerFactory
    import fiji.plugin.trackmate.tracking.LAPUtils as LAPUtils
    import fiji.plugin.trackmate.visualization.hyperstack.HyperStackDisplayer as HyperStackDisplayer
    import fiji.plugin.trackmate.features.FeatureFilter as FeatureFilter
    import fiji.plugin.trackmate.features.FeatureAnalyzer as FeatureAnalyzer
    import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzerFactory as SpotContrastAndSNRAnalyzerFactory
    import fiji.plugin.trackmate.action.ExportStatsToIJAction as ExportStatsToIJAction
    import fiji.plugin.trackmate.io.TmXmlReader as TmXmlReader
    import fiji.plugin.trackmate.action.ExportTracksToXML as ExportTracksToXML
    import fiji.plugin.trackmate.io.TmXmlWriter as TmXmlWriter
    import fiji.plugin.trackmate.features.ModelFeatureUpdater as ModelFeatureUpdater
    import fiji.plugin.trackmate.features.SpotFeatureCalculator as SpotFeatureCalculator
    import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzer as SpotContrastAndSNRAnalyzer
    import fiji.plugin.trackmate.features.spot.SpotIntensityAnalyzerFactory as SpotIntensityAnalyzerFactory
    import fiji.plugin.trackmate.features.track.TrackSpeedStatisticsAnalyzer as TrackSpeedStatisticsAnalyzer
    import fiji.plugin.trackmate.util.TMUtils as TMUtils
    import fiji.plugin.trackmate.visualization.trackscheme.TrackScheme as TrackScheme
    import fiji.plugin.trackmate.visualization.PerTrackFeatureColorGenerator as PerTrackFeatureColorGenerator

    #-------------------------
    # Instantiate model object
    #-------------------------

    nFrames = imp.getNFrames()
    model = Model()

    # Set logger
    #model.setLogger(Logger.IJ_LOGGER)

    #------------------------
    # Prepare settings object
    #------------------------

    settings = Settings()
    settings.setFrom(imp)

    # Configure detector
    settings.detectorFactory = DogDetectorFactory()
    settings.detectorSettings = {
        DetectorKeys.KEY_DO_SUBPIXEL_LOCALIZATION: True,
        DetectorKeys.KEY_RADIUS: 12.30,
        DetectorKeys.KEY_TARGET_CHANNEL: 1,
        DetectorKeys.KEY_THRESHOLD: 100.,
        DetectorKeys.KEY_DO_MEDIAN_FILTERING: False,
    }

    # Configure tracker
    settings.trackerFactory = SparseLAPTrackerFactory()
    settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap()
    settings.trackerSettings['LINKING_MAX_DISTANCE'] = 10.0
    settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 10.0
    settings.trackerSettings['MAX_FRAME_GAP'] = 3

    # Add the analyzers for some spot features.
    # You need to configure TrackMate with analyzers that will generate
    # the data you need.
    # Here we just add two analyzers for spot, one that computes generic
    # pixel intensity statistics (mean, max, etc...) and one that computes
    # an estimate of each spot's SNR.
    # The trick here is that the second one requires the first one to be in
    # place. Be aware of this kind of gotchas, and read the docs.
    settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
    settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory())

    # Add an analyzer for some track features, such as the track mean speed.
    settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer())

    settings.initialSpotFilterValue = 1

    print(str(settings))

    #----------------------
    # Instantiate trackmate
    #----------------------

    trackmate = TrackMate(model, settings)

    #------------
    # Execute all
    #------------

    ok = trackmate.checkInput()
    if not ok:
        sys.exit(str(trackmate.getErrorMessage()))

    ok = trackmate.process()
    if not ok:
        sys.exit(str(trackmate.getErrorMessage()))

    #----------------
    # Display results
    #----------------

    selectionModel = SelectionModel(model)
    displayer = HyperStackDisplayer(model, selectionModel, imp)
    displayer.render()
    displayer.refresh()

    #---------------------
    # Select correct spots
    #---------------------

    # Prepare display.
    sm = SelectionModel(model)
    color = PerTrackFeatureColorGenerator(model, 'TRACK_INDEX')

    # launch TrackScheme to select spots and tracks
    trackscheme = TrackScheme(model, sm)
    trackscheme.setDisplaySettings('TrackColoring', color)
    trackscheme.render()

    # Update image with TrackScheme commands
    view = HyperStackDisplayer(model, sm, imp)
    view.setDisplaySettings('TrackColoring', color)
    view.render()

    # Wait for the user to select correct spots and tracks before collecting data
    dialog = WaitForUserDialog(
        "Spots",
        "Delete incorrect spots and edit tracks if necessary. (Press ESC to cancel analysis)"
    )
    dialog.show()
    if dialog.escPressed():
        IJ.run("Remove Overlay", "")
        imp.close()
        return ([], nFrames)

    # The feature model, that stores edge and track features.
    #model.getLogger().log('Found ' + str(model.getTrackModel().nTracks(True)) + ' tracks.')
    fm = model.getFeatureModel()
    crds_perSpot = []
    for id in model.getTrackModel().trackIDs(True):

        # Fetch the track feature from the feature model.(remove """ to enable)
        """v = fm.getTrackFeature(id, 'TRACK_MEAN_SPEED')
	    model.getLogger().log('')
	    model.getLogger().log('Track ' + str(id) + ': mean velocity = ' + str(v) + ' ' + model.getSpaceUnits() + '/' + model.getTimeUnits())"""
        trackID = str(id)
        track = model.getTrackModel().trackSpots(id)

        spot_track = {}
        for spot in track:
            sid = spot.ID()
            # Fetch spot features directly from spot.
            x = spot.getFeature('POSITION_X')
            y = spot.getFeature('POSITION_Y')
            t = spot.getFeature('FRAME')
            q = spot.getFeature('QUALITY')
            snr = spot.getFeature('SNR')
            mean = spot.getFeature('MEAN_INTENSITY')
            #model.getLogger().log('\tspot ID = ' + str(sid) + ', x='+str(x)+', y='+str(y)+', t='+str(t)+', q='+str(q) + ', snr='+str(snr) + ', mean = ' + str(mean))
            spot_track[t] = (x, y)
        crds_perSpot.append(spot_track)
        #print ("Spot", crds_perSpot.index(spot_track),"has the following coordinates:", crds_perSpot[crds_perSpot.index(spot_track)])
    return (crds_perSpot, nFrames)
Exemple #8
0
    img_width = imp.getWidth()
    img_height = imp.getHeight()

    bound_width = d['bounds']['maxX'] - d['bounds']['minX']
    bound_height = d['bounds']['maxY'] - d['bounds']['minY']

    scale = img_width * 1.0 / bound_width
    print 'scale', scale
    xvals = [(x - d['bounds']['minX']) * scale for x, y in coords[:-1]]
    yvals = [(y - d['bounds']['minY']) * scale for x, y in coords[:-1]]
    print xvals, yvals
    fpoly = PolygonRoi(xvals, yvals, Roi.POLYGON)
    imp.setRoi(fpoly)

    dlg = WaitForUserDialog('testing')
    dlg.show()
    roi = imp.getRoi()
    fpoly = roi.getFloatPolygon()
    coords = [[]]
    for x, y in zip(fpoly.xpoints, fpoly.ypoints):
        x = (x / scale) + d['bounds']['minX']
        y = (y / scale) + d['bounds']['minY']
        coords[0].append([x, y])
    for x, y in zip(fpoly.xpoints[:1], fpoly.ypoints[:1]):
        x = (x / scale) + d['bounds']['minX']
        y = (y / scale) + d['bounds']['minY']
        coords[0].append([x, y])

    d['roi']['coordinates'] = coords
    imp.close()
    fp = open(f, 'w')
Exemple #9
0
def wait(message=None):
    if message == None:
        message = 'press ok to continue'
    wfug = WaitForUserDialog('Waiting for user', message)
    wfug.show()
imp = IJ.getImage()
directory = IJ.getDirectory("image")
f = str(imp.getTitle())
date, rows, columns, time_tif = str.split(f)  # deconstruct filename
time = time_tif.replace('tif','jpg')  # need to remove .tif from time
row_IDs = list(6*rows[0] + 6*rows[1])
column_IDs = [str(i) for i in 2*(range(int(columns[0]), int(columns[0]) + 6))]
zipped = zip(row_IDs, column_IDs)
sample_names = ["".join(values) for values in zipped]


IJ.setMinAndMax(1900, 11717) # adjust brightness and contrast
IJ.run("Apply LUT")
IJ.makeRectangle(200, 50, 730, 950) # initialize selection
dial = WaitForUserDialog("Adjust selection area")
dial.show() # ask user to place selection appropriately
IJ.run("Crop")
adjDir = os.path.join(directory, "Adjusted")
if not os.path.exists(adjDir):
	os.mkdir(adjDir)
adjImage = os.path.join(adjDir, "Adj " + f)
IJ.saveAs("Jpeg", adjImage)

## make ROI list
w = 130
h = 480
x_vals = 2*[10, 128, 246, 360, 478, 596]
y1 = 0
y2 = 470
y_vals = [y1, y1, y1, y1, y1, y1, y2, y2, y2, y2, y2, y2]
ROIs = zip(x_vals, y_vals)
from ij import IJ
from ij.io import SaveDialog
from ij.gui import WaitForUserDialog

theImage = IJ.getImage()
ip = theImage.getProcessor()

sd = SaveDialog("Select file to save results","injury",".csv")
saveFileName = sd.getFileName()
saveFileDir = sd.getDirectory()
if (saveFileName is not None):
	saveFilePath = saveFileDir + saveFileName
	savefilehandler = open(saveFilePath,"w")
	
	waitDialog = WaitForUserDialog("Use freeform tool to outline the piece of tissue")
	waitDialog.show()
	
	roi = theImage.getRoi()
	if (roi is not None):
		print type(roi)
		thePolygon = roi.getPolygon()
		boundRect = thePolygon.getBounds()
	
		for i in range(boundRect.x,boundRect.x+boundRect.width):
			pos_pixels = 0
			tot_pixels = 0
			IJ.showProgress(i-boundRect.x,boundRect.width)
			for j in range(boundRect.y,boundRect.y+boundRect.height):
				if thePolygon.contains(i,j):
					value = ip.getPixelValue(i,j)
					tot_pixels = tot_pixels + 1
Exemple #12
0
def run():
    ### Default arguments
    two_sarcomere_size = 25  # determined by filter used.
    rotation_angle = 0.0

    ### Get the image we'd like to work with.
    # Don't need to ask where to save the intermediate image since it'll just be saved in the matchedmyo folder anyway
    #   may change this though. In case they want to keep that image around.
    this_img = WindowManager.getCurrentImage()
    if this_img == None:
        ud = WaitForUserDialog(
            "Please select the image you would like to analyze.")
        ud.show()
        this_img = WindowManager.getCurrentImage()
    img_name = this_img.getTitle()

    matchedmyo_path = "/home/AD/dfco222/scratchMarx/matchedmyo/"  # this would be grabbed from the prompt
    gd = GenericDialog("Preprocessing Options")
    gd.addCheckbox("Automatic Resizing/Rotation", True)
    gd.addCheckbox("CLAHE", True)
    gd.addCheckbox("Normalization to Transverse Tubules", True)
    gd.showDialog()
    if gd.wasCanceled():
        return
    auto_resize_rotate = gd.getNextBoolean()
    clahe = gd.getNextBoolean()
    normalize = gd.getNextBoolean()

    if auto_resize_rotate:
        # Clear selection so it takes FFT of full image

        rotation_angle = auto_resize_angle_measure(this_img,
                                                   two_sarcomere_size)

    if clahe:
        clahe_args = "blocksize={} histogram=256 maximum=3 mask=*None* fast_(less_accurate)".format(
            two_sarcomere_size)
        IJ.run(this_img, "Enhance Local Contrast (CLAHE)", clahe_args)

    if normalize:
        # Ask the user to select a subsection of the image that looks healthy-ish.
        ud = WaitForUserDialog(
            "Please select a subsection exhibiting a measure of healthy TT structure using the Rectangle tool.\n"
            + " Only a single cell or several are needed.\n\n" +
            " Press 'OK' when finished.")
        ud.show()
        IJ.setTool("rectangle")

        # Duplicate the selected subsection.
        selection = this_img.crop()
        IJ.run(selection, "Duplicate...", "title=subsection.tif")

        # Grab the subsection image and rotate it.
        selection = WindowManager.getImage("subsection.tif")
        IJ.run(
            selection, "Rotate...",
            "angle={} grid=1 interpolation=Bicubic enlarge".format(
                rotation_angle))

        # Ask the user to select a bounding box that contains only tubules
        # NOTE: Need to get rid of initial selection since it's the entire image and it's annoying to click out of
        IJ.setTool("rectangle")
        IJ.run(selection, "Select None", "")
        ud = WaitForUserDialog(
            "Select a subsection of the image that contains only tubules and no membrane."
        )
        ud.show()

        # Grab the subsection ImagePlus
        selection = WindowManager.getCurrentImage()
        this_window = WindowManager.getActiveWindow()
        selection_small = selection.crop()
        IJ.run(selection, "Close", "")

        # NOTE: May not actually display this depending on how the macros work
        IJ.run(selection_small, "Duplicate...", "title=subsection_small.tif")

        # Smooth the selection using the single TT filter.
        # NOTE: It won't read in so we're just going to hard code it in since it's simple
        tt_filt_row = "0 0 0 1 1 1 1 1 1 0 0 0 0\n"
        tt_filt = ""
        for i in range(21):
            tt_filt += tt_filt_row
        IJ.run("Convolve...", "text1=[" + tt_filt + "] normalize")

        # Segment out the TTs from the 'gaps' using Gaussian Adaptive Thresholding.
        selection_small = WindowManager.getImage("subsection_small.tif")
        IJ.run(selection_small, "Duplicate...", "title=thresholded.tif")
        threshed = WindowManager.getImage("thresholded.tif")
        IJ.run(threshed, "Auto Local Threshold",
               "method=Bernsen radius=7 parameter_1=1 parameter_2=0 white")

        # Select the TTs from the thresholded image.
        IJ.run(threshed, "Create Selection", "")
        tt_selection = WindowManager.getImage("subsection_small.tif")
        IJ.selectWindow("thresholded.tif")
        IJ.selectWindow("subsection_small.tif")
        IJ.run(tt_selection, "Restore Selection", "")

        # Get TT intensity statistics.
        stat_options = IS.MEAN | IS.MIN_MAX | IS.STD_DEV
        stats = IS.getStatistics(tt_selection.getProcessor(), stat_options,
                                 selection_small.getCalibration())
        # Calculate pixel ceiling intensity value based on heuristic.
        # TODO: Add a catch for data type overflow.
        pixel_ceiling = stats.mean + 3 * stats.stdDev
        print "px ceil:", pixel_ceiling

        # Invert selection to get inter-sarcomeric gap intensity statistics.
        IJ.run(tt_selection, "Make Inverse", "")
        stat_options = IS.MEAN | IS.MIN_MAX | IS.STD_DEV
        stats = IS.getStatistics(tt_selection.getProcessor(), stat_options,
                                 selection_small.getCalibration())
        # Calculate pixel floor intensity value based on heuristic.
        pixel_floor = stats.mean - stats.stdDev
        # TODO: Add a catch for data type underflow.
        print "px floor:", pixel_floor

        # Threshold original image based on these values.
        IJ.selectWindow(this_img.getTitle())
        IJ.run(this_img, "Select All", "")
        IJ.setMinAndMax(pixel_floor, pixel_ceiling)
        IJ.run(this_img, "Apply LUT", "")

    ## Ask if it is acceptable.
    gd = GenericDialog("Acceptable?")
    gd.addMessage(
        "If the preprocessed image is acceptable for analysis, hit 'OK' to being analysis.\n"
        + " If the image is unacceptable or an error occurred, hit 'Cancel'")
    gd.showDialog()
    if gd.wasCanceled():
        return

    ## Save the preprocessed image.
    imp = IJ.getImage()
    fs = FileSaver(imp)
    img_save_dir = matchedmyo_path + "myoimages/"  # actually get from user at some point
    img_file_path = img_save_dir + img_name[:-4] + "_preprocessed.tif"
    if os.path.exists(img_save_dir) and os.path.isdir(img_save_dir):
        print "Saving image as:", img_file_path
        if os.path.exists(img_file_path):
            # use dialog box to ask if they want to overwrite
            gd = GenericDialog("Overwrite?")
            gd.addMessage(
                "A file exists with the specified path, \"{}\". Would you like to overwrite it?"
                .format(img_file_path))
            gd.enableYesNoCancel()
            gd.showDialog()
            if gd.wasCanceled():
                return
        elif fs.saveAsTiff(img_file_path):
            print "Preprocessed image saved successfully at:", '"' + img_file_path + '"'
    else:
        print "Folder does not exist or is not a folder!"

    ### Create the YAML file containing the parameters for classification
    ## Ask user for YAML input
    gd = GenericDialog("YAML Input")
    gd.addStringField("imageName", img_file_path, 50)
    #gd.addStringField("maskName", "None")
    gd.addStringField("outputParams_fileRoot", img_file_path[:-4], 50)
    gd.addStringField("outputParams_fileType", "tif")
    gd.addNumericField("outputParams_dpi", 300, 0)
    gd.addCheckbox("outputParams_saveHitsArray", False)
    gd.addStringField("outputParams_csvFile", matchedmyo_path + "results/")
    gd.addCheckbox("TT Filtering", True)
    gd.addToSameRow()
    gd.addCheckbox("LT Filtering", True)
    gd.addToSameRow()
    gd.addCheckbox("TA Filtering", True)
    gd.addNumericField("scopeResolutions_x", 5.0, 3)
    gd.addToSameRow()
    gd.addNumericField("scopeResolutions_y", 5.0, 3)
    gd.addMessage("Enter in filter rotation angles separated by commas.")
    gd.addStringField("", "-25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25", 50)
    gd.addCheckbox("returnAngles", False)
    gd.addCheckbox("returnPastedFilter", True)
    gd.showDialog()
    if gd.wasCanceled():
        return

    strings = [st.text for st in gd.getStringFields()]
    #if strings[1] == "None" or "":
    #	strings[1] = None
    nums = [float(num.text) for num in gd.getNumericFields()]
    nums[0] = int(nums[0])  # Have to make sure the dpi variable is an integer
    checks = [str(bool(boo.state)) for boo in gd.getCheckboxes()]
    iter_argument = ','.join(
        [str(float(it) - rotation_angle) for it in strings[4].split(',')])
    string_block = """imageName: {0[0]}
outputParams:
  fileRoot: {0[1]}
  fileType: {0[2]}
  dpi: {1[0]}
  saveHitsArray: {2[0]}
  csvFile: {0[3]}
preprocess: False
filterTypes:
  TT: {2[1]}
  LT: {2[2]}
  TA: {2[3]}
scopeResolutions:
  x: {1[1]}
  y: {1[2]}
iters: [{3}]
returnAngles: {2[4]}
returnPastedFilter: {2[5]}""".format(strings, nums, checks, iter_argument)
    im_title = this_img.getTitle()
    with cd(matchedmyo_path):
        yaml_file_path = "./YAML_files/" + im_title[:-4] + ".yml"
        with open("./YAML_files/" + im_title[:-4] + ".yml", "w") as ym:
            ym.write(string_block)
        print "Wrote YAML file to:", matchedmyo_path + yaml_file_path[2:]

    ### Run the matchedmyo code on the preprocessed image
    with cd(matchedmyo_path):
        #os.chdir(matchedmyo_path)
        #subprocess.call(["python3", matchedmyo_path+"matchedmyo.py", "fullValidation"])
        subprocess.call(
            ["python3", "matchedmyo.py", "run", "--yamlFile", yaml_file_path])
Exemple #13
0
    raise Exception("Couldn't find the EmissionWavelength info in the metadata")

# Make the maximum intensity projection
# and show it for user input.
project = ZProjector()
project.setMethod(ZProjector.MAX_METHOD)
project.setImage(imp)
project.doProjection()
projection = project.getProjection()
projection.show()

# Wait for the user to draw a line
# and check if line ROI.
IJ.setTool("line");
myWait = WaitForUserDialog ("waitForUser", "Make a line with the region of interest and press OK")
myWait.show()

roi = projection.getRoi()

# Check if roi is a line
# otherwise exit
if not (roi.isLine()):
    raise Exception("Line selection required")

# Loop through existing files to avoid
# overwriting. Will increment the number
# at the end. Saves the MIP with input.
fileName      = shortTitle + "ROI" + str(num);
overlayOutput = os.path.join(output,"MAX_" + fileName)

while(os.path.isfile(overlayOutput + ".png")):
gd.addNumericField("Slice start:",1,0)
gd.addNumericField("Slice end:",theImage.getNSlices(),0)
gd.showDialog()

if (gd.wasOKed()):
	startSlice = gd.getNextNumber()
	endSlice = gd.getNextNumber()
	width = theImage.getWidth()
	height = theImage.getHeight()
	newStack = ImageStack(width,height)

	t_line = 240
	for i in range(startSlice,endSlice+1):
		theImage.setSlice(i)
		waiter = WaitForUserDialog("Set ROI","Set ROI for thresholding region")
		waiter.show()

		roi = theImage.getRoi()
		newip = theImage.getProcessor().duplicate()
		newip.setColor(0)
		newip.fillOutside(roi)
		newip.snapshot()
		newip.setThreshold(0,t_line,ImageProcessor.BLACK_AND_WHITE_LUT)
		newImage = ImagePlus("toggler",newip)
		newImage.show()

		doSameSlice = True
		while (doSameSlice):
			accept_waiter = NonBlockingGenericDialog("Thresholding...")
			accept_waiter.addNumericField("Threshold:",t_line,0)
			accept_waiter.setCancelLabel("Apply new threshold")
Exemple #15
0
	return RGBStackMerge.mergeChannels(chimps, False)

manders = MandersColocalization()
results = ResultsTable()
for imageFile in os.listdir(inputDir):
	print "Opening " + imageFile
	try:
		images = BF.openImagePlus(inputDir + imageFile)
		image = images[0]
	except UnknownFormatException:
		continue
	preview = getPreview(image)
	preview.show()
	rm = RoiManager()
	dialog = WaitForUserDialog("Action required", "Please select regions of interest in this image. Click OK when done.")
	dialog.show()
	rm.close()
	splitter = ChannelSplitter()
	imp1 = ImagePlus("CH1", splitter.getChannel(image, imageA))
	imp2 = ImagePlus("CH2", splitter.getChannel(image, imageB))
	title = image.getTitle()
	title = title[:title.rfind('.')]
	image.close()
	preview.close()
	ch1 = ImagePlusAdapter.wrap(imp1)
	ch2 = ImagePlusAdapter.wrap(imp2)

	for roi in rm.getRoisAsArray():
		container = createContainer(roi, ch1, ch2)
		img1 = container.getSourceImage1()
		img2 = container.getSourceImage2()
			IJ.showStatus("Need to pick an ROI first before running this option")
			self.workRoiCheckbox.setState(False)

## Main body of script starts here
od = OpenDialog("Select parent LSM file...")
parentLSMFilePath = od.getPath()
if parentLSMFilePath is not None:
	tileConfigFilePath = parentLSMFilePath + "_tiles/resized/TileConfiguration.registered.txt"
	scale_info = estimate_scale_multiplier(parentLSMFilePath+"_tiles/tile_1.ome.tif",parentLSMFilePath+"_tiles/resized/tile_1.tif")
	print scale_info
	coords = read_tileconfig_file(tileConfigFilePath)
	full_keys = ["tile_"+str(i) for i in range(1,len(coords.keys())+1)]
	coords_vals = normalize_coords_in_list(get_list_from_dict(full_keys,coords))
	im = IJ.getImage()
	wfud = WaitForUserDialog("Select a freehand ROI, then click here")
	wfud.show()
	roi = im.getRoi()
	if (roi is not None):
		float_poly = roi.getFloatPolygon()
		containing_tiles_superlist = []
		for i in range(float_poly.npoints):
			containing_tiles_superlist.append(find_containing_tiles([float_poly.xpoints[i],float_poly.ypoints[i],0],coords_vals,scale_info[1],scale_info[2]))
		upscaled_coords = upscale_coords(coords,scale_info[0])
		containing_tiles_dict = rearrange_tile_list(containing_tiles_superlist)
		copy_fullsize_tiles(["tile_"+key for key in containing_tiles_dict.keys()],parentLSMFilePath+"_tiles/",parentLSMFilePath+"_tiles/subtiles/",".ome.tif")
		subupcoords_vals = normalize_coords_in_list(get_list_from_dict(["tile_"+key for key in containing_tiles_dict.keys()],upscaled_coords))
		hadStitched = False
		if len(containing_tiles_dict.keys())>1:
			for i in containing_tiles_dict.keys():
				IJ.log("Opened tile " + str(i))
			write_tileconfig_file(parentLSMFilePath+"_tiles/subtiles/TileConfiguration.subtiles.txt",subupcoords_vals,".ome.tif")
from ij import IJ
from ij.gui import WaitForUserDialog

# Draw a line
dial = WaitForUserDialog('Draw a line that will be rotated to horizontal.')
dial.show()

cur_image = IJ.getImage()

cur_roi = cur_image.getRoi()
cur_angle =  cur_roi.getAngle()
print cur_angle

# Angles must be between 90 and -90 degrees...currently are not but it's fine as my
# system has inversion symmetry along the y-axis

IJ.run(cur_image, "Rotate... ", "angle=" +str(cur_angle) + " grid=0 interpolation=Bicubic stack")