Esempio n. 1
0
def batchAnalysis(analysis_exe,
                  input_directory,
                  output_directory,
                  multi_xml,
                  max_processes=2):
    minimum_length = 100

    # FIXME: Should also handle .tif movies?
    dax_files = glob.glob(input_directory + "*.dax")

    # Figure out which movies to analyze.
    cmd_lines = []
    for movie_file in dax_files:

        movie_obj = datareader.inferReader(movie_file)
        if (movie_obj.filmSize()[2] > minimum_length):

            print("Analyzing:", movie_file)
            basename = os.path.basename(movie_file)
            mlistname = output_directory + "/" + basename[:-4] + "_mlist.bin"
            cmd_lines.append([
                'python', analysis_exe, "--movie", movie_file, "--bin",
                mlistname, "--xml", multi_xml
            ])
    batchRun.batchRun(cmd_lines, max_processes=max_processes)
Esempio n. 2
0
def batchAnalysis(input_directory, eps = 40, mc = 10, min_size = 50):
    """
    Batch DBSCAN clustering.
    """
    # We use the executable and batchRun as this is an easy
    # way to run all the clustering in parallel.
    src_dir = os.path.dirname(__file__)
    if not (src_dir == ""):
        src_dir += "/"
    
    clusters_exe = src_dir + "dbscan_analysis.py"

    # Find appropriate bin files.
    bin_files = glob.glob(input_directory + "*.hdf5")

    # Generate command line.
    cmd_lines = []
    for filename in bin_files:

        # skip clustering related bin files
        if ("clusters" in filename) or ("srt" in filename):
            continue

        print("Found:", filename)

        cmd_lines.append(['python', clusters_exe,
                          "--bin", filename,
                          "--eps", str(eps),
                          "--mc", str(mc),
                          "--min_size", str(min_size)])

    batchRun.batchRun(cmd_lines)
def batchAnalysis(input_directory, eps=40, mc=10, min_size=50):
    """
    Batch DBSCAN clustering.
    """
    # We use the executable and batchRun as this is an easy
    # way to run all the clustering in parallel.
    src_dir = os.path.dirname(__file__)
    if not (src_dir == ""):
        src_dir += "/"

    clusters_exe = src_dir + "dbscan_analysis.py"

    # Find appropriate bin files.
    bin_files = glob.glob(input_directory + "*.hdf5")

    # Generate command line.
    cmd_lines = []
    for filename in bin_files:

        # skip clustering related bin files
        if ("clusters" in filename) or ("srt" in filename):
            continue

        print("Found:", filename)

        cmd_lines.append([
            'python', clusters_exe, "--bin", filename, "--eps",
            str(eps), "--mc",
            str(mc), "--min_size",
            str(min_size)
        ])

    batchRun.batchRun(cmd_lines)
Esempio n. 4
0
def batchAnalysis(analysis_exe, input_directory, output_directory, multi_xml, max_processes = 2):
    minimum_length = 100

    # FIXME: Should also handle .tif movies?
    dax_files = glob.glob(input_directory + "*.dax")

    # Figure out which movies to analyze.
    cmd_lines = []
    for movie_file in dax_files:

        movie_obj = datareader.inferReader(movie_file)
        if(movie_obj.filmSize()[2] > minimum_length):

            print("Analyzing:", movie_file)
            basename = os.path.basename(movie_file)
            mlistname = output_directory + "/" + basename[:-4] + ".hdf5"
            cmd_lines.append(['python', analysis_exe,
                              "--movie", movie_file,
                              "--bin", mlistname,
                              "--xml", multi_xml])
    batchRun.batchRun(cmd_lines, max_processes = max_processes)
Esempio n. 5
0
for channel in channels:
 #for  i in range (iterations):
    base = str(channel)
 # create list of all .hdf5 files in BIN directory
    h5_files =glob.glob(input_directory + base + "*.hdf5")
 # set path to multi-fit analysis code
    hdf5_to_image = "C:/Program Files/Python36/Lib/site-packages/storm_analysis/sa_utilities/hdf5_to_image.py"

 # start processes
    cmd_lines = []
    for h5_file in h5_files:
        if base == "750storm":
            sigma = sigma_channels[0]
        elif base == "647storm":
            sigma = sigma_channels[1]
        elif base == "561storm":
            sigma = sigma_channels[2]
        elif base == "488storm":
            sigma = sigma_channels[3]
            
        print ("Found:", h5_file)
        h5_filename = os.path.basename(h5_file)
        print("Analyzing:",h5_filename,"with sigma ",str(sigma))
        tiff_out_name = output_directory + h5_filename[:-5] + ".tiff"
        cmd_lines.append(['python', hdf5_to_image,
                            "--image", tiff_out_name,
                            "--bin", h5_file,
                            "--scale", str(storm_image_scale),
                            "--sigma", str(sigma)])
    batchRun.batchRun(cmd_lines, max_processes = max_processes)