def mergeHeights(height_filename, channel_filenames):
    """
    channel_filenames is a list in order from shortest wavelength
    to the longest wavelength.
    """
    #
    # FIXME: We are just loading all the data at once, which could
    #        problematic for really large files.
    #
    height_data = None
    for i, elt in enumerate(channel_filenames):
        i3_data = readinsight3.loadI3File(elt)

        if height_data is None:
            height_data = numpy.zeros((i3_data.size, len(channel_filenames)),
                                      dtype=numpy.float32)

        height_data[:, i] = i3_data['h']

    # User feedback.
    if (height_data.shape[0] > 5):
        for i in range(5):
            print(height_data[i, :])

    # Save the heights.
    numpy.save(height_filename, height_data)
Esempio n. 2
0
    def __init__(self, parameters = None, **kwds):
        super(DataWriterI3, self).__init__(**kwds)

        raise Exception("Using the Insight3 format for analysis is deprecated!")

        self.pixel_size = parameters.getAttr("pixel_size")
                
        #
        # If the i3 file already exists, read it in, write it
        # out to prepare for starting the analysis from the
        # end of what currently exists.
        #
        # FIXME: If the existing file is really large there
        #        could be problems here as we're going to load
        #        the whole thing into memory.
        #
        if(os.path.exists(self.filename)):
            print("Found", self.filename)
            i3data_in = readinsight3.loadI3File(self.filename)
            if (i3data_in is None) or (i3data_in.size == 0):
                self.start_frame = 0
            else:
                self.start_frame = int(numpy.max(i3data_in['fr']))

            print(" Starting analysis at frame:", self.start_frame)
            self.i3data = writeinsight3.I3Writer(self.filename)
            if (self.start_frame > 0):
                self.i3data.addMolecules(i3data_in)
                self.total_peaks = i3data_in['x'].size
        else:
            self.start_frame = 0
            self.i3data = writeinsight3.I3Writer(self.filename)
Esempio n. 3
0
def test_hdf5_to_bin_2():
    """
    Test tracks conversion.
    """
    peaks = {"x" : numpy.zeros(10),
             "y" : numpy.ones(10)}

    h5_name = storm_analysis.getPathOutputTest("test_sa_hdf5.hdf5")
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.addMetadata("<settings/>")
        h5.setMovieInformation(256, 256, 10, "XYZZY")
        h5.setPixelSize(100.0)
        h5.addTracks(peaks)

    # Convert.
    i3_name = storm_analysis.getPathOutputTest("test_mlist.bin")
    storm_analysis.removeFile(i3_name)
    hdf5ToBin.hdf5ToBin(h5_name, i3_name)

    # Load Insight3 file and check values.
    i3_data = readinsight3.loadI3File(i3_name, verbose = False)

    assert(numpy.allclose(peaks["x"], i3_data['x'] - 1.0))
    assert(numpy.allclose(peaks["y"], i3_data['y'] - 1.0))    
    assert(numpy.allclose(i3_data['fr'], numpy.ones(10)))
Esempio n. 4
0
    def __init__(self, parameters = None, **kwds):
        super(DataWriterI3, self).__init__(**kwds)

        raise Exception("Using the Insight3 format for analysis is deprecated!")

        self.pixel_size = parameters.getAttr("pixel_size")
                
        #
        # If the i3 file already exists, read it in, write it
        # out to prepare for starting the analysis from the
        # end of what currently exists.
        #
        # FIXME: If the existing file is really large there
        #        could be problems here as we're going to load
        #        the whole thing into memory.
        #
        if(os.path.exists(self.filename)):
            print("Found", self.filename)
            i3data_in = readinsight3.loadI3File(self.filename)
            if (i3data_in is None) or (i3data_in.size == 0):
                self.start_frame = 0
            else:
                self.start_frame = int(numpy.max(i3data_in['fr']))

            print(" Starting analysis at frame:", self.start_frame)
            self.i3data = writeinsight3.I3Writer(self.filename)
            if (self.start_frame > 0):
                self.i3data.addMolecules(i3data_in)
                self.total_peaks = i3data_in['x'].size
        else:
            self.start_frame = 0
            self.i3data = writeinsight3.I3Writer(self.filename)
Esempio n. 5
0
def pixelBias(filename, n_bins = 1000, normalized = True, i3_field = "x"):
    i3_data = readinsight3.loadI3File(filename)
    
    xv = numpy.fmod(i3_data[i3_field], 1.0)

    [xp_hist, x_bins] = numpy.histogram(xv, bins = n_bins, range = (0.0, 1.0), normed = normalized)
    x_centers = 0.5 * (x_bins[1:] + x_bins[:-1])

    return [x_centers, xp_hist]
Esempio n. 6
0
    def simulate(self, dax_file, bin_file, n_frames):

        #
        # Initialization.
        #
        dax_data = daxwriter.DaxWriter(dax_file, self.x_size, self.y_size)
        i3_data_in = readinsight3.loadI3File(bin_file)

        out_fname_base = dax_file[:-4]
        i3_data_out = writeinsight3.I3Writer(out_fname_base + "_olist.bin")
        sim_settings = open(out_fname_base + "_sim_params.txt", "w")

        #
        # Create the user-specified class instances that will do
        # most of the actual work of the simulation.
        #
        bg = self.bg_factory(sim_settings, self.x_size, self.y_size, i3_data_in)
        cam = self.cam_factory(sim_settings, self.x_size, self.y_size, i3_data_in)
        pp = self.pphys_factory(sim_settings, self.x_size, self.y_size, i3_data_in)
        psf = self.psf_factory(sim_settings, self.x_size, self.y_size, i3_data_in)

        sim_settings.write(json.dumps({"simulation" : {"bin_file" : bin_file,
                                                       "x_size" : str(self.x_size),
                                                       "y_size" : str(self.y_size)}}) + "\n")

        #
        # Generate the simulated movie.
        #
        for i in range(n_frames):

            # Generate the new image.
            image = numpy.zeros((self.x_size, self.y_size))
            cur_i3 = pp.getEmitters(i)

            print("Frame", i, cur_i3['x'].size, "emitters")

            # Background
            image += bg.getBackground(i)
            cur_i3 = bg.getEmitterBackground(cur_i3)

            # Foreground
            image += psf.getPSFs(cur_i3)

            # Camera
            image = cam.readImage(image)

            # Save the image.
            dax_data.addFrame(image)

            # Save the molecule locations.
            cur_i3['fr'] = i + 1
            i3_data_out.addMolecules(cur_i3)

        dax_data.close()
        i3_data_out.close()
        sim_settings.close()
Esempio n. 7
0
def test_good_i3():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save the data.
    with writeinsight3.I3Writer(mlist_name) as i3w:
        i3w.addMolecules(locs)

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs.shape[0] == 100)
Esempio n. 8
0
def test_write_read_1():
    """
    Test writing and reading.
    """
    bin_name = storm_analysis.getPathOutputTest("test_insight3io.bin")

    i3_locs = i3dtype.createDefaultI3Data(10)
    i3dtype.posSet(i3_locs, 'x', 10.0)

    with writeinsight3.I3Writer(bin_name) as i3:
        i3.addMolecules(i3_locs)

    i3_in = readinsight3.loadI3File(bin_name, verbose = False)
    assert(numpy.allclose(i3_locs['x'], i3_in['x']))
Esempio n. 9
0
def test_bad_i3():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save the data.
    i3w = writeinsight3.I3Writer(mlist_name)
    i3w.addMolecules(locs)
    i3w.fp.close()

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs is None)
Esempio n. 10
0
def splitPeaks(mlist_filename, params_filename):

    parameters = params.ParametersMultiplane().initFromFile(params_filename)

    # Load the plane to plane mapping data.
    mappings = {}
    if parameters.hasAttr("mapping"):
        if os.path.exists(parameters.getAttr("mapping")):
            with open(parameters.getAttr("mapping"), 'rb') as fp:
                mappings = pickle.load(fp)

        else:
            print("No mapping file parameter, nothing to do")

    # Load frame offset information.
    frame_offsets = list(
        map(parameters.getAttr, mpUtilC.getOffsetAttrs(parameters)))
    print(frame_offsets)

    # Load the molecule list.
    ch0_data = readinsight3.loadI3File(mlist_filename)

    # Map to other channels.
    basename = mlist_filename[:-4]
    channel = 1
    m_key = "0_1_"
    while (m_key + "x") in mappings:
        chn_data = ch0_data.copy()

        # Map x.
        tx = mappings[m_key + "x"]
        chn_data['x'] = tx[0] + ch0_data['x'] * tx[1] + ch0_data['y'] * tx[2]

        # Map y.
        ty = mappings[m_key + "y"]
        chn_data['y'] = ty[0] + ch0_data['x'] * ty[1] + ch0_data['y'] * ty[2]

        # Map frame.
        chn_data[
            'fr'] = ch0_data['fr'] - frame_offsets[0] + frame_offsets[channel]

        with writeinsight3.I3Writer(basename + "_ch" + str(channel) +
                                    ".bin") as i3w:
            i3w.addMolecules(chn_data)

        channel += 1
        m_key = "0_" + str(channel) + "_"
Esempio n. 11
0
def makeTreeAndQuadsFromI3File(i3_filename,
                               min_size=None,
                               max_size=None,
                               max_neighbors=10):
    """
    Make a KD tree and a list of quads from an Insight3 file.

    Note: This file should probably only have localizations for a single frame.
    """
    i3_data = readinsight3.loadI3File(i3_filename)

    # Warning if there is more than 1 frame in the data.
    if (len(numpy.unique(i3_data['fr'])) > 1):
        print("Warning: Localizations in multiple frames detected!")

    return makeTreeAndQuads(i3_data['xc'],
                            i3_data['yc'],
                            min_size=min_size,
                            max_size=max_size,
                            max_neighbors=max_neighbors)
Esempio n. 12
0
def _test_l1h():
    
    # Test L1H.
    movie_name = storm_analysis.getData("test/data/test_l1h.dax")
    settings = storm_analysis.getData("test/data/test_l1h.xml")
    hres = storm_analysis.getPathOutputTest("test_l1h_list.hres")
    mlist = storm_analysis.getPathOutputTest("test_l1h_list.bin")

    storm_analysis.removeFile(hres)
    storm_analysis.removeFile(mlist)

    from storm_analysis.L1H.cs_analysis import analyze
    analyze(movie_name, settings, hres, mlist)

    # Verify number of localizations found.
    #
    # FIXME: Change L1H to use the HDF5 format.
    #
    num_locs = readinsight3.loadI3File(mlist)["x"].size
    if not veri.verifyIsCloseEnough(num_locs, 1986):        
        raise Exception("L1H did not find the expected number of localizations.")
Esempio n. 13
0
def test_good_i3_metadata():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save data and metadata.
    i3w = writeinsight3.I3Writer(mlist_name)
    i3w.addMolecules(locs)
    etree = ElementTree.Element("xml")
    test = ElementTree.SubElement(etree, "test")
    test.text = "test"
    i3w.closeWithMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs.shape[0] == 100)

    # Read the metadata.
    metadata = readinsight3.loadI3Metadata(mlist_name)
    assert(metadata.find("test").text == "test")
Esempio n. 14
0
def _test_l1h():

    # Test L1H.
    movie_name = storm_analysis.getData("test/data/test_l1h.dax")
    settings = storm_analysis.getData("test/data/test_l1h.xml")
    hres = storm_analysis.getPathOutputTest("test_l1h_list.hres")
    mlist = storm_analysis.getPathOutputTest("test_l1h_list.bin")

    storm_analysis.removeFile(hres)
    storm_analysis.removeFile(mlist)

    from storm_analysis.L1H.cs_analysis import analyze
    analyze(movie_name, settings, hres, mlist)

    # Verify number of localizations found.
    #
    # FIXME: Change L1H to use the HDF5 format.
    #
    num_locs = readinsight3.loadI3File(mlist)["x"].size
    if not veri.verifyIsCloseEnough(num_locs, 1986):
        raise Exception(
            "L1H did not find the expected number of localizations.")
Esempio n. 15
0
def measurePSF(movie_name,
               zfile_name,
               movie_mlist,
               psf_name,
               want2d=False,
               aoi_size=12,
               z_range=750.0,
               z_step=50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """

    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin=2)[:, 1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to
    # the nearest 50nm.
    #
    z_mid = int(z_range / z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z, 4 * aoi_size, 4 * aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf + 1) & (i3_data['x'] > aoi_size) & (
            i3_data['x'] <
            (dax_x - aoi_size - 1)) & (i3_data['y'] >
                                       aoi_size) & (i3_data['y'] <
                                                    (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size, util_c.getNPeakPar()))
        in_peaks[:, util_c.getXCenterIndex()] = xr
        in_peaks[:, util_c.getYCenterIndex()] = yr
        in_peaks[:, util_c.getZCenterIndex()] = zr
        in_peaks[:, util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2 * aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out",
              out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:, util_c.getXCenterIndex()]
        yr = out_peaks[:, util_c.getYCenterIndex()]
        zr = out_peaks[:, util_c.getZCenterIndex()]
        ht = out_peaks[:, util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf / z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi - aoi_size:xi + aoi_size,
                            yi - aoi_size:yi + aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(
                    psf, (-2.0 * (xf - xi), -2.0 * (yf - yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi, :, :] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i, 0, :], average_psf[i, -1, :],
                                  average_psf[i, :, 0], average_psf[i, :, -1]))
        average_psf[i, :, :] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i, :, :] = average_psf[i, :, :] / numpy.sum(
                numpy.abs(average_psf[i, :, :]))

    average_psf = average_psf / numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(
            os.path.join(os.path.dirname(psf_name), "psf.dax"),
            average_psf.shape[1], average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i, :, :] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf": average_psf[0, :, :], "type": "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {
            "psf": average_psf,
            "pixel_size": 0.080,  # 1/2 the camera pixel size in nm.
            "type": "3D",
            "zmin": -z_range,
            "zmax": z_range,
            "zvals": z_vals
        }

    with open(psf_name, 'wb') as fp:
        pickle.dump(psf_dict, fp)
Esempio n. 16
0
def psfZStack(movie_name,
              i3_filename,
              zstack_name,
              scmos_cal=None,
              aoi_size=8,
              driftx=0.0,
              drifty=0.0):
    """
    driftx, drifty are in units of pixels per frame, (bead x last frame - bead x first frame)/n_frames.
    """

    # Load movie.
    movie_data = datareader.inferReader(movie_name)
    [movie_x, movie_y, movie_len] = movie_data.filmSize()

    # Load localizations.
    i3_data = readinsight3.loadI3File(i3_filename)
    x = i3_data["x"]
    y = i3_data["y"]

    # Load sCMOS calibration data.
    gain = numpy.ones((movie_y, movie_x))
    offset = numpy.zeros((movie_y, movie_x))
    if scmos_cal is not None:
        [offset, variance, gain] = numpy.load(scmos_cal)
        gain = 1.0 / gain

    z_stack = numpy.zeros((4 * aoi_size, 4 * aoi_size, movie_len))

    for i in range(movie_len):
        if ((i % 50) == 0):
            print("Processing frame", i)

        #
        # Subtract pixel offset and convert to units of photo-electrons.
        #
        frame = (movie_data.loadAFrame(i) - offset) * gain

        #
        # Subtract estimated background. This assumes that the image is
        # mostly background and that the background is uniform.
        #
        frame = frame - numpy.median(frame)

        for j in range(x.size):
            xf = x[j] + driftx * float(i)
            yf = y[j] + drifty * float(i)
            xi = int(xf)
            yi = int(yf)

            im_slice = frame[xi - aoi_size:xi + aoi_size,
                             yi - aoi_size:yi + aoi_size]

            im_slice_up = scipy.ndimage.interpolation.zoom(im_slice, 2.0)
            im_slice_up = scipy.ndimage.interpolation.shift(
                im_slice_up, (-2.0 * (xf - xi), -2.0 * (yf - yi)),
                mode='nearest')

            z_stack[:, :, i] += im_slice_up

    # Normalize by the number of localizations.
    z_stack = z_stack / float(x.size)

    print("max intensity", numpy.amax(z_stack))

    # Save z_stack.
    numpy.save(zstack_name + ".npy", z_stack)

    # Save (normalized) z_stack as tif for inspection purposes.
    z_stack = z_stack / numpy.amax(z_stack)
    z_stack = z_stack.astype(numpy.float32)
    with tifffile.TiffWriter(zstack_name + ".tif") as tf:
        for i in range(movie_len):
            tf.save(z_stack[:, :, i])
Esempio n. 17
0
if (len(sys.argv) != 4):
    print(
        "usage: homotopy_psf <dax_file, input> <bin_file, input> <npy_file, output>"
    )
    exit()

# Minimum number of peaks to calculate the PSF from.
min_peaks = 300

# Half width of the aoi size in pixels.
aoi_size = 8

# Load dax file and corresponding molecule list file.
dax_data = datareader.inferReader(sys.argv[1])
i3_data = readinsight3.loadI3File(sys.argv[2])

# Go through the frames identifying good peaks and adding them
# to the average psf
average_psf = numpy.zeros((4 * aoi_size, 4 * aoi_size))
curf = 1
peaks_used = 0
total = 0.0
[dax_x, dax_y, dax_l] = dax_data.filmSize()
while (curf < dax_l) and (peaks_used < min_peaks):

    # Select localizations in current frame & not near the edges.
    mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (
        i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (
            i3_data['y'] < (dax_x - aoi_size - 1))
    xr = i3_data['x'][mask]
Esempio n. 18
0
def peakFinding(find_peaks, movie_file, mlist_file, parameters):

    # open files for input & output
    movie_data = datareader.inferReader(movie_file)
    [movie_x,movie_y,movie_l] = movie_data.filmSize()

    # if the i3 file already exists, read it in,
    # write it out & start the analysis from the
    # end.
    total_peaks = 0
    if(os.path.exists(mlist_file)):
        print("Found", mlist_file)
        i3data_in = readinsight3.loadI3File(mlist_file)
        try:
            curf = int(numpy.max(i3data_in['fr']))
        except ValueError:
            curf = 0
        print(" Starting analysis at frame:", curf)
        i3data = writeinsight3.I3Writer(mlist_file)
        if (curf > 0):
            i3data.addMolecules(i3data_in)
            total_peaks = i3data_in['x'].size
    else:
        curf = 0
        i3data = writeinsight3.I3Writer(mlist_file)

    # process parameters
    if parameters.hasAttr("start_frame"):
        if (parameters.getAttr("start_frame")>=curf) and (parameters.getAttr("start_frame")<movie_l):
            curf = parameters.getAttr("start_frame")

    if parameters.hasAttr("max_frame"):
        if (parameters.getAttr("max_frame")>0) and (parameters.getAttr("max_frame")<movie_l):
            movie_l = parameters.getAttr("max_frame")

    static_bg_estimator = None
    if (parameters.getAttr("static_background_estimate", 0) > 0):
        print("Using static background estimator.")
        static_bg_estimator = static_background.StaticBGEstimator(movie_data,
                                                                  start_frame = curf,
                                                                  sample_size = parameters.getAttr("static_background_estimate"))

    # analyze the movie
    # catch keyboard interrupts & "gracefully" exit.
    try:
        while(curf<movie_l):
            #for j in range(l):

            # Set up the analysis.
            image = movie_data.loadAFrame(curf) - parameters.getAttr("baseline")
            mask = (image < 1.0)
            if (numpy.sum(mask) > 0):
                print(" Removing negative values in frame", curf)
                image[mask] = 1.0

            # Find and fit the peaks.
            if static_bg_estimator is not None:
                bg_estimate = static_bg_estimator.estimateBG(curf) - parameters.getAttr("baseline")
                [peaks, residual] = find_peaks.analyzeImage(image,
                                                            bg_estimate = bg_estimate)
            else:
                [peaks, residual] = find_peaks.analyzeImage(image)

            # Save the peaks.
            if (type(peaks) == type(numpy.array([]))):
                # remove unconverged peaks
                peaks = find_peaks.getConvergedPeaks(peaks)

                # save results
                if(parameters.getAttr("orientation", "normal") == "inverted"):
                    i3data.addMultiFitMolecules(peaks, movie_x, movie_y, curf+1, parameters.getAttr("pixel_size"), inverted = True)
                else:
                    i3data.addMultiFitMolecules(peaks, movie_x, movie_y, curf+1, parameters.getAttr("pixel_size"), inverted = False)

                total_peaks += peaks.shape[0]
                print("Frame:", curf, peaks.shape[0], total_peaks)
            else:
                print("Frame:", curf, 0, total_peaks)
            curf += 1

        print("")
        i3data.close()
        find_peaks.cleanUp()
        return 0

    except KeyboardInterrupt:
        print("Analysis stopped.")
        i3data.close()
        find_peaks.cleanUp()
        return 1
Esempio n. 19
0
import storm_analysis.sa_library.datareader as datareader
import storm_analysis.sa_library.readinsight3 as readinsight3

if (len(sys.argv)!=4):
    print("usage: homotopy_psf <dax_file, input> <bin_file, input> <npy_file, output>")
    exit()

# Minimum number of peaks to calculate the PSF from.
min_peaks = 300

# Half width of the aoi size in pixels.
aoi_size = 8

# Load dax file and corresponding molecule list file.
dax_data = datareader.inferReader(sys.argv[1])
i3_data = readinsight3.loadI3File(sys.argv[2])

# Go through the frames identifying good peaks and adding them
# to the average psf
average_psf = numpy.zeros((4*aoi_size,4*aoi_size))
curf = 1
peaks_used = 0
total = 0.0
[dax_x, dax_y, dax_l] = dax_data.filmSize()
while (curf < dax_l) and (peaks_used < min_peaks):

    # Select localizations in current frame & not near the edges.
    mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1))
    xr = i3_data['x'][mask]
    yr = i3_data['y'][mask]
    ht = i3_data['h'][mask]
import numpy

import storm_analysis.sa_library.i3dtype as i3dtype
import storm_analysis.sa_library.readinsight3 as readinsight3

parser = argparse.ArgumentParser(description = "Check emitter photophysics.")

parser.add_argument('--bin', dest='i3bin', type=str, required=True,
                    help = "The name of Insight3 format file with the emitter positions.")
parser.add_argument('--maxi', dest='maxi', type=int, required=False,
                    help = "The maximum number of emitters to check.")

args = parser.parse_args()

# Initialization.
i3_data = readinsight3.loadI3File(args.i3bin)
if args.maxi is None:
    args.maxi = numpy.max(i3_data['i']) + 1

if (args.maxi > (numpy.max(i3_data['i']) + 1)):
    args.maxi = numpy.max(i3_data['i']) + 1

max_frame = numpy.max(i3_data['fr'])

on_times = []
off_times = []
for i in range(int(args.maxi)):

    if ((i % 100) == 0):
        print("Analyzing emitter", i)
Esempio n. 21
0
import storm_analysis.simulator.drift as drift
import storm_analysis.simulator.photophysics as photophysics
import storm_analysis.simulator.psf as psf
import storm_analysis.simulator.simulate as simulate

frames = 100
x_size = 300
y_size = 310
z_planes = [0.0]
#z_planes = [-250.0, 250.0]
#z_planes = [-250.0, 250.0]
#z_planes = [-750.0, -250.0, 250.0, 750.0]
z_value = -500.0

# Load emitter locations.
i3_locs = readinsight3.loadI3File("emitters.bin")

if False:
    i3_locs = i3_locs[0]

# Make a bin file with emitter locations for each frame.
with writeinsight3.I3Writer("test_olist.bin") as i3w:
    for i in range(frames):
        i3_temp = i3_locs.copy()
        i3dtype.setI3Field(i3_temp, "fr", i + 1)
        i3dtype.posSet(i3_temp, "z", z_value)
        i3w.addMolecules(i3_temp)

# Load channel to channel mapping file.
with open("map.map", 'rb') as fp:
    mappings = pickle.load(fp)
Esempio n. 22
0
def measurePSF(movie_name, zfile_name, movie_mlist, psf_name, want2d = False, aoi_size = 12, z_range = 750.0, z_step = 50.0):
    """
    The actual z range is 2x z_range (i.e. from -z_range to z_range).
    """
    
    # Load dax file, z offset file and molecule list file.
    dax_data = datareader.inferReader(movie_name)
    z_offsets = None
    if os.path.exists(zfile_name):
        try:
            z_offsets = numpy.loadtxt(zfile_name, ndmin = 2)[:,1]
        except IndexError:
            z_offsets = None
            print("z offsets were not loaded.")
    i3_data = readinsight3.loadI3File(movie_mlist)

    if want2d:
        print("Measuring 2D PSF")
    else:
        print("Measuring 3D PSF")

    #
    # Go through the frames identifying good peaks and adding them
    # to the average psf. For 3D molecule z positions are rounded to 
    # the nearest 50nm.
    #
    z_mid = int(z_range/z_step)
    max_z = 2 * z_mid + 1

    average_psf = numpy.zeros((max_z,4*aoi_size,4*aoi_size))
    peaks_used = 0
    totals = numpy.zeros(max_z)
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    for curf in range(dax_l):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf+1) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_x - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_y - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]

        # Use the z offset file if it was specified, otherwise use localization z positions.
        if z_offsets is None:
            if (curf == 0):
                print("Using fit z locations.")
            zr = i3_data['z'][mask]
        else:
            if (curf == 0):
                print("Using z offset file.")
            zr = numpy.ones(xr.size) * z_offsets[curf]

        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        in_peaks = numpy.zeros((xr.size,util_c.getNPeakPar()))
        in_peaks[:,util_c.getXCenterIndex()] = xr
        in_peaks[:,util_c.getYCenterIndex()] = yr
        in_peaks[:,util_c.getZCenterIndex()] = zr
        in_peaks[:,util_c.getHeightIndex()] = ht

        out_peaks = util_c.removeNeighbors(in_peaks, 2*aoi_size)
        #out_peaks = util_c.removeNeighbors(in_peaks, aoi_size)

        print(curf, "peaks in", in_peaks.shape[0], ", peaks out", out_peaks.shape[0])

        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf).astype(numpy.float64)

        xr = out_peaks[:,util_c.getXCenterIndex()]
        yr = out_peaks[:,util_c.getYCenterIndex()]
        zr = out_peaks[:,util_c.getZCenterIndex()]
        ht = out_peaks[:,util_c.getHeightIndex()]

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            zf = zr[i]
            xi = int(xf)
            yi = int(yf)
            if want2d:
                zi = 0
            else:
                zi = int(round(zf/z_step) + z_mid)

            # check the z is in range
            if (zi > -1) and (zi < max_z):

                # get localization image
                mat = image[xi-aoi_size:xi+aoi_size,
                            yi-aoi_size:yi+aoi_size]

                # zoom in by 2x
                psf = scipy.ndimage.interpolation.zoom(mat, 2.0)

                # re-center image
                psf = scipy.ndimage.interpolation.shift(psf, (-2.0*(xf-xi), -2.0*(yf-yi)), mode='nearest')

                # add to average psf accumulator
                average_psf[zi,:,:] += psf
                totals[zi] += 1

    # Force PSF to be zero (on average) at the boundaries.
    for i in range(max_z):
        edge = numpy.concatenate((average_psf[i,0,:],
                                  average_psf[i,-1,:],
                                  average_psf[i,:,0],
                                  average_psf[i,:,-1]))
        average_psf[i,:,:] -= numpy.mean(edge)

    # Normalize the PSF.
    if want2d:
        max_z = 1

    for i in range(max_z):
        print(i, totals[i])
        if (totals[i] > 0.0):
            average_psf[i,:,:] = average_psf[i,:,:]/numpy.sum(numpy.abs(average_psf[i,:,:]))

    average_psf = average_psf/numpy.max(average_psf)

    # Save PSF (in image form).
    if True:
        import storm_analysis.sa_library.daxwriter as daxwriter
        dxw = daxwriter.DaxWriter(os.path.join(os.path.dirname(psf_name), "psf.dax"),
                                  average_psf.shape[1],
                                  average_psf.shape[2])
        for i in range(max_z):
            dxw.addFrame(1000.0 * average_psf[i,:,:] + 100)
        dxw.close()

    # Save PSF.
    if want2d:
        psf_dict = {"psf" : average_psf[0,:,:],
                    "type" : "2D"}

    else:
        cur_z = -z_range
        z_vals = []
        for i in range(max_z):
            z_vals.append(cur_z)
            cur_z += z_step

        psf_dict = {"psf" : average_psf,
                    "pixel_size" : 0.080, # 1/2 the camera pixel size in nm.
                    "type" : "3D",
                    "zmin" : -z_range,
                    "zmax" : z_range,
                    "zvals" : z_vals}

    pickle.dump(psf_dict, open(psf_name, 'wb'))
Esempio n. 23
0
    parser.add_argument('--mm_map',
                        dest='mm_map',
                        type=str,
                        required=True,
                        help="The name of the micrometry map file.")
    parser.add_argument(
        '--results',
        dest='results',
        type=str,
        required=True,
        help="The name of the file to save the updated mapping in.")

    args = parser.parse_args()

    # Load locs1.
    i3_data1 = readinsight3.loadI3File(args.locs1)
    kd1 = scipy.spatial.KDTree(
        numpy.stack((i3_data1['xc'], i3_data1['yc']), axis=-1))

    # Load locs2.
    i3_data2 = readinsight3.loadI3File(args.locs2)
    kd2 = scipy.spatial.KDTree(
        numpy.stack((i3_data2['xc'], i3_data2['yc']), axis=-1))

    # Load 'first guess' transform.
    with open(args.mm_map, 'rb') as fp:
        mp_transform = pickle.load(fp)

    # Refine.
    [tr_1_to_0, tr_0_to_1
     ] = refineTransform(kd1, kd2,
Esempio n. 24
0
def peakFinding(find_peaks, movie_file, mlist_file, parameters):
    """
    Does the peak finding.
    """

    # open files for input & output
    movie_data = datareader.inferReader(movie_file)
    [movie_x, movie_y, movie_l] = movie_data.filmSize()

    # if the i3 file already exists, read it in,
    # write it out & start the analysis from the
    # end.
    total_peaks = 0
    if (os.path.exists(mlist_file)):
        print("Found", mlist_file)
        i3data_in = readinsight3.loadI3File(mlist_file)
        try:
            curf = int(numpy.max(i3data_in['fr']))
        except ValueError:
            curf = 0
        print(" Starting analysis at frame:", curf)
        i3data = writeinsight3.I3Writer(mlist_file)
        if (curf > 0):
            i3data.addMolecules(i3data_in)
            total_peaks = i3data_in['x'].size
    else:
        curf = 0
        i3data = writeinsight3.I3Writer(mlist_file)

    # process parameters
    if parameters.hasAttr("start_frame"):
        if (parameters.getAttr("start_frame") >=
                curf) and (parameters.getAttr("start_frame") < movie_l):
            curf = parameters.getAttr("start_frame")

    max_frame = movie_l
    if parameters.hasAttr("max_frame"):
        if (parameters.getAttr("max_frame") >
                0) and (parameters.getAttr("max_frame") < movie_l):
            max_frame = parameters.getAttr("max_frame")

    static_bg_estimator = None
    if (parameters.getAttr("static_background_estimate", 0) > 0):
        print("Using static background estimator.")
        static_bg_estimator = static_background.StaticBGEstimator(
            movie_data,
            start_frame=curf,
            sample_size=parameters.getAttr("static_background_estimate"))

    # analyze the movie
    # catch keyboard interrupts & "gracefully" exit.
    try:
        while (curf < max_frame):
            #for j in range(l):

            # Set up the analysis.
            image = movie_data.loadAFrame(curf) - parameters.getAttr(
                "baseline")
            mask = (image < 1.0)
            if (numpy.sum(mask) > 0):
                print(" Removing negative values in frame", curf)
                image[mask] = 1.0

            # Find and fit the peaks.
            if static_bg_estimator is not None:
                bg_estimate = static_bg_estimator.estimateBG(
                    curf) - parameters.getAttr("baseline")
                [peaks,
                 residual] = find_peaks.analyzeImage(image,
                                                     bg_estimate=bg_estimate)
            else:
                [peaks, residual] = find_peaks.analyzeImage(image)

            # Save the peaks.
            if (type(peaks) == type(numpy.array([]))):
                # remove unconverged peaks
                peaks = find_peaks.getConvergedPeaks(peaks)

                # save results
                if (parameters.getAttr("orientation", "normal") == "inverted"):
                    i3data.addMultiFitMolecules(
                        peaks,
                        movie_x,
                        movie_y,
                        curf + 1,
                        parameters.getAttr("pixel_size"),
                        inverted=True)
                else:
                    i3data.addMultiFitMolecules(
                        peaks,
                        movie_x,
                        movie_y,
                        curf + 1,
                        parameters.getAttr("pixel_size"),
                        inverted=False)

                total_peaks += peaks.shape[0]
                print("Frame:", curf, peaks.shape[0], total_peaks)
            else:
                print("Frame:", curf, 0, total_peaks)
            curf += 1

        print("")
        if parameters.getAttr("append_metadata", 0):

            etree = ElementTree.Element("xml")

            # Add analysis parameters.
            etree.append(parameters.toXMLElementTree())

            # Add movie properties.
            movie_props = ElementTree.SubElement(etree, "movie")
            field = ElementTree.SubElement(movie_props, "hash_value")
            field.text = movie_data.hashID()
            for elt in [["movie_x", movie_x], ["movie_y", movie_y],
                        ["movie_l", movie_l]]:
                field = ElementTree.SubElement(movie_props, elt[0])
                field.text = str(elt[1])

            i3data.closeWithMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))
        else:
            i3data.close()
        find_peaks.cleanUp()
        return 0

    except KeyboardInterrupt:
        print("Analysis stopped.")
        i3data.close()
        find_peaks.cleanUp()
        return 1
Esempio n. 25
0
    def simulate(self, dax_file, bin_file, n_frames):

        #
        # Initialization.
        #
        dax_data = daxwriter.DaxWriter(dax_file, self.x_size, self.y_size)
        i3_data_in = readinsight3.loadI3File(bin_file)

        out_fname_base = dax_file[:-4]
        i3_data_out = writeinsight3.I3Writer(out_fname_base + "_olist.bin")
        sim_settings = open(out_fname_base + "_sim_params.txt", "w")

        #
        # Create the user-specified class instances that will do
        # most of the actual work of the simulation.
        #
        bg = self.bg_factory(sim_settings, self.x_size, self.y_size,
                             i3_data_in)
        cam = self.cam_factory(sim_settings, self.x_size, self.y_size,
                               i3_data_in)
        drift = None
        if self.drift_factory is not None:
            drift = self.drift_factory(sim_settings, self.x_size, self.y_size,
                                       i3_data_in)
        pp = self.pphys_factory(sim_settings, self.x_size, self.y_size,
                                i3_data_in)
        psf = self.psf_factory(sim_settings, self.x_size, self.y_size,
                               i3_data_in)

        sim_settings.write(
            json.dumps({
                "simulation": {
                    "bin_file": bin_file,
                    "x_size": str(self.x_size),
                    "y_size": str(self.y_size)
                }
            }) + "\n")

        #
        # Generate the simulated movie.
        #
        for i in range(n_frames):

            # Generate the new image.
            image = numpy.zeros((self.x_size, self.y_size))

            # Get the emitters that are on in the current frame.
            cur_i3 = pp.getEmitters(i).copy()

            print("Frame", i, cur_i3['x'].size, "emitters")

            # Add background to image.
            image += bg.getBackground(i)

            # Set 'bg' parameter of the emitters.
            cur_i3 = bg.getEmitterBackground(cur_i3)

            # Apply drift to the localizations.
            if drift is not None:
                drift.drift(i, cur_i3)

            # Foreground
            image += psf.getPSFs(cur_i3)

            # Camera
            image = cam.readImage(image)

            # Save the image.
            dax_data.addFrame(image)

            # Save the molecule locations.
            cur_i3['fr'] = i + 1
            i3_data_out.addMolecules(cur_i3)

        dax_data.close()
        i3_data_out.close()
        sim_settings.close()
Esempio n. 26
0
        dest='channel',
        type=int,
        required=False,
        default=0,
        help="Channel that the localizations come from. Default is channel 0.")

    args = parser.parse_args()

    basename = os.path.splitext(args.i3bin)[0]

    # Load mappings.
    with open(args.mapping, 'rb') as fp:
        mappings = pickle.load(fp)

    # Load x,y locations.
    i3_in = readinsight3.loadI3File(args.i3bin)

    xi = i3_in["x"]
    yi = i3_in["y"]

    # Map to back channel 0 if necessary.
    if (args.channel != 0):
        xt_name = str(args.channel) + "_0_x"
        yt_name = str(args.channel) + "_0_y"
        [xi, yi] = mapXYLocations(xi, yi, mappings[xt_name], mappings[yt_name])

    for i in range(8):
        if (i == 0):
            i3dtype.posSet(i3_in, "x", xi)
            i3dtype.posSet(i3_in, "y", yi)
        else:
Esempio n. 27
0
def homotopyPSF(dax_file, bin_file, psf_file):
    
    # Minimum number of peaks to calculate the PSF from.
    min_peaks = 300

    # Half width of the aoi size in pixels.
    aoi_size = 8

    # Load dax file and corresponding molecule list file.
    dax_data = datareader.inferReader(dax_file)
    i3_data = readinsight3.loadI3File(bin_file)

    # Go through the frames identifying good peaks and adding them
    # to the average psf
    average_psf = numpy.zeros((4*aoi_size,4*aoi_size))
    curf = 1
    peaks_used = 0
    total = 0.0
    [dax_x, dax_y, dax_l] = dax_data.filmSize()
    while (curf < dax_l) and (peaks_used < min_peaks):

        # Select localizations in current frame & not near the edges.
        mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1))
        xr = i3_data['x'][mask]
        yr = i3_data['y'][mask]
        ht = i3_data['h'][mask]

        # Remove localizations that are too close to each other.
        mask = iaUtilsC.removeNeighborsMask(xr, yr, aoi_size)
        print(curf, "peaks in", xr.size, ", peaks out", numpy.count_nonzero(mask))
        
        xr = xr[mask]
        yr = yr[mask]
        ht = ht[mask]
        
        # Use remaining localizations to calculate spline.
        image = dax_data.loadAFrame(curf-1).astype(numpy.float64)

        for i in range(xr.size):
            xf = xr[i]
            yf = yr[i]
            xi = int(xf)
            yi = int(yf)

            # get localization image
            mat = image[xi-aoi_size:xi+aoi_size,
                        yi-aoi_size:yi+aoi_size]

            # re-center image
            psf = scipy.ndimage.interpolation.shift(mat,(-(xf-xi),-(yf-yi)),mode='nearest')

            # zoom in by 2x
            psf = scipy.ndimage.interpolation.zoom(psf,2.0)

            # add to average psf accumulator
            average_psf += psf
            total += ht[i]

            peaks_used += 1
        
        curf += 1

    average_psf = average_psf/total

    average_psf = numpy.transpose(average_psf)

    # force psf to be zero (on average) at the boundaries.
    if True:
        edge = numpy.concatenate((average_psf[0,:],
                                  average_psf[-1,:],
                                  average_psf[:,0],
                                  average_psf[:,-1]))
        average_psf -= numpy.mean(edge)

    # save PSF (in numpy form).
    numpy.save(psf_file, average_psf)
    
    # save PSF (in image form).
    #
    # FIXME: This may be useful but it is annoying for automated testing as this file
    #        is created in which ever directory the tests are run in.
    #
    if True:
        import tifffile
        tifffile.imsave("l1h_psf.tif", average_psf.astype(numpy.float32))
Esempio n. 28
0
parser.add_argument(
    '--bin',
    dest='i3bin',
    type=str,
    required=True,
    help="The name of Insight3 format file with the emitter positions.")
parser.add_argument('--maxi',
                    dest='maxi',
                    type=int,
                    required=False,
                    help="The maximum number of emitters to check.")

args = parser.parse_args()

# Initialization.
i3_data = readinsight3.loadI3File(args.i3bin)
if args.maxi is None:
    args.maxi = numpy.max(i3_data['i']) + 1

if (args.maxi > (numpy.max(i3_data['i']) + 1)):
    args.maxi = numpy.max(i3_data['i']) + 1

max_frame = numpy.max(i3_data['fr'])

on_times = []
off_times = []
for i in range(int(args.maxi)):

    if ((i % 100) == 0):
        print("Analyzing emitter", i)
Esempio n. 29
0
def mergeAnalysis(dir_name, bin_base_name, extensions=[".bin"]):

    # Create Insight3 file writers.
    i3_out = []
    for ext in extensions:
        i3_out.append(writeinsight3.I3Writer(bin_base_name + ext))

    # Find all the job*.xml files.
    job_xml_files = glob.glob(dir_name + "job*.xml")

    # Sort job files.
    job_xml_files = sorted(
        job_xml_files,
        key=lambda x: int(
            os.path.splitext(os.path.basename(x))[0].split("_")[1]))

    # Check for corresponding mlist.bin files.
    metadata = None
    last_frame = 0
    for i in range(len(job_xml_files)):

        job_complete = True
        for j, ext in enumerate(extensions):
            mlist_name = dir_name + "p_" + str(i + 1) + "_mlist" + ext

            if os.path.exists(mlist_name) and readinsight3.checkStatus(
                    mlist_name):

                # Load metadata from the first file.
                if (i == 0) and (j == 0):
                    metadata = readinsight3.loadI3Metadata(mlist_name)

                # Read localizations.
                i3_data = readinsight3.loadI3File(mlist_name, verbose=False)

                # Check for empty file.
                if (i3_data.size == 0):
                    print("No localizations found in", mlist_name)

                else:
                    # Print frame range covered.
                    if (j == 0):
                        last_frame = i3_data["fr"][-1]
                        print(i3_data["fr"][0], last_frame, mlist_name)

                    # Add localizations to the output file.
                    i3_out[j].addMolecules(i3_data)

            else:
                job_complete = False
                break

        if not job_complete:
            print("Merge failed because", job_xml_files[i], "is incomplete.")
            for j, ext in enumerate(extensions):
                i3_out[j].close()
                os.remove(bin_base_name + ext)
            assert (False)

    if metadata is None:
        print("No metadata found.")
        for i3w in i3_out:
            i3w.close()
    else:

        # Fix movie length node based on the last frame of the last molecule.
        metadata.find("movie").find("movie_l").text = str(last_frame)

        # Also need to fix analysis end points. We are assuming that the
        # entire movie was analyzed.
        metadata.find("settings").find("start_frame").text = "-1"
        metadata.find("settings").find("max_frame").text = "-1"

        for i3w in i3_out:
            i3w.closeWithMetadata(ElementTree.tostring(metadata, 'ISO-8859-1'))
Esempio n. 30
0
def verifyNumberLocalizations(bin_fname):
    """
    Return the number of localizations in a I3 file.
    """
    return readinsight3.loadI3File(bin_fname, verbose = False).size
Esempio n. 31
0
def peakFinding(find_peaks, movie_file, mlist_file, parameters):

    # open files for input & output
    movie_data = datareader.inferReader(movie_file)
    [movie_x, movie_y, movie_l] = movie_data.filmSize()

    # if the i3 file already exists, read it in,
    # write it out & start the analysis from the
    # end.
    total_peaks = 0
    if (os.path.exists(mlist_file)):
        print("Found", mlist_file)
        i3data_in = readinsight3.loadI3File(mlist_file)
        try:
            curf = int(numpy.max(i3data_in['fr']))
        except ValueError:
            curf = 0
        print(" Starting analysis at frame:", curf)
        i3data = writeinsight3.I3Writer(mlist_file)
        if (curf > 0):
            i3data.addMolecules(i3data_in)
            total_peaks = i3data_in['x'].size
    else:
        curf = 0
        i3data = writeinsight3.I3Writer(mlist_file)

    # process parameters
    if hasattr(parameters, "start_frame"):
        if (parameters.start_frame >= curf) and (parameters.start_frame <
                                                 movie_l):
            curf = parameters.start_frame

    if hasattr(parameters, "max_frame"):
        if (parameters.max_frame > 0) and (parameters.max_frame < movie_l):
            movie_l = parameters.max_frame

    static_bg_estimator = None
    if hasattr(parameters, "static_background_estimate"):
        if (parameters.static_background_estimate > 0):
            print("Using static background estimator.")
            static_bg_estimator = static_background.StaticBGEstimator(
                movie_data,
                start_frame=curf,
                sample_size=parameters.static_background_estimate)

    # analyze the movie
    # catch keyboard interrupts & "gracefully" exit.
    try:
        while (curf < movie_l):
            #for j in range(l):

            # Set up the analysis.
            image = movie_data.loadAFrame(curf) - parameters.baseline
            mask = (image < 1.0)
            if (numpy.sum(mask) > 0):
                print(" Removing negative values in frame", curf)
                image[mask] = 1.0

            # Find and fit the peaks.
            if static_bg_estimator is not None:
                bg_estimate = static_bg_estimator.estimateBG(
                    curf) - parameters.baseline
                [peaks,
                 residual] = find_peaks.analyzeImage(image,
                                                     bg_estimate=bg_estimate)
            else:
                [peaks, residual] = find_peaks.analyzeImage(image)

            # Save the peaks.
            if (type(peaks) == type(numpy.array([]))):
                # remove unconverged peaks
                peaks = find_peaks.getConvergedPeaks(peaks)

                # save results
                if (parameters.orientation == "inverted"):
                    i3data.addMultiFitMolecules(peaks,
                                                movie_x,
                                                movie_y,
                                                curf + 1,
                                                parameters.pixel_size,
                                                inverted=True)
                else:
                    i3data.addMultiFitMolecules(peaks,
                                                movie_x,
                                                movie_y,
                                                curf + 1,
                                                parameters.pixel_size,
                                                inverted=False)

                total_peaks += peaks.shape[0]
                print("Frame:", curf, peaks.shape[0], total_peaks)
            else:
                print("Frame:", curf, 0, total_peaks)
            curf += 1

        print("")
        i3data.close()
        find_peaks.cleanUp()
        return 0

    except KeyboardInterrupt:
        print("Analysis stopped.")
        i3data.close()
        find_peaks.cleanUp()
        return 1
Esempio n. 32
0
    def __init__(self, bin_name=None, **kwds):
        super().__init__(**kwds)

        self.i3_data = readinsight3.loadI3File(bin_name)
        self.nm_per_pixel = self.parameters.getAttr("pixel_size")