Ejemplo n.º 1
0
    def __init__(self, parameters = None, **kwds):
        super(DataWriterI3, self).__init__(**kwds)

        raise Exception("Using the Insight3 format for analysis is deprecated!")

        self.pixel_size = parameters.getAttr("pixel_size")
                
        #
        # If the i3 file already exists, read it in, write it
        # out to prepare for starting the analysis from the
        # end of what currently exists.
        #
        # FIXME: If the existing file is really large there
        #        could be problems here as we're going to load
        #        the whole thing into memory.
        #
        if(os.path.exists(self.filename)):
            print("Found", self.filename)
            i3data_in = readinsight3.loadI3File(self.filename)
            if (i3data_in is None) or (i3data_in.size == 0):
                self.start_frame = 0
            else:
                self.start_frame = int(numpy.max(i3data_in['fr']))

            print(" Starting analysis at frame:", self.start_frame)
            self.i3data = writeinsight3.I3Writer(self.filename)
            if (self.start_frame > 0):
                self.i3data.addMolecules(i3data_in)
                self.total_peaks = i3data_in['x'].size
        else:
            self.start_frame = 0
            self.i3data = writeinsight3.I3Writer(self.filename)
Ejemplo n.º 2
0
def findClusters(mlist_name, clist_name, eps, mc, ignore_z = True, ignore_category = True):
    
    # Load the data.
    pix_to_nm = 160.0

    i3_data_in = readinsight3.loadI3GoodOnly(mlist_name)

    c = i3_data_in['c']
    x = i3_data_in['xc']*pix_to_nm
    y = i3_data_in['yc']*pix_to_nm

    if ignore_z:
        print("Warning! Clustering without using localization z value!")
        z = numpy.zeros(x.size)
    else:
        z = i3_data_in['zc']

    # Perform analysis without regard to category.
    if ignore_category:
        print("Warning! Clustering without regard to category!")
        c = numpy.zeros(c.size)

    # Cluster the data.
    labels = dbscanC.dbscan(x, y, z, c, eps, mc, z_factor=1.0)

    # Save the data.    
    i3_data_out = writeinsight3.I3Writer(clist_name)
    i3dtype.setI3Field(i3_data_in, 'lk', labels)
    i3_data_out.addMolecules(i3_data_in)
    i3_data_out.close()
Ejemplo n.º 3
0
def copyTracking(ref_track_filename, input_filename, output_filename):

    i3_ref = readinsight3.I3Reader(ref_track_filename)
    i3_in = readinsight3.I3Reader(input_filename)

    # Verify that these two localization files are actually pairs.
    assert (i3_ref.molecules == i3_in.molecules), "Input files do not match."

#    # Verify that the output file doesn't already exist.
#    assert not os.path.exists(output_filename), "Output file already exists."
    
    i3_out = writeinsight3.I3Writer(output_filename)

    i3_ref_data = i3_ref.nextBlock()
    while (i3_ref_data is not False):        
        i3_in_data = i3_in.nextBlock()

        # Copy tracking information from the reference data file.
        for field in ["tl", "lk", "fi"]:
            i3_in_data[field] = i3_ref_data[field]

        # Save merge of reference (channel 0) and current channel.
        i3_out.addMolecules(i3_in_data)
        
        # Load the next block of reference data.
        i3_ref_data = i3_ref.nextBlock()

    # At least for now we are not bothering to copy the meta-data.
    i3_out.close()
Ejemplo n.º 4
0
    def __init__(self, **kwds):
        super(MPDataWriter, self).__init__(**kwds)
        parameters = kwds["parameters"]

        self.offsets = []

        # Figure out how many planes there are.
        self.n_planes = len(mpUtilC.getExtAttrs(parameters))

        # Save frame offsets for each plane.
        for offset in mpUtilC.getOffsetAttrs(parameters):
            self.offsets.append(parameters.getAttr(offset))

        # Adjust starting frame based on channel 0 offset.
        if (self.start_frame > 0) and (self.offsets[0] != 0):
            self.start_frame += self.offsets[0]
            print("Adjusted start frame to", self.start_frame,
                  "based on channel 0 offset.")

        # Create writers for the other planes.
        #
        # FIXME: This won't work for existing I3 files.
        #
        assert (self.start_frame == 0)
        self.i3_writers = [self.i3data]
        for i in range(1, self.n_planes):
            fname = self.filename[:-4] + "_ch" + str(i) + ".bin"
            self.i3_writers.append(writeinsight3.I3Writer(fname))
Ejemplo n.º 5
0
def test_good_i3():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save the data.
    with writeinsight3.I3Writer(mlist_name) as i3w:
        i3w.addMolecules(locs)

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs.shape[0] == 100)
Ejemplo n.º 6
0
def test_write_read_1():
    """
    Test writing and reading.
    """
    bin_name = storm_analysis.getPathOutputTest("test_insight3io.bin")

    i3_locs = i3dtype.createDefaultI3Data(10)
    i3dtype.posSet(i3_locs, 'x', 10.0)

    with writeinsight3.I3Writer(bin_name) as i3:
        i3.addMolecules(i3_locs)

    i3_in = readinsight3.loadI3File(bin_name, verbose = False)
    assert(numpy.allclose(i3_locs['x'], i3_in['x']))
Ejemplo n.º 7
0
def test_bad_i3():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save the data.
    i3w = writeinsight3.I3Writer(mlist_name)
    i3w.addMolecules(locs)
    i3w.fp.close()

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs is None)
Ejemplo n.º 8
0
def splitPeaks(mlist_filename, params_filename):

    parameters = params.ParametersMultiplane().initFromFile(params_filename)

    # Load the plane to plane mapping data.
    mappings = {}
    if parameters.hasAttr("mapping"):
        if os.path.exists(parameters.getAttr("mapping")):
            with open(parameters.getAttr("mapping"), 'rb') as fp:
                mappings = pickle.load(fp)

        else:
            print("No mapping file parameter, nothing to do")

    # Load frame offset information.
    frame_offsets = list(
        map(parameters.getAttr, mpUtilC.getOffsetAttrs(parameters)))
    print(frame_offsets)

    # Load the molecule list.
    ch0_data = readinsight3.loadI3File(mlist_filename)

    # Map to other channels.
    basename = mlist_filename[:-4]
    channel = 1
    m_key = "0_1_"
    while (m_key + "x") in mappings:
        chn_data = ch0_data.copy()

        # Map x.
        tx = mappings[m_key + "x"]
        chn_data['x'] = tx[0] + ch0_data['x'] * tx[1] + ch0_data['y'] * tx[2]

        # Map y.
        ty = mappings[m_key + "y"]
        chn_data['y'] = ty[0] + ch0_data['x'] * ty[1] + ch0_data['y'] * ty[2]

        # Map frame.
        chn_data[
            'fr'] = ch0_data['fr'] - frame_offsets[0] + frame_offsets[channel]

        with writeinsight3.I3Writer(basename + "_ch" + str(channel) +
                                    ".bin") as i3w:
            i3w.addMolecules(chn_data)

        channel += 1
        m_key = "0_" + str(channel) + "_"
Ejemplo n.º 9
0
def test_write_read_3():
    """
    Test I3Reader on an empty file.
    """
    bin_name = storm_analysis.getPathOutputTest("test_insight3io.bin")

    i3_locs = i3dtype.createDefaultI3Data(10)
    i3dtype.posSet(i3_locs, 'x', 10.0)

    with writeinsight3.I3Writer(bin_name) as i3:
        pass

    i3_reader = readinsight3.I3Reader(bin_name)

    # Read localizations.
    i3_in = i3_reader.nextBlock()
    assert(i3_in is False)
Ejemplo n.º 10
0
def reduceMList(i3_name_in,
                i3_name_out,
                xstart=None,
                xstop=None,
                ystart=None,
                ystop=None,
                min_frame=None,
                max_frame=None):

    meta_data = readinsight3.loadI3Metadata(i3_name_in)

    i3_in = readinsight3.I3Reader(i3_name_in)
    i3_out = writeinsight3.I3Writer(i3_name_out)

    i3_data = i3_in.nextBlock()
    while (i3_data is not False):
        sys.stdout.write(".")
        sys.stdout.flush()

        # Create mask.
        mask = numpy.full(i3_data.size, True, dtype=bool)
        if xstart is not None:
            mask = mask & (i3_data['xc'] > xstart)
        if xstop is not None:
            mask = mask & (i3_data['xc'] < xstop)
        if ystart is not None:
            mask = mask & (i3_data['yc'] > ystart)
        if ystop is not None:
            mask = mask & (i3_data['yc'] < ystop)
        if min_frame is not None:
            mask = mask & (i3_data['fr'] > min_frame)
        if max_frame is not None:
            mask = mask & (i3_data['fr'] < max_frame)

        i3_data = i3dtype.maskData(i3_data, mask)
        if (i3_data.size > 0):
            i3_out.addMolecules(i3_data)

        i3_data = i3_in.nextBlock()

    print()

    if meta_data is not None:
        i3_out.closeWithMetadata(ElementTree.tostring(meta_data, 'ISO-8859-1'))
    else:
        i3_out.close()
Ejemplo n.º 11
0
def test_good_i3_metadata():
    mlist_name = storm_analysis.getPathOutputTest("test_i3_io_mlist.bin")

    # Create data.
    locs = i3dtype.createDefaultI3Data(100)

    # Save data and metadata.
    i3w = writeinsight3.I3Writer(mlist_name)
    i3w.addMolecules(locs)
    etree = ElementTree.Element("xml")
    test = ElementTree.SubElement(etree, "test")
    test.text = "test"
    i3w.closeWithMetadata(ElementTree.tostring(etree, 'ISO-8859-1'))

    # Read the data.
    locs = readinsight3.loadI3File(mlist_name)
    assert(locs.shape[0] == 100)

    # Read the metadata.
    metadata = readinsight3.loadI3Metadata(mlist_name)
    assert(metadata.find("test").text == "test")
Ejemplo n.º 12
0
def hdf5ToBin(hdf5_name, bin_name):
    with saH5Py.SAH5Py(hdf5_name) as h5:
        nm_per_pixel = h5.getPixelSize()
        [movie_x, movie_y, movie_l, hash_value] = h5.getMovieInformation()

        # Create Insight3 file for writing.
        i3 = i3w.I3Writer(bin_name)

        # Convert tracks.
        if h5.hasTracks():
            print("Converting tracks.")
            for tracks in h5.tracksIterator():
                i3.addMultiFitMolecules(tracks, 1, nm_per_pixel)

        # Convert localizations.
        else:
            print("Converting localizations.")
            for fnum, locs in h5.localizationsIterator(drift_corrected=False):
                i3.addMultiFitMolecules(locs, fnum + 1, nm_per_pixel)

        # Add metadata.
        etree = ElementTree.Element("xml")

        # Analysis parameters.
        h5_metadata = h5.getMetadata()
        etree.append(ElementTree.fromstring(h5_metadata))

        # Movie properties.
        movie_props = ElementTree.SubElement(etree, "movie")
        field = ElementTree.SubElement(movie_props, "hash_value")
        field.text = hash_value
        for elt in [["movie_x", movie_x], ["movie_y", movie_y],
                    ["movie_l", movie_l]]:
            field = ElementTree.SubElement(movie_props, elt[0])
            field.text = str(elt[1])

        metadata = ElementTree.tostring(etree, 'ISO-8859-1')

        # Close i3 file with metadata.
        i3.closeWithMetadata(metadata)
Ejemplo n.º 13
0
def clusterSize(clist_name, clist_size_name, remove_cat0=False):

    # Load the data.
    i3_data_in = readinsight3.loadI3GoodOnly(clist_name)

    # Remove category zero localizations.
    if remove_cat0:
        print("warning, removing category zero localizations!")
        i3_data = i3dtype.maskData(i3_data_in, (i3_data_in['c'] != 0))
    else:
        i3_data = i3_data_in

    # Record cluster localization numbers in the fit area field.
    i3_data['a'] = dbscanC.localizationClusterSize(i3_data['lk']) + 1

    # Copy cluster id into the frame field.
    i3_data['fr'] = i3_data['lk']

    # Save the data.
    i3_data_out = writeinsight3.I3Writer(clist_size_name)
    i3_data_out.addMolecules(i3_data)
    i3_data_out.close()
Ejemplo n.º 14
0
def test_write_read_2():
    """
    Test I3Reader.
    """
    bin_name = storm_analysis.getPathOutputTest("test_insight3io.bin")

    i3_locs = i3dtype.createDefaultI3Data(10)
    i3dtype.posSet(i3_locs, 'x', 10.0)

    with writeinsight3.I3Writer(bin_name) as i3:
        i3.addMolecules(i3_locs)

    i3_reader = readinsight3.I3Reader(bin_name)

    # Read localizations.
    i3_in = i3_reader.nextBlock()
    assert(numpy.allclose(i3_locs['x'], i3_in['x']))
    assert(i3_in is not False)

    # This should return False because there are no more localizations.
    i3_in = i3_reader.nextBlock()
    assert(i3_in is False)
Ejemplo n.º 15
0
def test_sa_h5py_5():
    """
    Test querying if the HDF5 file is a storm-analysis file.
    """
    filename = "test_sa_hdf5.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Open empty file.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        pass
    assert (saH5Py.isSAHDF5(h5_name))

    # Create generic HDF5 file.
    f = h5py.File(h5_name, "w")
    f.close()
    assert (not saH5Py.isSAHDF5(h5_name))

    # Create Insight3 file.
    with i3w.I3Writer(h5_name) as i3:
        pass
    assert not (saH5Py.isSAHDF5(h5_name))
Ejemplo n.º 16
0
def MergeBin(bin_files, results_file):
    assert not os.path.exists(results_file)

    # Load meta data.
    metadata = readinsight3.loadI3Metadata(bin_files[0])

    # Create I3 writer.
    i3w = writeinsight3.I3Writer(results_file)

    # Sequentially read input files and copy into output file.
    for b_file in bin_files:
        print("Merging", b_file)
        i3_reader = readinsight3.I3Reader(b_file)
        i3_data = i3_reader.nextBlock()
        while i3_data is not False:
            print("  working..")
            i3w.addMolecules(i3_data)
            i3_data = i3_reader.nextBlock()

    # Close i3 output file.
    if metadata is None:
        i3w.close()
    else:
        i3w.closeWithMetadata(ElementTree.tostring(metadata, 'ISO-8859-1'))
Ejemplo n.º 17
0
import storm_analysis.sa_library.i3dtype as i3dtype
import storm_analysis.sa_library.writeinsight3 as writeinsight3

background = 5
clusters = 100
length = 20000
p_on = 0.1

numpy.random.seed(0)

cx = numpy.random.uniform(low = 20.0, high = 236.0, size = clusters)
cy = numpy.random.uniform(low = 20.0, high = 236.0, size = clusters)
cz = numpy.random.uniform(low = -300.0, high = 300.0, size = clusters)

with writeinsight3.I3Writer("../data/test_clustering_list.bin") as i3_fp:
    for i in range(length):
        
        if((i % 500) == 0):
            print("Creating frame", i)

        on = (numpy.random.uniform(size = clusters) < p_on)
        number_on = numpy.count_nonzero(on)

        x = cx + numpy.random.normal(scale = 0.5, size = clusters)
        y = cy + numpy.random.normal(scale = 0.5, size = clusters)
        z = cz + numpy.random.normal(scale = 50.0, size = clusters)

        # Add background
        x = numpy.concatenate((x[on], numpy.random.uniform(high = 256.0, size = background)))
        y = numpy.concatenate((y[on], numpy.random.uniform(high = 256.0, size = background)))
Ejemplo n.º 18
0
    # Save results.
    fx = fdecon.getXVector()
    print(numpy.min(fx), numpy.max(fx))
    decon_data = daxwriter.DaxWriter(sys.argv[3], fx.shape[0], fx.shape[1])
    for i in range(fx.shape[2]):
        decon_data.addFrame(fx[:, :, i])
    decon_data.close()

    # Find peaks in the decon data.
    peaks = fdecon.getPeaks(parameters.getAttr("threshold"), 5)

    zci = utilC.getZCenterIndex()
    z_min, z_max = fdecon.getZRange()
    peaks[:, zci] = 1.0e-3 * ((z_max - z_min) * peaks[:, zci] + z_min)

    i3_writer = writeinsight3.I3Writer(sys.argv[3][:-4] + "_flist.bin")
    i3_writer.addMultiFitMolecules(peaks, x_size, y_size, 1,
                                   parameters.getAttr("pixel_size"))
    i3_writer.close()

#
# The MIT License
#
# Copyright (c) 2016 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
Ejemplo n.º 19
0
x_size = 300
y_size = 310
z_planes = [0.0]
#z_planes = [-250.0, 250.0]
#z_planes = [-250.0, 250.0]
#z_planes = [-750.0, -250.0, 250.0, 750.0]
z_value = -500.0

# Load emitter locations.
i3_locs = readinsight3.loadI3File("emitters.bin")

if False:
    i3_locs = i3_locs[0]

# Make a bin file with emitter locations for each frame.
with writeinsight3.I3Writer("test_olist.bin") as i3w:
    for i in range(frames):
        i3_temp = i3_locs.copy()
        i3dtype.setI3Field(i3_temp, "fr", i + 1)
        i3dtype.posSet(i3_temp, "z", z_value)
        i3w.addMolecules(i3_temp)

# Load channel to channel mapping file.
with open("map.map", 'rb') as fp:
    mappings = pickle.load(fp)

# Create bin files for each plane.
for i, z_plane in enumerate(z_planes):
    cx = mappings["0_" + str(i) + "_x"]
    cy = mappings["0_" + str(i) + "_y"]
    i3_temp = i3_locs.copy()
Ejemplo n.º 20
0
#
# Create a test .bin file for evaluating RCC correction.
#
# Hazen 10/14
#

import numpy
import sys

import storm_analysis.sa_library.writeinsight3 as writeinsight3

if (len(sys.argv) != 2):
    print "usage: <out.bin>"
    exit()

i3_out = writeinsight3.I3Writer(sys.argv[1])
n_locs = 100


def addMol(x, y, frame):
    x += numpy.random.normal(scale=0.05, size=n_locs)
    y += numpy.random.normal(scale=0.05, size=n_locs)
    i3_out.addMoleculesWithXYFrame(x, y, i)


x1 = 255.0 * numpy.random.uniform(size=n_locs)
y1 = 255.0 * numpy.random.uniform(size=n_locs)

x2 = 255.0 * numpy.random.uniform(size=n_locs)
y2 = 255.0 * numpy.random.uniform(size=n_locs)
Ejemplo n.º 21
0
def voronoi(mlist_name, clist_name, density_factor, min_size, verbose=True):

    i3_data_in = readinsight3.loadI3GoodOnly(mlist_name)
    n_locs = i3_data_in['xc'].size
    points = numpy.column_stack((i3_data_in['xc'], i3_data_in['yc']))

    print("Creating Voronoi object.")
    vor = Voronoi(points)

    print("Calculating 2D region sizes.")
    for i, region_index in enumerate(vor.point_region):
        if ((i % 10000) == 0):
            print("Processing point", i)

        vertices = []
        for vertex in vor.regions[region_index]:

            # I think these are edge regions?
            if (vertex == -1):
                vertices = []
                break

            vertices.append(vor.vertices[vertex])

        if (len(vertices) > 0):
            area = Polygon(vertices).area
            i3_data_in['a'][i] = 1.0 / area

    # Used median density based threshold.
    ave_density = numpy.median(i3_data_in['a'])
    if verbose:
        print("Min density", numpy.min(i3_data_in['a']))
        print("Max density", numpy.max(i3_data_in['a']))
        print("Median density", ave_density)

    # Record the neighbors of each point.
    max_neighbors = 40
    neighbors = numpy.zeros((n_locs, max_neighbors), dtype=numpy.int32) - 1
    neighbors_counts = numpy.zeros((n_locs), dtype=numpy.int32)

    print("Calculating neighbors")
    for ridge_p in vor.ridge_points:

        p1 = ridge_p[0]
        p2 = ridge_p[1]

        # Add p2 to the list for p1
        neighbors[p1, neighbors_counts[p1]] = p2
        neighbors_counts[p1] += 1

        # Add p1 to the list for p2
        neighbors[p2, neighbors_counts[p2]] = p1
        neighbors_counts[p2] += 1

    if False:
        n1 = neighbors[0, :]
        print(n1)
        print(neighbors[n1[0], :])

    # Mark connected points that meet the minimum density criteria.
    print("Marking connected regions")
    i3_data_in['lk'] = -1
    min_density = density_factor * ave_density
    visited = numpy.zeros((n_locs), dtype=numpy.int32)

    def neighborsList(index):
        nlist = []
        for i in range(neighbors_counts[index]):
            loc_index = neighbors[index, i]
            if (visited[loc_index] == 0):
                nlist.append(neighbors[index, i])
                visited[loc_index] = 1
        return nlist

    cluster_id = 2
    for i in range(n_locs):
        if (visited[i] == 0):
            if (i3_data_in['a'][i] > min_density):
                cluster_elt = [i]
                c_size = 1
                visited[i] = 1
                to_check = neighborsList(i)
                while (len(to_check) > 0):

                    # Remove last localization from the list.
                    loc_index = to_check[-1]
                    to_check = to_check[:-1]

                    # If the localization has sufficient density add to cluster and check neighbors.
                    if (i3_data_in['a'][loc_index] > min_density):
                        to_check += neighborsList(loc_index)
                        cluster_elt.append(loc_index)
                        c_size += 1

                    # Mark as visited.
                    visited[loc_index] = 1

                # Mark the cluster if there are enough localizations in the cluster.
                if (c_size > min_size):
                    print("cluster", cluster_id, "size", c_size)
                    for elt in cluster_elt:
                        i3_data_in['lk'][elt] = cluster_id
                cluster_id += 1
            visited[i] = 1

    print(cluster_id, "clusters")

    # Save the data.
    print("Saving results")
    i3_data_out = writeinsight3.I3Writer(clist_name)
    i3_data_out.addMolecules(i3_data_in)
    i3_data_out.close()
Ejemplo n.º 22
0
def analyze(movie_name, settings_name, hres_name, bin_name):

    movie_data = datareader.inferReader(movie_name)

    #
    # FIXME:
    #
    # This should also start at the same frame as hres in the event of a restart.
    #
    i3_file = writeinsight3.I3Writer(bin_name)

    params = parameters.ParametersL1H().initFromFile(settings_name)

    #
    # Load the a matrix and setup the homotopy image analysis class.
    #
    a_mat_file = params.getAttr("a_matrix")

    print("Using A matrix file:", a_mat_file)
    a_mat = setup_A_matrix.loadAMatrix(a_mat_file)

    image = movie_data.loadAFrame(0)
    htia = homotopy_imagea_c.HomotopyIA(a_mat, params.getAttr("epsilon"),
                                        image.shape)

    #
    # This opens the file. If it already exists, then it sets the file pointer
    # to the end of the file & returns the number of the last frame analyzed.
    #
    curf = htia.openHRDataFile(hres_name)

    #
    # Figure out which frame to start & stop at.
    #
    [dax_x, dax_y, dax_l] = movie_data.filmSize()

    if params.hasAttr("start_frame"):
        if (params.getAttr("start_frame") >=
                curf) and (params.getAttr("start_frame") < dax_l):
            curf = params.getAttr("start_frame")

    if params.hasAttr("max_frame"):
        if (params.getAttr("max_frame") > 0) and (params.getAttr("max_frame") <
                                                  dax_l):
            dax_l = params.getAttr("max_frame")

    print("Starting analysis at frame", curf)

    #
    # Analyze the dax data.
    #
    total_peaks = 0
    try:
        while (curf < dax_l):

            # Load image, subtract baseline & remove negative values.
            image = movie_data.loadAFrame(curf).astype(numpy.float)
            image -= params.getAttr("baseline")
            mask = (image < 0)
            image[mask] = 0

            # Analyze image.
            hres_image = htia.analyzeImage(image)
            peaks = htia.saveHRFrame(hres_image, curf + 1)
            [cs_x, cs_y, cs_a, cs_i] = htia.getPeaks(hres_image)
            i3_file.addMoleculesWithXYAItersFrame(cs_x, cs_y, cs_a, cs_i,
                                                  curf + 1)

            peaks = cs_x.size
            total_peaks += peaks
            print("Frame:", curf, peaks, total_peaks)

            curf += 1

    except KeyboardInterrupt:
        print("Analysis stopped.")

    # cleanup
    htia.closeHRDataFile()
    i3_file.close()
Ejemplo n.º 23
0
if False:
    z = i3_data_in['zc']
else:
    print "Warning! Clustering without using localization z value!"
    z = numpy.zeros(x.size)

# Perform analysis without regard to category.
if True:
    print "Warning! Clustering without regard to category!"
    c = numpy.zeros(c.size)

# Cluster the data.
if (len(sys.argv) == 4):
    print "Using eps =", sys.argv[2], "mc =", sys.argv[3]
    labels = dbscanC.dbscan(x,
                            y,
                            z,
                            c,
                            float(sys.argv[2]),
                            int(sys.argv[3]),
                            z_factor=1.0)
else:
    print "Using eps = 80, mc = 5"
    labels = dbscanC.dbscan(x, y, z, c, 80.0, 5, z_factor=1.0)

# Save the data.
i3_data_out = writeinsight3.I3Writer(sys.argv[1][:-8] + "clusters_list.bin")
i3dtype.setI3Field(i3_data_in, 'lk', labels)
i3_data_out.addMolecules(i3_data_in)
i3_data_out.close()
Ejemplo n.º 24
0
def psfLocalizations(i3_filename, mapping_filename, frame = 1, aoi_size = 8, movie_filename = None):

    # Load localizations.
    i3_reader = readinsight3.I3Reader(i3_filename)

    # Load mapping.
    mappings = {}
    if os.path.exists(mapping_filename):
        with open(mapping_filename, 'rb') as fp:
            mappings = pickle.load(fp)
    else:
        print("Mapping file not found, single channel data?")

    # Try and determine movie frame size.
    i3_metadata = readinsight3.loadI3Metadata(i3_filename)
    if i3_metadata is None:
        if movie_filename is None:
            raise Exception("I3 metadata not found and movie filename is not specified.")
        else:
            movie_fp = datareader.inferReader(movie_filename)
            [movie_y, movie_x] = movie_fp.filmSize()[:2]
    else:
        movie_data = i3_metadata.find("movie")

        # FIXME: These may be transposed?
        movie_x = int(movie_data.find("movie_x").text)
        movie_y = int(movie_data.find("movie_y").text)
    
    # Load localizations in the requested frame.
    locs = i3_reader.getMoleculesInFrame(frame)
    print("Loaded", locs.size, "localizations.")

    # Remove localizations that are too close to each other.
    in_locs = numpy.zeros((locs["x"].size, util_c.getNPeakPar()))
    in_locs[:,util_c.getXCenterIndex()] = locs["x"]
    in_locs[:,util_c.getYCenterIndex()] = locs["y"]

    out_locs = util_c.removeNeighbors(in_locs, 2 * aoi_size)

    xf = out_locs[:,util_c.getXCenterIndex()]
    yf = out_locs[:,util_c.getYCenterIndex()]

    #
    # Remove localizations that are too close to the edge or
    # outside of the image in any of the channels.
    #
    is_good = numpy.ones(xf.size, dtype = numpy.bool)
    for i in range(xf.size):

        # Check in Channel 0.
        if (xf[i] < aoi_size) or (xf[i] + aoi_size >= movie_x):
            is_good[i] = False
            continue
        
        if (yf[i] < aoi_size) or (yf[i] + aoi_size >= movie_y):
            is_good[i] = False
            continue

        # Check other channels.
        for key in mappings:
            if not is_good[i]:
                break
            
            coeffs = mappings[key]
            [ch1, ch2, axis] = key.split("_")
            if (ch1 == "0"):

                if (axis == "x"):
                    xm = coeffs[0] + coeffs[1]*xf[i] + coeffs[2]*yf[i]
                    if (xm < aoi_size) or (xm + aoi_size >= movie_x):
                        is_good[i] = False
                        break

                elif (axis == "y"):
                    ym = coeffs[0] + coeffs[1]*xf[i] + coeffs[2]*yf[i]
                    if (ym < aoi_size) or (ym + aoi_size >= movie_y):
                        is_good[i] = False
                        break

    #
    # Save localizations for each channel.
    #
    gx = xf[is_good]
    gy = yf[is_good]

    basename = os.path.splitext(i3_filename)[0]
    with writeinsight3.I3Writer(basename + "_c1_psf.bin") as w3:
        w3.addMoleculesWithXY(gx, gy)
    
    index = 1
    while ("0_" + str(index) + "_x" in mappings):
        cx = mappings["0_" + str(index) + "_x"]
        cy = mappings["0_" + str(index) + "_y"]
        #cx = mappings[str(index) + "_0" + "_x"]
        #cy = mappings[str(index) + "_0" + "_y"]
        xm = cx[0] + cx[1] * gx + cx[2] * gy
        ym = cy[0] + cy[1] * gx + cy[2] * gy

        with writeinsight3.I3Writer(basename + "_c" + str(index+1) + "_psf.bin") as w3:
            w3.addMoleculesWithXY(xm, ym)

        index += 1

    #
    # Print localizations that were kept.
    #
    print(gx.size, "localizations were kept:")
    for i in range(gx.size):
        print("ch0: {0:.2f} {1:.2f}".format(gx[i], gy[i]))
        index = 1
        while ("0_" + str(index) + "_x" in mappings):
            cx = mappings["0_" + str(index) + "_x"]
            cy = mappings["0_" + str(index) + "_y"]
            xm = cx[0] + cx[1] * gx[i] + cx[2] * gy[i]
            ym = cy[0] + cy[1] * gx[i] + cy[2] * gy[i]
            print("ch" + str(index) + ": {0:.2f} {1:.2f}".format(xm, ym))
            index += 1
        print("")
    print("")
Ejemplo n.º 25
0
def alignAndMerge(file1, file2, results_file, scale = 2, dx = 0, dy = 0, z_min = -500.0, z_max = 500.0):
    assert not os.path.exists(results_file)

    z_bins = int((z_max - z_min)/50)

    # Load meta data.
    metadata1 = readinsight3.loadI3Metadata(file1)
    metadata2 = readinsight3.loadI3Metadata(file1)

    # If meta data is available, update the film length
    # field to be which ever data set is longer.
    #
    # Note that the merged file will still be messy in that the
    # frame numbers for the second movie are not changed, so they
    # will likely overlap with those of the first movie and break
    # the assumption that frame number always increases as you
    # go through the file.
    #
    if (metadata1 is not None) and (metadata2 is not None):
        f1_length = int(metadata1.find("movie").find("movie_l").text)
        f2_length = int(metadata2.find("movie").find("movie_l").text)
        if (f2_length > f1_length):
            metadata1.find("movie").find("movie_l").text = str(f2_length)
    
    i3_data1 = i3togrid.I3GData(file1, scale = scale)
    i3_data2 = i3togrid.I3GData(file2, scale = scale)

    # Determine x,y offsets.
    xy_data1 = i3_data1.i3To2DGridAllChannelsMerged()
    xy_data2 = i3_data2.i3To2DGridAllChannelsMerged()
    
    [corr, offx, offy, xy_success] = imagecorrelation.xyOffset(xy_data1,
                                                               xy_data2,
                                                               scale,
                                                               center = [dx * scale,
                                                                         dy * scale])

    assert(xy_success)

    # Update x,y positions in file2.
    offx = offx/float(scale)
    offy = offy/float(scale)
    print("x,y offsets", offx, offy)

    i3_data2.i3data['xc'] += offx
    i3_data2.i3data['yc'] += offy

    # Determine z offsets.
    xyz_data1 = i3_data1.i3To3DGridAllChannelsMerged(z_bins,
                                                     zmin = z_min,
                                                     zmax = z_max)
    xyz_data2 = i3_data2.i3To3DGridAllChannelsMerged(z_bins,
                                                     zmin = z_min,
                                                     zmax = z_max)

    [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyz_data1, xyz_data2)
    assert(z_success)

    dz = dz * (z_max - z_min)/float(z_bins)
    print("z offset", dz)

    # Update z positions in file2.
    i3_data2.i3data['zc'] -= dz

    i3w = writeinsight3.I3Writer(results_file)
    i3w.addMolecules(i3_data1.getData())
    i3w.addMolecules(i3_data2.getData())
    if metadata1 is None:
        i3w.close()
    else:
        i3w.closeWithMetadata(ElementTree.tostring(metadata1, 'ISO-8859-1'))
Ejemplo n.º 26
0
def mergeAnalysis(dir_name, bin_base_name, extensions=[".bin"]):

    # Create Insight3 file writers.
    i3_out = []
    for ext in extensions:
        i3_out.append(writeinsight3.I3Writer(bin_base_name + ext))

    # Find all the job*.xml files.
    job_xml_files = glob.glob(dir_name + "job*.xml")

    # Sort job files.
    job_xml_files = sorted(
        job_xml_files,
        key=lambda x: int(
            os.path.splitext(os.path.basename(x))[0].split("_")[1]))

    # Check for corresponding mlist.bin files.
    metadata = None
    last_frame = 0
    for i in range(len(job_xml_files)):

        job_complete = True
        for j, ext in enumerate(extensions):
            mlist_name = dir_name + "p_" + str(i + 1) + "_mlist" + ext

            if os.path.exists(mlist_name) and readinsight3.checkStatus(
                    mlist_name):

                # Load metadata from the first file.
                if (i == 0) and (j == 0):
                    metadata = readinsight3.loadI3Metadata(mlist_name)

                # Read localizations.
                i3_data = readinsight3.loadI3File(mlist_name, verbose=False)

                # Check for empty file.
                if (i3_data.size == 0):
                    print("No localizations found in", mlist_name)

                else:
                    # Print frame range covered.
                    if (j == 0):
                        last_frame = i3_data["fr"][-1]
                        print(i3_data["fr"][0], last_frame, mlist_name)

                    # Add localizations to the output file.
                    i3_out[j].addMolecules(i3_data)

            else:
                job_complete = False
                break

        if not job_complete:
            print("Merge failed because", job_xml_files[i], "is incomplete.")
            for j, ext in enumerate(extensions):
                i3_out[j].close()
                os.remove(bin_base_name + ext)
            assert (False)

    if metadata is None:
        print("No metadata found.")
        for i3w in i3_out:
            i3w.close()
    else:

        # Fix movie length node based on the last frame of the last molecule.
        metadata.find("movie").find("movie_l").text = str(last_frame)

        # Also need to fix analysis end points. We are assuming that the
        # entire movie was analyzed.
        metadata.find("settings").find("start_frame").text = "-1"
        metadata.find("settings").find("max_frame").text = "-1"

        for i3w in i3_out:
            i3w.closeWithMetadata(ElementTree.tostring(metadata, 'ISO-8859-1'))
Ejemplo n.º 27
0
def peakFinding(find_peaks, movie_file, mlist_file, parameters):

    # open files for input & output
    movie_data = datareader.inferReader(movie_file)
    [movie_x, movie_y, movie_l] = movie_data.filmSize()

    # if the i3 file already exists, read it in,
    # write it out & start the analysis from the
    # end.
    total_peaks = 0
    if (os.path.exists(mlist_file)):
        print("Found", mlist_file)
        i3data_in = readinsight3.loadI3File(mlist_file)
        try:
            curf = int(numpy.max(i3data_in['fr']))
        except ValueError:
            curf = 0
        print(" Starting analysis at frame:", curf)
        i3data = writeinsight3.I3Writer(mlist_file)
        if (curf > 0):
            i3data.addMolecules(i3data_in)
            total_peaks = i3data_in['x'].size
    else:
        curf = 0
        i3data = writeinsight3.I3Writer(mlist_file)

    # process parameters
    if hasattr(parameters, "start_frame"):
        if (parameters.start_frame >= curf) and (parameters.start_frame <
                                                 movie_l):
            curf = parameters.start_frame

    if hasattr(parameters, "max_frame"):
        if (parameters.max_frame > 0) and (parameters.max_frame < movie_l):
            movie_l = parameters.max_frame

    static_bg_estimator = None
    if hasattr(parameters, "static_background_estimate"):
        if (parameters.static_background_estimate > 0):
            print("Using static background estimator.")
            static_bg_estimator = static_background.StaticBGEstimator(
                movie_data,
                start_frame=curf,
                sample_size=parameters.static_background_estimate)

    # analyze the movie
    # catch keyboard interrupts & "gracefully" exit.
    try:
        while (curf < movie_l):
            #for j in range(l):

            # Set up the analysis.
            image = movie_data.loadAFrame(curf) - parameters.baseline
            mask = (image < 1.0)
            if (numpy.sum(mask) > 0):
                print(" Removing negative values in frame", curf)
                image[mask] = 1.0

            # Find and fit the peaks.
            if static_bg_estimator is not None:
                bg_estimate = static_bg_estimator.estimateBG(
                    curf) - parameters.baseline
                [peaks,
                 residual] = find_peaks.analyzeImage(image,
                                                     bg_estimate=bg_estimate)
            else:
                [peaks, residual] = find_peaks.analyzeImage(image)

            # Save the peaks.
            if (type(peaks) == type(numpy.array([]))):
                # remove unconverged peaks
                peaks = find_peaks.getConvergedPeaks(peaks)

                # save results
                if (parameters.orientation == "inverted"):
                    i3data.addMultiFitMolecules(peaks,
                                                movie_x,
                                                movie_y,
                                                curf + 1,
                                                parameters.pixel_size,
                                                inverted=True)
                else:
                    i3data.addMultiFitMolecules(peaks,
                                                movie_x,
                                                movie_y,
                                                curf + 1,
                                                parameters.pixel_size,
                                                inverted=False)

                total_peaks += peaks.shape[0]
                print("Frame:", curf, peaks.shape[0], total_peaks)
            else:
                print("Frame:", curf, 0, total_peaks)
            curf += 1

        print("")
        i3data.close()
        find_peaks.cleanUp()
        return 0

    except KeyboardInterrupt:
        print("Analysis stopped.")
        i3data.close()
        find_peaks.cleanUp()
        return 1
Ejemplo n.º 28
0
i3dtype.posSet(m_data, "x", numpy.random.uniform(high=im_size,
                                                 size=total_match))
i3dtype.posSet(m_data, "y", numpy.random.uniform(high=im_size,
                                                 size=total_match))

# Create noise 1.
total_noise = args.total - total_match
n1_data = i3dtype.createDefaultI3Data(total_noise)

i3dtype.posSet(n1_data, "x",
               numpy.random.uniform(high=im_size, size=total_noise))
i3dtype.posSet(n1_data, "y",
               numpy.random.uniform(high=im_size, size=total_noise))

# Create noise 2.
n2_data = i3dtype.createDefaultI3Data(total_noise)

i3dtype.posSet(n2_data, "x",
               numpy.random.uniform(high=im_size, size=total_noise))
i3dtype.posSet(n2_data, "y",
               numpy.random.uniform(high=im_size, size=total_noise))

# Save data sets.
with writeinsight3.I3Writer("locs1.bin") as i3w:
    i3w.addMolecules(m_data)
    i3w.addMolecules(n1_data)

with writeinsight3.I3Writer("locs2.bin") as i3w:
    i3w.addMolecules(m_data)
    i3w.addMolecules(n2_data)
Ejemplo n.º 29
0
# Hazen 11/11
#

import sys

import storm_analysis.dbscan.dbscan_c as dbscanC
import storm_analysis.sa_library.i3dtype as i3dtype
import storm_analysis.sa_library.readinsight3 as readinsight3
import storm_analysis.sa_library.writeinsight3 as writeinsight3

# Load the data.
i3_data_in = readinsight3.loadI3GoodOnly(sys.argv[1])

# Remove category zero localizations.
if False:
    print "warning, removing category zero localizations!"
    i3_data = i3dtype.maskData(i3_data_in, (i3_data_in['c'] != 0))
else:
    i3_data = i3_data_in

# Record cluster localization numbers in the fit area field.
i3_data['a'] = dbscanC.localizationClusterSize(i3_data['lk']) + 1

# Copy cluster id into the frame field.
i3_data['fr'] = i3_data['lk']

# Save the data.
i3_data_out = writeinsight3.I3Writer(sys.argv[2])
i3_data_out.addMolecules(i3_data)
i3_data_out.close()
Ejemplo n.º 30
0
def KMeansClassifier(codebook, input_basename, output_name, extensions = [".bin", "_ch1.bin", "_ch2.bin", "_ch3.bin"], max_distance = 80):
    """
    Note: 
      1. The default is that there are 4 color channels / cameras.
      2. The maximum distance is in percent, so '80' means that the 20%
         of the localizations that most distant from a cluster center
         will put in category 9.
    """
    n_channels = codebook.shape[1]
    assert (n_channels == len(extensions)), "Codebook size does not match data."

    # Create a reader for each file.
    i3_readers = []
    for ext in extensions:
        i3_name = input_basename + ext
        print(i3_name)
        i3_readers.append(readinsight3.I3Reader(i3_name))

    # Create writer for the results.
    i3_out = writeinsight3.I3Writer(output_name)

    # Read first block of the first channel data.
    i3_data = [i3_readers[0].nextBlock()]
    while (i3_data[0] is not False):
        print("working..")

        # Read the data from the other channels.
        for i in range(1,len(i3_readers)):
            i3_data.append(i3_readers[i].nextBlock())

        # Load height data for each channel.
        features = numpy.zeros((i3_data[0].size, n_channels))
        for i in range(len(i3_readers)):
            features[:,i] = i3_data[i]['h']

        # Normalize by total height.
        total = numpy.sum(features, axis = 1)
        for i in range(features.shape[0]):
            features[i,:] = features[i,:]/total[i]
    
        # Whiten the features as recommended by Scipy.
        features = scipy.cluster.vq.whiten(features)

        # Classify using codebook.
        [category, distance] = scipy.cluster.vq.vq(features, codebook)
        dist_max = numpy.percentile(distance, max_distance)

        # Put top XX% in distance in category 9 (the discard category).
        mask = (distance > dist_max)
        category[mask] = 9
            
        #
        # Store category and distance in the 'c' and 'i' field respectively.
        #
        i3_data[0]['c'] = category
        i3_data[0]['i'] = distance

        i3_out.addMolecules(i3_data[0])

        # Load the next block of data.
        i3_data = [i3_readers[0].nextBlock()]

    # Close output file
    i3_out.close()