コード例 #1
0
def noiseFraction(truth_h5, measured_h5, tolerance):
    """
    Return the fraction of measured localizations that are greater than
    tolerance pixels from the nearest truth localization.

    Note: This will return 0 if there are no measured localizations.

    truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
    measured_h5 - A saH5Py.SAH5Py object with the found localizations.
    tolerance - The search radius in pixels.
    """
    if (measured_h5.getNLocalizations() == 0):
        return [0, truth_h5.getNLocalizations()]

    noise_locs = 0
    total_locs = 0
    for i in range(truth_h5.getMovieLength()):
        t_locs = truth_h5.getLocalizationsInFrame(i)
        m_locs = measured_h5.getLocalizationsInFrame(i)

        if bool(t_locs) and bool(m_locs):
            dist = iaUtilsC.peakToPeakDistAndIndex(m_locs['x'],
                                                   m_locs['y'],
                                                   t_locs['x'],
                                                   t_locs['y'],
                                                   max_distance=tolerance)[0]

            noise_locs += numpy.count_nonzero((dist < 0.0))
            total_locs += dist.size
        elif bool(m_locs):
            noise_locs += m_locs['x'].size
            total_locs += m_locs['x'].size

    return [noise_locs, total_locs]
コード例 #2
0
def xyDrift(locs_filename):

    # Load localizations.
    #
    with saH5Py.SAH5Py(locs_filename) as h5:
        n_frames = h5.getMovieLength()
        f0_locs = h5.getLocalizationsInFrame(0, fields = ["x", "y"])
        fn_locs = h5.getLocalizationsInFrame(n_frames - 1, fields = ["x", "y"])

    assert (f0_locs["x"].size > 0), "No localizations in the first frame."
    assert (fn_locs["y"].size > 0), "No localizations in the last frame."

    #
    # Identify matching beads in the first and last frame and
    # compute the displacement.
    #
    p_index = utilC.peakToPeakDistAndIndex(f0_locs['x'], f0_locs['y'],
                                           fn_locs['x'], fn_locs['y'])[1]

    all_dx = []
    all_dy = []
    for i in range(f0_locs['x'].size):
        dx = fn_locs['x'][p_index[i]] - f0_locs['x'][i]
        dy = fn_locs['y'][p_index[i]] - f0_locs['y'][i]
        if ((dx*dx + dy*dy) < 1.0):
            all_dx.append(dx)
            all_dy.append(dy)

    #
    # Return average per frame.
    #
    return [numpy.mean(numpy.array(all_dx))/float(n_frames),
            numpy.mean(numpy.array(all_dy))/float(n_frames)]
コード例 #3
0
def recallFraction(truth_h5, measured_h5, tolerance):
    """
    Return the fraction of truth localizations that have a
    measured localization within X pixels (in the XY plane).

    truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
    measured_h5 - A saH5Py.SAH5Py object with the found localizations.
    tolerance - The search radius in pixels.
    """
    if (measured_h5.getNLocalizations() == 0):
        return [0, truth_h5.getNLocalizations()]

    recalled_locs = 0
    total_locs = 0
    for i in range(truth_h5.getMovieLength()):
        t_locs = truth_h5.getLocalizationsInFrame(i)
        m_locs = measured_h5.getLocalizationsInFrame(i)

        if bool(t_locs) and bool(m_locs):
            dist = iaUtilsC.peakToPeakDistAndIndex(t_locs['x'],
                                                   t_locs['y'],
                                                   m_locs['x'],
                                                   m_locs['y'],
                                                   max_distance=tolerance)[0]

            recalled_locs += numpy.count_nonzero((dist >= 0.0))
            total_locs += dist.size
        elif bool(t_locs):
            total_locs += t_locs['x'].size

    return [recalled_locs, total_locs]
コード例 #4
0
ファイル: collate.py プロジェクト: yanyuxiong/storm-analysis
def collateRQE(dirs, settings):
    """
    Results collation for RQE correction.
    """
    for a_dir in dirs:
        print("Processing", a_dir)

        t_locs = saH5Py.loadLocalizations("grid_list.hdf5", fields=["x", "y"])

        t_locs_found = numpy.zeros_like(t_locs["x"])

        n_frames = 0
        with saH5Py.SAH5Py(os.path.join(a_dir, "test.hdf5")) as h5:
            for i in range(h5.getMovieLength()):
                n_frames += 1

                m_locs = h5.getLocalizationsInFrame(i, fields=["x", "y"])
                dist = iaUtilsC.peakToPeakDistAndIndex(t_locs['x'],
                                                       t_locs['y'],
                                                       m_locs['x'],
                                                       m_locs['y'],
                                                       max_distance=3)[0]

                for j in range(dist.size):
                    if (dist[j] > -0.1):
                        t_locs_found[j] += 1

        # Check results against the binomial distribution.
        p = numpy.sum(t_locs_found) / (n_frames * t_locs["x"].size)
        print("  Mean P found     : {0:.3f}".format(p))
        print("  Expected variance: {0:.3f}".format(n_frames * p * (1 - p)))
        print("  Actual variance  : {0:.3f}".format(numpy.var(t_locs_found)))
        print()
コード例 #5
0
def findingFittingError(truth_h5, measured_h5, pixel_size = None, max_distance = None):
    """
    truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
    measured_h5 - A saH5Py.SAH5Py object with the found localizations.
    pixel_size - The camera pixel size in nanometers. If not specified then the value
                 in the measured HDF5 file will be used.
    max_distance - If not none, found peaks that are greater than this distance from
                   a truth peak will be ignored. Units are nanometers.
    """
    if (measured_h5.getNLocalizations() == 0):
        return [None, None, None]

    md_in_pixels = None
    md_sqr = None
    if max_distance is not None:
        md_in_pixels = max_distance/pixel_size
        md_sqr = max_distance * max_distance

    if pixel_size is None:
        pixel_size = measured_h5.getPixelSize()
        
    all_dx = []
    all_dy = []
    all_dz = []
    for i in range(truth_h5.getMovieLength()):
        t_locs = truth_h5.getLocalizationsInFrame(i)
        m_locs = measured_h5.getLocalizationsInFrame(i)

        if not bool(t_locs) or not bool(m_locs):
            continue
        
        p_index = iaUtilsC.peakToPeakDistAndIndex(m_locs['x'], m_locs['y'],
                                                  t_locs['x'], t_locs['y'],
                                                  max_distance = md_in_pixels)[1]
        for i in range(m_locs['x'].size):
            if(p_index[i] < 0):
                continue
            dx = pixel_size * (m_locs['x'][i] - t_locs['x'][p_index[i]])
            dy = pixel_size * (m_locs['y'][i] - t_locs['y'][p_index[i]])
            
            if 'z' in m_locs:
                dz = 1000.0 * (m_locs['z'][i] - t_locs['z'][p_index[i]])
            else:
                dz = 0.0
                
            if md_sqr is not None:
                if ((dx*dx + dy*dy + dz*dz) < md_sqr):
                    all_dx.append(dx)
                    all_dy.append(dy)
                    all_dz.append(dz)
            else:
                all_dx.append(dx)
                all_dy.append(dy)
                all_dz.append(dz)

    return [numpy.array(all_dx), numpy.array(all_dy), numpy.array(all_dz)]
コード例 #6
0
def collate():

    dirs = sorted(glob.glob("test*"))

    if (len(dirs) == 0):
        print("No test directories found.")
        exit()

    # Load reference localizations.
    ref = saH5Py.loadLocalizations("sim_input_c1_grid_list.hdf5",
                                   fields=["color", "x", "y"])

    for a_dir in dirs:

        # Check color correspondence.
        #
        # Note: Currently only works for gridded localizations.
        #
        exp = saH5Py.loadTracks(a_dir + "/test.hdf5",
                                fields=["x", "y", "km_color"])

        # Identify corresponding peaks with 2 pixel maximum radius.
        p_index = iaUtilsC.peakToPeakDistAndIndex(exp["x"],
                                                  exp["y"],
                                                  ref["x"],
                                                  ref["y"],
                                                  max_distance=2.0)[1]

        # Create array for checking category correspondence.
        c_size = numpy.count_nonzero(p_index > -1)
        categories = numpy.zeros((c_size, 2), dtype=numpy.int32)
        i = 0
        for j in range(p_index.size):
            if (p_index[j] < 0):
                continue
            categories[i, 0] = int(ref["color"][p_index[j]])
            categories[i, 1] = int(exp["km_color"][j])
            i += 1

        # Measure fraction of experimental localizations assigned correctly. This
        # is complicated by the k-mean category being arbitrary.
        for i in range(8):
            ref_mask = (categories[:, 0] == i)
            if (numpy.count_nonzero(ref_mask) > 0):
                exp_cat = int(numpy.median(categories[ref_mask, 1]))
                exp_mask = (categories[:, 1] == exp_cat)

                total = numpy.count_nonzero(exp_mask)
                matched = numpy.count_nonzero(
                    numpy.logical_and(ref_mask, exp_mask))
                mismatched = total - matched
                print(
                    "Color {0:0d}, matching fraction is {1:.2f}, unmatched fraction is {2:.2f}, total {3:0d}"
                    .format(i, matched / total, mismatched / total, total))
コード例 #7
0
ファイル: collate.py プロジェクト: ZhuangLab/storm-analysis
def collate():

    dirs = sorted(glob.glob("test*"))

    if(len(dirs) == 0):
        print("No test directories found.")
        exit()

    # Load reference localizations.
    ref = saH5Py.loadLocalizations("sim_input_c1_grid_list.hdf5", fields = ["color", "x", "y"])
    
    for a_dir in dirs:
    
        # Check color correspondence.
        #
        # Note: Currently only works for gridded localizations.
        #
        exp = saH5Py.loadTracks(a_dir + "/test.hdf5", fields = ["x", "y", "km_color"])

        # Identify corresponding peaks with 2 pixel maximum radius.
        p_index = iaUtilsC.peakToPeakDistAndIndex(exp["x"], exp["y"], ref["x"], ref["y"],
                                                  max_distance = 2.0)[1]

        # Create array for checking category correspondence.
        c_size = numpy.count_nonzero(p_index > -1)
        categories = numpy.zeros((c_size, 2), dtype = numpy.int32)
        i = 0
        for j in range(p_index.size):
            if (p_index[j] < 0):
                continue
            categories[i,0] = int(ref["color"][p_index[j]])
            categories[i,1] = int(exp["km_color"][j])
            i += 1

        # Measure fraction of experimental localizations assigned correctly. This
        # is complicated by the k-mean category being arbitrary.
        for i in range(8):
            ref_mask = (categories[:,0] == i)
            if(numpy.count_nonzero(ref_mask) > 0):
                exp_cat = int(numpy.median(categories[ref_mask,1]))
                exp_mask = (categories[:,1] == exp_cat)

                total = numpy.count_nonzero(exp_mask)
                matched = numpy.count_nonzero(numpy.logical_and(ref_mask, exp_mask))
                mismatched = total - matched
                print("Color {0:0d}, matching fraction is {1:.2f}, unmatched fraction is {2:.2f}, total {3:0d}".format(i, matched/total, mismatched/total, total))
コード例 #8
0
def collateDAO(dirs, settings, calc_width_error = True):
    """
    Results collations for 3D-DAOSTORM and sCMOS analysis.
    """
    all_dx = []
    all_dy = []
    all_wx = []
    all_wy = []
    noise = 0
    noise_total = 0
    recall = 0
    recall_total = 0
    total_locs = 0
    total_time = 0.0
    for a_dir in dirs:
        print("Processing", a_dir)

        # Load timing information.
        with open(a_dir + "/timing.txt") as fp:
            total_time += float(fp.readline())
            
        # Load localizations.
        truth_h5 = saH5Py.SAH5Py(a_dir + "/test_ref.hdf5")
        measured_h5 = saH5Py.SAH5Py(a_dir + "/test.hdf5")
        total_locs += measured_h5.getNLocalizations()
    
        # Calculate fractional recall.
        [partial, total] = rfrac.recallFraction(truth_h5, measured_h5, settings.tolerance)
        recall += partial
        recall_total += total

        # Calculate noise fraction.
        [partial, total] = rfrac.noiseFraction(truth_h5, measured_h5, settings.tolerance)
        noise += partial
        noise_total += total

        # Calculate error in fitting width.
        if calc_width_error:

            for i in range(truth_h5.getMovieLength()):    
                t_locs = truth_h5.getLocalizationsInFrame(i)
                m_locs = measured_h5.getLocalizationsInFrame(i)

                # Widths for truth localizations.
                t_wx = t_locs["xsigma"]
                t_wy = t_locs["ysigma"]
    
                # Widths for found localizations.
                if ("xsigma" in m_locs):
                    m_wx = m_locs["xsigma"]
                else:
                    m_wx = 1.5*numpy.ones(m_locs['x'].size)
                                      
                if ("ysigma" in m_locs):
                    m_wy = m_locs["ysigma"]
                else:
                    m_wy = m_wx.copy()

                p_index = iaUtilsC.peakToPeakDistAndIndex(m_locs['x'], m_locs['y'],
                                                          t_locs['x'], t_locs['y'],
                                                          max_distance = settings.tolerance)[1]

                p_size = numpy.count_nonzero(p_index > -1)
                d_wx = numpy.zeros(p_size)
                d_wy = numpy.zeros(p_size)
                k = 0
                for j in range(m_locs["x"].size):
                    if(p_index[j] < 0):
                        continue
            
                    d_wx[k] = m_wx[j] - t_wx[p_index[j]]
                    d_wy[k] = m_wy[j] - t_wy[p_index[j]]
                    k += 1

                all_wx.append(d_wx)
                all_wy.append(d_wy)

        # Calculate fitting error in XY.
        max_distance = None
        if True:
            max_distance = 2.0 * settings.pixel_size
            print("Using max_distance", max_distance, "nm for error calcuations.")
        
        [dx, dy, dz] = ffe.findingFittingError(truth_h5,
                                               measured_h5,
                                               pixel_size = settings.pixel_size,
                                               max_distance = max_distance)
        if dx.size != 0:
            all_dx.append([numpy.std(dx), math.sqrt(numpy.mean(dx*dx))])
            all_dy.append([numpy.std(dy), math.sqrt(numpy.mean(dy*dy))])
        else:
            all_dx.append([0,0])
            all_dy.append([0,0])

        truth_h5.close()
        measured_h5.close()


    print()
    print("Analysis Summary:")
    print("Processed {0:0d} localizations in {1:.2f} seconds, {2:.2f}/sec".format(total_locs, total_time, float(total_locs)/float(total_time)))
    print("Recall {0:.5f}".format(float(recall)/float(recall_total)))
    print("Noise {0:.5f}".format(float(noise)/float(noise_total)))
    print("XY Error Standard Deviation (nm):")
    for i, a_dir in enumerate(dirs):
        print(a_dir + "\t{0:.2f}\t{1:.2f}".format(all_dx[i][0], all_dy[i][0]))
    print("")
    print("XY RMSE (nm):")
    for i, a_dir in enumerate(dirs):
        print(a_dir + "\t{0:.2f}\t{1:.2f}".format(all_dx[i][1], all_dy[i][1]))
        
    if calc_width_error:
        print("")
        print("XY Width Error, Mean difference with truth, Standard deviation (pixels):")
        for i, a_dir in enumerate(dirs):
            print(a_dir + "\t{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format(numpy.mean(all_wx[i]),
                                                                        numpy.std(all_wx[i]),
                                                                        numpy.mean(all_wy[i]),
                                                                        numpy.std(all_wy[i])))
コード例 #9
0
def findingFittingError(truth_h5,
                        measured_h5,
                        pixel_size=None,
                        max_distance=None):
    """
    truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
    measured_h5 - A saH5Py.SAH5Py object with the found localizations.
    pixel_size - The camera pixel size in nanometers. If not specified then the value
                 in the measured HDF5 file will be used.
    max_distance - If not none, found peaks that are greater than this distance from
                   a truth peak will be ignored. Units are nanometers.
    """
    if (measured_h5.getNLocalizations() == 0):
        return [None, None, None]

    md_in_pixels = None
    md_sqr = None
    if max_distance is not None:
        md_in_pixels = max_distance / pixel_size
        md_sqr = max_distance * max_distance

    if pixel_size is None:
        pixel_size = measured_h5.getPixelSize()

    all_dx = []
    all_dy = []
    all_dz = []
    for i in range(truth_h5.getMovieLength()):
        t_locs = truth_h5.getLocalizationsInFrame(i)
        m_locs = measured_h5.getLocalizationsInFrame(i)

        if not bool(t_locs) or not bool(m_locs):
            continue

        p_index = iaUtilsC.peakToPeakDistAndIndex(m_locs['x'],
                                                  m_locs['y'],
                                                  t_locs['x'],
                                                  t_locs['y'],
                                                  max_distance=md_in_pixels)[1]
        for i in range(m_locs['x'].size):
            if (p_index[i] < 0):
                continue
            dx = pixel_size * (m_locs['x'][i] - t_locs['x'][p_index[i]])
            dy = pixel_size * (m_locs['y'][i] - t_locs['y'][p_index[i]])

            if 'z' in m_locs:
                dz = 1000.0 * (m_locs['z'][i] - t_locs['z'][p_index[i]])
            else:
                dz = 0.0

            if md_sqr is not None:
                if ((dx * dx + dy * dy + dz * dz) < md_sqr):
                    all_dx.append(dx)
                    all_dy.append(dy)
                    all_dz.append(dz)
            else:
                all_dx.append(dx)
                all_dy.append(dy)
                all_dz.append(dz)

    return [numpy.array(all_dx), numpy.array(all_dy), numpy.array(all_dz)]