def test_merge_2():
    """
    Test file merging, skipping files with no tracks.
    """
    metadata = "<xml><field1><data1>data</data1></field></xml>"
    ref_tracks = {"x" : numpy.random.randint(0,10,10),
                  "y" : numpy.random.randint(0,10,10)}

    # Create HDF5 files to merge.
    h5_names = []
    for i in range(3):
        h5_name = storm_analysis.getPathOutputTest("test_merge_f" + str(i) + ".hdf5")
        h5_names.append(h5_name)

        with saH5Py.SAH5Py(h5_name, is_existing = False, overwrite = True) as h5:
            h5.addMetadata(metadata)
            h5.setMovieInformation(20,20,1,"")
            h5.setPixelSize(100.0)
            if(i != 1):
                h5.addTracks(ref_tracks)

    # Merge.
    merge_name = storm_analysis.getPathOutputTest("test_merge.hdf5")
    storm_analysis.removeFile(merge_name)
    mergeHDF5.mergeHDF5(h5_names, merge_name)

    # Check merge.
    with saH5Py.SAH5Py(merge_name) as h5:
        assert(metadata == h5.getMetadata())
        for tracks in h5.tracksIterator():
            assert(numpy.allclose(ref_tracks["x"], tracks["x"]))
Exemple #2
0
def test_noise_1():
    """
    Test noise fraction calculation.
    """
    px = numpy.arange(10)
    py = numpy.ones(10)

    # Write GT data.
    gt_name = storm_analysis.getPathOutputTest("trf_gt")
    with saH5Py.SAH5Py(gt_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(1, 1, 1, "")
        h5.addLocalizations({"x": px, "y": py}, 0)

    for i in range(2, 10):

        # Write test data.
        meas_name = storm_analysis.getPathOutputTest("trf_meas")
        with saH5Py.SAH5Py(meas_name, is_existing=False, overwrite=True) as h5:
            h5.setMovieInformation(1, 1, 1, "")
            h5.addLocalizations({"x": px[:i], "y": py[:i] + eps}, 0)

        # Test noise calculation.
        with saH5Py.SAH5Reader(gt_name) as h5_gt:
            with saH5Py.SAH5Reader(meas_name) as h5_meas:
                [nl, tl] = rfrac.noiseFraction(h5_gt, h5_meas, 0.1)
                assert (nl == 0)
                assert (tl == i)
def test_voronoi_clustering():

    # Test voronoi
    alist_name = storm_analysis.getData("test/data/test_clustering_list.bin")
    output_dir = storm_analysis.getPathOutputTest("./")

    from storm_analysis.voronoi.voronoi_analysis import voronoiAnalysis
    voronoiAnalysis(alist_name, 0.1, output_dir)

    # Verify number of clusters found.
    stats_file = storm_analysis.getPathOutputTest(
        "test_clustering_srt_stats.txt")
    n_clusters = len(open(stats_file).readlines())
    if (n_clusters != 100):
        raise Exception(
            "Voronoi did not identify the expected number of clusters.")

    # Make pictures.
    clist_name = storm_analysis.getPathOutputTest(
        "test_clustering_srt_size_list.bin")
    image_name = storm_analysis.getPathOutputTest("test_clustering_vr")

    from storm_analysis.dbscan.cluster_images import clusterImages
    clusterImages(clist_name, "Voronoi Clustering", 50, 20, image_name,
                  [256, 256])
def test_micrometry_2():
    """
    Test micrometry on random data.
    """

    locs1_name = storm_analysis.getPathOutputTest("locs1.hdf5")
    locs2_name = storm_analysis.getPathOutputTest("locs2.hdf5")

    # Create test data.
    im_size = 512
    n_points = 50

    numpy.random.seed(0)

    with saH5Py.SAH5Py(locs1_name, is_existing = False, overwrite = True) as h5:
        locs = {"x" : numpy.random.uniform(high = im_size, size = n_points),
                "y" : numpy.random.uniform(high = im_size, size = n_points)}
        h5.setMovieInformation(512, 512, 1, "")
        h5.addLocalizations(locs, 0)

    with saH5Py.SAH5Py(locs2_name, is_existing = False, overwrite = True) as h5:
        locs = {"x" : numpy.random.uniform(high = im_size, size = n_points),
                "y" : numpy.random.uniform(high = im_size, size = n_points)}
        h5.setMovieInformation(512, 512, 1, "")
        h5.addLocalizations(locs, 0)

    # Test
    mm = micrometry.Micrometry(locs1_name,
                               min_size = 5.0,
                               max_size = 100.0,
                               max_neighbors = 20)
    [best_ratio, best_transform] = mm.findTransform(locs2_name, 1.0e-2)

    assert(best_ratio < 10.0)
def test_dbscan_clustering():

    # Test dbscan
    import shutil

    # Copy alist to the output directory so that the DBSCAN results end up in the right place.
    alist_data = storm_analysis.getData("test/data/test_clustering_list.bin")
    alist_output = storm_analysis.getPathOutputTest(
        "test_clustering_alist.bin")
    shutil.copyfile(alist_data, alist_output)

    from storm_analysis.dbscan.dbscan_analysis import dbscanAnalysis
    dbscanAnalysis(alist_output, 0)

    # Verify number of clusters found.
    stats_file = storm_analysis.getPathOutputTest(
        "test_clustering_aclusters_stats.txt")
    n_clusters = len(open(stats_file).readlines())
    if (n_clusters != 99):
        raise Exception(
            "DBSCAN did not identify the expected number of clusters.")

    # Make pictures.
    clist_name = storm_analysis.getPathOutputTest(
        "test_clustering_aclusters_size_list.bin")
    image_name = storm_analysis.getPathOutputTest("test_clustering_db")

    from storm_analysis.dbscan.cluster_images import clusterImages
    clusterImages(clist_name, "DBSCAN Clustering", 50, 20, image_name,
                  [256, 256])
def configureTest():
    """
    These tests have a lot of setup. This function takes care of this.
    """
    mparams = params.ParametersMultiplane()

    # sCMOS calibration files.
    gain = numpy.ones(im_size)
    offset = numpy.zeros(im_size)
    variance = numpy.ones(im_size)
    rqe = numpy.ones(im_size)

    cal_file = storm_analysis.getPathOutputTest("c1_cal.npy")
    numpy.save(cal_file, [offset, variance, gain, rqe, 2])
    mparams.changeAttr("channel0_cal", cal_file)

    cal_file = storm_analysis.getPathOutputTest("c2_cal.npy")
    numpy.save(cal_file, [offset, variance, gain, rqe, 2])
    mparams.changeAttr("channel1_cal", cal_file)

    mparams.changeAttr("channel0_ext", "_c1.tif")
    mparams.changeAttr("channel1_ext", "_c2.tif")

    mparams.changeAttr("channel0_offset", 0)
    mparams.changeAttr("channel1_offset", 0)

    return mparams
def configureTest():
    """
    These tests have a lot of setup. This function takes care of this.
    """
    mparams = params.ParametersMultiplane()
        
    # sCMOS calibration files.
    gain = numpy.ones(im_size)
    offset = numpy.zeros(im_size)
    variance = numpy.ones(im_size)
    rqe = numpy.ones(im_size)
    
    cal_file = storm_analysis.getPathOutputTest("c1_cal.npy")
    numpy.save(cal_file, [offset, variance, gain, rqe, 2])
    mparams.changeAttr("channel0_cal", cal_file)

    cal_file = storm_analysis.getPathOutputTest("c2_cal.npy")
    numpy.save(cal_file, [offset, variance, gain, rqe, 2])
    mparams.changeAttr("channel1_cal", cal_file)

    mparams.changeAttr("channel0_ext", "_c1.tif")
    mparams.changeAttr("channel1_ext", "_c2.tif")

    mparams.changeAttr("channel0_offset", 0)
    mparams.changeAttr("channel1_offset", 0)

    return mparams
Exemple #8
0
def test_hdf5_to_bin_2():
    """
    Test tracks conversion.
    """
    peaks = {"x" : numpy.zeros(10),
             "y" : numpy.ones(10)}

    h5_name = storm_analysis.getPathOutputTest("test_sa_hdf5.hdf5")
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.addMetadata("<settings/>")
        h5.setMovieInformation(256, 256, 10, "XYZZY")
        h5.setPixelSize(100.0)
        h5.addTracks(peaks)

    # Convert.
    i3_name = storm_analysis.getPathOutputTest("test_mlist.bin")
    storm_analysis.removeFile(i3_name)
    hdf5ToBin.hdf5ToBin(h5_name, i3_name)

    # Load Insight3 file and check values.
    i3_data = readinsight3.loadI3File(i3_name, verbose = False)

    assert(numpy.allclose(peaks["x"], i3_data['x'] - 1.0))
    assert(numpy.allclose(peaks["y"], i3_data['y'] - 1.0))    
    assert(numpy.allclose(i3_data['fr'], numpy.ones(10)))
Exemple #9
0
def test_recall_2():
    """
    Test recall fraction calculation.
    """
    px = numpy.arange(10)
    py = numpy.ones(10)

    # Write GT data.
    gt_name = storm_analysis.getPathOutputTest("trf_gt")
    with saH5Py.SAH5Py(gt_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(1, 1, 1, "")
        h5.addLocalizations({"x": px, "y": py}, 0)

    for i in range(2, 10):

        # Write test data.
        meas_name = storm_analysis.getPathOutputTest("trf_meas")
        with saH5Py.SAH5Py(meas_name, is_existing=False, overwrite=True) as h5:
            h5.setMovieInformation(1, 1, 1, "")
            ty = numpy.copy(py) + eps
            ty[i:] += 0.2
            h5.addLocalizations({"x": px, "y": ty}, 0)

        # Test recall calculation.
        with saH5Py.SAH5Reader(gt_name) as h5_gt:
            with saH5Py.SAH5Reader(meas_name) as h5_meas:
                [rl, tl] = rfrac.recallFraction(h5_gt, h5_meas, 0.1)
                assert (rl == i)
                assert (tl == 10)
Exemple #10
0
def _test_psf_to_spline_2D():

    psf = storm_analysis.getPathOutputTest("test_spliner_psf_2d.psf")
    spline = storm_analysis.getPathOutputTest("test_spliner_psf_2d.spline")
    storm_analysis.removeFile(spline)

    from storm_analysis.spliner.psf_to_spline import psfToSpline
    psfToSpline(psf, spline, 7)
Exemple #11
0
def create2DSpline():
    movie = storm_analysis.getData("test/data/test.dax")
    mlist = storm_analysis.getData("test/data/test_ref.hdf5")
    psf = storm_analysis.getPathOutputTest("test_spliner_psf_2d.psf")
    spline = storm_analysis.getPathOutputTest("test_spliner_psf_2d.spline")

    storm_analysis.removeFile(psf)
    storm_analysis.removeFile(spline)

    measurePSF.measurePSF(movie, "", mlist, psf, want2d=True, aoi_size=5)
    psfToSpline.psfToSpline(psf, spline, 4)
def create2DSpline():
    movie = storm_analysis.getData("test/data/test.dax")
    mlist = storm_analysis.getData("test/data/test_ref.hdf5")
    psf = storm_analysis.getPathOutputTest("test_spliner_psf_2d.psf")
    spline = storm_analysis.getPathOutputTest("test_spliner_psf_2d.spline")
    
    storm_analysis.removeFile(psf)
    storm_analysis.removeFile(spline)

    measurePSF.measurePSF(movie, "", mlist, psf, want2d = True, aoi_size = 5)
    psfToSpline.psfToSpline(psf, spline, 4)
def create3DSpline():

    movie = storm_analysis.getData("test/data/test_spliner.dax")
    mlist = storm_analysis.getData("test/data/test_spliner_ref.hdf5")
    psf = storm_analysis.getPathOutputTest("test_spliner_psf.psf")
    spline = storm_analysis.getPathOutputTest("test_spliner_psf.spline")

    storm_analysis.removeFile(psf)
    storm_analysis.removeFile(spline)
    
    measurePSF.measurePSF(movie, "", mlist, psf, aoi_size = 6)
    psfToSpline.psfToSpline(psf, spline, 5)
Exemple #14
0
def create3DSpline():

    movie = storm_analysis.getData("test/data/test_spliner.dax")
    mlist = storm_analysis.getData("test/data/test_spliner_ref.hdf5")
    psf = storm_analysis.getPathOutputTest("test_spliner_psf.psf")
    spline = storm_analysis.getPathOutputTest("test_spliner_psf.spline")

    storm_analysis.removeFile(psf)
    storm_analysis.removeFile(spline)

    measurePSF.measurePSF(movie, "", mlist, psf, aoi_size=6)
    psfToSpline.psfToSpline(psf, spline, 5)
Exemple #15
0
def test_l1h():
    
    # Test L1H.
    movie_name = storm_analysis.getData("test/data/test_l1h.dax")
    settings = storm_analysis.getData("test/data/test_l1h.xml")
    hres = storm_analysis.getPathOutputTest("test_l1h_list.hres")
    mlist = storm_analysis.getPathOutputTest("test_l1h_list.bin")

    storm_analysis.removeFile(hres)
    storm_analysis.removeFile(mlist)

    from storm_analysis.L1H.cs_analysis import analyze
    analyze(movie_name, settings, hres, mlist)
def test_align_merge_2():
    """
    Test aligning and merging two HDF5 files with offset.
    """
    n_locs = 500
    tracks = {"x" : numpy.random.normal(loc = 10.0, scale = 0.2, size = n_locs),
              "y" : numpy.random.normal(loc = 10.0, scale = 0.2, size = n_locs),
              "z" : numpy.random.normal(scale = 0.05, size = n_locs)}

    h5_in1 = storm_analysis.getPathOutputTest("test_align_merge_1.hdf5")
    h5_in2 = storm_analysis.getPathOutputTest("test_align_merge_2.hdf5")
    h5_alm = storm_analysis.getPathOutputTest("test_align_merge_3.hdf5")

    # Create input files.
    t_dx = 2.0
    t_dz = 0.3
    with saH5Py.SAH5Py(h5_in1, is_existing = False, overwrite = True) as h5:
        h5.addMetadata("<xml><field1><data1>1</data1></field></xml>")
        h5.setMovieInformation(20, 20, 2, "")
        h5.setPixelSize(100.0)
        h5.addTracks(tracks)

    with saH5Py.SAH5Py(h5_in2, is_existing = False, overwrite = True) as h5:
        h5.addMetadata("<xml><field1><data1>2</data1></field></xml>")
        h5.setMovieInformation(20, 20, 2, "")
        h5.setPixelSize(100.0)

        tracks["x"] += t_dx
        tracks["z"] += t_dz
        h5.addTracks(tracks)

    # Align and merge with offset.
    storm_analysis.removeFile(h5_alm)
    [dx, dy, dz] = alignAndMerge.alignAndMerge(h5_in1, h5_in2, h5_alm, dx = -t_dx)

    # Check that we got the right offsets.
    assert(numpy.allclose(numpy.array([dx, dy, dz]),
                          numpy.array([-t_dx, 0.0, -t_dz]),
                          atol = 0.001,
                          rtol = 0.1))


    # Check that the output file is correctly aligned.
    with saH5Py.SAH5Py(h5_alm) as h5:
        tracks = h5.getTracks(fields = ["x", "y", "z"])
        assert(numpy.allclose(numpy.array([numpy.std(tracks["x"]),
                                           numpy.std(tracks["y"]),
                                           numpy.std(tracks["z"])]),
                              numpy.array([0.2, 0.2, 0.05]),
                              atol = 0.001,
                              rtol = 0.1))
Exemple #17
0
def test_voronoi_clustering():
    
    # Test voronoi
    alist_name = storm_analysis.getData("test/data/test_drift_alist.bin")
    output_dir = storm_analysis.getPathOutputTest("./")

    from storm_analysis.voronoi.voronoi_analysis import voronoiAnalysis
    voronoiAnalysis(alist_name, 1.25, output_dir)

    clist_name = storm_analysis.getPathOutputTest("test_drift_asrt_size_list.bin")
    image_name = storm_analysis.getPathOutputTest("test_drift_vr")

    from storm_analysis.dbscan.cluster_images import clusterImages
    clusterImages(clist_name, "Voronoi Clustering", 50, 20, image_name, [256, 256])
def test_tracker():

    # Test tracking.
    import shutil

    settings = storm_analysis.getData("test/data/test_drift.xml")
    alist_name = storm_analysis.getPathOutputTest("test_drift_alist.bin")

    # Copy mlist so that it is in the same directory as alist.
    mlist_data = storm_analysis.getData("test/data/test_drift_mlist.bin")
    mlist_output = storm_analysis.getPathOutputTest("test_drift_mlist.bin")
    shutil.copyfile(mlist_data, mlist_output)

    from storm_analysis.sa_utilities.track_average_correct import trackAverageCorrect
    trackAverageCorrect(mlist_output, alist_name, settings)
Exemple #19
0
def test_tracker():
    
    # Test tracking.
    import shutil

    settings = storm_analysis.getData("test/data/test_drift.xml")
    alist_name = storm_analysis.getPathOutputTest("test_drift_alist.bin")

    # Copy mlist so that it is in the same directory as alist.
    mlist_data = storm_analysis.getData("test/data/test_drift_mlist.bin")
    mlist_output = storm_analysis.getPathOutputTest("test_drift_mlist.bin")
    shutil.copyfile(mlist_data, mlist_output)

    from storm_analysis.sa_utilities.track_average_correct import trackAverageCorrect
    trackAverageCorrect(mlist_output, alist_name, settings)
def test_tracker_6():
    """
    Test max_gap parameter.
    """
    peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
             "y" : numpy.array([1.0, 1.0, 1.0]),
             "sum" : numpy.array([4.0, 4.0, 4.0])}

    filename = "test_sa_hdf5.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.addLocalizations(peaks, 0)
        h5.addLocalizations(peaks, 2)
        h5.addMovieInformation(FakeReader(n_frames = 3))

    # Track.
    tracker.tracker(h5_name, radius = 0.1)

    # Tracking.
    with saH5Py.SAH5Py(h5_name) as h5:
        assert(h5.getNTracks() == 6)
        for t in h5.tracksIterator():
            assert(numpy.allclose(t["track_length"], numpy.ones(6)))

    # Redo the tracking allowing single frame gaps.
    tracker.tracker(h5_name, max_gap = 1, radius = 0.1)

    with saH5Py.SAH5Py(h5_name) as h5:
        assert(h5.getNTracks() == 3)
        for t in h5.tracksIterator():
            assert(numpy.allclose(t["track_length"], 2.0*numpy.ones(3)))
def test_pupilfn_2():
    """
    Test PF translation.
    """
    dx = 0.5
    dy = 0.25
    dz = 0.2
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry = geo)
    pf_c.setPF(pf)

    pf_c.translate(dx, dy, dz)
    psf_c = pupilMath.intensity(pf_c.getPSF())

    defocused = geo.changeFocus(pf, dz)
    translated = geo.translatePf(defocused, dx, dy)
    psf_py = pupilMath.intensity(pupilMath.toRealSpace(translated))

    if False:
        with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_pupilfn_2.tif")) as tf:
            tf.save(psf_c.astype(numpy.float32))
            tf.save(psf_py.astype(numpy.float32))

    assert numpy.allclose(psf_c, psf_py)
            
    pf_c.cleanup()
def test_pupilfn_4():
    """
    Test PF X derivative (Python library).
    """
    dx = 1.0e-6
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])
    
    # Calculate derivative of magnitude as a function of x.
    psf_py = pupilMath.toRealSpace(pf)
    psf_py_dx = pupilMath.toRealSpace(geo.dx(pf))
    mag_dx_calc = 2.0 * (numpy.real(psf_py)*numpy.real(psf_py_dx) + numpy.imag(psf_py)*numpy.imag(psf_py_dx))

    # Estimate derivative using (f(x+dx) - f(x))/dx
    mag = pupilMath.intensity(psf_py)
    translated = geo.translatePf(pf, dx, 0.0)
    mag_dx_est = (pupilMath.intensity(pupilMath.toRealSpace(translated)) - mag)/dx
        
    if False:
        with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_pupilfn_4.tif")) as tf:
            #tf.save(mag.astype(numpy.float32))
            tf.save(mag_dx_calc.astype(numpy.float32))
            tf.save(mag_dx_est.astype(numpy.float32))
            tf.save(numpy.abs(mag_dx_calc - mag_dx_est).astype(numpy.float32))

    assert numpy.allclose(mag_dx_calc, mag_dx_est, atol = 1.0e-6)
def test_load_mappings_1():
    map_test_file = storm_analysis.getPathOutputTest("map.map")

    max_ch = 4
    mappings = {}
    for i in range(1,max_ch):
        j = i
        mappings[str(i) + "_0_x"] = numpy.arange(j,j+2.5,1.0)
        j += 0.1
        mappings[str(i) + "_0_y"] = numpy.arange(j,j+2.5,1.0)
        j += 0.1
        mappings["0_" + str(i) + "_x"] = numpy.arange(j,j+2.5,1.0)
        j += 0.1
        mappings["0_" + str(i) + "_y"] = numpy.arange(j,j+2.5,1.0)

    max_ch -= 1

    with open(map_test_file, 'wb') as fp:
        pickle.dump(mappings, fp)

    mappings = {}
    [xt_0toN, yt_0toN, xt_Nto0, yt_Nto0] = mpUtil.loadMappings(map_test_file, 0)
    assert(xt_0toN[0,0] == 0.0)
    assert(yt_0toN[0,0] == 0.0)
    assert(xt_Nto0[0,0] == 0.0)
    assert(yt_Nto0[0,0] == 0.0)

    assert(abs(xt_0toN[max_ch,2]-5.2) < 1.0e-6)
    assert(abs(yt_0toN[max_ch,2]-5.3) < 1.0e-6)
    assert(abs(xt_Nto0[max_ch,2]-5.0) < 1.0e-6)
    assert(abs(yt_Nto0[max_ch,2]-5.1) < 1.0e-6)
def test_pupilfn_7():
    """
    Test that PF translation is correct (i.e. independent of size).
    """
    sizes = [10, 20, 40]
    dx = 1.0

    for size in sizes:
        geo = pupilMath.Geometry(size, 0.1, 0.6, 1.5, 1.4)
        pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

        pf_c = pfFnC.PupilFunction(geometry = geo)
        pf_c.setPF(pf)
        
        psf_untranslated = numpy.roll(pupilMath.intensity(pf_c.getPSF()), 1, axis = 0)
            
        pf_c.translate(dx, 0.0, 0.0)
        psf_translated = pupilMath.intensity(pf_c.getPSF())

        if False:
            with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_pupilfn_7.tif")) as tf:
                tf.save(psf_untranslated.astype(numpy.float32))
                tf.save(psf_translated.astype(numpy.float32))

        assert numpy.allclose(psf_untranslated, psf_translated)
            
        pf_c.cleanup()
Exemple #25
0
def test_fiducials_1():
    """
    Basic fiducials test.
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in range(3):
            h5.addLocalizations(peaks, i)

        h5.addMovieInformation(FakeReader(n_frames=3))

    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius=0.1)

    # Check.
    with saH5Py.SAH5Py(h5_name) as h5:
        for fnum, locs in h5.localizationsIterator(fields=["fiducial_id"]):
            assert (numpy.allclose(locs["fiducial_id"], numpy.arange(3)))
Exemple #26
0
def test_rcc():

    mlist_name = storm_analysis.getData("test/data/test_drift_mlist.bin")
    drift_name = storm_analysis.getPathOutputTest("test_drift.txt")

    from storm_analysis.rcc.rcc_drift_correction import rccDriftCorrection
    rccDriftCorrection(mlist_name, drift_name, 2000, 1, True, False)
Exemple #27
0
def test_io_4():
    """
    Test TIF movie IO (1 page, multiple frames per page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10

    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(
        numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej=True) as tf:
        tf.save(data, truncate=True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    for i in range(movie_l):
        assert (numpy.allclose(data[i, :, :], rd.loadAFrame(i)))
Exemple #28
0
def test_io_3():
    """
    Test FITS movie IO.
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10

    data = numpy.random.randint(0, 60000,
                                (movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.fits")

    # Write FITS movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert (mh == movie_h)
    assert (mw == movie_w)
    assert (ml == movie_l)
    assert (numpy.allclose(data, rd.loadAFrame(0)))
def test_psf_fft7():
    """
    Test against the Python version, translation.
    """
    dx = 0.5
    dy = 0.25
    dz = 0.2
    [pf_psf, geo, pf] = makePSFAndPF(-0.4, 0.4, 0.05)
    
    pfft_c = psfFFTC.PSFFFT(pf_psf)
    pfft_py = psfFFTPy.PSFFFT(pf_psf)

    pfft_c.translate(dx, dy, dz)
    pfft_py.translate(dx, dy, dz)

    psf_c =pfft_c.getPSF()
    psf_py = pfft_py.getPSF()

    if False:
        print(numpy.max(numpy.abs(psf_c - psf_py)))
        with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_psf_fft7.tif")) as tf:
            tf.save(psf_c.astype(numpy.float32))
            tf.save(psf_py.astype(numpy.float32))

    assert (numpy.max(numpy.abs(psf_c - psf_py))) < 1.0e-6

    pfft_c.cleanup()
def test_psf_fft2():
    """
    Test translated PSF calculation.
    """
    dx = 0.5
    dy = 0.25
    dz = 0.2
    [pf_psf, geo, pf] = makePSFAndPF(-0.4, 0.4, 0.05)
    
    pfft = psfFFTC.PSFFFT(pf_psf)
    pfft.translate(dy, dx, dz*(pf_psf.shape[0] - 1)/0.8)
    psf_fft = pfft.getPSF()
    
    defocused = geo.changeFocus(pf, dz)
    translated = geo.translatePf(defocused, dx, dy)
    psf_pf = pupilMath.intensity(pupilMath.toRealSpace(translated))
    
    if False:
        print(numpy.max(numpy.abs(psf_fft - psf_pf)))
        with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_psf_fft2.tif")) as tf:
            tf.save(psf_fft.astype(numpy.float32))
            tf.save(psf_pf.astype(numpy.float32))

    assert (numpy.max(numpy.abs(psf_fft - psf_pf))) < 1.0e-10

    pfft.cleanup()
def test_cl_sa_h5py_5():
    """
    Test getting all of the localizations for clustering.
    """
    locs = {"category" : numpy.arange(4, dtype = numpy.int32),
            "x" : numpy.arange(4, dtype = numpy.float),
            "y" : numpy.arange(4, dtype = numpy.float)}

    filename = "test_clusters_sa_h5py.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write localization data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.setMovieInformation(1,1,5,"")
        h5.setPixelSize(100.0)
        h5.addLocalizations(locs, 1)
        h5.addLocalizations(locs, 3)

    # Test getting all the localization data.
    with clSAH5Py.SAH5Clusters(h5_name) as cl_h5:
        [x, y, z, c, cl_dict] = cl_h5.getDataForClustering()
        assert(numpy.allclose(x, cl_dict['loc_id']))
        assert(numpy.allclose(y, cl_dict['loc_id']))
        assert(numpy.allclose(z, numpy.zeros(x.size)))
        assert(numpy.allclose(c, cl_dict['loc_id']))
        assert(numpy.allclose(cl_dict['frame'], numpy.array([1,1,1,1,3,3,3,3])))
Exemple #32
0
def test_hdf5_to_image_4():
    """
    Test category (using tracks).
    """
    tracks = {
        "category": [1, 2],
        "x": numpy.array([10.0, 20.0]),
        "y": numpy.array([20.0, 10.0])
    }

    h5_name = storm_analysis.getPathOutputTest("test_hdf5_to_image.hdf5")
    storm_analysis.removeFile(h5_name)

    # Write data (as peaks).
    with saH5Py.SAH5Py(h5_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(40, 30, 1, "")
        h5.addTracks(tracks)

    # Render image.
    image = hdf5ToImage.render2DImage(h5_name, category=1, scale=1)

    assert (image.shape[0] == 30)
    assert (image.shape[1] == 40)
    assert (image[10, 20] == 0)
    assert (image[20, 10] == 1)
Exemple #33
0
def test_hdf5_to_image_5():
    """
    Test 3D rendering.
    """
    tracks = {
        "x": numpy.array([10.0, 10.0, 10.0]),
        "y": numpy.array([20.0, 20.0, 20.0]),
        "z": numpy.array([-0.2, 0.0, 0.2])
    }

    h5_name = storm_analysis.getPathOutputTest("test_hdf5_to_image.hdf5")
    storm_analysis.removeFile(h5_name)

    # Write data (as peaks).
    with saH5Py.SAH5Py(h5_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(40, 30, 1, "")
        h5.addTracks(tracks)

    # Render image.
    images = hdf5ToImage.render3DImage(h5_name, [-0.3, -0.1, 0.1, 0.3],
                                       scale=1)

    assert (images[0].shape[0] == 30)
    assert (images[0].shape[1] == 40)
    assert (numpy.allclose(images[0], images[1]))
    assert (numpy.allclose(images[0], images[2]))
Exemple #34
0
def test_otf_scaler_1():
    """
    Test that the C and the Python libraries agree on the calculation
    of an OTF scaled PSF.
    """
    otf_sigma = 1.8

    geo = pupilMath.Geometry(128, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry=geo)
    pf_c.setPF(pf)

    otf_sc = otfSC.OTFScaler(size=geo.size)

    gsf = geo.gaussianScalingFactor(otf_sigma)
    psf_py = geo.pfToPSF(pf, [0.0], scaling_factor=gsf)

    psf_c = pf_c.getPSFIntensity()
    otf_sc.setScale(gsf)
    psf_c = otf_sc.scale(psf_c)

    if False:
        with tifffile.TiffWriter(
                storm_analysis.getPathOutputTest(
                    "test_otf_scaler_1.tif")) as tf:
            tf.save(psf_c.astype(numpy.float32))
            tf.save(psf_py.astype(numpy.float32))

    assert numpy.allclose(psf_c, psf_py)

    pf_c.cleanup()
    otf_sc.cleanup()
def test_cl_sa_h5py_4():
    """
    Test cluster info string round trip.
    """
    locs = {"x" : numpy.arange(10, dtype = numpy.float),
            "y" : numpy.arange(10, dtype = numpy.float)}

    filename = "test_clusters_sa_h5py.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write localization data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.setMovieInformation(1,1,2,"")
        h5.addLocalizations(locs, 1)

    # Write clustering data for localizations.
    cluster_id = numpy.remainder(numpy.arange(10), 3)
    cluster_data = {"frame" : numpy.ones(10, dtype = numpy.int),
                    "loc_id" : numpy.arange(10)}

    info_string = "dbscan,eps,10.0,mc,5"
    with clSAH5Py.SAH5Clusters(h5_name) as cl_h5:
        cl_h5.addClusters(cluster_id, cluster_data)

        cl_h5.setClusteringInfo(info_string)
        assert (cl_h5.getClusteringInfo() == info_string)
def test_cl_sa_h5py_2():
    """
    Test basic cluster file mechanics (using tracks).
    """
    tracks = {"x" : numpy.arange(11, dtype = numpy.float),
              "y" : numpy.arange(11, dtype = numpy.float)}

    filename = "test_clusters_sa_h5py.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write track data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.setMovieInformation(1,1,2,"")
        h5.addTracks(tracks)

    # Write clustering data for tracks.
    cluster_id = numpy.remainder(numpy.arange(11), 3)
    cluster_data = {"track_id" : numpy.zeros(11, dtype = numpy.int),
                    "loc_id" : numpy.arange(11)}

    cl_size = [0, 4, 4, 3]
    with clSAH5Py.SAH5Clusters(h5_name) as cl_h5:
        cl_h5.addClusters(cluster_id, cluster_data)

        assert(cl_h5.getNClusters() == (len(cl_size) - 1))
        for index, cluster in cl_h5.clustersIterator(skip_unclustered = False):
            for field in cluster:
                assert(cluster[field].size == cl_size[index])
def test_io_3():
    """
    Test FITS movie IO.
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10
    
    data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.fits")

    # Write FITS movie.
    wr = datawriter.inferWriter(movie_name)
    for i in range(movie_l):
        wr.addFrame(data)
    wr.close()
        
    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    assert(numpy.allclose(data, rd.loadAFrame(0)))
Exemple #38
0
def test_fiducials_3():
    """
    Basic fiducials test.
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in range(3):
            temp = {}
            for elt in peaks:
                temp[elt] = peaks[elt][i:]
            h5.addLocalizations(temp, i)

        h5.addMovieInformation(FakeReader(n_frames=4))

    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius=0.1, reference_frame=2)

    # Check.
    with saH5Py.SAH5Py(h5_name) as h5:
        expected = numpy.array([-1, -1, 0])
        for fnum, locs in h5.localizationsIterator(fields=["fiducial_id"]):
            assert numpy.allclose(locs["fiducial_id"], expected[fnum:])
def test_cam_cal_1():

    size = (12, 10)
    cam_gain = 1.5 * numpy.ones(size)
    cam_offset = 1000.0 * numpy.ones(size)
    cam_var = 2.0 * numpy.ones(size)
    n_frames = 20000

    # Create calibration files.
    scmos_files = []
    for i, name in enumerate(
        ["dark.npy", "light1.npy", "light2.npy", "light3.npy", "light4.npy"]):
        f_name = storm_analysis.getPathOutputTest(name)
        scmos_files.append(f_name)

        mean = i * 500 * cam_gain
        var = mean * cam_gain + cam_var
        mean += cam_offset

        N = mean * n_frames
        NN = (var + mean * mean) * n_frames

        numpy.save(f_name, [numpy.array([n_frames]), N, NN])

    # Check.
    [cal_offset, cal_var,
     cal_gain] = camCal.cameraCalibration(scmos_files,
                                          show_fit_plots=False,
                                          show_mean_plots=False)

    assert (numpy.allclose(cal_offset, cam_offset))
    assert (numpy.allclose(cal_var, cam_var))
    assert (numpy.allclose(cal_gain, cam_gain))
def test_pupilfn_2():
    """
    Test PF translation.
    """
    dx = 0.5
    dy = 0.25
    dz = 0.2
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry=geo)
    pf_c.setPF(pf)

    pf_c.translate(dx, dy, dz)
    psf_c = pupilMath.intensity(pf_c.getPSF())

    defocused = geo.changeFocus(pf, dz)
    translated = geo.translatePf(defocused, dx, dy)
    psf_py = pupilMath.intensity(pupilMath.toRealSpace(translated))

    if False:
        with tifffile.TiffWriter(
                storm_analysis.getPathOutputTest("test_pupilfn_2.tif")) as tf:
            tf.save(psf_c.astype(numpy.float32))
            tf.save(psf_py.astype(numpy.float32))

    assert numpy.allclose(psf_c, psf_py)

    pf_c.cleanup()
def test_cl_sa_h5py_6():
    """
    Test getting all of the tracks for clustering.
    """
    tracks = {"category" : numpy.arange(4, dtype = numpy.int32),
              "x" : numpy.arange(4, dtype = numpy.float),
              "y" : numpy.arange(4, dtype = numpy.float),
              "z" : numpy.arange(4, dtype = numpy.float)}

    filename = "test_clusters_sa_h5py.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write tracks data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.setMovieInformation(1,1,2,"")
        h5.setPixelSize(100.0)
        h5.addTracks(tracks)
        h5.addTracks(tracks)

    # Test getting all the tracking data.
    with clSAH5Py.SAH5Clusters(h5_name) as cl_h5:
        [x, y, z, c, cl_dict] = cl_h5.getDataForClustering()
        assert(numpy.allclose(x, cl_dict['loc_id']))
        assert(numpy.allclose(y, cl_dict['loc_id']))
        assert(numpy.allclose(z, cl_dict['loc_id']))
        assert(numpy.allclose(c, cl_dict['loc_id']))
        assert(numpy.allclose(cl_dict['track_id'], numpy.array([0,0,0,0,1,1,1,1])))
def test_io_4():
    """
    Test TIF movie IO (1 page, multiple frames per page).
    """
    movie_h = 50
    movie_w = 40
    movie_l = 10
    
    data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(numpy.uint16)

    movie_name = storm_analysis.getPathOutputTest("test_dataio.tif")

    # Write tif movie.
    with tifffile.TiffWriter(movie_name, imagej = True) as tf:
        tf.save(data, truncate = True)

    # Read & check.
    rd = datareader.inferReader(movie_name)
    [mw, mh, ml] = rd.filmSize()

    assert(mh == movie_h)
    assert(mw == movie_w)
    assert(ml == movie_l)
    for i in range(movie_l):
        assert(numpy.allclose(data[i,:,:], rd.loadAFrame(i)))
def test_pupilfn_3():
    """
    Test PF X derivative (C library).
    """
    dx = 1.0e-6
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry=geo)
    pf_c.setPF(pf)

    # Calculate derivative of magnitude as a function of x.
    psf_c = pf_c.getPSF()
    psf_c_dx = pf_c.getPSFdx()
    mag_dx_calc = 2.0 * (numpy.real(psf_c) * numpy.real(psf_c_dx) +
                         numpy.imag(psf_c) * numpy.imag(psf_c_dx))

    # Estimate derivative using (f(x+dx) - f(x))/dx
    mag = pupilMath.intensity(psf_c)
    pf_c.translate(dx, 0.0, 0.0)
    mag_dx_est = (pupilMath.intensity(pf_c.getPSF()) - mag) / dx

    if False:
        with tifffile.TiffWriter(
                storm_analysis.getPathOutputTest("test_pupilfn_3.tif")) as tf:
            #tf.save(mag.astype(numpy.float32))
            tf.save(mag_dx_calc.astype(numpy.float32))
            tf.save(mag_dx_est.astype(numpy.float32))
            tf.save(numpy.abs(mag_dx_calc - mag_dx_est).astype(numpy.float32))

    assert numpy.allclose(mag_dx_calc, mag_dx_est, atol=1.0e-6)

    pf_c.cleanup()
Exemple #44
0
def test_frc():
    mlist_name = storm_analysis.getData("test/data/test_drift_mlist.bin")
    results_name = storm_analysis.getPathOutputTest("test_drift_frc.txt")

    from storm_analysis.frc.frc_calc2d import frcCalc2d

    frcCalc2d(mlist_name, results_name, False)
def test_fiducials_3():
    """
    Basic fiducials test.
    """
    peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
             "y" : numpy.array([1.0, 1.0, 1.0])}

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        for i in range(3):
            temp = {}
            for elt in peaks:
                temp[elt] = peaks[elt][i:]
            h5.addLocalizations(temp, i)

        h5.addMovieInformation(FakeReader(n_frames = 4))
        
    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius = 0.1, reference_frame = 2)

    # Check.
    with saH5Py.SAH5Py(h5_name) as h5:
        expected = numpy.array([-1,-1,0])
        for fnum, locs in h5.localizationsIterator(fields = ["fiducial_id"]):
            assert numpy.allclose(locs["fiducial_id"], expected[fnum:])
def test_pupilfn_8():
    """
    Test that pupilfn.make_pupil_fn.makePupilFunction works as expected.
    """
    pf_size = 30
    zmn = [[1.3, 2, 2]]
    z_offset = -0.3
    
    # Create & save pupil function.
    pf_file = storm_analysis.getPathOutputTest("pf_test.pfn")
    makePupilFn.makePupilFunction(pf_file, pf_size, 0.1, zmn, z_offset = z_offset)

    # Load PF.
    with open(pf_file, "rb") as fp:
        pf_data = pickle.load(fp)
        test_pf = pf_data["pf"]

    # Create comparison PF.
    geo = pupilMath.GeometrySim(pf_size,
                                pf_data["pixel_size"],
                                pf_data["wavelength"],
                                pf_data["immersion_index"],
                                pf_data["numerical_aperture"])
    ref_pf = geo.createFromZernike(1.0, zmn)

    # Normalize reference to also have height 1.0 (at z = 0.0).
    psf = pupilMath.intensity(pupilMath.toRealSpace(ref_pf))
    ref_pf = ref_pf * 1.0/math.sqrt(numpy.max(psf))

    # Test that they are the same.
    for z in [-0.2, -0.1, 0.0, 0.1, 0.2]:
        test_psf = pupilMath.intensity(pupilMath.toRealSpace(geo.changeFocus(test_pf, z)))
        ref_psf = pupilMath.intensity(pupilMath.toRealSpace(geo.changeFocus(ref_pf, z - z_offset)))
        #print(numpy.max(numpy.abs(test_psf - ref_psf)))
        assert numpy.allclose(test_psf, ref_psf)
Exemple #47
0
def test_fiducials_4():
    """
    Test no localizations in reference frame.
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in range(3):
            h5.addLocalizations(peaks, i)
        h5.addMovieInformation(FakeReader(n_frames=5))

    # Track fiducials..
    okay = False
    try:
        fiducials.trackFiducials(h5_name, radius=0.1, reference_frame=3)
    except fiducials.FiducialException:
        okay = True
    assert okay
def test_pupilfn_3():
    """
    Test PF X derivative (C library).
    """
    dx = 1.0e-6
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry = geo)
    pf_c.setPF(pf)
    
    # Calculate derivative of magnitude as a function of x.
    psf_c = pf_c.getPSF()
    psf_c_dx = pf_c.getPSFdx()
    mag_dx_calc = 2.0 * (numpy.real(psf_c)*numpy.real(psf_c_dx) + numpy.imag(psf_c)*numpy.imag(psf_c_dx))

    # Estimate derivative using (f(x+dx) - f(x))/dx
    mag = pupilMath.intensity(psf_c)
    pf_c.translate(dx,0.0,0.0)
    mag_dx_est = (pupilMath.intensity(pf_c.getPSF()) - mag)/dx
                
    if False:
        with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_pupilfn_3.tif")) as tf:
            #tf.save(mag.astype(numpy.float32))
            tf.save(mag_dx_calc.astype(numpy.float32))
            tf.save(mag_dx_est.astype(numpy.float32))
            tf.save(numpy.abs(mag_dx_calc - mag_dx_est).astype(numpy.float32))

    assert numpy.allclose(mag_dx_calc, mag_dx_est, atol = 1.0e-6)
    
    pf_c.cleanup()
Exemple #49
0
def test_fiducials_7():
    """
    Iterator test.
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in range(3):
            temp = {}
            for elt in peaks:
                temp[elt] = peaks[elt][i:]
            h5.addLocalizations(temp, i)

        h5.addMovieInformation(FakeReader(n_frames=4))

    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius=0.1, reference_frame=2)

    # Check.
    with fiducials.SAH5Fiducials(h5_name) as h5:
        for fdcl in h5.fiducialsIterator():
            assert (numpy.allclose(fdcl["frame"], numpy.arange(3)))
def test_tracker_5():
    """
    Test that nearest track is assigned to the nearest object.
    """
    filename = "test_sa_hdf5.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
                 "y" : numpy.array([1.0, 1.0, 1.0]),
                 "sum" : numpy.array([4.0, 4.0, 4.0])}
        h5.addLocalizations(peaks, 0)
        
        peaks = {"x" : numpy.array([2.0]),
                 "y" : numpy.array([1.0]),
                 "sum" : numpy.array([4.0])}
        h5.addLocalizations(peaks, 1)

        h5.addMovieInformation(FakeReader(n_frames = 2))

    # Track.
    tracker.tracker(h5_name, radius = 1.1)

    # Tracking.
    with saH5Py.SAH5Py(h5_name) as h5:
        assert(h5.getNTracks() == 3)
        for t in h5.tracksIterator():
            assert(numpy.allclose(numpy.array([1,3,2]), t["x"]))
            assert(numpy.allclose(numpy.array([0,2,1]), t["track_id"]))
            assert(numpy.allclose(numpy.array([1,1,2]), t["track_length"]))
Exemple #51
0
def test_fiducials_8():
    """
    Gap test.
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in [0, 1, 3]:
            h5.addLocalizations(peaks, i)

        h5.addMovieInformation(FakeReader(n_frames=4))

    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius=0.1, max_gap=1)

    # Check.
    with fiducials.SAH5Fiducials(h5_name) as h5:
        expected = numpy.array([0, 1, 3])
        for fdcl in h5.fiducialsIterator():
            assert (numpy.allclose(fdcl["frame"], expected))
def test_tracker_8():
    """
    Test tracking over an empty frame.
    """
    peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
             "y" : numpy.array([1.0, 1.0, 1.0]),
             "sum" : numpy.array([4.0, 4.0, 4.0])}

    empty = {"x" : numpy.array([]),
             "y" : numpy.array([]),
             "sum" : numpy.array([])} 

    filename = "test_sa_hdf5.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
        h5.addLocalizations(peaks, 0)
        h5.addLocalizations(empty, 1)
        h5.addLocalizations(peaks, 2)
        h5.addMovieInformation(FakeReader(n_frames = 3))

    # Track.
    tracker.tracker(h5_name, descriptor = "111", radius = 0.1)

    # Tracking.
    with saH5Py.SAH5Py(h5_name) as h5:
        assert(h5.getNTracks() == 6)
        for t in h5.tracksIterator():
            assert(numpy.allclose(numpy.ones(6), t["track_length"]))
Exemple #53
0
def test_fiducials_10():
    """
    Test fiducial averaging (preload_all = False).
    """
    peaks = {
        "x": numpy.array([1.0, 2.0, 3.0]),
        "y": numpy.array([1.0, 1.0, 1.0])
    }

    filename = "test_fiducials.hdf5"
    h5_name = storm_analysis.getPathOutputTest(filename)
    storm_analysis.removeFile(h5_name)

    # Write data.
    with saH5Py.SAH5Py(h5_name, is_existing=False) as h5:
        for i in range(3):
            h5.addLocalizations(peaks, i)

        h5.addMovieInformation(FakeReader(n_frames=3))

    # Track fiducials..
    fiducials.trackFiducials(h5_name, radius=0.1)

    # Check
    with fiducials.SAH5Fiducials(h5_name) as h5:
        [ave, n] = h5.averageFiducials(fields=["y"], preload_all=False)
        assert (numpy.allclose(ave["y"], numpy.ones(3)))
Exemple #54
0
def test_drift_correction():

    # Calculate drift correction.
    param_name = storm_analysis.getData("test/data/test_drift.xml")
    parameters = params.ParametersAnalysis().initFromFile(param_name)

    mlist_name = storm_analysis.getData("test/data/test_drift_mlist.bin")
    drift_output = storm_analysis.getPathOutputTest("test_drift_drift.txt")

    xyzDriftCorrection.xyzDriftCorrection(mlist_name,
                                          drift_output,
                                          parameters.getAttr("frame_step"),
                                          parameters.getAttr("d_scale"),
                                          correct_z=True)

    # Verify results.
    diffs = veri.verifyDriftCorrection(
        storm_analysis.getData("test/data/test_drift.txt"), drift_output)

    if (diffs[0] > 0.1):
        raise Exception("Frame numbers do not match.")

    # These thresholds are somewhat arbitrary.
    if (diffs[1] > 0.1) or (diffs[2] > 0.1):
        raise Exception("XY drift correction error.")

    if (diffs[3] > 30.0):
        raise Exception("Z drift correction error.")
Exemple #55
0
def test_pupilfn_6():
    """
    Test PF Z derivative (C library).
    """
    dz = 1.0e-6
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    pf_c = pfFnC.PupilFunction(geometry=geo)
    pf_c.setPF(pf)

    # Calculate derivative of magnitude as a function of z.
    psf_c = pf_c.getPSF()
    psf_c_dz = pf_c.getPSFdz()
    mag_dz_calc = 2.0 * (numpy.real(psf_c) * numpy.real(psf_c_dz) +
                         numpy.imag(psf_c) * numpy.imag(psf_c_dz))

    # Estimate derivative using (f(z+dz) - f(z))/dz
    mag = pupilMath.intensity(psf_c)
    pf_c.translate(0.0, 0.0, dz)
    mag_dz_est = (pupilMath.intensity(pf_c.getPSF()) - mag) / dz

    if False:
        with tifffile.TiffWriter(
                storm_analysis.getPathOutputTest("test_pupilfn_6.tif")) as tf:
            #tf.save(mag.astype(numpy.float32))
            tf.save(mag_dz_calc.astype(numpy.float32))
            tf.save(mag_dz_est.astype(numpy.float32))
            tf.save(numpy.abs(mag_dz_calc - mag_dz_est).astype(numpy.float32))

    assert (numpy.max(numpy.abs(mag_dz_calc - mag_dz_est))) < 1.0e-6

    pf_c.cleanup()
def test_pupilfn_4():
    """
    Test PF X derivative (Python library).
    """
    dx = 1.0e-6
    geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
    pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

    # Calculate derivative of magnitude as a function of x.
    psf_py = pupilMath.toRealSpace(pf)
    psf_py_dx = pupilMath.toRealSpace(geo.dx(pf))
    mag_dx_calc = 2.0 * (numpy.real(psf_py) * numpy.real(psf_py_dx) +
                         numpy.imag(psf_py) * numpy.imag(psf_py_dx))

    # Estimate derivative using (f(x+dx) - f(x))/dx
    mag = pupilMath.intensity(psf_py)
    translated = geo.translatePf(pf, dx, 0.0)
    mag_dx_est = (pupilMath.intensity(pupilMath.toRealSpace(translated)) -
                  mag) / dx

    if False:
        with tifffile.TiffWriter(
                storm_analysis.getPathOutputTest("test_pupilfn_4.tif")) as tf:
            #tf.save(mag.astype(numpy.float32))
            tf.save(mag_dx_calc.astype(numpy.float32))
            tf.save(mag_dx_est.astype(numpy.float32))
            tf.save(numpy.abs(mag_dx_calc - mag_dx_est).astype(numpy.float32))

    assert numpy.allclose(mag_dx_calc, mag_dx_est, atol=1.0e-6)
def test_pupilfn_7():
    """
    Test that PF translation is correct (i.e. independent of size).
    """
    sizes = [10, 20, 40]
    dx = 1.0

    for size in sizes:
        geo = pupilMath.Geometry(size, 0.1, 0.6, 1.5, 1.4)
        pf = geo.createFromZernike(1.0, [[1.3, 2, 2]])

        pf_c = pfFnC.PupilFunction(geometry=geo)
        pf_c.setPF(pf)

        psf_untranslated = numpy.roll(pupilMath.intensity(pf_c.getPSF()),
                                      1,
                                      axis=0)

        pf_c.translate(dx, 0.0, 0.0)
        psf_translated = pupilMath.intensity(pf_c.getPSF())

        if False:
            with tifffile.TiffWriter(
                    storm_analysis.getPathOutputTest(
                        "test_pupilfn_7.tif")) as tf:
                tf.save(psf_untranslated.astype(numpy.float32))
                tf.save(psf_translated.astype(numpy.float32))

        assert numpy.allclose(psf_untranslated, psf_translated)

        pf_c.cleanup()