def test_drift_correction_6():
    """
    Test Z offset determination & correction.
    """
    n_locs = 500
    peaks = {
        "x": numpy.random.normal(loc=10.0, scale=0.2, size=n_locs),
        "y": numpy.random.normal(loc=10.0, scale=0.2, size=n_locs),
        "z": numpy.random.normal(scale=0.05, size=n_locs)
    }

    h5_name = storm_analysis.getPathOutputTest("test_dc_hdf5.hdf5")

    # Save peaks.
    t_dz = 0.3
    with saH5Py.SAH5Py(h5_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(20, 20, 2, "")
        h5.addLocalizations(peaks, 0)
        peaks["z"] += t_dz
        h5.addLocalizations(peaks, 1)

    scale = 2
    z_min = -1.0
    z_max = 1.0
    z_bins = int((z_max - z_min) / 0.05)
    with driftUtils.SAH5DriftCorrection(filename=h5_name,
                                        scale=scale,
                                        z_bins=z_bins) as h5d:
        h5d.setFrameRange(0, 1)
        im1 = h5d.grid3D(z_min, z_max)
        h5d.setFrameRange(1, 2)
        im2 = h5d.grid3D(z_min, z_max)

        # Check that both images have the same number localizations.
        assert (numpy.sum(im1) == numpy.sum(im2))

        # Measure offset.
        [corr, fit, dz, success] = imagecorrelation.zOffset(im1, im2)

        # Test that it succeeded.
        assert (success)

        # Check result.
        dz = dz * (z_max - z_min) / float(z_bins)
        assert (abs(dz - t_dz) / t_dz < 0.1)

        # Test that we are correcting in the right direction.
        h5d.setDriftCorrectionZ(-dz)
        im2 = h5d.grid3D(z_min, z_max, drift_corrected=True)
        [corr, fit, dz, success] = imagecorrelation.zOffset(im1, im2)
        dz = -dz * (z_max - z_min) / float(z_bins)
        assert (abs(dz) < 0.1)
def rccDriftCorrection(mlist_name, drift_name, step, scale, correct_z = False, show_plot = False):

    i3_data = i3togrid.I3GDataLL(mlist_name, scale = scale)
    film_l = i3_data.getFilmLength() - 1
    max_err = 0.2

    # Default values for the z range, may need to adjusted.
    z_min = -500.0
    z_max = 500.0

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftutilities.saveDriftData(drift_name, fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftutilities.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are too short.
    if (4 * step > film_l):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return

    print("Performing XY correction.")

    # Compute offsets between all pairs of sub images.
    endpost = film_l - step/2
    old_start1 = -1
    start1 = 0
    end1 = start1 + step
    start2 = start1
    end2 = start2 + step
    i = 0
    j = 0
    centers = [(end1 - start1)/2 + start1]
    pairs = []
    while (start1 < endpost):

        if (start2 > endpost):
            i += 1
            j = i
            start1 += step
            end1 = start1 + step
            start2 = start1
            end2 = start2 + step
            if (end1 > endpost):
                end1 = film_l
            if (end2 > endpost):
                end2 = film_l
            if (start1 < endpost):
                centers.append((end1 - start1)/2 + start1)

        if (start1 > endpost):
            continue

        if not (start1 == start2):
            if (old_start1 != start1):
                i3_data.loadDataInFrames(fmin = start1, fmax = end1-1)
                sub1 = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)
                old_start1 = start1

            i3_data.loadDataInFrames(fmin = start2, fmax = end2-1)
            sub2 = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)

            [corr, dx, dy, success] = imagecorrelation.xyOffset(sub1,
                                                                sub2,
                                                                scale)

            dx = dx/float(scale)
            dy = dy/float(scale)

            print("offset between frame ranges ", start1, "-" , end1 , " and ", start2, "-", end2)

            if success:
                print(" -> ", dx, dy, "good")
            else:
                print(" -> ", dx, dy, "bad")
            print("")

            pairs.append([i, j, dx, dy, success])

        j += 1
        start2 += step
        end2 = start2 + step
        if (end2 > endpost):
            end2 = film_l


    print("--")

    #
    # For testing it is faster to not have to re-run the
    # XY drift correction calculations.
    #
    #with open("test.dat", "w") as fp:
    #    pickle.dump([centers, pairs], fp)
    #
    #with open("test.dat") as fp:
    #    [centers, pairs] = pickle.load(fp)
    #

    # Prepare rij_x, rij_y, A matrix.
    rij_x = numpy.zeros(len(pairs), dtype = numpy.float32)
    rij_y = numpy.zeros(len(pairs), dtype = numpy.float32)
    A = numpy.zeros((len(pairs),len(centers)), dtype = numpy.float32)
    for i, pair in enumerate(pairs):
        rij_x[i] = pair[2]
        rij_y[i] = pair[3]
        A[i,pair[0]:pair[1]] = 1.0

    # Calculate drift (pass1). 
    # dx and dy contain the optimal offset between sub image i and sub image i+1 in x/y.
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Calculate errors.
    err_x = numpy.dot(A, dx) - rij_x
    err_y = numpy.dot(A, dy) - rij_y

    err_d = numpy.sqrt(err_x * err_x + err_y * err_y)
    arg_sort_err = numpy.argsort(err_d)

    # Print errors before.
    if False:
        print("Before:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Remove bad values.
    j = len(arg_sort_err) - 1

    while (j > 0) and (err_d[arg_sort_err[j]] > max_err):
        index = arg_sort_err[j]
        delA = numpy.delete(A, index, 0)
        if (numpy.linalg.matrix_rank(delA, tol = 1.0) == (len(centers)-1)):
            print(j, "removing", index, "with error", err_d[index])
            A = delA
            rij_x = numpy.delete(rij_x, index, 0)
            rij_y = numpy.delete(rij_y, index, 0)
            err_d = numpy.delete(err_d, index, 0)
            arg_sort_err[(arg_sort_err > index)] -= 1
        else:
            print("not removing", index, "with error", err_d[index])
        j -= 1

    # Print errors after.
    if False:
        print("")
        print("After:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Calculate drift (pass2). 
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)


    # Integrate to get final drift.
    driftx = numpy.zeros((dx.size))
    drifty = numpy.zeros((dy.size))
    for i in range(dx.size):
        driftx[i] = numpy.sum(dx[0:i])
        drifty[i] = numpy.sum(dy[0:i])

    if True:
        for i in range(driftx.size):
            print(i, centers[i], driftx[i], drifty[i])

    # Create spline for interpolation.
    final_driftx = interpolateData(centers, driftx)
    final_drifty = interpolateData(centers, drifty)

    # Plot XY drift.
    if show_plot:
        import matplotlib
        import matplotlib.pyplot as pyplot

        x = numpy.arange(film_l)
        fig = pyplot.figure()
        ax = fig.add_subplot(111)
        ax.plot(x, final_driftx, color = 'blue')
        ax.plot(x, final_drifty, color = 'red')
        pyplot.show()

    # Z correction.
    if not correct_z:
        saveDriftData(final_driftx,
                      final_drifty,
                      numpy.zeros(film_l))
        return

    print("")
    print("Performing Z Correction.")

    start = 0
    z_bins = 20
    i3_data.loadDataInFrames(fmin = start, fmax = start+step)

    if correct_z:
        z_bins = 20
        xyzmaster = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                        zmin = z_min,
                                                        zmax = z_max,
                                                        uncorrected = True)

    j = 0
    index = 0
    old_dz = 0.0
    driftz = numpy.zeros((dx.size))
    while(j < film_l):

        # Load correct frame range.
        if ((j + 2*step) > film_l):
            i3_data.loadDataInFrames(fmin = j)
            step_step = 2*step
        else:
            i3_data.loadDataInFrames(fmin = j, fmax = j + step)
            step_step = step

        # Apply XY drift correction.
        i3_data.applyXYDriftCorrection(driftx[index], drifty[index])

        # Z correlation
        dz = old_dz

        xyzcurr = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                      zmin = z_min,
                                                      zmax = z_max,
                                                      uncorrected = True)

        [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyzmaster, xyzcurr)

        # Update Values
        if z_success:
            old_dz = dz
        else:
            dz = old_dz
        dz = dz * (z_max - z_min)/float(z_bins)

        if z_success:
            i3_data.applyZDriftCorrection(-dz)
            xyzmaster += i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                             zmin = z_min,
                                                             zmax = z_max)

        driftz[index] = dz

        if z_success:
            print(index, dz, "good")
        else:
            print(index, dz, "bad")

        index += 1
        j += step_step

    final_driftz = interpolateData(centers, driftz)

    saveDriftData(final_driftx,
                  final_drifty,
                  final_driftz)

    # Plot X,Y, Z drift.
    if show_plot:
        import matplotlib
        import matplotlib.pyplot as pyplot

        pixel_size = 160.0 # pixel size in nm.
        x = numpy.arange(film_l)
        fig = pyplot.figure()
        ax = fig.add_subplot(111)
        ax.plot(x, pixel_size * final_driftx, color = 'red')
        ax.plot(x, pixel_size * final_drifty, color = 'green')
        ax.plot(x, final_driftz, color = 'blue')
        pyplot.show()
def xyzDriftCorrection(mlist_filename,
                       drift_filename,
                       step,
                       scale,
                       correct_z=True):

    i3_data = i3togrid.I3GDataLL(mlist_filename, scale=scale)
    film_l = i3_data.getFilmLength() - 1

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftutilities.saveDriftData(drift_filename, fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftutilities.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are too short.
    if ((4 * step) >= film_l):
        saveDriftData(numpy.zeros(film_l), numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return ()

    #
    # Drift correction (XY and Z are all done at the same time)
    #
    # Note that drift corrected localizations are added back into
    # the reference image in the hopes of improving the correction
    # for subsequent localizations.
    #

    # Figure out how to bin the movie.
    frame = 0
    bin_edges = [0]
    while (frame < film_l):
        if ((frame + 2 * step) > film_l):
            frame = film_l
        else:
            frame += step
        bin_edges.append(frame)

    z_bins = 20
    xy_master = None
    xyz_master = None
    t = []
    x = []
    y = []
    z = []
    old_dx = 0.0
    old_dy = 0.0
    old_dz = 0.0
    for i in range(len(bin_edges) - 1):

        # Load correct frame range.
        i3_data.loadDataInFrames(fmin=bin_edges[i], fmax=bin_edges[i + 1] - 1)

        midp = (bin_edges[i + 1] + bin_edges[i]) / 2

        xy_curr = i3_data.i3To2DGridAllChannelsMerged(uncorrected=True)

        #
        # This is to handle analysis that did not start at frame 0
        # of the movie.
        #
        # FIXME: There could still be problems if the movie does not
        #        start on a multiple of the step size.
        #
        if xy_master is None:
            if (numpy.sum(xy_curr) > 0):
                xy_master = xy_curr
                if correct_z:
                    xyz_master = i3_data.i3To3DGridAllChannelsMerged(
                        z_bins, uncorrected=True)

            t.append(midp)
            x.append(0.0)
            y.append(0.0)
            z.append(0.0)
            print(bin_edges[i], bin_edges[i + 1], numpy.sum(xy_curr), 0.0, 0.0,
                  0.0)
            continue

        # Correlate to master image.
        [corr, dx, dy, xy_success] = imagecorrelation.xyOffset(
            xy_master,
            xy_curr,
            i3_data.getScale(),
            center=[x[i - 1] * scale, y[i - 1] * scale])

        # Update values
        if xy_success:
            old_dx = dx
            old_dy = dy
        else:
            dx = old_dx
            dy = old_dy

        dx = dx / float(scale)
        dy = dy / float(scale)

        t.append(midp)
        x.append(dx)
        y.append(dy)

        i3_data.applyXYDriftCorrection(dx, dy)
        if xy_success:
            # Add current to master
            xy_master += i3_data.i3To2DGridAllChannelsMerged()

        # Z correlation
        dz = old_dz
        if correct_z and xy_success:

            xyz_curr = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                           uncorrected=True)

            # Do z correlation
            [corr, fit, dz,
             z_success] = imagecorrelation.zOffset(xyz_master, xyz_curr)

            # Update Values
            if z_success:
                old_dz = dz
            else:
                dz = old_dz

            dz = dz * 1000.0 / float(z_bins)

            if z_success:
                i3_data.applyZDriftCorrection(-dz)
                xyz_master += i3_data.i3To3DGridAllChannelsMerged(z_bins)

        z.append(dz)

        print(bin_edges[i], bin_edges[i + 1], numpy.sum(xy_curr), dx, dy, dz)

    i3_data.close()

    # Create numpy versions of the drift arrays.
    nt = numpy.array(t)
    final_driftx = interpolateData(nt, numpy.array(x))
    final_drifty = interpolateData(nt, numpy.array(y))
    final_driftz = interpolateData(nt, numpy.array(z))

    saveDriftData(final_driftx, final_drifty, final_driftz)
Пример #4
0
def alignAndMerge(file1, file2, results_file, scale = 2, dx = 0, dy = 0, z_min = -500.0, z_max = 500.0):
    assert not os.path.exists(results_file)

    z_bins = int((z_max - z_min)/50)

    # Load meta data.
    metadata1 = readinsight3.loadI3Metadata(file1)
    metadata2 = readinsight3.loadI3Metadata(file1)

    # If meta data is available, update the film length
    # field to be which ever data set is longer.
    #
    # Note that the merged file will still be messy in that the
    # frame numbers for the second movie are not changed, so they
    # will likely overlap with those of the first movie and break
    # the assumption that frame number always increases as you
    # go through the file.
    #
    if (metadata1 is not None) and (metadata2 is not None):
        f1_length = int(metadata1.find("movie").find("movie_l").text)
        f2_length = int(metadata2.find("movie").find("movie_l").text)
        if (f2_length > f1_length):
            metadata1.find("movie").find("movie_l").text = str(f2_length)
    
    i3_data1 = i3togrid.I3GData(file1, scale = scale)
    i3_data2 = i3togrid.I3GData(file2, scale = scale)

    # Determine x,y offsets.
    xy_data1 = i3_data1.i3To2DGridAllChannelsMerged()
    xy_data2 = i3_data2.i3To2DGridAllChannelsMerged()
    
    [corr, offx, offy, xy_success] = imagecorrelation.xyOffset(xy_data1,
                                                               xy_data2,
                                                               scale,
                                                               center = [dx * scale,
                                                                         dy * scale])

    assert(xy_success)

    # Update x,y positions in file2.
    offx = offx/float(scale)
    offy = offy/float(scale)
    print("x,y offsets", offx, offy)

    i3_data2.i3data['xc'] += offx
    i3_data2.i3data['yc'] += offy

    # Determine z offsets.
    xyz_data1 = i3_data1.i3To3DGridAllChannelsMerged(z_bins,
                                                     zmin = z_min,
                                                     zmax = z_max)
    xyz_data2 = i3_data2.i3To3DGridAllChannelsMerged(z_bins,
                                                     zmin = z_min,
                                                     zmax = z_max)

    [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyz_data1, xyz_data2)
    assert(z_success)

    dz = dz * (z_max - z_min)/float(z_bins)
    print("z offset", dz)

    # Update z positions in file2.
    i3_data2.i3data['zc'] -= dz

    i3w = writeinsight3.I3Writer(results_file)
    i3w.addMolecules(i3_data1.getData())
    i3w.addMolecules(i3_data2.getData())
    if metadata1 is None:
        i3w.close()
    else:
        i3w.closeWithMetadata(ElementTree.tostring(metadata1, 'ISO-8859-1'))
Пример #5
0
    y.append(dy)

    i3_data.applyXYDriftCorrection(dx,dy)
    if xy_success:
        # Add current to master
        xymaster += i3_data.i3To2DGridAllChannelsMerged()

    # Z correlation
    dz = old_dz
    if correct_z and xy_success:

        xyzcurr = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                      uncorrected = True)

        # Do z correlation
        [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyzmaster, xyzcurr)

        # Update Values
        if z_success:
            old_dz = dz
        else:
            dz = old_dz
            
        dz = dz * 1000.0/float(z_bins)

        if z_success:
            i3_data.applyZDriftCorrection(-dz)
            xyzmaster += i3_data.i3To3DGridAllChannelsMerged(z_bins)
    
    z.append(dz)
Пример #6
0
def xyzDriftCorrection(mlist_filename, drift_filename, step, scale, correct_z = True):
    
    i3_data = i3togrid.I3GDataLL(mlist_filename, scale = scale)
    film_l = i3_data.getFilmLength()

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftutilities.saveDriftData(drift_filename, fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftutilities.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are too short.
    if ((4*step) >= film_l):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return()

    #
    # Drift correction (XY and Z are all done at the same time)
    #
    # Note that drift corrected localizations are added back into 
    # the reference image in the hopes of improving the correction
    # for subsequent localizations. 
    #

    start = 0
    i3_data.loadDataInFrames(fmin = start, fmax = start+step-1)
    xymaster = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)

    if correct_z:
        z_bins = 20
        xyzmaster = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                        uncorrected = True)

    index = 1
    last = 0
    step_step = 0
    if(start>0):
        j = 0
    else:
        j = step
    t = [step/2]
    x = [0]
    y = [0]
    z = [0]
    old_dx = 0.0
    old_dy = 0.0
    old_dz = 0.0
    while(j < film_l):

        # Load correct frame range.
        last = j
        if ((j + 2*step) >= film_l):
            i3_data.loadDataInFrames(fmin = j)
            step_step = 2*step
        else:
            i3_data.loadDataInFrames(fmin = j, fmax = j + step - 1)
            step_step = step

        xycurr = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)

        # Correlate to master image.
        [corr, dx, dy, xy_success] = imagecorrelation.xyOffset(xymaster,
                                                               xycurr,
                                                               i3_data.getScale(),
                                                               center = [x[index-1] * scale,
                                                                         y[index-1] * scale])

        # Update values
        if xy_success:
            old_dx = dx
            old_dy = dy
        else:
            dx = old_dx
            dy = old_dy

        dx = dx/float(scale)
        dy = dy/float(scale)

        t.append(step/2 + index * step)
        x.append(dx)
        y.append(dy)

        i3_data.applyXYDriftCorrection(dx,dy)
        if xy_success:
            # Add current to master
            xymaster += i3_data.i3To2DGridAllChannelsMerged()

        # Z correlation
        dz = old_dz
        if correct_z and xy_success:

            xyzcurr = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                          uncorrected = True)

            # Do z correlation
            [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyzmaster, xyzcurr)

            # Update Values
            if z_success:
                old_dz = dz
            else:
                dz = old_dz
            
            dz = dz * 1000.0/float(z_bins)

            if z_success:
                i3_data.applyZDriftCorrection(-dz)
                xyzmaster += i3_data.i3To3DGridAllChannelsMerged(z_bins)
    
        z.append(dz)

        print(index, dx, dy, dz)

        index += 1
        j += step_step

    i3_data.close()

    # Create numpy versions of the drift arrays.
    nt = numpy.array(t)
    final_driftx = interpolateData(nt, numpy.array(x))
    final_drifty = interpolateData(nt, numpy.array(y))
    final_driftz = interpolateData(nt, numpy.array(z))

    saveDriftData(final_driftx,
                  final_drifty,
                  final_driftz)
def test_drift_correction_7():
    """
    Test XY and Z offset determination & correction as well as saving of the
    correct offsets in the HDF5 file.
    """
    n_locs = 500
    peaks = {
        "x": numpy.random.normal(loc=10.0, scale=0.2, size=n_locs),
        "y": numpy.random.normal(loc=10.0, scale=0.2, size=n_locs),
        "z": numpy.random.normal(scale=0.05, size=n_locs)
    }

    h5_name = storm_analysis.getPathOutputTest("test_dc_hdf5.hdf5")

    # Save peaks.
    t_dx = 2.0
    t_dz = 0.3
    with saH5Py.SAH5Py(h5_name, is_existing=False, overwrite=True) as h5:
        h5.setMovieInformation(20, 20, 2, "")
        h5.addLocalizations(peaks, 0)
        peaks["x"] += t_dx
        peaks["z"] += t_dz
        h5.addLocalizations(peaks, 1)

    scale = 2
    z_min = -1.0
    z_max = 1.0
    z_bins = int((z_max - z_min) / 0.05)
    with driftUtils.SAH5DriftCorrection(filename=h5_name,
                                        scale=scale,
                                        z_bins=z_bins) as h5d:
        h5d.setFrameRange(0, 1)
        im1_xy = h5d.grid2D()
        im1_xyz = h5d.grid3D(z_min, z_max)
        h5d.setFrameRange(1, 2)
        im2_xy = h5d.grid2D()
        im2_xyz = h5d.grid3D(z_min, z_max)

        # Check that both images have the same number localizations.
        assert (numpy.sum(im1_xy) == numpy.sum(im2_xy))

        # Measure and correct XY offset.
        #

        # Measure offset.
        [corr, dx, dy,
         success] = imagecorrelation.xyOffset(im1_xy, im2_xy, scale)

        # Test that it succeeded.
        assert (success)

        # Test that we got the right answer.
        dx = dx / scale
        dy = dy / scale
        assert (numpy.allclose(numpy.array([dx, dy]),
                               numpy.array([-t_dx, 0.0]),
                               atol=1.0e-6))

        # Apply xy drift correction.
        h5d.setDriftCorrectionXY(dx, dy)

        # Verify that z measurement returns the wrong value if we don't
        # correct for XY.
        #

        # Measure z offset.
        [corr, fit, dz, success] = imagecorrelation.zOffset(im1_xyz, im2_xyz)
        dz = dz * (z_max - z_min) / float(z_bins)
        assert (dz < z_min)

        # Get 3D image with XY corrections.
        im2_xyz = h5d.grid3D(z_min, z_max, drift_corrected=True)

        # Verify correct z offset.
        [corr, fit, dz, success] = imagecorrelation.zOffset(im1_xyz, im2_xyz)
        dz = dz * (z_max - z_min) / float(z_bins)

        assert (abs(dz - t_dz) / t_dz < 0.1)

        # Save the drift data in the HDF5 file.
        h5d.saveDriftData([0.0, dx], [0.0, 0.0], [0.0, -dz])

    # Reload file and verify that the corrections are saved properly.
    with driftUtils.SAH5DriftCorrectionTest(filename=h5_name,
                                            scale=scale,
                                            z_bins=z_bins) as h5d:
        h5d.setFrameRange(0, 1)
        im1_xy = h5d.grid2D()
        im1_xyz = h5d.grid3D(z_min, z_max)
        h5d.setFrameRange(1, 2)
        im2_xy = h5d.grid2D()
        im2_xyz = h5d.grid3D(z_min, z_max)

        # Verify 0.0 xy offset.
        [corr, dx, dy,
         success] = imagecorrelation.xyOffset(im1_xy, im2_xy, scale)
        assert (success)
        dx = dx / scale
        dy = dy / scale
        assert (numpy.allclose(numpy.array([dx, dy]),
                               numpy.zeros(2),
                               atol=1.0e-6))

        # Verify 0.0 z offset.
        [corr, fit, dz, success] = imagecorrelation.zOffset(im1_xyz, im2_xyz)
        dz = dz * (z_max - z_min) / float(z_bins)
        assert (abs(dz) < 0.05)
Пример #8
0
def rccDriftCorrection(mlist_name, drift_name, step, scale, correct_z = False, show_plot = False):

    i3_data = i3togrid.I3GDataLL(mlist_name, scale = scale)
    film_l = i3_data.getFilmLength()
    max_err = 0.2


    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftutilities.saveDriftData(drift_name, fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftutilities.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are too short.
    if (4 * step > film_l):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return

    print("Performing XY correction.")

    # Compute offsets between all pairs of sub images.
    endpost = film_l - step/2
    old_start1 = -1
    start1 = 0
    end1 = start1 + step
    start2 = start1
    end2 = start2 + step
    i = 0
    j = 0
    centers = [(end1 - start1)/2 + start1]
    pairs = []
    while (start1 < endpost):

        if (start2 > endpost):
            i += 1
            j = i
            start1 += step
            end1 = start1 + step
            start2 = start1
            end2 = start2 + step
            if (end1 > endpost):
                end1 = film_l
            if (end2 > endpost):
                end2 = film_l
            if (start1 < endpost):
                centers.append((end1 - start1)/2 + start1)

        if (start1 > endpost):
            continue

        if not (start1 == start2):
            if (old_start1 != start1):
                i3_data.loadDataInFrames(fmin = start1, fmax = end1-1)
                sub1 = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)
                old_start1 = start1

            i3_data.loadDataInFrames(fmin = start2, fmax = end2-1)
            sub2 = i3_data.i3To2DGridAllChannelsMerged(uncorrected = True)

            [corr, dx, dy, success] = imagecorrelation.xyOffset(sub1,
                                                                sub2,
                                                                scale)

            dx = dx/float(scale)
            dy = dy/float(scale)

            print("offset between frame ranges ", start1, "-" , end1 , " and ", start2, "-", end2)

            if success:
                print(" -> ", dx, dy, "good")
            else:
                print(" -> ", dx, dy, "bad")
            print("")

            pairs.append([i, j, dx, dy, success])

        j += 1
        start2 += step
        end2 = start2 + step
        if (end2 > endpost):
            end2 = film_l


    print("--")

    #
    # For testing it is faster to not have to re-run the
    # XY drift correction calculations.
    #
    #with open("test.dat", "w") as fp:
    #    pickle.dump([centers, pairs], fp)
    #
    #with open("test.dat") as fp:
    #    [centers, pairs] = pickle.load(fp)
    #

    # Prepare rij_x, rij_y, A matrix.
    rij_x = numpy.zeros(len(pairs), dtype = numpy.float32)
    rij_y = numpy.zeros(len(pairs), dtype = numpy.float32)
    A = numpy.zeros((len(pairs),len(centers)), dtype = numpy.float32)
    for i, pair in enumerate(pairs):
        rij_x[i] = pair[2]
        rij_y[i] = pair[3]
        A[i,pair[0]:pair[1]] = 1.0

    # Calculate drift (pass1). 
    # dx and dy contain the optimal offset between sub image i and sub image i+1 in x/y.
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Calculate errors.
    err_x = numpy.dot(A, dx) - rij_x
    err_y = numpy.dot(A, dy) - rij_y

    err_d = numpy.sqrt(err_x * err_x + err_y * err_y)
    arg_sort_err = numpy.argsort(err_d)

    # Print errors before.
    if False:
        print("Before:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Remove bad values.
    j = len(arg_sort_err) - 1

    while (j > 0) and (err_d[arg_sort_err[j]] > max_err):
        index = arg_sort_err[j]
        delA = numpy.delete(A, index, 0)
        if (numpy.linalg.matrix_rank(delA, tol = 1.0) == (len(centers)-1)):
            print(j, "removing", index, "with error", err_d[index])
            A = delA
            rij_x = numpy.delete(rij_x, index, 0)
            rij_y = numpy.delete(rij_y, index, 0)
            err_d = numpy.delete(err_d, index, 0)
            arg_sort_err[(arg_sort_err > index)] -= 1
        else:
            print("not removing", index, "with error", err_d[index])
        j -= 1

    # Print errors after.
    if False:
        print("")
        print("After:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Calculate drift (pass2). 
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)


    # Integrate to get final drift.
    driftx = numpy.zeros((dx.size))
    drifty = numpy.zeros((dy.size))
    for i in range(dx.size):
        driftx[i] = numpy.sum(dx[0:i])
        drifty[i] = numpy.sum(dy[0:i])

    if True:
        for i in range(driftx.size):
            print(i, centers[i], driftx[i], drifty[i])

    # Create spline for interpolation.
    final_driftx = interpolateData(centers, driftx)
    final_drifty = interpolateData(centers, drifty)

    # Plot XY drift.
    if show_plot:
        import matplotlib
        import matplotlib.pyplot as pyplot

        x = numpy.arange(film_l)
        fig = pyplot.figure()
        ax = fig.add_subplot(111)
        ax.plot(x, final_driftx, color = 'blue')
        ax.plot(x, final_drifty, color = 'red')
        pyplot.show()

    # Z correction.
    if not correct_z:
        saveDriftData(final_driftx,
                      final_drifty,
                      numpy.zeros(film_l))
        return

    print("")
    print("Performing Z Correction.")

    start = 0
    z_bins = 20
    i3_data.loadDataInFrames(fmin = start, fmax = start+step)

    if correct_z:
        z_bins = 20
        xyzmaster = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                        uncorrected = True)

    j = 0
    index = 0
    old_dz = 0.0
    driftz = numpy.zeros((dx.size))
    while(j < film_l):

        # Load correct frame range.
        if ((j + 2*step) >= film_l):
            i3_data.loadDataInFrames(fmin = j)
            step_step = 2*step
        else:
            i3_data.loadDataInFrames(fmin = j, fmax = j + step)
            step_step = step

        # Apply XY drift correction.
        i3_data.applyXYDriftCorrection(driftx[index], drifty[index])

        # Z correlation
        dz = old_dz

        xyzcurr = i3_data.i3To3DGridAllChannelsMerged(z_bins,
                                                      uncorrected = True)

        [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyzmaster, xyzcurr)

        # Update Values
        if z_success:
            old_dz = dz
        else:
            dz = old_dz
        dz = dz * 1000.0/float(z_bins)

        if z_success:
            i3_data.applyZDriftCorrection(-dz)
            xyzmaster += i3_data.i3To3DGridAllChannelsMerged(z_bins)

        driftz[index] = dz

        if z_success:
            print(index, dz, "good")
        else:
            print(index, dz, "bad")

        index += 1
        j += step_step

    final_driftz = interpolateData(centers, driftz)

    saveDriftData(final_driftx,
                  final_drifty,
                  final_driftz)

    # Plot X,Y, Z drift.
    if show_plot:
        import matplotlib
        import matplotlib.pyplot as pyplot

        pixel_size = 160.0 # pixel size in nm.
        x = numpy.arange(film_l)
        fig = pyplot.figure()
        ax = fig.add_subplot(111)
        ax.plot(x, pixel_size * final_driftx, color = 'red')
        ax.plot(x, pixel_size * final_drifty, color = 'green')
        ax.plot(x, final_driftz, color = 'blue')
        pyplot.show()
Пример #9
0
def alignAndMerge(file1,
                  file2,
                  results_file,
                  scale=2,
                  dx=0,
                  dy=0,
                  z_min=-0.5,
                  z_max=0.5):
    """
    Note: This only aligns and merges the tracks not the localizations.
    """
    z_bins = int((z_max - z_min) / 0.05)

    with saH5Py.SAH5Py(results_file, is_existing=False) as h5_out:

        # Process first file, this has no offset.
        with saH5Py.SAH5Grid(filename=file1, scale=scale,
                             z_bins=z_bins) as h5g_in1:
            [mx, my] = h5g_in1.getMovieInformation()[:2]
            h5_out.setMovieInformation(mx, my, 0, "")
            h5_out.setPixelSize(h5g_in1.getPixelSize())
            h5_out.addMetadata(h5g_in1.getMetadata())

            for tracks in h5g_in1.tracksIterator():
                sys.stdout.write(".")
                sys.stdout.flush()
                h5_out.addTracks(tracks)

            sys.stdout.write("\n")

            im1_xy = h5g_in1.gridTracks2D()
            im1_xyz = h5g_in1.gridTracks3D(z_min, z_max)

        # Process second file.
        with saH5Py.SAH5Grid(filename=file2, scale=scale,
                             z_bins=z_bins) as h5g_in2:

            # Determine X/Y offset.
            im2_xy = h5g_in2.gridTracks2D()
            [corr, offx, offy, xy_success
             ] = imagecorrelation.xyOffset(im1_xy,
                                           im2_xy,
                                           scale,
                                           center=[dx * scale, dy * scale])

            if False:
                tifffile.imsave("im1_xy.tif", im1_xy)
                tifffile.imsave("im2_xy.tif", im2_xy)

            assert xy_success, "Could not align images in X/Y."
            offx = offx / float(scale)
            offy = offy / float(scale)

            # Determine Z offset.
            im2_xyz = h5g_in2.gridTracks3D(z_min, z_max, dx=offx, dy=offy)

            [corr, fit, offz,
             z_success] = imagecorrelation.zOffset(im1_xyz, im2_xyz)

            assert z_success, "Could not align images in Z."
            offz = -offz * (z_max - z_min) / float(z_bins)

            for tracks in h5g_in2.tracksIterator():
                sys.stdout.write(".")
                sys.stdout.flush()
                tracks["x"] += offx
                tracks["y"] += offy
                tracks["z"] += offz
                h5_out.addTracks(tracks)

            sys.stdout.write("\n")

    return [offx, offy, offz]
def rccDriftCorrection(hdf5_filename, drift_filename, step, scale, z_min, z_max, correct_z, make_plots = True):
    """
    hdf5_filename - The localizations file for drift estimation.
    drift_filename - A text file to save the estimated drift in.
    step - Number of frames to group together to create a single image.
    scale - Image upsampling factor, 2.0 = 2x upsampling.
    z_min - Minimum localization z value in microns.
    z_max - Maximum localization z value in microns.
    correct_z - Estimate drift in z as well as in x/y.
    """

    z_bins = int((z_max - z_min)/0.05)
    h5_dc = driftUtils.SAH5DriftCorrection(filename = hdf5_filename,
                                           scale = scale,
                                           z_bins = z_bins)
    film_l = h5_dc.getMovieLength()
    
    max_err = 0.2

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftUtils.saveDriftData(drift_filename, fdx, fdy, fdz)
        h5_dc.saveDriftData(fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftUtils.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are empty.
    if (h5_dc.getNLocalizations() == 0):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return()
    
    # Don't analyze films that are too short.
    if (4 * step > film_l):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return

    print("Performing XY correction.")

    # Figure out how to bin the movie.
    frame = 0
    bin_edges = [0]
    while(frame < film_l):
        if ((frame + 2*step) > film_l):
            frame = film_l
        else:
            frame += step
        bin_edges.append(frame)

    # Estimate offsets between all pairs of sub images.        
    centers = []
    pairs = []
    for i in range(len(bin_edges)-1):
        centers.append((bin_edges[i+1] + bin_edges[i])/2)
        for j in range(i+1, len(bin_edges)-1):
            h5_dc.setFrameRange(bin_edges[i], bin_edges[i+1])
            sub1 = h5_dc.grid2D()

            h5_dc.setFrameRange(bin_edges[j], bin_edges[j+1])
            sub2 = h5_dc.grid2D()

            [corr, dx, dy, success] = imagecorrelation.xyOffset(sub1, sub2, scale)

            dx = dx/float(scale)
            dy = dy/float(scale)

            print("offset between frame ranges", bin_edges[i], "-", bin_edges[i+1],
                  "and", bin_edges[j], "-", bin_edges[j+1])

            if success:
                print(" -> {0:0.3f} {1:0.3f} good".format(dx, dy))
            else:
                print(" -> {0:0.3f} {1:0.3f} bad".format(dx, dy))
            print("")

            pairs.append([i, j, dx, dy, success])

    print("--")

    #
    # For testing it is faster to not have to re-run the
    # XY drift correction calculations.
    #
    #with open("test.dat", "w") as fp:
    #    pickle.dump([centers, pairs], fp)
    #
    #with open("test.dat") as fp:
    #    [centers, pairs] = pickle.load(fp)
    #

    # Prepare rij_x, rij_y, A matrix.
    rij_x = numpy.zeros(len(pairs), dtype = numpy.float32)
    rij_y = numpy.zeros(len(pairs), dtype = numpy.float32)
    A = numpy.zeros((len(pairs),len(centers)), dtype = numpy.float32)
    for i, pair in enumerate(pairs):
        rij_x[i] = pair[2]
        rij_y[i] = pair[3]
        A[i,pair[0]:pair[1]] = 1.0

    # Calculate drift (pass1). 
    # dx and dy contain the optimal offset between sub image i and sub image i+1 in x/y.
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Calculate errors.
    err_x = numpy.dot(A, dx) - rij_x
    err_y = numpy.dot(A, dy) - rij_y

    err_d = numpy.sqrt(err_x * err_x + err_y * err_y)
    arg_sort_err = numpy.argsort(err_d)

    # Print errors before.
    if False:
        print("Before:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Remove bad values.
    j = len(arg_sort_err) - 1

    while (j > 0) and (err_d[arg_sort_err[j]] > max_err):
        index = arg_sort_err[j]
        delA = numpy.delete(A, index, 0)
        if (numpy.linalg.matrix_rank(delA, tol = 1.0) == (len(centers)-1)):
            print(j, "removing", index, "with error", err_d[index])
            A = delA
            rij_x = numpy.delete(rij_x, index, 0)
            rij_y = numpy.delete(rij_y, index, 0)
            err_d = numpy.delete(err_d, index, 0)
            arg_sort_err[(arg_sort_err > index)] -= 1
        else:
            print("not removing", index, "with error", err_d[index])
        j -= 1

    # Print errors after.
    if False:
        print("")
        print("After:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i,:], err_d[i])
        print("")

    # Calculate drift (pass2). 
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Integrate to get final drift.
    driftx = numpy.zeros((dx.size))
    drifty = numpy.zeros((dy.size))
    for i in range(dx.size):
        driftx[i] = numpy.sum(dx[0:i])
        drifty[i] = numpy.sum(dy[0:i])

    # Print out XY results.
    for i in range(driftx.size):
        print("{0:0.1f} {1:0.3f} {2:0.3f}".format(centers[i], driftx[i], drifty[i]))

    # Create spline for interpolation.
    final_driftx = interpolateData(centers, driftx)
    final_drifty = interpolateData(centers, drifty)

    # Plot XY drift.
    if make_plots:
        import matplotlib
        import matplotlib.pyplot as pyplot

        x = numpy.arange(film_l)
        pyplot.plot(x, final_driftx, color = 'blue')
        pyplot.plot(x, final_drifty, color = 'red')
        pyplot.show()

    # Z correction.
    if not correct_z:
        saveDriftData(final_driftx,
                      final_drifty,
                      numpy.zeros(film_l))        
        h5_dc.close(verbose = False)
        return

    print("")
    print("Performing Z Correction.")

    driftz = numpy.zeros((dx.size))
    xyz_master = None
    for i in range(len(bin_edges)-1):
        h5_dc.setFrameRange(bin_edges[i], bin_edges[i+1])
        h5_dc.setDriftCorrectionXY(driftx[i], drifty[i])
        h5_dc.setDriftCorrectionZ(0.0)
        
        if xyz_master is None:
            xyz_master = h5_dc.grid3D(z_min, z_max, drift_corrected = True)
            continue

        xyz_curr = h5_dc.grid3D(z_min, z_max, drift_corrected = True)
            
        # Do z correlation
        [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyz_master, xyz_curr)

        # Update Values
        if z_success:
            old_dz = dz
        else:
            dz = old_dz
            
        dz = dz * (z_max - z_min)/float(z_bins)

        if z_success:
            h5_dc.setDriftCorrectionZ(-dz)
            xyz_master += h5_dc.grid3D(z_min, z_max, drift_corrected = True)

        print("{0:d} {1:d} {2:0.3f}".format(bin_edges[i], bin_edges[i+1], dz))
        driftz[i] = -dz

    final_driftz = interpolateData(centers, driftz)

    saveDriftData(final_driftx,
                  final_drifty,
                  final_driftz)

    h5_dc.close(verbose = False)

    # Plot X,Y,Z drift.
    if make_plots:
        import matplotlib
        import matplotlib.pyplot as pyplot

        pixel_size = 160.0 # pixel size in nm.
        x = numpy.arange(film_l)
        pyplot.plot(x, pixel_size * final_driftx, color = 'red')
        pyplot.plot(x, pixel_size * final_drifty, color = 'green')
        pyplot.plot(x, 1000.0*final_driftz, color = 'blue')
        pyplot.show()
Пример #11
0
def alignAndMerge(file1, file2, results_file, scale = 2, dx = 0, dy = 0, z_min = -0.5, z_max = 0.5):
    """
    Note: This only aligns and merges the tracks not the localizations.
    """
    z_bins = int((z_max - z_min)/0.05)
    
    with saH5Py.SAH5Py(results_file, is_existing = False) as h5_out:

        # Process first file, this has no offset.
        with saH5Py.SAH5Grid(filename = file1, scale = scale, z_bins = z_bins) as h5g_in1:
            [mx, my] = h5g_in1.getMovieInformation()[:2]
            h5_out.setMovieInformation(mx, my, 0, "")
            h5_out.setPixelSize(h5g_in1.getPixelSize())
            h5_out.addMetadata(h5g_in1.getMetadata())

            for tracks in h5g_in1.tracksIterator():
                sys.stdout.write(".")
                sys.stdout.flush()
                h5_out.addTracks(tracks)

            sys.stdout.write("\n")

            im1_xy = h5g_in1.gridTracks2D()
            im1_xyz = h5g_in1.gridTracks3D(z_min, z_max)
                
        # Process second file.
        with saH5Py.SAH5Grid(filename = file2, scale = scale, z_bins = z_bins) as h5g_in2:

            # Determine X/Y offset.
            im2_xy = h5g_in2.gridTracks2D()
            [corr, offx, offy, xy_success] = imagecorrelation.xyOffset(im1_xy, im2_xy, scale,
                                                                       center = [dx * scale,
                                                                                 dy * scale])

            if False:
                tifffile.imsave("im1_xy.tif", im1_xy)
                tifffile.imsave("im2_xy.tif", im2_xy)

            assert xy_success, "Could not align images in X/Y."
            offx = offx/float(scale)
            offy = offy/float(scale)

            # Determine Z offset.
            im2_xyz = h5g_in2.gridTracks3D(z_min, z_max, dx = offx, dy = offy)

            [corr, fit, offz, z_success] = imagecorrelation.zOffset(im1_xyz, im2_xyz)
            
            assert z_success, "Could not align images in Z."
            offz = -offz * (z_max - z_min)/float(z_bins)

            for tracks in h5g_in2.tracksIterator():
                sys.stdout.write(".")
                sys.stdout.flush()
                tracks["x"] += offx
                tracks["y"] += offy
                tracks["z"] += offz
                h5_out.addTracks(tracks)

            sys.stdout.write("\n")

    return [offx, offy, offz]
Пример #12
0
def rccDriftCorrection(hdf5_filename, drift_filename, step, scale, z_min,
                       z_max, correct_z):
    """
    hdf5_filename - The localizations file for drift estimation.
    drift_filename - A text file to save the estimated drift in.
    step - Number of frames to group together to create a single image.
    scale - Image upsampling factor, 2.0 = 2x upsampling.
    z_min - Minimum localization z value in microns.
    z_max - Maximum localization z value in microns.
    correct_z - Estimate drift in z as well as in x/y.
    """

    z_bins = int((z_max - z_min) / 0.05)
    h5_dc = driftUtils.SAH5DriftCorrection(filename=hdf5_filename,
                                           scale=scale,
                                           z_bins=z_bins)
    film_l = h5_dc.getMovieLength()

    max_err = 0.2

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftUtils.saveDriftData(drift_filename, fdx, fdy, fdz)
        h5_dc.saveDriftData(fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftUtils.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are empty.
    if (h5_dc.getNLocalizations() == 0):
        saveDriftData(numpy.zeros(film_l), numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return ()

    # Don't analyze films that are too short.
    if (4 * step > film_l):
        saveDriftData(numpy.zeros(film_l), numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return

    print("Performing XY correction.")

    # Figure out how to bin the movie.
    frame = 0
    bin_edges = [0]
    while (frame < film_l):
        if ((frame + 2 * step) > film_l):
            frame = film_l
        else:
            frame += step
        bin_edges.append(frame)

    # Estimate offsets between all pairs of sub images.
    centers = []
    pairs = []
    for i in range(len(bin_edges) - 1):
        centers.append((bin_edges[i + 1] + bin_edges[i]) / 2)
        for j in range(i + 1, len(bin_edges) - 1):
            h5_dc.setFrameRange(bin_edges[i], bin_edges[i + 1])
            sub1 = h5_dc.grid2D()

            h5_dc.setFrameRange(bin_edges[j], bin_edges[j + 1])
            sub2 = h5_dc.grid2D()

            [corr, dx, dy,
             success] = imagecorrelation.xyOffset(sub1, sub2, scale)

            dx = dx / float(scale)
            dy = dy / float(scale)

            print("offset between frame ranges", bin_edges[i], "-",
                  bin_edges[i + 1], "and", bin_edges[j], "-", bin_edges[j + 1])

            if success:
                print(" -> {0:0.3f} {1:0.3f} good".format(dx, dy))
            else:
                print(" -> {0:0.3f} {1:0.3f} bad".format(dx, dy))
            print("")

            pairs.append([i, j, dx, dy, success])

    print("--")

    #
    # For testing it is faster to not have to re-run the
    # XY drift correction calculations.
    #
    #with open("test.dat", "w") as fp:
    #    pickle.dump([centers, pairs], fp)
    #
    #with open("test.dat") as fp:
    #    [centers, pairs] = pickle.load(fp)
    #

    # Prepare rij_x, rij_y, A matrix.
    rij_x = numpy.zeros(len(pairs), dtype=numpy.float32)
    rij_y = numpy.zeros(len(pairs), dtype=numpy.float32)
    A = numpy.zeros((len(pairs), len(centers)), dtype=numpy.float32)
    for i, pair in enumerate(pairs):
        rij_x[i] = pair[2]
        rij_y[i] = pair[3]
        A[i, pair[0]:pair[1]] = 1.0

    # Calculate drift (pass1).
    # dx and dy contain the optimal offset between sub image i and sub image i+1 in x/y.
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Calculate errors.
    err_x = numpy.dot(A, dx) - rij_x
    err_y = numpy.dot(A, dy) - rij_y

    err_d = numpy.sqrt(err_x * err_x + err_y * err_y)
    arg_sort_err = numpy.argsort(err_d)

    # Print errors before.
    if False:
        print("Before:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i, :], err_d[i])
        print("")

    # Remove bad values.
    j = len(arg_sort_err) - 1

    while (j > 0) and (err_d[arg_sort_err[j]] > max_err):
        index = arg_sort_err[j]
        delA = numpy.delete(A, index, 0)
        if (numpy.linalg.matrix_rank(delA, tol=1.0) == (len(centers) - 1)):
            print(j, "removing", index, "with error", err_d[index])
            A = delA
            rij_x = numpy.delete(rij_x, index, 0)
            rij_y = numpy.delete(rij_y, index, 0)
            err_d = numpy.delete(err_d, index, 0)
            arg_sort_err[(arg_sort_err > index)] -= 1
        else:
            print("not removing", index, "with error", err_d[index])
        j -= 1

    # Print errors after.
    if False:
        print("")
        print("After:")
        for i in range(err_d.size):
            print(i, rij_x[i], rij_y[i], A[i, :], err_d[i])
        print("")

    # Calculate drift (pass2).
    pinv_A = numpy.linalg.pinv(A)
    dx = numpy.dot(pinv_A, rij_x)
    dy = numpy.dot(pinv_A, rij_y)

    # Integrate to get final drift.
    driftx = numpy.zeros((dx.size))
    drifty = numpy.zeros((dy.size))
    for i in range(dx.size):
        driftx[i] = numpy.sum(dx[0:i])
        drifty[i] = numpy.sum(dy[0:i])

    # Print out XY results.
    for i in range(driftx.size):
        print("{0:0.1f} {1:0.3f} {2:0.3f}".format(centers[i], driftx[i],
                                                  drifty[i]))

    # Create spline for interpolation.
    final_driftx = interpolateData(centers, driftx)
    final_drifty = interpolateData(centers, drifty)

    # Plot XY drift.
    if False:
        import matplotlib
        import matplotlib.pyplot as pyplot

        x = numpy.arange(film_l)
        pyplot.plot(x, final_driftx, color='blue')
        pyplot.plot(x, final_drifty, color='red')
        pyplot.show()

    # Z correction.
    if not correct_z:
        saveDriftData(final_driftx, final_drifty, numpy.zeros(film_l))
        h5_dc.close(verbose=False)
        return

    print("")
    print("Performing Z Correction.")

    driftz = numpy.zeros((dx.size))
    xyz_master = None
    for i in range(len(bin_edges) - 1):
        h5_dc.setFrameRange(bin_edges[i], bin_edges[i + 1])
        h5_dc.setDriftCorrectionXY(driftx[i], drifty[i])
        h5_dc.setDriftCorrectionZ(0.0)

        if xyz_master is None:
            xyz_master = h5_dc.grid3D(z_min, z_max, drift_corrected=True)
            continue

        xyz_curr = h5_dc.grid3D(z_min, z_max, drift_corrected=True)

        # Do z correlation
        [corr, fit, dz,
         z_success] = imagecorrelation.zOffset(xyz_master, xyz_curr)

        # Update Values
        if z_success:
            old_dz = dz
        else:
            dz = old_dz

        dz = dz * (z_max - z_min) / float(z_bins)

        if z_success:
            h5_dc.setDriftCorrectionZ(-dz)
            xyz_master += h5_dc.grid3D(z_min, z_max, drift_corrected=True)

        print("{0:d} {1:d} {2:0.3f}".format(bin_edges[i], bin_edges[i + 1],
                                            dz))
        driftz[i] = -dz

    final_driftz = interpolateData(centers, driftz)

    saveDriftData(final_driftx, final_drifty, final_driftz)

    h5_dc.close(verbose=False)

    # Plot X,Y, Z drift.
    if True:
        import matplotlib
        import matplotlib.pyplot as pyplot

        pixel_size = 160.0  # pixel size in nm.
        x = numpy.arange(film_l)
        pyplot.plot(x, pixel_size * final_driftx, color='red')
        pyplot.plot(x, pixel_size * final_drifty, color='green')
        pyplot.plot(x, 1000.0 * final_driftz, color='blue')
        pyplot.show()
Пример #13
0
def xyzDriftCorrection(hdf5_filename, drift_filename, step, scale, z_min,
                       z_max, correct_z):
    """
    hdf5_filename - The localizations file for drift estimation.
    drift_filename - A text file to save the estimated drift in.
    step - Number of frames to group together to create a single image.
    scale - Image upsampling factor, 2.0 = 2x upsampling.
    z_min - Minimum localization z value in microns.
    z_max - Maximum localization z value in microns.
    correct_z - Estimate drift in z as well as in x/y.
    """
    #
    # FIXME? This assumes that we also analyzed all the frames in the
    #        movie. If the user set the 'max_frame' parameter this
    #        might not actually be true. For now we're just skipping
    #        over all the empty frames, but it might make more sense
    #        not to do anything at all.
    #
    z_bins = int((z_max - z_min) / 0.05)
    h5_dc = driftUtils.SAH5DriftCorrection(filename=hdf5_filename,
                                           scale=scale,
                                           z_bins=z_bins)
    film_l = h5_dc.getMovieLength()

    # Check if we have z data for z drift correction.
    if correct_z:
        assert h5_dc.hasLocalizationsField(
            "z"
        ), "Cannot do z drift correction without 'z' position information. Set 'z_correction' parameter to 0."

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftUtils.saveDriftData(drift_filename, fdx, fdy, fdz)
        h5_dc.saveDriftData(fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftUtils.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are empty.
    if (h5_dc.getNLocalizations() == 0):
        saveDriftData(numpy.zeros(film_l), numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return ()

    # Don't analyze films that are too short.
    if ((4 * step) >= film_l):
        saveDriftData(numpy.zeros(film_l), numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return ()

    #
    # Drift correction (XY and Z are all done at the same time)
    #
    # Note that drift corrected localizations are added back into
    # the reference image in the hopes of improving the correction
    # for subsequent localizations.
    #

    #
    # Figure out how to bin the movie. It seemed easier to do
    # this at the beginning rather than dynamically as we
    # went through the movie.
    #
    frame = 0
    bin_edges = [0]
    while (frame < film_l):
        if ((frame + 2 * step) > film_l):
            frame = film_l
        else:
            frame += step
        bin_edges.append(frame)

    xy_master = None
    xyz_master = None
    t = []
    x = []
    y = []
    z = []
    old_dx = 0.0
    old_dy = 0.0
    old_dz = 0.0
    for i in range(len(bin_edges) - 1):

        # Load correct frame range.
        h5_dc.setFrameRange(bin_edges[i], bin_edges[i + 1])

        midp = (bin_edges[i + 1] + bin_edges[i]) / 2

        xy_curr = h5_dc.grid2D()

        #
        # This is to handle analysis that did not start at frame 0
        # of the movie. Basically we keep skipping ahead until we
        # find a group of frames that have some localizations.
        #
        # FIXME: There could still be problems if the movie does not
        #        start on a multiple of the step size.
        #
        if xy_master is None:
            if (numpy.sum(xy_curr) > 0):
                xy_master = xy_curr
                if correct_z:
                    xyz_master = h5_dc.grid3D(z_min, z_max)
            t.append(midp)
            x.append(0.0)
            y.append(0.0)
            z.append(0.0)
            print(bin_edges[i], bin_edges[i + 1], numpy.sum(xy_curr), 0.0, 0.0,
                  0.0)
            continue

        # Correlate to master image, skipping empty images.
        if (numpy.sum(xy_curr) > 0):
            [corr, dx, dy, xy_success] = imagecorrelation.xyOffset(
                xy_master,
                xy_curr,
                scale,
                center=[x[i - 1] * scale, y[i - 1] * scale])
        else:
            [corr, dx, dy, xy_success] = [0.0, 0.0, 0.0, False]

        #
        # Update values. If we failed, we just use the last successful
        # offset measurement and hope this is close enough.
        #
        if xy_success:
            old_dx = dx
            old_dy = dy
        else:
            dx = old_dx
            dy = old_dy

        dx = dx / float(scale)
        dy = dy / float(scale)

        t.append(midp)
        x.append(dx)
        y.append(dy)

        #
        # Apply the x/y drift correction to the current 'test'
        # localizations and add them into the master, but only
        # if the offset was measured successfully.
        #
        h5_dc.setDriftCorrectionXY(dx, dy)
        if xy_success:
            # Add current to master
            xy_master += h5_dc.grid2D(drift_corrected=True)

        #
        # Do Z correlation if requested.
        #
        dz = old_dz
        if correct_z and xy_success:

            # Create 3D image with XY corrections only. We set the z offset to 0.0 to
            # reset stale values from the previous cycle, if any.
            h5_dc.setDriftCorrectionZ(0.0)
            xyz_curr = h5_dc.grid3D(z_min, z_max, drift_corrected=True)

            # Do z correlation, skipping empty images.
            if (numpy.sum(xyz_curr) > 0):
                [corr, fit, dz,
                 z_success] = imagecorrelation.zOffset(xyz_master, xyz_curr)
            else:
                [corr, fit, dz, z_success] = [0.0, 0.0, 0.0, False]

            # Update Values
            if z_success:
                old_dz = dz
            else:
                dz = old_dz

            dz = dz * (z_max - z_min) / float(z_bins)

            if z_success:
                h5_dc.setDriftCorrectionZ(-dz)
                xyz_master += h5_dc.grid3D(z_min, z_max, drift_corrected=True)

        z.append(-dz)

        print("{0:d} {1:d} {2:d} {3:0.3f} {4:0.3f} {5:0.3f}".format(
            bin_edges[i], bin_edges[i + 1], numpy.sum(xy_curr), dx, dy, dz))

    #
    # Create numpy versions of the drift arrays. We estimated the drift
    # for groups of frames. We use interpolation to create an estimation
    # for each individual frame.
    #
    nt = numpy.array(t)
    final_driftx = interpolateData(nt, numpy.array(x))
    final_drifty = interpolateData(nt, numpy.array(y))
    final_driftz = interpolateData(nt, numpy.array(z))

    saveDriftData(final_driftx, final_drifty, final_driftz)

    h5_dc.close(verbose=False)
def xyzDriftCorrection(hdf5_filename, drift_filename, step, scale, z_min, z_max, correct_z):
    """
    hdf5_filename - The localizations file for drift estimation.
    drift_filename - A text file to save the estimated drift in.
    step - Number of frames to group together to create a single image.
    scale - Image upsampling factor, 2.0 = 2x upsampling.
    z_min - Minimum localization z value in microns.
    z_max - Maximum localization z value in microns.
    correct_z - Estimate drift in z as well as in x/y.
    """
    #
    # FIXME? This assumes that we also analyzed all the frames in the
    #        movie. If the user set the 'max_frame' parameter this
    #        might not actually be true. For now we're just skipping
    #        over all the empty frames, but it might make more sense
    #        not to do anything at all.
    #
    z_bins = int((z_max - z_min)/0.05)
    h5_dc = driftUtils.SAH5DriftCorrection(filename = hdf5_filename,
                                           scale = scale,
                                           z_bins = z_bins)
    film_l = h5_dc.getMovieLength()

    # Check if we have z data for z drift correction.
    if correct_z:
        assert h5_dc.hasLocalizationsField("z"), "Cannot do z drift correction without 'z' position information. Set 'z_correction' parameter to 0."

    # Sub-routines.
    def saveDriftData(fdx, fdy, fdz):
        driftUtils.saveDriftData(drift_filename, fdx, fdy, fdz)
        h5_dc.saveDriftData(fdx, fdy, fdz)

    def interpolateData(xvals, yvals):
        return driftUtils.interpolateData(xvals, yvals, film_l)

    # Don't analyze films that are empty.
    if (h5_dc.getNLocalizations() == 0):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return()
        
    # Don't analyze films that are too short.
    if ((4*step) >= film_l):
        saveDriftData(numpy.zeros(film_l),
                      numpy.zeros(film_l),
                      numpy.zeros(film_l))
        return()

    #
    # Drift correction (XY and Z are all done at the same time)
    #
    # Note that drift corrected localizations are added back into 
    # the reference image in the hopes of improving the correction
    # for subsequent localizations. 
    #

    #
    # Figure out how to bin the movie. It seemed easier to do
    # this at the beginning rather than dynamically as we
    # went through the movie.
    #
    frame = 0
    bin_edges = [0]
    while(frame < film_l):
        if ((frame + 2*step) > film_l):
            frame = film_l
        else:
            frame += step
        bin_edges.append(frame)
    
    xy_master = None
    xyz_master = None
    t = []
    x = []
    y = []
    z = []
    old_dx = 0.0
    old_dy = 0.0
    old_dz = 0.0
    for i in range(len(bin_edges)-1):

        # Load correct frame range.
        h5_dc.setFrameRange(bin_edges[i], bin_edges[i+1])

        midp = (bin_edges[i+1] + bin_edges[i])/2

        xy_curr = h5_dc.grid2D()

        #
        # This is to handle analysis that did not start at frame 0
        # of the movie. Basically we keep skipping ahead until we
        # find a group of frames that have some localizations.
        #
        # FIXME: There could still be problems if the movie does not
        #        start on a multiple of the step size.
        #
        if xy_master is None:
            if (numpy.sum(xy_curr) > 0):
                xy_master = xy_curr
                if correct_z:
                    xyz_master = h5_dc.grid3D(z_min, z_max)
            t.append(midp)
            x.append(0.0)
            y.append(0.0)
            z.append(0.0)
            print(bin_edges[i], bin_edges[i+1], numpy.sum(xy_curr), 0.0, 0.0, 0.0)
            continue
                
        # Correlate to master image, skipping empty images.
        if (numpy.sum(xy_curr) > 0):
            [corr, dx, dy, xy_success] = imagecorrelation.xyOffset(xy_master, xy_curr, scale,
                                                                   center = [x[i-1] * scale,
                                                                             y[i-1] * scale])
        else:
            [corr, dx, dy, xy_success] = [0.0, 0.0, 0.0, False]

        #
        # Update values. If we failed, we just use the last successful
        # offset measurement and hope this is close enough.
        #
        if xy_success:
            old_dx = dx
            old_dy = dy
        else:
            dx = old_dx
            dy = old_dy

        dx = dx/float(scale)
        dy = dy/float(scale)

        t.append(midp)
        x.append(dx)
        y.append(dy)

        #
        # Apply the x/y drift correction to the current 'test'
        # localizations and add them into the master, but only
        # if the offset was measured successfully.
        #
        h5_dc.setDriftCorrectionXY(dx,dy)
        if xy_success:
            # Add current to master
            xy_master += h5_dc.grid2D(drift_corrected = True)

        #
        # Do Z correlation if requested.
        #
        dz = old_dz
        if correct_z and xy_success:

            # Create 3D image with XY corrections only. We set the z offset to 0.0 to
            # reset stale values from the previous cycle, if any.
            h5_dc.setDriftCorrectionZ(0.0)
            xyz_curr = h5_dc.grid3D(z_min, z_max, drift_corrected = True)

            # Do z correlation, skipping empty images.
            if (numpy.sum(xyz_curr) > 0):
                [corr, fit, dz, z_success] = imagecorrelation.zOffset(xyz_master, xyz_curr)
            else:
                [corr, fit, dz, z_success] = [0.0, 0.0, 0.0, False]
            
            # Update Values
            if z_success:
                old_dz = dz
            else:
                dz = old_dz
            
            dz = dz * (z_max - z_min)/float(z_bins)

            if z_success:
                h5_dc.setDriftCorrectionZ(-dz)
                xyz_master += h5_dc.grid3D(z_min, z_max, drift_corrected = True)

        z.append(-dz)

        print("{0:d} {1:d} {2:d} {3:0.3f} {4:0.3f} {5:0.3f}".format(bin_edges[i],
                                                                    bin_edges[i+1],
                                                                    numpy.sum(xy_curr),
                                                                    dx, dy, dz))

    #
    # Create numpy versions of the drift arrays. We estimated the drift
    # for groups of frames. We use interpolation to create an estimation
    # for each individual frame.
    #
    nt = numpy.array(t)
    final_driftx = interpolateData(nt, numpy.array(x))
    final_drifty = interpolateData(nt, numpy.array(y))
    final_driftz = interpolateData(nt, numpy.array(z))

    saveDriftData(final_driftx,
                  final_drifty,
                  final_driftz)

    h5_dc.close(verbose = False)