def analyze(movie_name, mlist_name, settings_name): # Load parameters. parameters = params.ParametersPSFFFT().initFromFile(settings_name, warnings = False) # Create finding and fitting object. finder = findPeaksStd.initFindAndFit(parameters) # Create appropriate reader. if parameters.hasAttr("camera_offset"): frame_reader = analysisIO.FrameReaderStd(movie_file = movie_name, parameters = parameters) else: frame_reader = analysisIO.FrameReaderSCMOS(movie_file = movie_name, parameters = parameters) # Create movie reader (uses frame reader). movie_reader = analysisIO.MovieReader(frame_reader = frame_reader, parameters = parameters) # Create localization file writer. data_writer = analysisIO.DataWriterHDF5(data_file = mlist_name, parameters = parameters, sa_type = "PSF-FFT") std_analysis.standardAnalysis(finder, movie_reader, data_writer, parameters)
def test_mspb_3(): """ Test (single) PSF measurement with drift. """ # Make test movie. im_max = 1000.0 n_pts = 10 x = 7.0 y = 11.0 drift_xy = numpy.random.uniform(size=(n_pts, 2)) psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") with tifffile.TiffWriter(psf_movie) as tf: for i in range(n_pts): image = dg.drawGaussiansXY((20, 20), numpy.array([x + drift_xy[i][0]]), numpy.array([y + drift_xy[i][1]])) image = image * im_max tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.zeros(n_pts).astype(numpy.int) - 1 z_index[0] = 0 [psf0, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x + drift_xy[0][0], y + drift_xy[0][1], zoom=2) for i in range(1, n_pts): z_index = numpy.zeros(n_pts).astype(numpy.int) - 1 z_index[i] = 0 [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x, y, drift_xy=drift_xy, zoom=2) assert (numpy.max(numpy.abs(psf0 - psf) / numpy.max(psf)) < 0.05)
def test_mspb_2(): """ Test (single) PSF measurement, no drift, recentering. The maximum relative difference is typically on the order of 2%. """ # Make test movie. im_max = 1000.0 x = 7.0 + numpy.random.uniform(size=10) y = 11.0 + numpy.random.uniform(size=10) psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") with tifffile.TiffWriter(psf_movie) as tf: for i in range(x.size): image = dg.drawGaussiansXY((20, 20), numpy.array([x[i]]), numpy.array([y[i]])) image = image * im_max tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.zeros(x.size).astype(numpy.int) - 1 z_index[0] = 0 [psf0, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x[0], y[0], zoom=2) for i in range(1, x.size): z_index = numpy.zeros(x.size).astype(numpy.int) - 1 z_index[i] = 0 [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x[i], y[i], zoom=2) assert (numpy.max(numpy.abs(psf0 - psf) / numpy.max(psf)) < 0.05)
def analyze(movie_name, mlist_name, settings_name): # Load parameters. parameters = params.ParametersSpliner().initFromFile(settings_name, warnings=False) # Check for v1.0 parameters. if not (parameters.hasAttr("camera_gain") or parameters.hasAttr("camera_calibration")): raise Exception( "Camera parameters are missing. Version 1.0 parameters?") # Create appropriate finding and fitting object. if (parameters.getAttr("use_fista", 0) != 0): parameters = params.ParametersSplinerFISTA().initFromFile( settings_name) finder = find_peaks_fista.initFindAndFit(parameters) else: parameters = params.ParametersSplinerSTD().initFromFile(settings_name) finder = find_peaks_std.initFindAndFit(parameters) # Create appropriate reader. if parameters.hasAttr("camera_offset"): frame_reader = analysisIO.FrameReaderStd(movie_file=movie_name, parameters=parameters) else: frame_reader = analysisIO.FrameReaderSCMOS(movie_file=movie_name, parameters=parameters) # Create movie reader (uses frame reader). movie_reader = analysisIO.MovieReader(frame_reader=frame_reader, parameters=parameters) # Create localization file writer. data_writer = analysisIO.DataWriterHDF5(data_file=mlist_name, parameters=parameters, sa_type="Spliner") std_analysis.standardAnalysis(finder, movie_reader, data_writer, parameters)
def test_mspb_1(): """ Test (single) PSF measurement, no drift. """ # Make test movie. x = 7.2 y = 9.8 psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") image = 1000.0 * dg.drawGaussiansXY( (20, 20), numpy.array([x]), numpy.array([y])) with tifffile.TiffWriter(psf_movie) as tf: for i in range(6): tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.array([0, 1, 2, 2, -1, -1]) [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x, y, zoom=2) assert (numpy.allclose(samples, numpy.array([1, 1, 2]))) for i in range(1, psf.shape[0]): assert (numpy.allclose(psf[0, :, :], psf[i, :, :] / samples[i])) if False: with tifffile.TiffWriter("psf.tif") as tf: for i in range(psf.shape[0]): tf.save(psf[i, :, :].astype(numpy.float32))
def analyze(movie_name, mlist_name, settings_name): # Load parameters. parameters = params.ParametersDAO().initFromFile(settings_name) # Check for possibly v1.0 parameters. if not parameters.hasAttr("background_sigma"): raise Exception("Parameter 'background_sigma' is missing. Version 1.0 parameters?") # Create finding and fitting object. finder = find_peaks.initFindAndFit(parameters) # Create object for reading (non sCMOS) camera frames. movie_ext = os.path.splitext(movie_name)[1] if movie_ext == '.nd2': frame_reader = FrameReaderStdNd2(movie_file = movie_name, parameters = parameters) else: frame_reader = analysisIO.FrameReaderStd(movie_file = movie_name, parameters = parameters) # Create movie reader (uses frame_reader). movie_reader = analysisIO.MovieReader(frame_reader = frame_reader, parameters = parameters) # Create localization file writer. data_writer = analysisIO.DataWriterHDF5(data_file = mlist_name, parameters = parameters, sa_type = '3D-DAOSTORM') # Run the analysis. std_analysis.standardAnalysis(finder, movie_reader, data_writer, parameters)
def measurePSFBeads(movie_name, zfile_name, beads_file, psf_name, aoi_size=12, pixel_size=0.1, refine=False, z_range=0.6, z_step=0.05): """ movie_name - The name of the movie, presumably a z stack for PSF measurement. zfile_name - The text file containing the z offsets (in microns) for each frame. beads_file - The text file containing the locations of the beads. psf_name - The name of the file to save the measured PSF in (as a pickled Python dictionary). aoi_size - The AOI of interest in pixels. The final AOI is 2x this number. pixel_size - The pixel size in microns. refine - Align the measured PSF for each bead to the average PSF. z_range - The range the PSF should cover in microns. z_step - The z step size of the PSF. """ # Load the z-offset information for the dax file. # # This is a text file with one line per frame that contains the # z-offset (in microns) for that frame. Each line is a space separated # valid, z_pos pair. If valid if 0 the frame will be ignored, # otherwise it will be used. # z_offsets = numpy.loadtxt(zfile_name) # Create array specifying what frame corresponds to what # Z slice in the PSF. # z_index = measurePSFUtils.makeZIndexArray(z_offsets, z_range, z_step) # Load the locations of the beads. # # This is a text file the contains the locations of the beads that # will be used to construct the PSF. Each line is a space separated # x, y pair of bead locations (in pixels). # # One way to create this file is to look at the bead movie with # visualizer.py and record the center positions of several beads. # data = numpy.loadtxt(beads_file, ndmin=2) bead_x = data[:, 1] + 1 bead_y = data[:, 0] + 1 # Create a reader of the movie. # # We assume that the bead stack was measured with a camera # that does not have a large pixel to pixel variation in # gain and offset. The offset and magnitude are not that # important at we will estimate and subtract the offset # and normalize 1. # # Movie (frame) reader. frame_reader = analysisIO.FrameReaderStd(movie_file=movie_name, camera_gain=1.0, camera_offset=0.0) # Measure PSFs for each bead. # total_samples = None psfs = [] for i in range(bead_x.size): [psf, samples] = measurePSFUtils.measureSinglePSFBeads(frame_reader, z_index, aoi_size, bead_x[i], bead_y[i], zoom=2) # Verify that we have at least one sample per section, because if # we don't this almost surely means something is wrong. if (i == 0): for j in range(samples.size): assert (samples[i] > 0), "No data for PSF z section " + str(i) # Normalize by the number of sample per z section. #for j in range(samples.size): # psf[j,:,:] = psf[j,:,:]/samples[j] # Keep track of total number of samples. if total_samples is None: total_samples = samples else: total_samples += samples psfs.append(psf) # Set the PSF to have zero average on the X/Y boundaries. We are # matching the behavior of spliner.measure_psf here. # sum_psf = measurePSFUtils.sumPSF(psfs) for i in range(sum_psf.shape[0]): mean_edge = measurePSFUtils.meanEdge(sum_psf[i, :, :]) for j in range(len(psfs)): psfs[j][i, :, :] -= mean_edge / float(len(psfs)) # Align the PSFs to each other. This should hopefully correct for # any small errors in the input locations, and also for fields of # view that are not completely flat. # if refine: print("Refining PSF alignment.") # Normalize each PSF by the number of z sections. for psf in psfs: for i in range(samples.size): psf[i, :, :] = psf[i, :, :] / samples[i] [average_psf, i_score] = measurePSFUtils.alignPSFs(psfs) else: average_psf = measurePSFUtils.averagePSF(psfs) # Normalize PSF. # # This normalizes the PSF so that sum of the absolute values # of each section is 1.0. This only makes sense if the AOI is # large enough to capture all the photons, which might not be # true. Not clear how important this is as Spliner will fit # for the height anyway. # for i in range(average_psf.shape[0]): print("z plane {0:0d} has {1:0d} samples".format(i, total_samples[i])) section_sum = numpy.sum(numpy.abs(average_psf[i, :, :])) # Do we need this test? We already check that we have at # least one sample per section. if (section_sum > 0.0): average_psf[i, :, :] = average_psf[i, :, :] / section_sum # Normalize to unity maximum height. if (numpy.max(average_psf) > 0.0): average_psf = average_psf / numpy.max(average_psf) else: print("Warning! Measured PSF maxima is zero or negative!") # Save PSF (in image form). if True: tif_name = os.path.splitext(psf_name)[0] with tifffile.TiffWriter(tif_name + "_beads.tif") as tf: for i in range(average_psf.shape[0]): tf.save(average_psf[i, :, :].astype(numpy.float32)) # Save PSF. # # For historical reasons all the PSF z values are in nanometers. # At some point this should be fixed. # z_range = 1.0e+3 * z_range z_step = 1.0e+3 * z_step cur_z = -z_range z_vals = [] for i in range(average_psf.shape[0]): z_vals.append(cur_z) cur_z += z_step psf_dict = { "psf": average_psf, "pixel_size": 0.5 * pixel_size, "type": "3D", "version": 1.0, "zmin": -z_range, "zmax": z_range, "zvals": z_vals } pickle.dump(psf_dict, open(psf_name, 'wb'))