def test_io_3(): """ Test FITS movie IO. """ movie_h = 50 movie_w = 40 movie_l = 10 data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16) movie_name = storm_analysis.getPathOutputTest("test_dataio.fits") # Write FITS movie. wr = datawriter.inferWriter(movie_name) for i in range(movie_l): wr.addFrame(data) wr.close() # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert (mh == movie_h) assert (mw == movie_w) assert (ml == movie_l) assert (numpy.allclose(data, rd.loadAFrame(0)))
def test_io_3(): """ Test FITS movie IO. """ movie_h = 50 movie_w = 40 movie_l = 10 data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16) movie_name = storm_analysis.getPathOutputTest("test_dataio.fits") # Write FITS movie. wr = datawriter.inferWriter(movie_name) for i in range(movie_l): wr.addFrame(data) wr.close() # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert(mh == movie_h) assert(mw == movie_w) assert(ml == movie_l) assert(numpy.allclose(data, rd.loadAFrame(0)))
def rollingBallSub(movie_in, movie_out, radius, sigma, offset=100): input_movie = datareader.inferReader(movie_in) output_dax = datawriter.inferWriter(movie_out) rb = RollingBall(radius, sigma) for i in range(input_movie.filmSize()[2]): if ((i % 10) == 0): print("Processing frame", i) image = input_movie.loadAFrame(i) - offset if False: image = image.astype(numpy.float) lowpass = scipy.ndimage.filters.gaussian_filter(image, sigma) sub = image - lowpass else: sub = rb.removeBG(image) output_dax.addFrame(sub + offset) output_dax.close()
def rollingBallSub(movie_in, movie_out, radius, sigma, offset = 100): input_movie = datareader.inferReader(movie_in) output_dax = datawriter.inferWriter(movie_out) rb = RollingBall(radius, sigma) for i in range(input_movie.filmSize()[2]): if((i%10) == 0): print("Processing frame", i) image = input_movie.loadAFrame(i) - offset if False: image = image.astype(numpy.float) lowpass = scipy.ndimage.filters.gaussian_filter(image, sigma) sub = image - lowpass else: sub = rb.removeBG(image) output_dax.addFrame(sub + offset) output_dax.close()
def saveAsDax(file_name, A, measured_pixels): """ Save A matrix in dax format for visualization purposes. """ import storm_analysis.sa_library.datawriter as datawriter dx = datawriter.inferWriter(file_name) ncols = A.shape[1] for i in range(A.shape[1]): x = numpy.zeros(ncols) x[i] = 1.0 b = numpy.dot(A,x) b = b.reshape(measured_pixels,measured_pixels) dx.addFrame(10000.0*b) dx.close()
def test_io_5(): """ Test TIF movie IO (1 frame, 1 page). """ movie_h = 50 movie_w = 40 movie_l = 1 movie_name = storm_analysis.getPathOutputTest("test_dataio.tif") ## Standard Tiff. data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16) # Write tif movie. wr = datawriter.inferWriter(movie_name) for i in range(movie_l): wr.addFrame(data) wr.close() # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert (mh == movie_h) assert (mw == movie_w) assert (ml == movie_l) assert (numpy.allclose(data, rd.loadAFrame(0))) ## 'imagej' Tiff. data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype( numpy.uint16) movie_name = storm_analysis.getPathOutputTest("test_dataio.tif") # Write tif movie. with tifffile.TiffWriter(movie_name, imagej=True) as tf: tf.save(data, truncate=True) # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert (mh == movie_h) assert (mw == movie_w) assert (ml == movie_l) assert (numpy.allclose(data[0, :, :], rd.loadAFrame(0)))
def test_io_5(): """ Test TIF movie IO (1 frame, 1 page). """ movie_h = 50 movie_w = 40 movie_l = 1 movie_name = storm_analysis.getPathOutputTest("test_dataio.tif") ## Standard Tiff. data = numpy.random.randint(0, 60000, (movie_h, movie_w)).astype(numpy.uint16) # Write tif movie. wr = datawriter.inferWriter(movie_name) for i in range(movie_l): wr.addFrame(data) wr.close() # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert(mh == movie_h) assert(mw == movie_w) assert(ml == movie_l) assert(numpy.allclose(data, rd.loadAFrame(0))) ## 'imagej' Tiff. data = numpy.random.randint(0, 60000, (movie_l, movie_h, movie_w)).astype(numpy.uint16) movie_name = storm_analysis.getPathOutputTest("test_dataio.tif") # Write tif movie. with tifffile.TiffWriter(movie_name, imagej = True) as tf: tf.save(data, truncate = True) # Read & check. rd = datareader.inferReader(movie_name) [mw, mh, ml] = rd.filmSize() assert(mh == movie_h) assert(mw == movie_w) assert(ml == movie_l) assert(numpy.allclose(data[0,:,:], rd.loadAFrame(0)))
def waveletBGRSub(movie_in, movie_out, wavelet_type, wavelet_level, iterations, threshold, offset = 100): input_movie = datareader.inferReader(movie_in) output_dax = datawriter.inferWriter(movie_out) wbgr = WaveletBGR(wavelet_type = wavelet_type) for i in range(input_movie.filmSize()[2]): if((i%10) == 0): print("Processing frame", i) image = input_movie.loadAFrame(i) - offset sub = wbgr.removeBG(image, iterations, threshold, wavelet_level) output_dax.addFrame(sub + offset) output_dax.close()
def waveletBGRSub(movie_in, movie_out, wavelet_type, wavelet_level, iterations, threshold, offset=100): input_movie = datareader.inferReader(movie_in) output_dax = datawriter.inferWriter(movie_out) wbgr = WaveletBGR(wavelet_type=wavelet_type) for i in range(input_movie.filmSize()[2]): if ((i % 10) == 0): print("Processing frame", i) image = input_movie.loadAFrame(i) - offset sub = wbgr.removeBG(image, iterations, threshold, wavelet_level) output_dax.addFrame(sub + offset) output_dax.close()
def simulate(self, dax_file, bin_file, n_frames, verbosity=1): assert (verbosity > 0), "Verbosity must be greater than 1." # # Initialization. # movie_data = datawriter.inferWriter(dax_file, width=self.x_size, height=self.y_size) with saH5Py.SAH5Py(bin_file) as h5: h5_data_in = h5.getLocalizations() if not h5_data_in: print("Warning! No localizations input HDF5 file!") out_fname_base = dax_file[:-4] h5_data_out = saH5Py.SAH5Py(filename=out_fname_base + "_ref.hdf5", is_existing=False, overwrite=True) h5_data_out.setMovieInformation(self.x_size, self.y_size, n_frames, "") sim_settings = open(out_fname_base + "_sim_params.txt", "w") # # Create the user-specified class instances that will do # most of the actual work of the simulation. # bg = self.bg_factory(sim_settings, self.x_size, self.y_size, h5_data_in) cam = self.cam_factory(sim_settings, self.x_size, self.y_size, h5_data_in) drift = None if self.drift_factory is not None: drift = self.drift_factory(sim_settings, self.x_size, self.y_size, h5_data_in) if h5_data_in: pp = self.pphys_factory(sim_settings, self.x_size, self.y_size, h5_data_in) psf = self.psf_factory(sim_settings, self.x_size, self.y_size, h5_data_in) sim_settings.write( json.dumps({ "simulation": { "bin_file": bin_file, "x_size": str(self.x_size), "y_size": str(self.y_size) } }) + "\n") h5_data_out.setPixelSize(psf.nm_per_pixel) # # Generate the simulated movie. # for i in range(n_frames): # Generate the new image. image = numpy.zeros((self.x_size, self.y_size)) if h5_data_in: # Get the emitters that are on in the current frame. cur_h5 = pp.getEmitters(i) if ((i % verbosity) == 0): print("Frame", i, cur_h5['x'].size, "emitters") # Dither points x,y values if requested. This is useful for things # like looking for pixel level biases in simulated data with gridded # localizations. # if self.dither: cur_h5['x'] += numpy.random.uniform( size=cur_h5['x'].size) - 0.5 cur_h5['y'] += numpy.random.uniform( size=cur_h5['y'].size) - 0.5 # Add background to image. image += bg.getBackground(i) if h5_data_in: # Set 'bg' parameter of the emitters. cur_h5 = bg.getEmitterBackground(cur_h5) # Apply drift to the localizations. if drift is not None: drift.drift(i, cur_h5) # Foreground image += psf.getPSFs(cur_h5) # Camera image = cam.readImage(image) # Save the image. movie_data.addFrame(numpy.transpose(image)) if h5_data_in: # Save the molecule locations. h5_data_out.addLocalizations(cur_h5, i) movie_data.close() h5_data_out.close() sim_settings.close()
def simulate(self, dax_file, bin_file, n_frames): # # Initialization. # movie_data = datawriter.inferWriter(dax_file, width = self.x_size, height = self.y_size) with saH5Py.SAH5Py(bin_file) as h5: h5_data_in = h5.getLocalizations() out_fname_base = dax_file[:-4] h5_data_out = saH5Py.SAH5Py(filename = out_fname_base + "_ref.hdf5", is_existing = False, overwrite = True) h5_data_out.setMovieInformation(self.x_size, self.y_size, n_frames, "") sim_settings = open(out_fname_base + "_sim_params.txt", "w") # # Create the user-specified class instances that will do # most of the actual work of the simulation. # bg = self.bg_factory(sim_settings, self.x_size, self.y_size, h5_data_in) cam = self.cam_factory(sim_settings, self.x_size, self.y_size, h5_data_in) drift = None if self.drift_factory is not None: drift = self.drift_factory(sim_settings, self.x_size, self.y_size, h5_data_in) pp = self.pphys_factory(sim_settings, self.x_size, self.y_size, h5_data_in) psf = self.psf_factory(sim_settings, self.x_size, self.y_size, h5_data_in) sim_settings.write(json.dumps({"simulation" : {"bin_file" : bin_file, "x_size" : str(self.x_size), "y_size" : str(self.y_size)}}) + "\n") h5_data_out.setPixelSize(psf.nm_per_pixel) # # Generate the simulated movie. # for i in range(n_frames): # Generate the new image. image = numpy.zeros((self.x_size, self.y_size)) # Get the emitters that are on in the current frame. cur_h5 = pp.getEmitters(i) print("Frame", i, cur_h5['x'].size, "emitters") # Dither points x,y values if requested. This is useful for things # like looking for pixel level biases in simulated data with gridded # localizations. # if self.dither: cur_h5['x'] += numpy.random.uniform(size = cur_h5['x'].size) - 0.5 cur_h5['y'] += numpy.random.uniform(size = cur_h5['y'].size) - 0.5 # Add background to image. image += bg.getBackground(i) # Set 'bg' parameter of the emitters. cur_h5 = bg.getEmitterBackground(cur_h5) # Apply drift to the localizations. if drift is not None: drift.drift(i, cur_h5) # Foreground image += psf.getPSFs(cur_h5) # Camera image = cam.readImage(image) # Save the image. movie_data.addFrame(numpy.transpose(image)) # Save the molecule locations. h5_data_out.addLocalizations(cur_h5, i) movie_data.close() h5_data_out.close() sim_settings.close()