def test_afLC(): afc = afLC.AFLockC(offset=0.0) cx = 16.0 cy = 32.0 for i in range(10): x1_off = cx + 10.0 * (random.random() - 0.5) y1_off = cy + 40.0 * (random.random() - 0.5) x2_off = cx + 10.0 * (random.random() - 0.5) y2_off = cy + 40.0 * (random.random() - 0.5) im1 = dg.drawGaussiansXY((32, 64), numpy.array([x1_off]), numpy.array([y1_off])) im2 = dg.drawGaussiansXY((32, 64), numpy.array([x2_off]), numpy.array([y2_off])) [dx, dy, res, mag] = afc.findOffset(im1, im2) assert (res.success) assert (numpy.allclose(numpy.array([dx, dy]), numpy.array([x1_off - x2_off, y1_off - y2_off]), atol=1.0e-3, rtol=1.0e-3))
def test_afLC_ds(): downsample = 4 afc = afLC.AFLockC(offset=0.0, downsample=downsample) cx = 32.0 cy = 64.0 for i in range(10): x1_off = cx + 10.0 * (random.random() - 0.5) y1_off = cy + 40.0 * (random.random() - 0.5) x2_off = cx + 10.0 * (random.random() - 0.5) y2_off = cy + 40.0 * (random.random() - 0.5) im1 = dg.drawGaussiansXY((64, 128), numpy.array([x1_off]), numpy.array([y1_off]), sigma=downsample) im2 = dg.drawGaussiansXY((64, 128), numpy.array([x2_off]), numpy.array([y2_off]), sigma=downsample) [dx, dy, res, mag] = afc.findOffset(im1, im2) assert (res.success) assert (numpy.allclose(numpy.array([downsample * dx, downsample * dy]), numpy.array([x1_off - x2_off, y1_off - y2_off]), atol=1.0e-2, rtol=1.0e-2))
def test_afLC_ds_u16_nm(): downsample = 4 afc = afLC.AFLockC(offset = 0.0, downsample = downsample) cx = 32.0 cy = 64.0 for i in range(10): x1_off = cx + 10.0 * (random.random() - 0.5) y1_off = cy + 40.0 * (random.random() - 0.5) x2_off = 3*cx + 10.0 * (random.random() - 0.5) y2_off = cy + 40.0 * (random.random() - 0.5) im = dg.drawGaussiansXY((128,128), numpy.array([x1_off, x2_off]), numpy.array([y1_off, y2_off]), sigma = downsample) im = (100.0*im).astype(numpy.uint16) [dx, dy, res, mag] = afc.findOffsetU16NM(im) assert res, "Fitting failed." assert(numpy.allclose(numpy.array([downsample*dx, downsample*dy]), numpy.array([x1_off - x2_off + 2.0*cx, y1_off - y2_off]), atol = 1.0e-2, rtol = 1.0e-2))
def getPSF(self, z_value, shape=None, normalize=False): cx = numpy.array([0.5 * float(shape[0])]) cy = numpy.array([0.5 * float(shape[1])]) return dg.drawGaussiansXY(shape, cx, cy, sigma=self.sigma, res=self.res)
def test_dgxy_1(): image = dg.drawGaussiansXY((20, 20), numpy.array([10.0]), numpy.array([10.0])) dx = numpy.arange(-10.0, 10, 1.0) g_slice = numpy.exp(-dx * dx / 2.0) assert (numpy.allclose(g_slice, image[10, :], atol=1.0e-4)) assert (numpy.allclose(g_slice, image[:, 10], atol=1.0e-4))
def gaussianPSF(shape, sigma): """ Return a normalized 2D Gaussian, usually used for creating MatchedFilter objects. """ psf = dg.drawGaussiansXY(shape, numpy.array([0.5 * shape[0]]), numpy.array([0.5 * shape[1]]), sigma=sigma) return psf / numpy.sum(psf)
def __init__(self, sim_fp, x_size, y_size, h5_data, photons = 100, sigma = 100.0): super(GaussianBackground, self).__init__(sim_fp, x_size, y_size, h5_data) self.saveJSON({"background" : {"class" : "GaussianBackground", "photons" : str(photons), "sigma" : str(sigma)}}) self.bg_image = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5*x_size]), numpy.array([0.5*y_size]), sigma = sigma) self.bg_image = photons * self.bg_image/numpy.max(self.bg_image)
def test_dgxy_1(): image = dg.drawGaussiansXY((20,20), numpy.array([10.0]), numpy.array([10.0])) dx = numpy.arange(-10.0,10,1.0) g_slice = numpy.exp(-dx*dx/2.0) assert(numpy.allclose(g_slice, image[10,:], atol = 1.0e-4)) assert(numpy.allclose(g_slice, image[:,10], atol = 1.0e-4))
def test_afLC_offset(): afc = afLC.AFLockC(offset = 0.0) cx = 16.0 cy = 32.0 for i in range(10): x1_off = cx + random.randint(-5, 5) y1_off = cy + random.randint(-20, 20) x2_off = cx + random.randint(-5, 5) y2_off = cy + random.randint(-20, 20) im1 = dg.drawGaussiansXY((32,64), numpy.array([x1_off]), numpy.array([y1_off])) im2 = dg.drawGaussiansXY((32,64), numpy.array([x2_off]), numpy.array([y2_off])) afc.findOffset(im1, im2) offset = afc.getOffset() assert(numpy.allclose(offset, numpy.array([x1_off - x2_off, y1_off - y2_off])))
def translate(self, x): if (self.g_image is None) or (not numpy.allclose( self.g_x, x, atol=1.0e-12, rtol=1.0e-12)): self.g_image = dg.drawGaussiansXY((self.x_size, self.y_size), numpy.array([self.cx + x[0]]), numpy.array([self.cy + x[1]]), sigma=self.sigma) self.g_x = numpy.copy(x) return self.g_image
def newImage(self, new_image): fitting.PeakFinder.newImage(self, new_image) # If does not already exist, create a gaussian filter object. if self.mfilter is None: psf = dg.drawGaussiansXY(new_image.shape, numpy.array([0.5*new_image.shape[0]]), numpy.array([0.5*new_image.shape[1]]), sigma = self.filter_sigma) psf = psf/numpy.sum(psf) self.mfilter = matchedFilterC.MatchedFilter(psf)
def newImage(self, new_image): fitting.PeakFinder.newImage(self, new_image) # If does not already exist, create a gaussian filter object. if self.mfilter is None: psf = dg.drawGaussiansXY(new_image.shape, numpy.array([0.5 * new_image.shape[0]]), numpy.array([0.5 * new_image.shape[1]]), sigma=self.filter_sigma) psf = psf / numpy.sum(psf) self.mfilter = matchedFilterC.MatchedFilter(psf)
def test_c2dg(): clf = cl2DG.CorrLockFitter(roi_size=8, sigma=1.0, threshold=0.1) for i in range(10): x = float(i / 10.0) image1 = dg.drawGaussiansXY((50, 200), numpy.array([25 + x]), numpy.array([124 + x + 0.2])) [ox, oy, success] = clf.findFitPeak(image1) assert (success) assert (numpy.allclose(numpy.array([ox, oy]), numpy.array([25.0 + x, 124 + x + 0.2]), atol=1.0e-3, rtol=1.0e-3))
def test_hess(): dx = 1.0e-6 afc_py = afLC.AFLockPy(offset=0.0) afc = afLC.AFLockC(offset=0.0) x1_off = 8.0 y1_off = 16.0 x2_off = 8.0 y2_off = 16.0 im1 = dg.drawGaussiansXY((32, 64), numpy.array([x1_off]), numpy.array([y1_off])) im2 = dg.drawGaussiansXY((32, 64), numpy.array([x2_off]), numpy.array([y2_off])) # Initialize fitter. afc_py.findOffset(im1, im2) afc.findOffset(im1, im2) for i in range(10): v1 = numpy.random.normal(size=2) # Exact. hce_py = afc_py.hessCost(v1) hce_c = afc.hessCost(v1) # Analytic. hca = numpy.zeros((2, 2)) v2 = numpy.copy(v1) v2[0] += dx hca[0, :] = (afc_py.gradCost(v2) - afc_py.gradCost(v1)) / dx v2 = numpy.copy(v1) v2[1] += dx hca[1, :] = (afc_py.gradCost(v2) - afc_py.gradCost(v1)) / dx assert (numpy.allclose(hca, hce_py, atol=1.0e-3, rtol=1.0e-3)) assert (numpy.allclose(hca, hce_c, atol=1.0e-3, rtol=1.0e-3))
def test_mspb_3(): """ Test (single) PSF measurement with drift. """ # Make test movie. im_max = 1000.0 n_pts = 10 x = 7.0 y = 11.0 drift_xy = numpy.random.uniform(size=(n_pts, 2)) psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") with tifffile.TiffWriter(psf_movie) as tf: for i in range(n_pts): image = dg.drawGaussiansXY((20, 20), numpy.array([x + drift_xy[i][0]]), numpy.array([y + drift_xy[i][1]])) image = image * im_max tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.zeros(n_pts).astype(numpy.int) - 1 z_index[0] = 0 [psf0, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x + drift_xy[0][0], y + drift_xy[0][1], zoom=2) for i in range(1, n_pts): z_index = numpy.zeros(n_pts).astype(numpy.int) - 1 z_index[i] = 0 [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x, y, drift_xy=drift_xy, zoom=2) assert (numpy.max(numpy.abs(psf0 - psf) / numpy.max(psf)) < 0.05)
def test_mspb_2(): """ Test (single) PSF measurement, no drift, recentering. The maximum relative difference is typically on the order of 2%. """ # Make test movie. im_max = 1000.0 x = 7.0 + numpy.random.uniform(size=10) y = 11.0 + numpy.random.uniform(size=10) psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") with tifffile.TiffWriter(psf_movie) as tf: for i in range(x.size): image = dg.drawGaussiansXY((20, 20), numpy.array([x[i]]), numpy.array([y[i]])) image = image * im_max tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.zeros(x.size).astype(numpy.int) - 1 z_index[0] = 0 [psf0, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x[0], y[0], zoom=2) for i in range(1, x.size): z_index = numpy.zeros(x.size).astype(numpy.int) - 1 z_index[i] = 0 [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x[i], y[i], zoom=2) assert (numpy.max(numpy.abs(psf0 - psf) / numpy.max(psf)) < 0.05)
def __init__(self, sim_fp, x_size, y_size, h5_data, photons=100, sigma=100.0): super(GaussianBackground, self).__init__(sim_fp, x_size, y_size, h5_data) self.saveJSON({ "background": { "class": "GaussianBackground", "photons": str(photons), "sigma": str(sigma) } }) self.bg_image = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5 * x_size]), numpy.array([0.5 * y_size]), sigma=sigma) self.bg_image = photons * self.bg_image / numpy.max(self.bg_image)
def test_mspb_1(): """ Test (single) PSF measurement, no drift. """ # Make test movie. x = 7.2 y = 9.8 psf_movie = storm_analysis.getPathOutputTest("psf_movie.tif") image = 1000.0 * dg.drawGaussiansXY( (20, 20), numpy.array([x]), numpy.array([y])) with tifffile.TiffWriter(psf_movie) as tf: for i in range(6): tf.save(image.astype(numpy.float32)) # Parameters. p = params.ParametersDAO() p.changeAttr("camera_gain", 1.0) p.changeAttr("camera_offset", 0.0) # Frame reader. frdr = analysisIO.FrameReaderStd(movie_file=psf_movie, parameters=p) z_index = numpy.array([0, 1, 2, 2, -1, -1]) [psf, samples] = mPSFUtils.measureSinglePSFBeads(frdr, z_index, 6, x, y, zoom=2) assert (numpy.allclose(samples, numpy.array([1, 1, 2]))) for i in range(1, psf.shape[0]): assert (numpy.allclose(psf[0, :, :], psf[i, :, :] / samples[i])) if False: with tifffile.TiffWriter("psf.tif") as tf: for i in range(psf.shape[0]): tf.save(psf[i, :, :].astype(numpy.float32))
def test_cl_1(): sigma = 2.0 cl_fit = clf.CorrLockFitter(roi_size=8, sigma=sigma, threshold=10) im_size = (50, 200) reps = 10 # Test for i in range(reps): tx = 0.5 * im_size[0] + random.uniform(-5.0, 5.0) ty = 0.5 * im_size[1] + random.uniform(-5.0, 5.0) image = dg.drawGaussiansXY(im_size, numpy.array([tx]), numpy.array([ty]), sigma=sigma, height=50.0) [mx, my, success] = cl_fit.findFitPeak(image) assert success assert (numpy.abs(mx - tx) < 1.0e-2) assert (numpy.abs(my - ty) < 1.0e-2) cl_fit.cleanup()
def getPSF(self, z_value, shape = None, normalize = False): cx = numpy.array([0.5*float(shape[0])]) cy = numpy.array([0.5*float(shape[1])]) return dg.drawGaussiansXY(shape, cx, cy, sigma = self.sigma, res = self.res)
def findOffsets(base_name, params_file, background_scale = 4.0, foreground_scale = 1.0, im_slice = None): """ The 'main' function of this module. base_name - The basename for the group of movies. params_file - An analysis XML file containing the details for this experiment. background_scale - Features in the background change on this scale (in pixels) or more slowly. foreground_scale - Features that change on this scale are likely foreground. im_slice - A slice object created for example with numpy.s_ to limit the analysis to a smaller AOI. Notes: 1. This only checks a limited range of offsets between the two channels. 2. This assumes that the movies are longer than just a few frames. """ n_tests = 10 search_range = 5 # Load parameters. parameters = params.ParametersMultiplane().initFromFile(params_file) # Load the movies from each camera. n_channels = 0 movies = [] for ext in mpUtil.getExtAttrs(parameters): movie_name = base_name + parameters.getAttr(ext) movies.append(datareader.inferReader(movie_name)) n_channels += 1 print("Found", n_channels, "movies.") # Load sCMOS calibration data. offsets = [] gains = [] for calib_name in mpUtil.getCalibrationAttrs(parameters): [offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name)) offsets.append(offset) gains.append(1.0/gain) assert(len(offsets) == n_channels) # Load the plane to plane mapping data & create affine transform objects. mappings = {} with open(parameters.getAttr("mapping"), 'rb') as fp: mappings = pickle.load(fp) atrans = [] for i in range(n_channels-1): xt = mappings["0_" + str(i+1) + "_x"] yt = mappings["0_" + str(i+1) + "_y"] atrans.append(affineTransformC.AffineTransform(xt = xt, yt = yt)) # Create background and foreground variance filters. # # FIXME: Is this right for movies that are not square? # [y_size, x_size] = movies[0].filmSize()[:2] if im_slice is not None: y_size = im_slice[0].stop - im_slice[0].start x_size = im_slice[1].stop - im_slice[1].start psf = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5*x_size]), numpy.array([0.5*y_size]), sigma = background_scale) psf = psf/numpy.sum(psf) bg_filter = matchedFilterC.MatchedFilter(psf) psf = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5*x_size]), numpy.array([0.5*y_size]), sigma = foreground_scale) psf = psf/numpy.sum(psf) fg_filter = matchedFilterC.MatchedFilter(psf) var_filter = matchedFilterC.MatchedFilter(psf*psf) # Check background estimation. if False: frame = loadImage(movies[0], 0, offsets[0], gain[0]) frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter) with tifffile.TiffWriter("bg_estimate.tif") as tif: tif.save(frame.astype(numpy.float32)) tif.save(frame_bg.astype(numpy.float32)) tif.save((frame - frame_bg).astype(numpy.float32)) votes = numpy.zeros((n_channels - 1, 2*search_range+1)) for i in range(n_tests): print("Test", i) # Load reference frame. ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0]) if im_slice is not None: ref_frame = ref_frame[im_slice] ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter, var_filter) ref_frame -= ref_frame_bg # Load test frames and measure correlation. for j in range(n_channels - 1): best_corr = 0.0 best_offset = 0 for k in range(-search_range, search_range + 1): test_frame = loadImage(movies[j+1], search_range + i + k, offsets[j+1], gain[j+1], transform = atrans[j]) if im_slice is not None: test_frame = test_frame[im_slice] test_frame_bg = estimateBackground(test_frame, bg_filter, fg_filter, var_filter) test_frame -= test_frame_bg test_frame_corr = numpy.sum(ref_frame*test_frame)/numpy.sum(test_frame) if (test_frame_corr > best_corr): best_corr = test_frame_corr best_offset = k + search_range votes[j, best_offset] += 1 # Print results. print("Offset votes:") print(votes) frame_offsets = [0] frame_offsets += list(numpy.argmax(votes, axis = 1) - search_range) print("Best offsets:") for i in range(n_channels): print(str(i) + ": " + str(frame_offsets[i])) # Create stacks with optimal offsets. print("Saving image stacks.") for i in range(n_channels): with tifffile.TiffWriter(base_name + "_offsets_ch" + str(i) + ".tif") as tif: for j in range(5): if (i == 0): frame = loadImage(movies[i], search_range + frame_offsets[i] + j, offsets[i], gain[i]) else: frame = loadImage(movies[i], search_range + frame_offsets[i] + j, offsets[i], gain[i], transform = atrans[i-1]) if im_slice is not None: frame = frame[im_slice] frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter) frame -= frame_bg tif.save(frame.astype(numpy.float32)) return frame_offsets
def fitROI(self, mx, my, roi): """ Pass to aligner and find optimal offset. """ self.c2dg.setImage(roi) [disp, success, fun, status] = self.c2dg.maximize() if (success) or (status == 2): return [my + disp[0] - 0.5, mx + disp[1] - 0.5, True] else: return [0, 0, False] if (__name__ == "__main__"): # # The unit tests, if this was a unit. Not included in the official # tests due to the dependence on the storm-analysis project. # if True: clf = CorrLockFitter(roi_size=8, sigma=1.0, threshold=0.1) for i in range(10): x = float(i / 10.0) image1 = dg.drawGaussiansXY((50, 200), numpy.array([25 + x]), numpy.array([124 + x + 0.2])) [ox, oy, success] = clf.findFitPeak(image1) assert (success) assert (numpy.allclose(numpy.array([ox, oy]), numpy.array([25.0 + x, 124 + x + 0.2]), atol=1.0e-3, rtol=1.0e-3))
def findOffsets(base_name, params_file, background_scale=4.0, foreground_scale=1.0): """ The 'main' function of this module. base_name - The basename for the group of movies. params_file - An analysis XML file containing the details for this experiment. background_scale - Features in the background change on this scale (in pixels) or more slowly. foreground_scale - Features that change on this scale are likely foreground. Notes: 1. This only checks a limited range of offsets between the two channels. 2. This assumes that the movies are longer than just a few frames. """ n_tests = 10 search_range = 5 # Load parameters. parameters = params.ParametersMultiplane().initFromFile(params_file) # Load the movies from each camera. n_channels = 0 movies = [] for ext in mpUtil.getExtAttrs(parameters): movie_name = base_name + parameters.getAttr(ext) movies.append(datareader.inferReader(movie_name)) n_channels += 1 print("Found", n_channels, "movies.") # Load sCMOS calibration data. offsets = [] gains = [] for calib_name in mpUtil.getCalibrationAttrs(parameters): [offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr(calib_name)) offsets.append(offset) gains.append(1.0 / gain) assert (len(offsets) == n_channels) # Load the plane to plane mapping data & create affine transform objects. mappings = {} with open(parameters.getAttr("mapping"), 'rb') as fp: mappings = pickle.load(fp) atrans = [] for i in range(n_channels - 1): xt = mappings["0_" + str(i + 1) + "_x"] yt = mappings["0_" + str(i + 1) + "_y"] atrans.append(affineTransformC.AffineTransform(xt=xt, yt=yt)) # Create background and foreground variance filters. # # FIXME: Is this right for movies that are not square? # [y_size, x_size] = movies[0].filmSize()[:2] psf = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5 * x_size]), numpy.array([0.5 * y_size]), sigma=background_scale) psf = psf / numpy.sum(psf) bg_filter = matchedFilterC.MatchedFilter(psf) psf = dg.drawGaussiansXY((x_size, y_size), numpy.array([0.5 * x_size]), numpy.array([0.5 * y_size]), sigma=foreground_scale) psf = psf / numpy.sum(psf) fg_filter = matchedFilterC.MatchedFilter(psf) var_filter = matchedFilterC.MatchedFilter(psf * psf) # Check background estimation. if False: frame = loadImage(movies[0], 0, offsets[0], gain[0]) frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter) with tifffile.TiffWriter("bg_estimate.tif") as tif: tif.save(frame.astype(numpy.float32)) tif.save(frame_bg.astype(numpy.float32)) tif.save((frame - frame_bg).astype(numpy.float32)) votes = numpy.zeros((n_channels - 1, 2 * search_range + 1)) for i in range(n_tests): print("Test", i) # Load reference frame. ref_frame = loadImage(movies[0], search_range + i, offsets[0], gain[0]) ref_frame_bg = estimateBackground(ref_frame, bg_filter, fg_filter, var_filter) ref_frame -= ref_frame_bg # Load test frames and measure correlation. for j in range(n_channels - 1): best_corr = 0.0 best_offset = 0 for k in range(-search_range, search_range + 1): test_frame = loadImage(movies[j + 1], search_range + i + k, offsets[j + 1], gain[j + 1], transform=atrans[j]) test_frame_bg = estimateBackground(test_frame, bg_filter, fg_filter, var_filter) test_frame -= test_frame_bg test_frame_corr = numpy.sum( ref_frame * test_frame) / numpy.sum(test_frame) if (test_frame_corr > best_corr): best_corr = test_frame_corr best_offset = k + search_range votes[j, best_offset] += 1 # Print results. print("Offset votes:") print(votes) frame_offsets = [0] frame_offsets += list(numpy.argmax(votes, axis=1) - search_range) print("Best offsets:") for i in range(n_channels): print(str(i) + ": " + str(frame_offsets[i])) # Create stacks with optimal offsets. print("Saving image stacks.") for i in range(n_channels): with tifffile.TiffWriter("find_offsets_ch" + str(i) + ".tif") as tif: for j in range(5): if (i == 0): frame = loadImage(movies[i], search_range + frame_offsets[i] + j, offsets[i], gain[i]) else: frame = loadImage(movies[i], search_range + frame_offsets[i] + j, offsets[i], gain[i], transform=atrans[i - 1]) frame_bg = estimateBackground(frame, bg_filter, fg_filter, var_filter) frame -= frame_bg tif.save(frame.astype(numpy.float32))
def setVariances(self, variances): # # Make sure that the number of (sCMOS) variance arrays # matches the number of image planes. # assert (len(variances) == self.n_channels) # # We initialize the following here because at __init__ we # don't know how big the images are. # # Note the assumption that every frame in all the movies # is the same size. # # Create mask to limit peak finding to a user defined sub-region of the image. self.peak_mask = numpy.ones(variances[0].shape) if self.parameters.hasAttr("x_start"): self.peak_mask[0:self.parameters.getAttr("x_start") + self.margin, :] = 0.0 if self.parameters.hasAttr("x_stop"): self.peak_mask[self.parameters.getAttr("x_stop") + self.margin:-1, :] = 0.0 if self.parameters.hasAttr("y_start"): self.peak_mask[:, 0:self.parameters.getAttr("y_start") + self.margin] = 0.0 if self.parameters.hasAttr("y_stop"): self.peak_mask[:, self.parameters.getAttr("y_stop") + self.margin:-1] = 0.0 # # Create mpUtilC.MpUtil object that is used to do a lot of the # peak list manipulations. # self.mpu = mpUtilC.MpUtil(radius=self.new_peak_radius, neighborhood=self.neighborhood, im_size_x=variances[0].shape[1], im_size_y=variances[0].shape[0], n_channels=self.n_channels, n_zplanes=len(self.z_values), margin=self.margin) # # Load mappings file again so that we can set the transforms for # the MpUtil object. # # Use self.margin - 1, because we added 1 to the x,y coordinates # when we saved them, see sa_library.i3dtype.createFromMultiFit(). # [xt, yt] = mpUtilC.loadMappings(self.mapping_filename, self.margin - 1)[:2] self.mpu.setTransforms(xt, yt) # # Now that we have the MpUtil object we can split the input peak # locations to create a list for each channel. # if self.peak_locations is not None: self.peak_locations = self.mpu.splitPeaks(self.peak_locations) # # Create "foreground" and "variance" filters, as well as the # height rescaling array. # # These are stored in a list indexed by z value, then by # channel / plane. So self.mfilters[1][2] is the filter # for z value 1, plane 2. # for i, mfilter_z in enumerate(self.mfilters_z): self.height_rescale.append([]) self.mfilters.append([]) self.vfilters.append([]) for j, s_to_psf in enumerate(self.s_to_psfs): psf = s_to_psf.getPSF(mfilter_z, shape=variances[0].shape, normalize=False) # # We are assuming that the psf has no negative values, # or if it does that they are very small. # psf_norm = psf / numpy.sum(psf) self.mfilters[i].append(matchedFilterC.MatchedFilter(psf_norm)) self.vfilters[i].append( matchedFilterC.MatchedFilter(psf_norm * psf_norm)) self.height_rescale[i].append(1.0 / numpy.sum(psf * psf_norm)) # Save a pictures of the PSFs for debugging purposes. if False: print("psf max", numpy.max(psf)) filename = "psf_z{0:.3f}_c{1:d}.tif".format(mfilter_z, j) tifffile.imsave(filename, psf.astype(numpy.float32)) # "background" filter. psf = dg.drawGaussiansXY(variances[0].shape, numpy.array([0.5 * variances[0].shape[0]]), numpy.array([0.5 * variances[0].shape[1]]), sigma=self.bg_filter_sigma) psf = psf / numpy.sum(psf) self.bg_filter = matchedFilterC.MatchedFilter(psf) # # Process variance arrays now as they don't change from frame # to frame. # # This initializes the self.variances array with a list # of lists with the same organization as foreground and # psf / variance filters. # # Use PSF filter and not variance filter here as this is the # measured camera variance. # # Iterate over z values. for i in range(len(self.mfilters)): variance = numpy.zeros(variances[0].shape) # Iterate over channels / planes. for j in range(len(self.mfilters[i])): # Convolve variance with the appropriate PSF filter. conv_var = self.mfilters[i][j].convolve(variances[j]) # Transform variance to the channel 0 frame. if self.atrans[j] is None: variance += conv_var else: variance += self.atrans[j].transform(conv_var) self.variances.append(variance)