def doFit(fit_function, fit_data, max_iterations): save_residual = False if save_residual: resid_dax = daxwriter.DaxWriter("residual.dax", fit_data.image.shape[0], fit_data.image.shape[1]) for i in range(max_iterations): if save_residual: resid_dax.addFrame(fit_data.residual) done = findPeaks(fit_data) fit_function(fit_data) updateBackgroundCutoff(fit_data) if not(done): break #cutoff = background + threshold * std if save_residual: resid_dax.close() if (type(fit_data.peaks) == type(numpy.array([]))): fit_data.peaks[:,util_c.getXCenterIndex()] -= fit_data.pad_size fit_data.peaks[:,util_c.getYCenterIndex()] -= fit_data.pad_size # TODO: a final phase of refitting after removal of bad peaks? return [fit_data.peaks, fit_data.residual]
def analyzeImage(self, new_image, save_residual = False, verbose = False): pad_image = fitting.padArray(new_image, self.peak_finder.margin) self.peak_finder.newImage(pad_image) self.peak_fitter.newImage(pad_image) # # This is a lot simpler than 3D-DAOSTORM as we only do one pass, # hopefully the compressed sensing (FISTA) deconvolution finds all the # peaks and then we do a single pass of fitting. # if True: peaks = self.peak_finder.findPeaks() [fit_peaks, residual] = self.peak_fitter.fitPeaks(peaks) # # This is for testing if just using FISTA followed by the center # of mass calculation is basically as good as also doing the # additional MLE spline fitting step. # # The short answer is that it appears that it is not. It about # 1.3x worse in XY and about 4x worse in Z. # else: fit_peaks = self.peak_finder.findPeaks() # Adjust z scale. z_index = utilC.getZCenterIndex() z_size = (self.peak_fitter.spline.shape[2] - 1.0) status_index = utilC.getStatusIndex() fit_peaks[:,z_index] = z_size*fit_peaks[:,z_index] # Mark as converged. fit_peaks[:,status_index] = 1.0 residual = None # # Subtract margin so that peaks are in the right # place with respect to the original image. # fit_peaks[:,utilC.getXCenterIndex()] -= float(self.peak_finder.margin) fit_peaks[:,utilC.getYCenterIndex()] -= float(self.peak_finder.margin) return [fit_peaks, residual]
def getPeaks(self, threshold, margin): fx = self.getXVector() # Get area, position, height. fd_peaks = fdUtil.getPeaks(fx, threshold, margin) num_peaks = fd_peaks.shape[0] peaks = numpy.zeros((num_peaks, utilC.getNResultsPar())) peaks[:, utilC.getXWidthIndex()] = numpy.ones(num_peaks) peaks[:, utilC.getYWidthIndex()] = numpy.ones(num_peaks) peaks[:, utilC.getXCenterIndex()] = fd_peaks[:, 2] peaks[:, utilC.getYCenterIndex()] = fd_peaks[:, 1] # Calculate height. # # FIXME: Typically the starting value for the peak height will be # under-estimated unless a large enough number of FISTA # iterations is performed to completely de-convolve the image. # h_index = utilC.getHeightIndex() #peaks[:,h_index] = fd_peaks[:,0] for i in range(num_peaks): peaks[i, h_index] = fd_peaks[i, 0] * self.psf_heights[int( round(fd_peaks[i, 3]))] # Calculate z (0.0 - 1.0). peaks[:, utilC.getZCenterIndex()] = fd_peaks[:, 3] / (float(fx.shape[2]) - 1.0) # Background term calculation. bg_index = utilC.getBackgroundIndex() for i in range(num_peaks): ix = int(round(fd_peaks[i, 1])) iy = int(round(fd_peaks[i, 2])) peaks[i, bg_index] = self.background[ix, iy] return peaks
curf = 1 peaks_used = 0 total = 0.0 [dax_x, dax_y, dax_l] = dax_data.filmSize() while (curf < dax_l) and (peaks_used < min_peaks): # Select localizations in current frame & not near the edges. mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & (i3_data['x'] < (dax_y - aoi_size - 1)) & (i3_data['y'] > aoi_size) & (i3_data['y'] < (dax_x - aoi_size - 1)) xr = i3_data['x'][mask] yr = i3_data['y'][mask] ht = i3_data['h'][mask] # Remove localizations that are too close to each other. in_peaks = numpy.zeros((xr.size,util_c.getNResultsPar())) in_peaks[:,util_c.getXCenterIndex()] = xr in_peaks[:,util_c.getYCenterIndex()] = yr in_peaks[:,util_c.getHeightIndex()] = ht out_peaks = util_c.removeNeighbors(in_peaks, aoi_size) print curf, in_peaks.shape, out_peaks.shape # Use remaining localizations to calculate spline. image = dax_data.loadAFrame(curf-1).astype(numpy.float64) xr = out_peaks[:,util_c.getXCenterIndex()] yr = out_peaks[:,util_c.getYCenterIndex()] ht = out_peaks[:,util_c.getHeightIndex()] for i in range(xr.size): xf = xr[i]
total = 0.0 [dax_x, dax_y, dax_l] = dax_data.filmSize() while (curf < dax_l) and (peaks_used < min_peaks): # Select localizations in current frame & not near the edges. mask = (i3_data['fr'] == curf) & (i3_data['x'] > aoi_size) & ( i3_data['x'] < (dax_x - aoi_size - 1)) & (i3_data['y'] > aoi_size) & ( i3_data['y'] < (dax_y - aoi_size - 1)) xr = i3_data['x'][mask] yr = i3_data['y'][mask] ht = i3_data['h'][mask] # Remove localizations that are too close to each other. in_peaks = numpy.zeros((xr.size, util_c.getNResultsPar())) in_peaks[:, util_c.getXCenterIndex()] = xr in_peaks[:, util_c.getYCenterIndex()] = yr in_peaks[:, util_c.getHeightIndex()] = ht out_peaks = util_c.removeNeighbors(in_peaks, aoi_size) print curf, in_peaks.shape, out_peaks.shape # Use remaining localizations to calculate spline. image = dax_data.loadAFrame(curf - 1).astype(numpy.float64) xr = out_peaks[:, util_c.getXCenterIndex()] yr = out_peaks[:, util_c.getYCenterIndex()] ht = out_peaks[:, util_c.getHeightIndex()] for i in range(xr.size): xf = xr[i]