def analyzeImage(self, new_image, bg_estimate=None, save_residual=False, verbose=False): image = fitting.padArray(new_image, self.peak_finder.margin) if bg_estimate is not None: bg_estimate = fitting.padArray(bg_estimate, self.peak_finder.margin) self.peak_finder.newImage(image, bg_estimate) self.peak_fitter.newImage(image) # # This is a lot simpler than 3D-DAOSTORM as we only do one pass, # hopefully the compressed sensing (FISTA) deconvolution finds all the # peaks and then we do a single pass of fitting. # if True: peaks = self.peak_finder.findPeaks() [fit_peaks, residual] = self.peak_fitter.fitPeaks(peaks) # # This is for testing if just using FISTA followed by the center # of mass calculation is basically as good as also doing the # additional MLE spline fitting step. # # The short answer is that it appears that it is not. It about # 1.3x worse in XY and about 4x worse in Z. # else: fit_peaks = self.peak_finder.findPeaks() # Adjust z scale. z_index = utilC.getZCenterIndex() z_size = (self.peak_fitter.spline.shape[2] - 1.0) status_index = utilC.getStatusIndex() fit_peaks[:, z_index] = z_size * fit_peaks[:, z_index] # Mark as converged. fit_peaks[:, status_index] = 1.0 residual = None # # Subtract margin so that peaks are in the right # place with respect to the original image. # fit_peaks[:, utilC.getXCenterIndex()] -= float(self.peak_finder.margin) fit_peaks[:, utilC.getYCenterIndex()] -= float(self.peak_finder.margin) return [fit_peaks, residual]
def loadSCMOSData(calibration_filename, margin): # Additional sCMOS specific data & objects. # Load camera calibrations & create smoother and regularizer. [offset, variance, gain] = numpy.load(calibration_filename) # Pad out sCMOS arrays. lg_offset = fitting.padArray(offset, margin) lg_variance = fitting.padArray(variance, margin) lg_gain = fitting.padArray(gain, margin) return [lg_offset, lg_variance, lg_gain]
def loadSCMOSData(calibration_filename, margin): # Load camera calibration data. # # Note: Gain is expected to be in units of ADU per photo-electron. # [offset, variance, gain] = numpy.load(calibration_filename) # Pad out camera calibration data to the final image size. lg_offset = fitting.padArray(offset, margin) lg_variance = fitting.padArray(variance, margin) lg_gain = fitting.padArray(gain, margin) return [lg_offset, lg_variance, lg_gain]
def loadSCMOSData(calibration_filename, margin): """ Load camera calibration data. Note: Gain is expected to be in units of ADU per photo-electron. """ [offset, variance, gain] = numpy.load(calibration_filename) # Pad out camera calibration data to the final image size. lg_offset = fitting.padArray(offset, margin) lg_variance = fitting.padArray(variance, margin) lg_gain = fitting.padArray(gain, margin) return [lg_offset, lg_variance, lg_gain]
def analyzeImage(self, new_image, bg_estimate = None, save_residual = False, verbose = False): image = fitting.padArray(new_image, self.peak_finder.margin) if bg_estimate is not None: bg_estimate = fitting.padArray(bg_estimate, self.peak_finder.margin) self.peak_finder.newImage(image, bg_estimate) self.peak_fitter.newImage(image) # # This is a lot simpler than 3D-DAOSTORM as we only do one pass, # hopefully the compressed sensing (FISTA) deconvolution finds all the # peaks and then we do a single pass of fitting. # if True: peaks = self.peak_finder.findPeaks() [fit_peaks, residual] = self.peak_fitter.fitPeaks(peaks) # # This is for testing if just using FISTA followed by the center # of mass calculation is basically as good as also doing the # additional MLE spline fitting step. # # The short answer is that it appears that it is not. It about # 1.3x worse in XY and about 4x worse in Z. # else: fit_peaks = self.peak_finder.findPeaks() # Adjust z scale. z_index = utilC.getZCenterIndex() z_size = (self.peak_fitter.spline.shape[2] - 1.0) status_index = utilC.getStatusIndex() fit_peaks[:,z_index] = z_size*fit_peaks[:,z_index] # Mark as converged. fit_peaks[:,status_index] = 1.0 residual = None # # Subtract margin so that peaks are in the right # place with respect to the original image. # fit_peaks[:,utilC.getXCenterIndex()] -= float(self.peak_finder.margin) fit_peaks[:,utilC.getYCenterIndex()] -= float(self.peak_finder.margin) return [fit_peaks, residual]
def setVariances(self, variances): """ setVariances() customized for arbitrary PSFs. """ # Make sure that the number of (sCMOS) variance arrays # matches the number of image planes. # assert (len(variances) == self.n_channels) # Pad variances to correct size. # temp = [] for variance in variances: temp.append(fitting.padArray(variance, self.margin)) variances = temp # Create "foreground" and "variance" filters. # # These are stored in a list indexed by z value, then by # channel / plane. So self.mfilters[1][2] is the filter # for z value 1, plane 2. # for i, mfilter_z in enumerate(self.mfilters_z): self.mfilters.append([]) self.vfilters.append([]) for j, psf_object in enumerate(self.psf_objects): psf = psf_object.getPSF(mfilter_z, shape=variances[0].shape, normalize=False) # # We are assuming that the psf has no negative values, # or if it does that they are very small. # psf_norm = psf / numpy.sum(psf) self.mfilters[i].append( matchedFilterC.MatchedFilter(psf_norm, memoize=True, max_diff=1.0e-3)) self.vfilters[i].append( matchedFilterC.MatchedFilter(psf_norm * psf_norm, memoize=True, max_diff=1.0e-3)) # Save a pictures of the PSFs for debugging purposes. if self.check_mode: print("psf max", numpy.max(psf)) filename = "psf_z{0:.3f}_c{1:d}.tif".format(mfilter_z, j) tifffile.imsave(filename, psf.astype(numpy.float32)) # This handles the rest of the initialization. # super(MPPeakFinderArb, self).setVariances(variances) return variances
def setVariances(self, variances): """ setVariances() customized for gaussian PSFs. """ # Make sure that the number of (sCMOS) variance arrays # matches the number of image planes. # assert (len(variances) == self.n_channels) # Pad variances to correct size. # temp = [] for variance in variances: temp.append(fitting.padArray(variance, self.margin)) variances = temp # Create "foreground" and "variance" filters. There is # only one z value here. # # These are stored in a list indexed by z value, then by # channel / plane. So self.mfilters[1][2] is the filter # for z value 1, plane 2. # self.mfilters.append([]) self.vfilters.append([]) psf_norm = fitting.gaussianPSF( variances[0].shape, self.parameters.getAttr("foreground_sigma")) var_norm = psf_norm * psf_norm for i in range(self.n_channels): self.mfilters[0].append( matchedFilterC.MatchedFilter( psf_norm, fftw_estimate=self.parameters.getAttr("fftw_estimate"), memoize=True, max_diff=1.0e-3)) self.vfilters[0].append( matchedFilterC.MatchedFilter( var_norm, fftw_estimate=self.parameters.getAttr("fftw_estimate"), memoize=True, max_diff=1.0e-3)) # Save a pictures of the PSFs for debugging purposes. if self.check_mode: print("psf max", numpy.max(psf)) filename = "psf_z0.0_c{1:d}.tif".format(j) tifffile.imsave(filename, psf.astype(numpy.float32)) # This handles the rest of the initialization. # super(MPPeakFinderDao, self).setVariances(variances) return variances
def setVariances(self, variances): """ setVariances() customized for arbitrary PSFs. """ # Make sure that the number of (sCMOS) variance arrays # matches the number of image planes. # assert(len(variances) == self.n_channels) # Pad variances to correct size. # temp = [] for variance in variances: temp.append(fitting.padArray(variance, self.margin)) variances = temp # Create "foreground" and "variance" filters. # # These are stored in a list indexed by z value, then by # channel / plane. So self.mfilters[1][2] is the filter # for z value 1, plane 2. # for i, mfilter_z in enumerate(self.mfilters_z): self.mfilters.append([]) self.vfilters.append([]) for j, psf_object in enumerate(self.psf_objects): psf = psf_object.getPSF(mfilter_z, shape = variances[0].shape, normalize = False) # # We are assuming that the psf has no negative values, # or if it does that they are very small. # psf_norm = psf/numpy.sum(psf) self.mfilters[i].append(matchedFilterC.MatchedFilter(psf_norm, memoize = True, max_diff = 1.0e-3)) self.vfilters[i].append(matchedFilterC.MatchedFilter(psf_norm * psf_norm, memoize = True, max_diff = 1.0e-3)) # Save a pictures of the PSFs for debugging purposes. if self.check_mode: print("psf max", numpy.max(psf)) filename = "psf_z{0:.3f}_c{1:d}.tif".format(mfilter_z, j) tifffile.imsave(filename, psf.astype(numpy.float32)) # This handles the rest of the initialization. # super(MPPeakFinderArb, self).setVariances(variances) return variances
def setVariances(self, variances): """ setVariances() customized for gaussian PSFs. """ # Make sure that the number of (sCMOS) variance arrays # matches the number of image planes. # assert(len(variances) == self.n_channels) # Pad variances to correct size. # temp = [] for variance in variances: temp.append(fitting.padArray(variance, self.margin)) variances = temp # Create "foreground" and "variance" filters. There is # only one z value here. # # These are stored in a list indexed by z value, then by # channel / plane. So self.mfilters[1][2] is the filter # for z value 1, plane 2. # self.mfilters.append([]) self.vfilters.append([]) psf_norm = fitting.gaussianPSF(variances[0].shape, self.parameters.getAttr("foreground_sigma")) var_norm = psf_norm * psf_norm for i in range(self.n_channels): self.mfilters[0].append(matchedFilterC.MatchedFilter(psf_norm, memoize = True, max_diff = 1.0e-3)) self.vfilters[0].append(matchedFilterC.MatchedFilter(var_norm, memoize = True, max_diff = 1.0e-3)) # Save a pictures of the PSFs for debugging purposes. if self.check_mode: print("psf max", numpy.max(psf)) filename = "psf_z0.0_c{1:d}.tif".format(j) tifffile.imsave(filename, psf.astype(numpy.float32)) # This handles the rest of the initialization. # super(MPPeakFinderDao, self).setVariances(variances) return variances
def loadImage(self, movie_reader): fit_peaks_images = [] images = [] for i in range(self.peak_finder.n_channels): # Load the image of a single channel / plane. image = movie_reader.getFrame(i) # Add edge padding. image = fitting.padArray(image, self.peak_finder.margin) # Add to lists. images.append(image) fit_peaks_images.append(numpy.zeros(image.shape)) return [images, fit_peaks_images]
def loadBackgroundEstimate(self, movie_reader): bg_estimates = [] for i in range(self.peak_finder.n_channels): # Load the background of a single channel / plane. bg = movie_reader.getBackground(i) if bg is None: bg_estimates.append(bg) continue # Add edge padding. bg = fitting.padArray(bg, self.peak_finder.margin) bg_estimates.append(bg) return bg_estimates
def loadBackgroundEstimates(self, movie_reader): bg_estimates = [] for i in range(self.n_planes): # Load the background of a single channel / plane. bg = movie_reader.getBackground(i) if bg is None: bg_estimates.append(bg) continue # Add edge padding. bg = fitting.padArray(bg, self.margin) # Convert to photo-electrons. bg = (bg - self.offsets[i]) * self.gains[i] bg_estimates.append(bg) return bg_estimates
def loadImages(self, movie_reader): fit_peaks_images = [] images = [] for i in range(self.n_planes): # Load the image of a single channel / plane. image = movie_reader.getFrame(i) # Add edge padding. image = fitting.padArray(image, self.margin) # Convert to photo-electrons. image = (image - self.offsets[i]) * self.gains[i] # Remove values < 1.0 mask = (image < 1.0) if (numpy.sum(mask) > 0): image[mask] = 1.0 images.append(image) fit_peaks_images.append(numpy.zeros(image.shape)) return [images, fit_peaks_images]
def setVariance(self, camera_variance): """ Just return the camera_variance array properly re-sized. """ return fitting.padArray(camera_variance, self.margin)