def plot_blob_grid(self, window=11, **kwargs): """Display a grid of blobs""" return display_grid( { i: self.data[slice_maker((y, x), window)] for i, (y, x, s, r) in enumerate(self.blobs) }, **kwargs)
def fit_blobs(self, width=10, **kwargs): """Fit blobs to Gaussian funtion.""" # If we don't have blobs, find them. if self._blobs is None: self.find_blobs() # set up the container for our fits peakfits = [] # iterate through blobs for y, x, s, r in self.blobs: # make a fit window win = slice_maker(int(y), int(x), width) # make a fit object with a subset of the data mypeak = Gauss2D(self.data[win]) # optimize params mypeak.optimize_params(**kwargs) fit_coefs = mypeak.all_params_dict() # need to place the fit coefs in the right place fit_coefs['y0'] += win[0].start fit_coefs['x0'] += win[1].start # Calc SNR for each peak fit_coefs['noise'] = mypeak.noise fit_coefs['SNR'] = fit_coefs['amp'] / fit_coefs['noise'] # append to peakfits peakfits.append(fit_coefs) # construct DataFrame peakfits_df = pd.DataFrame(peakfits) # internalize DataFrame self._fits = peakfits_df # Return it to user return peakfits_df
def plot_fits(self, window_width, residuals=False, **kwargs): """Generate a plot of the found peaks, individually""" # check if the fitting has been performed yet, warn user if it hasn't if self._fits is None: raise RuntimeError('Blobs have not been fit yet, cannot show fits') else: fits = self._fits # pull the labels and the data from the object data = self.data # find objects from labelled data my_objects = [ slice_maker(center, window_width) for center in fits[["y0", "x0"]].values ] # generate a nice layout nb_labels = len(my_objects) nrows = int(np.ceil(np.sqrt(nb_labels))) ncols = int(np.ceil(nb_labels / nrows)) fig, axes = plt.subplots(nrows, ncols, figsize=(3 * ncols, 3 * nrows)) for n, (obj, ax) in enumerate(zip(my_objects, axes.ravel())): ex = (obj[1].start, obj[1].stop - 1, obj[0].stop - 1, obj[0].start) ax.set_title(n) ax.grid("off") # generate the model fit to display, from parameters. dict_params = dict(fits.loc[n].dropna()) # recenter dict_params['x0'] -= obj[1].start dict_params['y0'] -= obj[0].start params = Gauss2D.dict_to_params(dict_params) fake_data = Gauss2D.gen_model(data[obj], *params) if residuals: ax.matshow(data[obj] - fake_data, extent=ex, **kwargs) else: ax.matshow(data[obj], extent=ex, **kwargs) ax.contour(fake_data, extent=ex, colors='w', origin='image') # # Remove empty plots for ax in axes.ravel(): if not (len(ax.images)) and not (len(ax.lines)): fig.delaxes(ax) fig.tight_layout() # return the fig and axes handles to user for later manipulation. return fig, axes
def pattern_params(my_pat, size=2): """Find stuff""" # REAL FFT! # note the limited shifting, we don't want to shift the last axis my_pat_fft = fftshift(rfftn(ifftshift(my_pat)), axes=tuple(range(my_pat.ndim))[:-1]) my_abs_pat_fft = abs(my_pat_fft) # find dc loc, center of FFT after shifting sizeky, sizekx = my_abs_pat_fft.shape # remember we didn't shift the last axis! dc_loc = (sizeky // 2, 0) # mask data and find next biggest peak dc_power = my_abs_pat_fft[dc_loc] my_abs_pat_fft[dc_loc] = 0 max_loc = np.unravel_index(my_abs_pat_fft.argmax(), my_abs_pat_fft.shape) # pull the 3x3 region around the peak and fit max_shift = localize_peak(my_abs_pat_fft[slice_maker(max_loc, 3)]) # calculate precise peak relative to dc peak = np.array(max_loc) + np.array(max_shift) - np.array(dc_loc) # correct location based on initial data shape peak_corr = peak / np.array(my_pat.shape) # calc angle preciseangle = np.arctan2(*peak_corr) # calc period precise_period = 1 / norm(peak_corr) # calc phase phase = np.angle(my_pat_fft[max_loc[0], max_loc[1]]) # calc modulation depth numerator = abs(my_pat_fft[slice_maker(max_loc, size)].sum()) mod = numerator / dc_power return {"period": precise_period, "angle": preciseangle, "phase": phase, "fft": my_pat_fft, "mod": mod, "max_loc": max_loc}
def fit_blobs(self, width=10, poly_coefs_df=None, **kwargs): """Fit blobs to Gaussian funtion. Parameters ---------- width : int The size of the fitting window in pixels **kwargs is for Gauss2D optimize_params """ # If we don't have blobs, find them. if self._blobs is None: self.find_blobs() @dask.delayed def fitfunc(win, sub_data): # fit the data as we should if poly_coefs_df is None: mypeak = Gauss2D(sub_data) else: mypeak = Gauss2Dz(sub_data, poly_coefs_df) # optimize params mypeak.optimize_params(**kwargs) fit_coefs = mypeak.all_params_dict() # need to place the fit coefs in the right place fit_coefs['y0'] += win[0].start fit_coefs['x0'] += win[1].start # Calc SNR for each peak fit_coefs['noise'] = mypeak.noise fit_coefs['SNR'] = fit_coefs['amp'] / fit_coefs['noise'] return fit_coefs # iterate through blobs windows = [ slice_maker((int(y), int(x)), width) for y, x, s, r in self.blobs ] data_to_fit = [self.data[win] for win in tqdm.tqdm_notebook(windows)] peakfits = dask.delayed([ fitfunc(win, sub_data) for win, sub_data in zip(windows, data_to_fit) ]) # construct DataFrame with ProgressBar(): peakfits_df = pd.DataFrame(peakfits.compute(scheduler="processes")) # internalize DataFrame self._fits = peakfits_df # Return it to user return peakfits_df
def prep_data_for_PR(data, xysize=None, multiplier=1.5): """A utility to prepare data for phase retrieval Will pad or crop to xysize and remove mode times multiplier Parameters ---------- data : ndarray The PSF data to prepare for phase retrieval xysize : int Size to pad or crop `data` to along the y, x dimensions multiplier : float The amount to by which to multiply the mode before subtracting Returns ------- prepped_data : ndarray The data that has been prepped for phase retrieval. """ # pull shape nz, ny, nx = data.shape # remove background data_without_bg = remove_bg(data, multiplier) # figure out padding or cropping if xysize is None: xysize = max(ny, nx) if xysize == ny == nx: pad_data = data_without_bg elif xysize >= max(ny, nx): pad_data = fft_pad(data_without_bg, (nz, xysize, xysize), mode="constant") else: # if need to crop, crop and center and return my_slice = slice_maker(((ny + 1) // 2, (nx + 1) // 2), xysize) return center_data(data_without_bg)[[Ellipsis] + my_slice] # return centered data return center_data(pad_data)
def _fitPeaks_sim(fitwidth, blob, stack, **kwargs): """ A sub function that can be dispatched to multiple cores for processing This function is specific to analyzing SIM data and is designed to fit substacks _without_ moving the fit window (i.e. it is assumed that drift is minimal). Parameters ---------- fitwidth : int size of fitting window blob : list [int] a blob as returned by the find peak function Returns ------- df : DataFrame A pandas DataFrame that contains all the fit parameters for a full stack. """ # fix stack if stack is None: # if stack is None we know we've been decorated stack = _fitPeaks_sim.stack # pull parameters from the blob y, x, w, amp = blob # generate a slice myslice = slice_maker(y, x, fitwidth) # save the upper left coordinates for later use ystart = myslice[0].start xstart = myslice[1].start # insert the equivalent of `:` at the beginning myslice.insert(0, slice(None, None, None)) # pull the substack substack = stack[myslice] # fit the max projection for a good initial guess max_z = Gauss2D(substack.max(0)) max_z.optimize_params(**kwargs) # save the initial guess for later use guess_params = max_z.opt_params # check to see if initial fit was successful, if so proceed if np.isfinite(guess_params).all(): def get_params(myslice): """ A helper function for the list comprehension below Takes a slice and fits a gaussian to it, makes sure to update fit window coordinates to full ROI coordinates """ # set up the fit object fit = Gauss2D(myslice) # do the fit, using the guess_parameters fit.optimize_params(guess_params=guess_params, **kwargs) # get the optimized parameters as a dict opt = fit.all_params_dict() # update coordinates opt['x0'] += xstart opt['y0'] += ystart # add an estimate of the noise opt['noise'] = (myslice - fit.fit_model).std() # return updated coordinates return opt # prep our container peakfits = [get_params(myslice) for myslice in substack] # turn everything into a data frame for easy manipulation. peakfits_df = pd.DataFrame(peakfits) # convert sigmas to positive values peakfits_df[['sigma_x', 'sigma_y']] =\ abs(peakfits_df[['sigma_x', 'sigma_y']]) peakfits_df.index.name = 'slice' return peakfits_df else: # initial fit failed, return None return None
def _fitPeaks_psf(fitwidth, blob, stack, **kwargs): """Fitting subfucntion for PSFStackAnalyzer""" # check if we're being dispatched from the multiprocessing pool if stack is None: stack = _fitPeaks_psf.stack # unpack peak variables y, x, w, amp = blob # make the slice around the blob myslice = slice_maker(y, x, fitwidth) # find the start ystart = myslice[0].start xstart = myslice[1].start # insert the equivalent of `:` at the beginning myslice.insert(0, slice(None, None, None)) # make the substack substack = stack[myslice] # we could do median filtering on the substack before attempting to # find the max slice! # this could still get messed up by salt and pepper noise. # my_max = np.unravel_index(substack.argmax(), substack.shape) # use the sum of each z-slice my_max = substack.sum((1, 2)).argmax() # now change my slice to be that zslice myslice[0] = my_max substack = stack[myslice] # prep our container peakfits = [] # initial fit max_z = Gauss2D(substack) max_z.optimize_params(**kwargs) if np.isfinite(max_z.opt_params).all(): # recenter the coordinates and add a slice variable opt_params = max_z.all_params_dict() opt_params['slice'] = my_max opt_params['x0'] += xstart opt_params['y0'] += ystart # append to our list peakfits.append(opt_params.copy()) # pop the slice parameters opt_params.pop('slice') forwardrange = range(my_max + 1, stack.shape[0]) backwardrange = reversed(range(0, my_max)) peakfits += fitPeak( stack, forwardrange, fitwidth, opt_params.copy(), quiet=True) peakfits += fitPeak( stack, backwardrange, fitwidth, opt_params.copy(), quiet=True) # turn everything into a data frame for easy manipulation. peakfits_df = pd.DataFrame(peakfits) # convert sigmas to positive values peakfits_df[['sigma_x', 'sigma_y']] =\ abs(peakfits_df[['sigma_x', 'sigma_y']]) return peakfits_df.set_index('slice').sort_index() else: print('blob {} is unfittable'.format(blob)) return None
def fitPeak(stack, slices, width, startingfit, **kwargs): """ Method to fit a peak through the stack. The method will track the peak through the stack, assuming that moves are relatively small from one slice to the next Parameters ---------- slices : iterator an iterator which dictates which slices to fit, should yeild integers only width : integer width of fitting window startingfit : dict fit coefficients Returns ------- list : list of dicts A list of dictionaries containing the best fits. Easy to turn into a DataFrame """ # set up our variable to return toreturn = [] # grab the starting fit parameters popt_d = startingfit.copy() y0 = int(round(popt_d['y0'])) x0 = int(round(popt_d['x0'])) if len(popt_d) == 6 * 2: modeltype = 'norot' elif len(popt_d) == 5 * 2: modeltype = 'sym' elif len(popt_d) == 7 * 2: modeltype = 'full' else: raise ValueError("Dictionary is too big {}".format(popt_d)) for s in slices: # make the slice try: myslice = slice_maker(y0, x0, width) except RuntimeError as e: print('Fit window moved to edge of ROI') break else: # pull the starting values from it ystart = myslice[0].start xstart = myslice[1].start # insert the z-slice number myslice.insert(0, s) # set up the fit and perform it using last best params sub_stack = stack[myslice] fit = Gauss2D(sub_stack) # move our guess coefs back into the window popt_d['x0'] -= xstart popt_d['y0'] -= ystart # leave this in for now for easier debugging in future. try: fit.optimize_params(popt_d, **kwargs) except TypeError as e: print(repr(myslice)) raise e # if there was an error performing the fit, try again without # a guess if fit.error: fit.optimize_params(modeltype=modeltype, **kwargs) # if there's not an error update center of fitting window and # move on to the next fit if not fit.error: popt_d = fit.all_params_dict() popt_d['x0'] += xstart popt_d['y0'] += ystart popt_d['slice'] = s # calculate the apparent noise as the standard deviation # of what's the residuals of the fit popt_d['noise'] = (sub_stack - fit.fit_model).std() toreturn.append(popt_d.copy()) y0 = int(round(popt_d['y0'])) x0 = int(round(popt_d['x0'])) else: # if the fit fails, make sure to _not_ update positions. bad_fit = fit.all_params_dict() bad_fit['slice'] = s # noise of a failed fit is not really useful popt_d['noise'] = np.nan toreturn.append(bad_fit.copy()) return toreturn