def bg_correct(raw, bg, df=None): """Correct for noisy images by dividing by a background. The calculation used is (raw-df)/(bg-df). Parameters ---------- raw : xarray.DataArray Image to be background divided. bg : xarray.DataArray background image recorded with the same optical setup. df : xarray.DataArray dark field image recorded without illumination. Returns ------- corrected_image : xarray.DataArray A copy of the background divided input image with None values of noise_sd updated to match bg. """ if df is None: df = raw.copy() df[:] = 0 if not (raw.shape == bg.shape == df.shape and list(get_spacing(raw)) == list(get_spacing(bg)) == list(get_spacing(df))): raise BadImage( "raw and background images must have the same shape and spacing") holo = (raw - df) / zero_filter(bg - df) holo = copy_metadata(raw, holo) if hasattr(holo, 'noise_sd') and hasattr( bg, 'noise_sd') and holo.noise_sd is None: holo = update_metadata(holo, noise_sd=bg.noise_sd) return holo
def make_center_priors(im, z_range_extents=5, xy_uncertainty_pixels=1, z_range_units=None): """ Make sensible default priors for the center of a sphere in a hologram Parameters ---------- im : xarray The image you wish to make priors for z_range_extents : float (optional) What range to extend a uniform prior for z over, measured in multiples of the total extent of the image. The default is 5 times the extent of the image, a large range, but since tempering is quite good at refining this, it is safer to just choose a large range to be sure to include the correct value. xy_uncertainty_pixels: float (optional) The number of pixels of uncertainty to assume for the centerfinder. The default is 1 pixel, and this is probably correct for most images. z_range_units : float Specify the range of the z prior in your data units. If this is provided, z_range_extents is ignored. """ if z_range_units is not None: z_range = z_range_units else: extents = get_extents(im) extent = max(extents['x'], extents['y']) z_range = 0, extent * z_range_extents spacing = get_spacing(im) center = center_find(im) * spacing + [im.x[0], im.y[0]] xy_sd = xy_uncertainty_pixels * spacing return [Gaussian(c, s) for c, s in zip(center, xy_sd)] + [Uniform(*z_range)]
def test_on_same_spacing(self): true_spacing = 0.1 detector = detector_grid((10, 10), spacing=true_spacing) spacing = get_spacing(detector) self.assertEqual(spacing[0], true_spacing) self.assertEqual(spacing[1], true_spacing)
def make_subset_data(data, random_subset=None, pixels=None, return_selection=False): if random_subset is None and pixels is None: return data if random_subset is not None and pixels is not None: raise ValueError("You can only specify one of pixels or random_subset") tot_pix = len(data.x)*len(data.y) if pixels is not None: n_sel = pixels else: n_sel = int(np.ceil(tot_pix*random_subset)) selection = np.random.choice(tot_pix, n_sel, replace=False) subset = flat(data).isel(flat=selection) subset = copy_metadata(data, subset, do_coords=False) shape = (len(data.x), len(data.y)) spacing = (get_spacing(data)) start = (np.asscalar(data.x[0]), np.asscalar(data.y[0])) coords = {key:val.values for key, val in dict_without(dict(data.coords), ['x','y','z']).items()} subset.attrs['original_dims'] = yaml.dump((shape, spacing, start, coords)) if return_selection: return subset, selection else: return subset
def _make_center_priors(self, params): image_x_values = self.data.x.values image_min_x = image_x_values.min() image_max_x = image_x_values.max() image_y_values = self.data.y.values image_min_y = image_y_values.min() image_max_y = image_y_values.max() if ('x' not in params) or ('y' not in params): pixel_spacing = get_spacing(self.data) image_lower_left = np.array([image_min_x, image_min_y]) center = center_find(self.data) * pixel_spacing + image_lower_left else: center = [params['x'], params['y']] xpar = prior.Uniform(image_min_x, image_max_x, guess=center[0]) ypar = prior.Uniform(image_min_y, image_max_y, guess=center[1]) extents = get_extents(self.data) extent = max(extents['x'], extents['y']) zextent = 5 zpar = prior.Uniform( -extent * zextent, extent * zextent, guess=params['z']) return xpar, ypar, zpar
def test_on_different_spacings(self): xspacing = 0.1 yspacing = 0.2 detector = detector_grid((10, 10), spacing=(xspacing, yspacing)) spacing = get_spacing(detector) self.assertEqual(spacing[0], xspacing) self.assertEqual(spacing[1], yspacing)
def test_no_metadata(self): filename = os.path.join(self.tempdir, 'image0007.tif') header = ifd2() header[270] = 'Dummy String' pilimage.fromarray(self.holo.values[0]).save(filename, tiffinfo=header) # load doesn't work self.assertRaises(NoMetadata, load, filename) # load_image does l = load_image(filename, spacing=get_spacing(self.holo)) assert_obj_close(l, copy_metadata(l, self.holo))
def load_image_with_metadata(self, filename): with warnings.catch_warnings(): warnings.simplefilter("ignore") loaded = load_image( filename, name=self.holo.name, medium_index=self.holo.medium_index, spacing=get_spacing(self.holo), illum_wavelen=self.holo.illum_wavelen, illum_polarization=self.holo.illum_polarization, noise_sd=self.holo.noise_sd) return loaded
def test_auto_scaling(self): filename = os.path.join(self.tempdir, 'image0001.tif') save_image(filename, self.holo, depth='float') with warnings.catch_warnings(): warnings.simplefilter("ignore") l = load_image(filename, name=self.holo.name, spacing=get_spacing(self.holo)) # skip checking full DataArray attrs because it is akward to keep # them through arithmetic. Ideally we would figure out a way to # preserve them and switch back to testing fully assert_allclose(l, (self.holo - self.holo.min()) / (self.holo.max() - self.holo.min()))
def pack_attrs(a, do_spacing=False): new_attrs = {attr_coords: {}} if a.name is not None: new_attrs['name'] = a.name if do_spacing: new_attrs['spacing'] = list(get_spacing(a)) for attr, val in a.attrs.items(): if isinstance(val, xr.DataArray): new_attrs[attr_coords][attr] = OrderedDict() for dim in val.dims: new_attrs[attr_coords][attr][str(dim)] = val[dim].values new_attrs[attr] = list(ensure_array(val.values)) else: new_attrs[attr_coords][attr] = False if val is not None: new_attrs[attr] = yaml.dump(val) new_attrs[attr_coords] = yaml.dump(new_attrs[attr_coords], default_flow_style=True) return new_attrs
def _make_center_priors(self, data, guess): image_x_values = data.x.values image_min_x = image_x_values.min() image_max_x = image_x_values.max() image_y_values = data.y.values image_min_y = image_y_values.min() image_max_y = image_y_values.max() if ('x' in guess) and ('y' in guess): x_guess = guess['x'] y_guess = guess['y'] elif ('center.0' in guess) and ('center.1' in guess): x_guess = guess['center.0'] y_guess = guess['center.1'] else: pixel_spacing = get_spacing(data) image_lower_left = np.array([image_min_x, image_min_y]) x_guess, y_guess = (center_find(data) * pixel_spacing + image_lower_left) extents = get_extents(data) # FIXME: 5 is a magic number. zextent = 5 * max(extents['x'], extents['y']) z_guess = guess['z'] if 'z' in guess else guess['center.2'] x = Parameter(name='x', value=x_guess, min=image_min_x, max=image_max_x) y = Parameter(name='y', value=y_guess, min=image_min_y, max=image_max_y) z = Parameter(name='z', value=z_guess, min=-zextent, max=zextent) return x, y, z
def load_average(filepath, refimg=None, spacing=None, medium_index=None, illum_wavelen=None, illum_polarization=None, normals=None, noise_sd=None, channel=None, image_glob='*.tif'): """ Average a set of images (usually as a background) Parameters ---------- filepath : string or list(string) Directory or list of filenames or filepaths. If filename is a directory, it will average all images matching image_glob. refimg : xarray.DataArray reference image to provide spacing and metadata for the new image. spacing : float Spacing between pixels in the images. Used preferentially over refimg value if both are provided. medium_index : float Refractive index of the medium in the images. Used preferentially over refimg value if both are provided. illum_wavelen : float Wavelength of illumination in the images. Used preferentially over refimg value if both are provided. illum_polarization : list-like Polarization of illumination in the images. Used preferentially over refimg value if both are provided. image_glob : string Glob used to select images (if images is a directory) Returns ------- averaged_image : xarray.DataArray Image which is an average of images noise_sd attribute contains average pixel stdev normalized by total image intensity """ if normals is not None: raise ValueError(NORMALS_DEPRECATION_MESSAGE) if isinstance(filepath, str): if os.path.isdir(filepath): filepath = glob.glob(os.path.join(filepath, image_glob)) else: #only a single image filepath = [filepath] if len(filepath) < 1: raise LoadError(filepath, "No images found") # read spacing from refimg if none provided if spacing is None: spacing = get_spacing(refimg) # read colour channels from refimg channel_dict = {'0': 'red', '1': 'green', '2': 'blue'} if channel is None and refimg is not None and illumination in refimg.dims: channel = [ i for i, col in enumerate(['red', 'green', 'blue']) if col in refimg[illumination].values ] if np.isscalar(spacing): spacing = np.repeat(spacing, 2) # calculate the average accumulator = Accumulator() for path in filepath: accumulator.push(load_image(path, spacing, channel=channel)) mean_image = accumulator.mean() # calculate average noise from image if noise_sd is None and len(filepath) > 1: if channel: noise_sd = xr.DataArray(accumulator.cv(), [[channel_dict[str(ch)] for ch in channel]], ['illumination']) else: noise_sd = ensure_array(accumulator.cv()) # crop according to refimg dimensions if refimg is not None: def extent(i): name = ['x', 'y'][i] return np.around(refimg[name].values / spacing[i]).astype('int') mean_image = mean_image.isel(x=extent(0), y=extent(1)) mean_image['x'] = refimg.x mean_image['y'] = refimg.y # copy metadata from refimg if refimg is not None: mean_image = copy_metadata(refimg, mean_image, do_coords=False) # overwrite metadata from refimg with provided values return update_metadata(mean_image, medium_index, illum_wavelen, illum_polarization, normals, noise_sd)
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False): ''' Propagates light back through a hologram that was taken using a diverging reference beam. Propataion can be to one plane only. Only propagation through media with refractive index 1 is supported. Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010. http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1 data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square. d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar. L = distance from screen to pinhole, in meters beam_c = [x,y] coodinates of beam center, in pixels out_schema = size of output image and pixel spacing, default is the schema of data. if Ip == True, returns Ip to be used on calculations in the stack if Ip == False compute reconstructed image as normal if Ip is an image, use this to speed up calculations returns an image(volume) corresponding to the reconstruction at plane(s) d.''' npix0 = float(len(data.x)) # size of original image in pixels wavelen = float(data.illum_wavelen) #laser wavelength in meters n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1) datavals = data.values.squeeze() Dx,Dy = get_spacing(data) #size of pixels on camera if out_schema is None: #mag = 1 out_spacing = Dx else: #mag = Dx/get_spacing(out_schema)[0] out_spacing = get_spacing(out_schema)[0] #get number of pixels to reconstruct given the desired output spacing def X0_f(npix): result = -Dx * (beam_c[0] + (npix - npix0)*0.5) return result def to_solve(npix): result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing return result npix = int(fsolve(to_solve, npix0)[0]) #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification) #center coordinates i_c = beam_c[0] + (npix - npix0)/2 j_c = beam_c[1] + (npix - npix0)/2 #set (X0,Y0) so beam center is at index (i_c,j_c) X0=-i_c*Dx Y0=-j_c*Dy #Scaling constants (eqn 1.32) X0p = X0*L/np.sqrt(L*L+X0*X0) Y0p = Y0*L/np.sqrt(L*L+Y0*Y0) con = X0+(npix-1)*Dx #useful constant Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime con = Y0+(npix-1)*Dy #useful constant Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime #scale actually used in reconstructed image spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). #useful constant ikz = 2j*np.pi*d/wavelen # this is (ikz) #Calculate I'(X,Y) (eqn 1.27) print('Calculating Ip') def Ip_calc(i,j): # (X',Y') coordinates corresponding to indecies (i,j) Xp=X0p+i*Dxp Yp=Y0p+j*Dyp #Useful constant (this is L/R') L_over_Rp = L**2-Xp**2-Yp**2 L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0) L_over_Rp = L/np.sqrt(L_over_Rp) L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp) if isinstance(old_Ip,bool): # (X,Y) coordinate in original image X = Xp*L_over_Rp Y = Yp*L_over_Rp # (X,Y) indecies of original image, but (npix,npix) in size i_X = np.array( (X-X0)/Dx ) i_Y = np.array( (Y-Y0)/Dy ) i_X = i_X - (npix - npix0)/2 i_Y = i_Y - (npix - npix0)/2 i_X = i_X.astype(int) i_Y = i_Y.astype(int) if old_Ip: # returns partially computed I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 else: #returns full I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp) else: result = old_Ip * np.exp(ikz/L_over_Rp) return result #get I' result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I' if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction return result, npix #compute final result, K_nm (eqn 1.33) i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int) print('Taking FFT') result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True) #result = ifft(result*phase_factor, shift =1, overwrite = True) print('Multiplying prefactor') phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int) result = Dxp*Dyp*phase_factor*result #crop to correct size if npix > npix0: x_cen = npix/2 y_cen = npix/2 if out_schema is None: offset = npix0/2 else: offset = len(out_schema.x)/2 result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] #return Image result return copy_metadata(data, data_grid(result, spacing=spacing, z=d))
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False): ''' Propagates light back through a hologram that was taken using a diverging reference beam. Propataion can be to one plane only. Only propagation through media with refractive index 1 is supported. Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010. http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1 data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square. d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar. L = distance from screen to pinhole, in meters beam_c = [x,y] coodinates of beam center, in pixels out_schema = size of output image and pixel spacing, default is the schema of data. if Ip == True, returns Ip to be used on calculations in the stack if Ip == False compute reconstructed image as normal if Ip is an image, use this to speed up calculations returns an image(volume) corresponding to the reconstruction at plane(s) d. ''' npix0 = float(len(data.x)) # size of original image in pixels wavelen = float(data.illum_wavelen) #laser wavelength in meters n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1) datavals = data.values.squeeze() Dx,Dy = get_spacing(data) #size of pixels on camera if out_schema is None: #mag = 1 out_spacing = Dx else: #mag = Dx/get_spacing(out_schema)[0] out_spacing = get_spacing(out_schema)[0] #get number of pixels to reconstruct given the desired output spacing def X0_f(npix): result = -Dx * (beam_c[0] + (npix - npix0)*0.5) return result def to_solve(npix): result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing return result npix = int(fsolve(to_solve, npix0)[0]) #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification) #center coordinates i_c = beam_c[0] + (npix - npix0)/2 j_c = beam_c[1] + (npix - npix0)/2 #set (X0,Y0) so beam center is at index (i_c,j_c) X0=-i_c*Dx Y0=-j_c*Dy #Scaling constants (eqn 1.32) X0p = X0*L/np.sqrt(L*L+X0*X0) Y0p = Y0*L/np.sqrt(L*L+Y0*Y0) con = X0+(npix-1)*Dx #useful constant Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime con = Y0+(npix-1)*Dy #useful constant Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime #scale actually used in reconstructed image spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). #useful constant ikz = 2j*np.pi*d/wavelen # this is (ikz) #Calculate I'(X,Y) (eqn 1.27) print('Calculating Ip') def Ip_calc(i,j): # (X',Y') coordinates corresponding to indecies (i,j) Xp=X0p+i*Dxp Yp=Y0p+j*Dyp #Useful constant (this is L/R') L_over_Rp = L**2-Xp**2-Yp**2 L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0) L_over_Rp = L/np.sqrt(L_over_Rp) L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp) if isinstance(old_Ip,bool): # (X,Y) coordinate in original image X = Xp*L_over_Rp Y = Yp*L_over_Rp # (X,Y) indecies of original image, but (npix,npix) in size i_X = np.array( (X-X0)/Dx ) i_Y = np.array( (Y-Y0)/Dy ) i_X = i_X - (npix - npix0)/2 i_Y = i_Y - (npix - npix0)/2 i_X = i_X.astype(int) i_Y = i_Y.astype(int) if old_Ip: # returns partially computed I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 else: #returns full I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp) else: result = old_Ip * np.exp(ikz/L_over_Rp) return result #get I' result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I' if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction return result, npix #compute final result, K_nm (eqn 1.33) i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int) print('Taking FFT') result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True) #result = ifft(result*phase_factor, shift =1, overwrite = True) print('Multiplying prefactor') phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int) result = Dxp*Dyp*phase_factor*result #crop to correct size if npix > npix0: x_cen = int(npix/2) y_cen = int(npix/2) if out_schema is None: offset = int(npix0/2) else: offset = int(len(out_schema.x)/2) result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] #return Image result return copy_metadata(data, data_grid(result, spacing=spacing, z=d))