def best_fit(self): shape, spacing, start, coords = yaml.load(self.dataset.data.original_dims) schema = detector_grid(shape, spacing, extra_dims = coords) schema['x'] = schema['x'] + start[0] schema['y'] = schema['y'] + start[1] schema = copy_metadata(self.dataset.data, schema, do_coords = False) return self.model._forward(self.values(), schema)
def add_noise(image, noise_mean=.1, smoothing=.01, poisson_lambda=1000): """Add simulated noise to images. Intended for use with exact calculated images to make them look more like noisy 'real' measurements. Real image noise usually has correlation, so we smooth the raw random variable. The noise_mean can be controlled independently of the poisson_lambda that controls the shape of the distribution. In general, you can stick with our default of a large poisson_lambda (ie for imaging conditions not near the shot noise limit). Defaults are set to give noise vaguely similar to what we tend to see in our holographic imaging. Parameters ---------- image : xarray.DataArray The image to add noise to. smoothing : float Fraction of the image size to smooth by. Should in general be << 1 poisson_lambda : float Used to compute the shape of the noise distribution. You can generally leave this at its default value unless you are simulating shot noise limited imaging. Returns ------- noisy_image : xarray.DataArray A copy of the input image with noise added. """ return copy_metadata( image, image + simulate_noise(image.shape, noise_mean, smoothing, poisson_lambda) * get_values(image.mean()))
def bg_correct(raw, bg, df=None): """Correct for noisy images by dividing by a background. The calculation used is (raw-df)/(bg-df). Parameters ---------- raw : xarray.DataArray Image to be background divided. bg : xarray.DataArray background image recorded with the same optical setup. df : xarray.DataArray dark field image recorded without illumination. Returns ------- corrected_image : xarray.DataArray A copy of the background divided input image with None values of noise_sd updated to match bg. """ if df is None: df = raw.copy() df[:] = 0 if not (raw.shape == bg.shape == df.shape and list(get_spacing(raw)) == list(get_spacing(bg)) == list(get_spacing(df))): raise BadImage( "raw and background images must have the same shape and spacing") holo = (raw - df) / zero_filter(bg - df) holo = copy_metadata(raw, holo) if hasattr(holo, 'noise_sd') and hasattr( bg, 'noise_sd') and holo.noise_sd is None: holo = update_metadata(holo, noise_sd=bg.noise_sd) return holo
def test_copies_metadata_keys(self): metadata = make_metadata() data = make_data() copied = copy_metadata(metadata, data) for key in METADATA_VALUES.keys(): self.assertIn(key, copied.attrs) self.assertTrue(hasattr(copied, key))
def make_subset_data(data, random_subset=None, pixels=None, return_selection=False): if random_subset is None and pixels is None: return data if random_subset is not None and pixels is not None: raise ValueError("You can only specify one of pixels or random_subset") tot_pix = len(data.x)*len(data.y) if pixels is not None: n_sel = pixels else: n_sel = int(np.ceil(tot_pix*random_subset)) selection = np.random.choice(tot_pix, n_sel, replace=False) subset = flat(data).isel(flat=selection) subset = copy_metadata(data, subset, do_coords=False) shape = (len(data.x), len(data.y)) spacing = (get_spacing(data)) start = (np.asscalar(data.x[0]), np.asscalar(data.y[0])) coords = {key:val.values for key, val in dict_without(dict(data.coords), ['x','y','z']).items()} subset.attrs['original_dims'] = yaml.dump((shape, spacing, start, coords)) if return_selection: return subset, selection else: return subset
def best_fit(self): shape, spacing, start, coords = yaml.load( self.dataset.data.original_dims) schema = detector_grid(shape, spacing, extra_dims=coords) schema['x'] = schema['x'] + start[0] schema['y'] = schema['y'] + start[1] schema = copy_metadata(self.dataset.data, schema, do_coords=False) return self.model._forward(self.values(), schema)
def test_copies_coords(self): metadata = make_metadata() data = make_data() copied = copy_metadata(metadata, data) for coordinate in data.coords.keys(): old_coords = data.coords[coordinate].values copied_coords = copied.coords[coordinate].values self.assertTrue(np.all(old_coords == copied_coords))
def test_no_metadata(self): filename = os.path.join(self.tempdir, 'image0007.tif') header = ifd2() header[270] = 'Dummy String' pilimage.fromarray(self.holo.values[0]).save(filename, tiffinfo=header) # load doesn't work self.assertRaises(NoMetadata, load, filename) # load_image does l = load_image(filename, spacing=get_spacing(self.holo)) assert_obj_close(l, copy_metadata(l, self.holo))
def test_copies_metadata_values(self): metadata = make_metadata() data = make_data() copied = copy_metadata(metadata, data) # we check illum_polarization separately: illum_polarization = METADATA_VALUES['illum_polarization'] self.assertTrue( np.all(illum_polarization == copied.illum_polarization.values[:2])) # Then we check the rest: for key, value in METADATA_VALUES.items(): if key != 'illum_polarization': self.assertEqual(value, getattr(copied, key))
def normalize(image): """ Normalize an image by dividing by the pixel average. This gives the image a mean value of 1. Parameters ---------- image : xarray.DataArray The array to normalize Returns ------- normalized_image : xarray.DataArray The normalized image """ return copy_metadata(image, image * 1.0 / image.sum() * image.size)
def forward(self, pars): if hasattr(self.data, 'original_dims'): # dealing with subset data original_dims = self.data.original_dims # can't currently handle non-0 values of z, as in detector_grid x = original_dims['x'] y = original_dims['y'] shape = (len(x), len(y)) spacing = (np.diff(x)[0], np.diff(y)[0]) extra_dims = dict_without(original_dims, ['x', 'y', 'z']) schema = detector_grid(shape, spacing, extra_dims=extra_dims) schema = copy_metadata(self.data, schema, do_coords=False) schema['x'] = x schema['y'] = y else: schema = self.data return self.model.forward(pars, schema)
def detrend(image): ''' Remove linear trends from an image. Performs a 2 axis linear detrend using scipy.signal.detrend Parameters ---------- image : xarray.DataArray Image to process Returns ------- image : xarray.DataArray Image with linear trends removed ''' return copy_metadata( image, dt(dt(image, image.dims.index('x')), image.dims.index('y')))
def zero_filter(image): ''' Search for and interpolate pixels equal to 0. This is to avoid NaN's when a hologram is divided by a BG with 0's. Parameters ---------- image : xarray.DataArray Image to process Returns ------- image : xarray.DataArray Image where pixels = 0 are instead given values equal to average of neighbors. dtype is the same as the input image ''' zero_pix = np.where(image == 0) output = image.copy() # check to see if adjacent pixels are 0, if more than 1 dead pixel if len(zero_pix[0]) > 1: delta_rows = zero_pix[0] - np.roll(zero_pix[0], 1) delta_cols = zero_pix[1] - np.roll(zero_pix[1], 1) if ((1 in delta_rows[np.where(delta_cols == 0)]) or (1 in delta_cols[np.where(delta_rows == 0)])): raise BadImage( 'Image has adjacent dead pixels, cannot remove dead pixels') for row, col in zip(zero_pix[0], zero_pix[1]): # in the bulk if ((row > 0) and (row < (image.shape[0] - 1)) and (col > 0) and (col < image.shape[1] - 1)): output[row, col] = np.sum(image[row - 1:row + 2, col - 1:col + 2]) / 8. else: # deal with edges by padding im_avg = image.sum() / (image.size - len(zero_pix[0])) padded_im = np.ones( (image.shape[0] + 2, image.shape[1] + 2)) * im_avg padded_im[1:-1, 1:-1] = image output[row, col] = np.sum(padded_im[row:row + 3, col:col + 3]) / 8. print('Pixel with value 0 reset to nearest neighbor average') return copy_metadata(image, output)
def make_subset_data(data, random_subset=None, pixels=None, return_selection=False): if random_subset is None and pixels is None: return data if random_subset is not None and pixels is not None: raise ValueError("You can only specify one of pixels or random_subset") if pixels is not None: n_sel = pixels else: n_sel = int(np.ceil(data.size * random_subset)) selection = np.random.choice(data.size, n_sel, replace=False) subset = flat(data)[selection] subset = copy_metadata(data, subset, do_coords=False) if return_selection: return subset, selection else: return subset
def subimage(arr, center, shape): """ Pick out a region of an image or other array Parameters ---------- arr : xarray.DataArray The array to subimage center : tuple of ints or floats The desired center of the region, should have the same number of elements as the arr has dimensions. Floats will be rounded shape : int or tuple of ints Desired shape of the region. If a single int is given the region will be that dimension in along every axis. Shape should be even Returns ------- sub : xarray.DataArray Subset of shape shape centered at center. DataArray coords will be set such that the upper left corner of the output has coordinates relative to the input. """ center = (np.round(center)).astype(int) if np.isscalar(shape): shape = np.repeat(shape, arr.ndim) assert len(shape) == arr.ndim def intr(n): return intr(np.round(n)) extent = [ slice(int(np.round(c - s / 2)), int(np.round(c + s / 2))) for c, s in zip(center, shape) ] return copy_metadata(arr, arr.isel(x=extent[0], y=extent[1]))
def finalize(detector, result): if not hasattr(detector, 'flat'): result = from_flat(result) return copy_metadata(detector, result, do_coords=False)
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False): ''' Propagates light back through a hologram that was taken using a diverging reference beam. Propataion can be to one plane only. Only propagation through media with refractive index 1 is supported. Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010. http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1 data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square. d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar. L = distance from screen to pinhole, in meters beam_c = [x,y] coodinates of beam center, in pixels out_schema = size of output image and pixel spacing, default is the schema of data. if Ip == True, returns Ip to be used on calculations in the stack if Ip == False compute reconstructed image as normal if Ip is an image, use this to speed up calculations returns an image(volume) corresponding to the reconstruction at plane(s) d. ''' npix0 = float(len(data.x)) # size of original image in pixels wavelen = float(data.illum_wavelen) #laser wavelength in meters n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1) datavals = data.values.squeeze() Dx,Dy = get_spacing(data) #size of pixels on camera if out_schema is None: #mag = 1 out_spacing = Dx else: #mag = Dx/get_spacing(out_schema)[0] out_spacing = get_spacing(out_schema)[0] #get number of pixels to reconstruct given the desired output spacing def X0_f(npix): result = -Dx * (beam_c[0] + (npix - npix0)*0.5) return result def to_solve(npix): result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing return result npix = int(fsolve(to_solve, npix0)[0]) #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification) #center coordinates i_c = beam_c[0] + (npix - npix0)/2 j_c = beam_c[1] + (npix - npix0)/2 #set (X0,Y0) so beam center is at index (i_c,j_c) X0=-i_c*Dx Y0=-j_c*Dy #Scaling constants (eqn 1.32) X0p = X0*L/np.sqrt(L*L+X0*X0) Y0p = Y0*L/np.sqrt(L*L+Y0*Y0) con = X0+(npix-1)*Dx #useful constant Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime con = Y0+(npix-1)*Dy #useful constant Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime #scale actually used in reconstructed image spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). #useful constant ikz = 2j*np.pi*d/wavelen # this is (ikz) #Calculate I'(X,Y) (eqn 1.27) print('Calculating Ip') def Ip_calc(i,j): # (X',Y') coordinates corresponding to indecies (i,j) Xp=X0p+i*Dxp Yp=Y0p+j*Dyp #Useful constant (this is L/R') L_over_Rp = L**2-Xp**2-Yp**2 L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0) L_over_Rp = L/np.sqrt(L_over_Rp) L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp) if isinstance(old_Ip,bool): # (X,Y) coordinate in original image X = Xp*L_over_Rp Y = Yp*L_over_Rp # (X,Y) indecies of original image, but (npix,npix) in size i_X = np.array( (X-X0)/Dx ) i_Y = np.array( (Y-Y0)/Dy ) i_X = i_X - (npix - npix0)/2 i_Y = i_Y - (npix - npix0)/2 i_X = i_X.astype(int) i_Y = i_Y.astype(int) if old_Ip: # returns partially computed I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 else: #returns full I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp) else: result = old_Ip * np.exp(ikz/L_over_Rp) return result #get I' result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I' if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction return result, npix #compute final result, K_nm (eqn 1.33) i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int) print('Taking FFT') result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True) #result = ifft(result*phase_factor, shift =1, overwrite = True) print('Multiplying prefactor') phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int) result = Dxp*Dyp*phase_factor*result #crop to correct size if npix > npix0: x_cen = int(npix/2) y_cen = int(npix/2) if out_schema is None: offset = int(npix0/2) else: offset = int(len(out_schema.x)/2) result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] #return Image result return copy_metadata(data, data_grid(result, spacing=spacing, z=d))
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False): ''' Propagates light back through a hologram that was taken using a diverging reference beam. Propataion can be to one plane only. Only propagation through media with refractive index 1 is supported. Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010. http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1 data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square. d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar. L = distance from screen to pinhole, in meters beam_c = [x,y] coodinates of beam center, in pixels out_schema = size of output image and pixel spacing, default is the schema of data. if Ip == True, returns Ip to be used on calculations in the stack if Ip == False compute reconstructed image as normal if Ip is an image, use this to speed up calculations returns an image(volume) corresponding to the reconstruction at plane(s) d.''' npix0 = float(len(data.x)) # size of original image in pixels wavelen = float(data.illum_wavelen) #laser wavelength in meters n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1) datavals = data.values.squeeze() Dx,Dy = get_spacing(data) #size of pixels on camera if out_schema is None: #mag = 1 out_spacing = Dx else: #mag = Dx/get_spacing(out_schema)[0] out_spacing = get_spacing(out_schema)[0] #get number of pixels to reconstruct given the desired output spacing def X0_f(npix): result = -Dx * (beam_c[0] + (npix - npix0)*0.5) return result def to_solve(npix): result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing return result npix = int(fsolve(to_solve, npix0)[0]) #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification) #center coordinates i_c = beam_c[0] + (npix - npix0)/2 j_c = beam_c[1] + (npix - npix0)/2 #set (X0,Y0) so beam center is at index (i_c,j_c) X0=-i_c*Dx Y0=-j_c*Dy #Scaling constants (eqn 1.32) X0p = X0*L/np.sqrt(L*L+X0*X0) Y0p = Y0*L/np.sqrt(L*L+Y0*Y0) con = X0+(npix-1)*Dx #useful constant Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime con = Y0+(npix-1)*Dy #useful constant Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime #scale actually used in reconstructed image spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). #useful constant ikz = 2j*np.pi*d/wavelen # this is (ikz) #Calculate I'(X,Y) (eqn 1.27) print('Calculating Ip') def Ip_calc(i,j): # (X',Y') coordinates corresponding to indecies (i,j) Xp=X0p+i*Dxp Yp=Y0p+j*Dyp #Useful constant (this is L/R') L_over_Rp = L**2-Xp**2-Yp**2 L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0) L_over_Rp = L/np.sqrt(L_over_Rp) L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp) if isinstance(old_Ip,bool): # (X,Y) coordinate in original image X = Xp*L_over_Rp Y = Yp*L_over_Rp # (X,Y) indecies of original image, but (npix,npix) in size i_X = np.array( (X-X0)/Dx ) i_Y = np.array( (Y-Y0)/Dy ) i_X = i_X - (npix - npix0)/2 i_Y = i_Y - (npix - npix0)/2 i_X = i_X.astype(int) i_Y = i_Y.astype(int) if old_Ip: # returns partially computed I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 else: #returns full I' result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp) else: result = old_Ip * np.exp(ikz/L_over_Rp) return result #get I' result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I' if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction return result, npix #compute final result, K_nm (eqn 1.33) i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int) print('Taking FFT') result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True) #result = ifft(result*phase_factor, shift =1, overwrite = True) print('Multiplying prefactor') phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int) result = Dxp*Dyp*phase_factor*result #crop to correct size if npix > npix0: x_cen = npix/2 y_cen = npix/2 if out_schema is None: offset = npix0/2 else: offset = len(out_schema.x)/2 result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] #return Image result return copy_metadata(data, data_grid(result, spacing=spacing, z=d))
def test_copies_name(self): metadata = make_metadata() data = make_data() copied = copy_metadata(metadata, data) self.assertEqual(metadata.name, copied.name)
def load_average(filepath, refimg=None, spacing=None, medium_index=None, illum_wavelen=None, illum_polarization=None, normals=None, noise_sd=None, channel=None, image_glob='*.tif'): """ Average a set of images (usually as a background) Parameters ---------- filepath : string or list(string) Directory or list of filenames or filepaths. If filename is a directory, it will average all images matching image_glob. refimg : xarray.DataArray reference image to provide spacing and metadata for the new image. spacing : float Spacing between pixels in the images. Used preferentially over refimg value if both are provided. medium_index : float Refractive index of the medium in the images. Used preferentially over refimg value if both are provided. illum_wavelen : float Wavelength of illumination in the images. Used preferentially over refimg value if both are provided. illum_polarization : list-like Polarization of illumination in the images. Used preferentially over refimg value if both are provided. image_glob : string Glob used to select images (if images is a directory) Returns ------- averaged_image : xarray.DataArray Image which is an average of images noise_sd attribute contains average pixel stdev normalized by total image intensity """ if normals is not None: raise ValueError(NORMALS_DEPRECATION_MESSAGE) if isinstance(filepath, str): if os.path.isdir(filepath): filepath = glob.glob(os.path.join(filepath, image_glob)) else: #only a single image filepath = [filepath] if len(filepath) < 1: raise LoadError(filepath, "No images found") # read spacing from refimg if none provided if spacing is None: spacing = get_spacing(refimg) # read colour channels from refimg channel_dict = {'0': 'red', '1': 'green', '2': 'blue'} if channel is None and refimg is not None and illumination in refimg.dims: channel = [ i for i, col in enumerate(['red', 'green', 'blue']) if col in refimg[illumination].values ] if np.isscalar(spacing): spacing = np.repeat(spacing, 2) # calculate the average accumulator = Accumulator() for path in filepath: accumulator.push(load_image(path, spacing, channel=channel)) mean_image = accumulator.mean() # calculate average noise from image if noise_sd is None and len(filepath) > 1: if channel: noise_sd = xr.DataArray(accumulator.cv(), [[channel_dict[str(ch)] for ch in channel]], ['illumination']) else: noise_sd = ensure_array(accumulator.cv()) # crop according to refimg dimensions if refimg is not None: def extent(i): name = ['x', 'y'][i] return np.around(refimg[name].values / spacing[i]).astype('int') mean_image = mean_image.isel(x=extent(0), y=extent(1)) mean_image['x'] = refimg.x mean_image['y'] = refimg.y # copy metadata from refimg if refimg is not None: mean_image = copy_metadata(refimg, mean_image, do_coords=False) # overwrite metadata from refimg with provided values return update_metadata(mean_image, medium_index, illum_wavelen, illum_polarization, normals, noise_sd)
def test_does_not_change_data(self): metadata = make_metadata() data = make_data() copied = copy_metadata(metadata, data) self.assertTrue(np.all(data.values == copied.values))