Beispiel #1
0
def test_subimage():
    i = detector_grid(shape=(10, 10), spacing=1)
    s = subimage(i, (5, 5), 2)
    assert s.shape == (1, 2, 2)

    i2 = data_grid(i, 1)
    s2 = subimage(i2, (5, 5), 2)
Beispiel #2
0
 def image(self):
     img = np.zeros([4, 4])
     img[:3, 1:] = np.pad(np.zeros([1, 1]),
                          1,
                          'constant',
                          constant_values=1)
     return data_grid(img, spacing=2)
Beispiel #3
0
def convert_ndarray_to_xarray(array, extra_dims=None):
    # FIXME extra_dims needs to be an OrderedDict, since the creation of
    # an xarray assumes that iteration over extra_dims occurs in the
    # insertion order.
    # However FIXME passing ``extra_dims`` as an OrderedDict does not
    # let the tests pass, as holopy.core.metadata.data_grid and
    # holopy.core.metadata.make_coords both assume that dicts iterate
    # in a fixed order.
    if array.ndim > 2:
        z = range(len(array))
    else:
        z = 0
    array = data_grid(array, spacing=1, z=z, extra_dims=extra_dims)
    array.attrs['_image_scaling'] = None
    return array
Beispiel #4
0
def load_image(inf,
               spacing=None,
               medium_index=None,
               illum_wavelen=None,
               illum_polarization=None,
               normals=None,
               noise_sd=None,
               channel=None,
               name=None):
    """
    Load data or results

    Parameters
    ----------
    inf : string
        File to load.
    spacing : float or (float, float) (optional)
        pixel size of images in each dimension - assumes square pixels if single value.
        set equal to 1 if not passed in and issues warning.
    medium_index : float (optional)
        refractive index of the medium
    illum_wavelen : float (optional)
        wavelength (in vacuum) of illuminating light
    illum_polarization : (float, float) (optional)
        (x, y) polarization vector of the illuminating light
    noise_sd : float (optional)
        noise level in the image, normalized to image intensity
    channel : int or tuple of ints (optional)
        number(s) of channel to load for a color image (in general 0=red,
        1=green, 2=blue)
	name : str (optional)
        name to assign the xr.DataArray object resulting from load_image

    Returns
    -------
    obj : xarray.DataArray representation of the image with associated metadata

    """
    if normals is not None:
        raise ValueError(NORMALS_DEPRECATION_MESSAGE)
    if name is None:
        name = os.path.splitext(os.path.split(inf)[-1])[0]

    with open(inf, 'rb') as pi_raw:
        pi = pilimage.open(pi_raw)
        arr = np.asarray(pi).astype('d')
        try:
            if isinstance(yaml.safe_load(pi.tag[270][0]), dict):
                warnings.warn(
                    "Metadata detected but ignored. Use hp.load to read it.")
        except (AttributeError, KeyError):
            pass

    extra_dims = None
    if channel is None:
        if arr.ndim > 2:
            raise BadImage(
                'Not a greyscale image. You must specify which channel(s) to use'
            )
    elif arr.ndim == 2:
        if not channel == 'all':
            warnings.warn("Not a color image (channel number ignored)")
        pass
    else:
        # color image with specified channel(s)
        if channel == 'all':
            channel = range(arr.shape[2])
        channel = ensure_array(channel)
        if channel.max() >= arr.shape[2]:
            raise LoadError(
                filename, "The image doesn't have a channel number {0}".format(
                    channel.max()))
        else:
            arr = arr[:, :, channel].squeeze()

            if len(channel) > 1:
                # multiple channels. increase output dimensionality
                if channel.max() <= 2:
                    channel = [['red', 'green', 'blue'][c] for c in channel]
                extra_dims = {illumination: channel}
                if illum_wavelen is not None and not isinstance(
                        illum_wavelen, dict) and len(
                            ensure_array(illum_wavelen)) == len(channel):
                    illum_wavelen = xr.DataArray(ensure_array(illum_wavelen),
                                                 dims=illumination,
                                                 coords=extra_dims)
                if not isinstance(illum_polarization, dict) and np.array(
                        illum_polarization).ndim == 2:
                    pol_index = xr.DataArray(channel,
                                             dims=illumination,
                                             name=illumination)
                    illum_polarization = xr.concat(
                        [to_vector(pol) for pol in illum_polarization],
                        pol_index)

    image = data_grid(arr,
                      spacing=spacing,
                      medium_index=medium_index,
                      illum_wavelen=illum_wavelen,
                      illum_polarization=illum_polarization,
                      noise_sd=noise_sd,
                      name=name,
                      extra_dims=extra_dims)
    return image
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False):
    '''
    Propagates light back through a hologram that was taken using a diverging reference beam.
    Propataion can be to one plane only.
    Only propagation through media with refractive index 1 is supported.
    
    Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source 
    Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010.
    http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1

    data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square.
    d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar.
    L = distance from screen to pinhole, in meters
    beam_c = [x,y] coodinates of beam center, in pixels
    out_schema = size of output image and pixel spacing, default is the schema of data.
    if Ip == True, returns Ip to be used on calculations in the stack
        if Ip == False compute reconstructed image as normal
        if Ip is an image, use this to speed up calculations
    
    returns an image(volume) corresponding to the reconstruction at plane(s) d.'''    

    npix0 = float(len(data.x)) # size of original image in pixels
    wavelen = float(data.illum_wavelen) #laser wavelength in meters
    n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1)
    datavals = data.values.squeeze()
    
    Dx,Dy = get_spacing(data) #size of pixels on camera
    
    if out_schema is None:
        #mag = 1
        out_spacing = Dx
    else:
        #mag = Dx/get_spacing(out_schema)[0]
        out_spacing = get_spacing(out_schema)[0]
    
    #get number of pixels to reconstruct given the desired output spacing
    def X0_f(npix):
        result = -Dx * (beam_c[0] + (npix - npix0)*0.5)
        return result
      
    def to_solve(npix):
        result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing
        return result
        
    npix = int(fsolve(to_solve, npix0)[0])
    
    #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification)

    #center coordinates
    i_c = beam_c[0] + (npix - npix0)/2
    j_c = beam_c[1] + (npix - npix0)/2

    #set (X0,Y0) so beam center is at index (i_c,j_c)
    X0=-i_c*Dx
    Y0=-j_c*Dy
    
    #Scaling constants (eqn 1.32)
    X0p = X0*L/np.sqrt(L*L+X0*X0)
    Y0p = Y0*L/np.sqrt(L*L+Y0*Y0)    
    con = X0+(npix-1)*Dx #useful constant
    Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime
    con = Y0+(npix-1)*Dy #useful constant
    Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime

    #scale actually used in reconstructed image
    spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). 

    #useful constant
    ikz = 2j*np.pi*d/wavelen # this is (ikz)

    #Calculate I'(X,Y) (eqn 1.27)
    print('Calculating Ip')

    def Ip_calc(i,j):
        
        # (X',Y') coordinates corresponding to indecies (i,j)        
        Xp=X0p+i*Dxp
        Yp=Y0p+j*Dyp
        
        #Useful constant (this is L/R')
        L_over_Rp = L**2-Xp**2-Yp**2
        L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0)
        L_over_Rp = L/np.sqrt(L_over_Rp)
        L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp)
        

        
        if isinstance(old_Ip,bool):
    
            # (X,Y) coordinate in original image
            X = Xp*L_over_Rp 
            Y = Yp*L_over_Rp         

            # (X,Y) indecies of original image, but (npix,npix) in size
            i_X = np.array( (X-X0)/Dx )
            i_Y = np.array( (Y-Y0)/Dy )
        
            i_X = i_X - (npix - npix0)/2
            i_Y = i_Y - (npix - npix0)/2
        
            i_X = i_X.astype(int)
            i_Y = i_Y.astype(int)
            
            if old_Ip:  # returns partially computed I'        
                result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4
            else: #returns full I'
                result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp)

        else:
            result = old_Ip * np.exp(ikz/L_over_Rp)      
        
        return result
        
    #get I'
    result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I'
   
    if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction
        return result, npix
        
    #compute final result, K_nm (eqn 1.33)
    i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N
    phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int)
    print('Taking FFT')
    result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True)
    
    #result = ifft(result*phase_factor, shift =1, overwrite = True)
    
    print('Multiplying prefactor')
    phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int)
        
    result = Dxp*Dyp*phase_factor*result
    
    
    #crop to correct size
    if npix > npix0:
        x_cen = npix/2
        y_cen = npix/2

        if out_schema is None:    
            offset = npix0/2
        else:
            offset = len(out_schema.x)/2
        result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] 
   

    #return Image result    
    return copy_metadata(data, data_grid(result, spacing=spacing, z=d))
Beispiel #6
0
def test_subimage_floats():
    i = data_grid(np.zeros((100, 100)), .1)
    s1 = subimage(i, (5.2, 5.6), 2)
    s2 = subimage(i, (5, 6), 2)
    assert_obj_close(s1, s2)
Beispiel #7
0
import numpy as np
from nose.plugins.attrib import attr

from holopy.core.metadata import data_grid
from holopy.scattering import Sphere
from holopy.inference import sample, fit, prior, AlphaModel, EmceeStrategy
from holopy.inference.interface import (make_default_model,
                                        parameterize_scatterer, rename_xyz,
                                        make_uniform)
from holopy.inference.result import SamplingResult
from holopy.inference.tests.common import SimpleModel

DATA = data_grid(np.ones((2, 2)),
                 spacing=1,
                 medium_index=1,
                 illum_wavelen=0.5,
                 illum_polarization=[0, 1])
SPHERE = Sphere(n=1, center=[2, 2, 2])
GUESSES = {'n': 1, 'r': 2, 'center.0': 3}


class TestUserFacingFunctions(unittest.TestCase):
    @attr('fast')
    def test_cannot_sample_without_model(self):
        self.assertRaises(ValueError, sample, DATA, Sphere())

    @attr('fast')
    def test_sample_function_calls_model_sample(self):
        result = sample(DATA, SimpleModel())
        self.assertTrue(isinstance(result, SamplingResult))
Beispiel #8
0
def display_image(im,
                  scaling='auto',
                  vert_axis='x',
                  horiz_axis='y',
                  depth_axis='z',
                  colour_axis='illumination'):
    im = im.copy()
    if isinstance(im, xr.DataArray):
        if hasattr(im, 'z') and len(im['z']) == 1 and depth_axis is not 'z':
            im = im[{'z': 0}]
        if depth_axis == 'z' and 'z' not in im.dims:
            im = im.expand_dims('z')
        if im.ndim > 3 + (colour_axis in im.dims):
            raise BadImage("Too many dims on DataArray to output properly.")
        attrs = im.attrs
    else:
        attrs = {}
        im = ensure_array(im)
        if im.ndim > 3:
            raise BadImage("Too many dims on ndarray to output properly.")
        elif im.ndim == 2:
            im = np.array([im])
        elif im.ndim < 2:
            raise BadImage("Too few dims on ndarray to output properly.")
        axes = [0, 1, 2]
        for axis in [vert_axis, horiz_axis, depth_axis]:
            if isinstance(axis, int):
                try:
                    axes.remove(axis)
                except KeyError:
                    raise ValueError("Cannot interpret axis specifications.")
        if len(axes) > 0:
            if not isinstance(depth_axis, int):
                depth_axis = axes[np.argmin([im.shape[i] for i in axes])]
                axes.remove(depth_axis)
            if not isinstance(vert_axis, int):
                vert_axis = axes[0]
                axes.pop(0)
            if not isinstance(horiz_axis, int):
                horiz_axis = axes[0]
        im = im.transpose([depth_axis, vert_axis, horiz_axis])
        depth_axis = 'z'
        vert_axis = 'x'
        horiz_axis = 'y'
        im = data_grid(im, spacing=1, z=range(len(im)))
    if np.iscomplex(im).any():
        warn("Image contains complex values. Taking image magnitude.")
        im = np.abs(im)
    if scaling is 'auto':
        scaling = (ensure_scalar(im.min()), ensure_scalar(im.max()))
    if scaling is not None:
        im = np.maximum(im, scaling[0])
        im = np.minimum(im, scaling[1])
        im = (im - scaling[0]) / (scaling[1] - scaling[0])
    im.attrs = attrs
    im.attrs['_image_scaling'] = scaling

    if colour_axis in im.dims:
        cols = [
            col[0].capitalize() if isinstance(col, str) else ' '
            for col in im[colour_axis].values
        ]
        RGB_names = np.all([letter in 'RGB' for letter in cols])
        if len(im[colour_axis]) == 1:
            im = im.squeeze(dim=colour_axis)
        elif len(im[colour_axis]) > 3:
            raise BadImage('Cannot output more than 3 colour channels')
        elif RGB_names:
            channels = {
                col: im[{
                    colour_axis: i
                }]
                for i, col in enumerate(cols)
            }
            if len(channels) == 2:
                dummy = im[{colour_axis: 0}].copy()
                dummy[:] = im.min()
                for i, col in enumerate('RGB'):
                    if col not in cols:
                        dummy[colour_axis] = col
                        channels[col] = dummy
                        channels['R'].attrs['_dummy_channel'] = i
                        break
            channels = [channels[col] for col in 'RGB']
            im = clean_concat(channels, colour_axis)
        elif len(im[colour_axis]) == 2:
            dummy = xr.full_like(im[{colour_axis: 0}], fill_value=im.min())
            dummy = dummy.expand_dims({colour_axis: [np.NaN]})
            im.attrs['_dummy_channel'] = -1
            im = clean_concat([im, dummy], colour_axis)
    dim_order = [depth_axis, vert_axis, horiz_axis, colour_axis][:im.ndim]
    return im.transpose(*dim_order)
def ps_propagate_plane(data, d, L, beam_c, out_schema = None, old_Ip = False):
    '''
    Propagates light back through a hologram that was taken using a diverging reference beam.
    Propataion can be to one plane only.
    Only propagation through media with refractive index 1 is supported.
    
    Based on the algorithm described in Manfred H. Jericho and H. Jurgen Kreuzer, "Point Source 
    Digital In-Line Holographic Microscopy," Chapter 1 of Coherent Light Microscopy, Springer, 2010.
    http://link.springer.com/chapter/10.1007%2F978-3-642-15813-1_1

    data is a holopy Xarray. It is the hologram to reconstruct. Must be square. The pixel spacing must also be square.
    d = distance from pinhole to reconstructed image, in meters (this is z in Jericho and Kreuzer). Must be a scalar.
    L = distance from screen to pinhole, in meters
    beam_c = [x,y] coodinates of beam center, in pixels
    out_schema = size of output image and pixel spacing, default is the schema of data.
    if Ip == True, returns Ip to be used on calculations in the stack
    if Ip == False compute reconstructed image as normal
    if Ip is an image, use this to speed up calculations
    
    returns an image(volume) corresponding to the reconstruction at plane(s) d.
    '''    

    npix0 = float(len(data.x)) # size of original image in pixels
    wavelen = float(data.illum_wavelen) #laser wavelength in meters
    n_medium = float(data.medium_index) #not used for now (assumes n_medium = 1)
    datavals = data.values.squeeze()
    
    Dx,Dy = get_spacing(data) #size of pixels on camera
    
    if out_schema is None:
        #mag = 1
        out_spacing = Dx
    else:
        #mag = Dx/get_spacing(out_schema)[0]
        out_spacing = get_spacing(out_schema)[0]
    
    #get number of pixels to reconstruct given the desired output spacing
    def X0_f(npix):
        result = -Dx * (beam_c[0] + (npix - npix0)*0.5)
        return result
      
    def to_solve(npix):
        result = (X0_f(npix)+(npix-1)*Dx) / np.sqrt(L**2 + (X0_f(npix)+(npix-1)*Dx)**2) - X0_f(npix)/np.sqrt(L**2-X0_f(npix)**2) - wavelen/out_spacing
        return result
        
    npix = int(fsolve(to_solve, npix0)[0])
    
    #npix = npix0*mag #number of pixels to reconstruct (this is an older way of doing the magnification)

    #center coordinates
    i_c = beam_c[0] + (npix - npix0)/2
    j_c = beam_c[1] + (npix - npix0)/2

    #set (X0,Y0) so beam center is at index (i_c,j_c)
    X0=-i_c*Dx
    Y0=-j_c*Dy
    
    #Scaling constants (eqn 1.32)
    X0p = X0*L/np.sqrt(L*L+X0*X0)
    Y0p = Y0*L/np.sqrt(L*L+Y0*Y0)    
    con = X0+(npix-1)*Dx #useful constant
    Dxp = L*con/npix/np.sqrt(L*L+con*con) - L*X0/npix/np.sqrt(L*L+X0*X0) #Delta_x^prime
    con = Y0+(npix-1)*Dy #useful constant
    Dyp = L*con/npix/np.sqrt(L*L+con*con) - L*Y0/npix/np.sqrt(L*L+Y0*Y0) #Delta_y^prime

    #scale actually used in reconstructed image
    spacing = wavelen*L/npix/np.array([Dxp,Dyp]) #calculate 'magic' spacing (eqn 1.34). 

    #useful constant
    ikz = 2j*np.pi*d/wavelen # this is (ikz)

    #Calculate I'(X,Y) (eqn 1.27)
    print('Calculating Ip')

    def Ip_calc(i,j):
        
        # (X',Y') coordinates corresponding to indecies (i,j)        
        Xp=X0p+i*Dxp
        Yp=Y0p+j*Dyp
        
        #Useful constant (this is L/R')
        L_over_Rp = L**2-Xp**2-Yp**2
        L_over_Rp = np.where(L_over_Rp >= 0, L_over_Rp, 0.0)
        L_over_Rp = L/np.sqrt(L_over_Rp)
        L_over_Rp = np.where(L_over_Rp == np.inf, 0.000001, L_over_Rp)
        

        
        if isinstance(old_Ip,bool):
    
            # (X,Y) coordinate in original image
            X = Xp*L_over_Rp 
            Y = Yp*L_over_Rp         

            # (X,Y) indecies of original image, but (npix,npix) in size
            i_X = np.array( (X-X0)/Dx )
            i_Y = np.array( (Y-Y0)/Dy )
        
            i_X = i_X - (npix - npix0)/2
            i_Y = i_Y - (npix - npix0)/2
        
            i_X = i_X.astype(int)
            i_Y = i_Y.astype(int)
            
            if old_Ip:  # returns partially computed I'        
                result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4
            else: #returns full I'
                result = interpolate2D(datavals,i_X,i_Y,0) * L_over_Rp**4 * np.exp(ikz/L_over_Rp)

        else:
            result = old_Ip * np.exp(ikz/L_over_Rp)      
        
        return result
        
    #get I'
    result = np.fromfunction(lambda i,j: Ip_calc(i,j), (npix, npix), dtype=int) #result is I'
   
    if isinstance(old_Ip,bool) and old_Ip: # returns partially computed I' and uncropped size of reconstruction
        return result, npix
        
    #compute final result, K_nm (eqn 1.33)
    i2Pi_over_N = 2j*np.pi/npix # this is i*2pi/N
    phase_factor = np.fromfunction(lambda i,j: np.exp( -i2Pi_over_N * (i*i_c + j*j_c) ), (npix, npix), dtype=int)
    print('Taking FFT')
    result = fftpack.ifft2(fftpack.fftshift(result*phase_factor, axes=[0,1]), axes=[0, 1], overwrite_x=True)
    
    #result = ifft(result*phase_factor, shift =1, overwrite = True)
    
    print('Multiplying prefactor')
    phase_factor = np.fromfunction(lambda i,j: np.exp( i2Pi_over_N * ((i-i_c)*X0p/Dxp + (j-j_c)*Y0p/Dyp) ), (npix, npix), dtype=int)
        
    result = Dxp*Dyp*phase_factor*result
    
    
    #crop to correct size
    if npix > npix0:
        x_cen = int(npix/2)
        y_cen = int(npix/2)

        if out_schema is None:    
            offset = int(npix0/2)
        else:
            offset = int(len(out_schema.x)/2)
        result = result [x_cen - offset : x_cen + offset, y_cen - offset : y_cen + offset] 
   

    #return Image result    
    return copy_metadata(data, data_grid(result, spacing=spacing, z=d))