Exemple #1
0
def imreg(im0, imk):
    import imreg_dft as ird
    from image_registration import chi2_shift
    from image_registration.fft_tools import shift
    xoff, yoff, exoff, eyoff = chi2_shift(im0, imk)
    timg = ird.transform_img(imk, tvec=np.array([-yoff, -xoff]))
    return timg
def cross_correlation_HST_diff_NDR():
	# Cross correlated the differential non-destructive reads from HST Scanning mode with WFC3 and G141
	import image_registration as ir
	from glob import glob
	from pylab import *;ion()
	from astropy.io import fits

	fitsfiles = glob("*ima*fits")

	ylow   = 50
	yhigh  = 90
	nExts  = 36
	extGap = 5
	shifts_ndr = zeros((len(fitsfiles), (nExts-1)//extGap, 2))
	fitsfile0 = fits.open(fitsfiles[0])
	for kf, fitsfilenow in enumerate(fitsfiles):
			 fitsnow = fits.open(fitsfilenow)
			 for kndr in range(extGap+1,nExts+1)[::extGap][::-1]:
					 fits0_dndrnow = fitsfile0[kndr-extGap].data[ylow:yhigh]- fitsfile0[kndr].data[ylow:yhigh]
					 fitsN_dndrnow = fitsnow[kndr-extGap].data[ylow:yhigh]  - fitsnow[kndr].data[ylow:yhigh]
					 shifts_ndr[kf][(kndr-1)//extGap-1] = ir.chi2_shift(fits0_dndrnow, fitsN_dndrnow)[:2]
					 #ax.clear()
					 #plt.imshow(fitsN_dndrnow)
					 #ax.set_aspect('auto')
					 #plt.pause(1e-3)

	plot(shifts_ndr[:,:-1,0],'o') # x-shifts 
	plot(shifts_ndr[:,:-1,1],'o') # y-shifts
Exemple #3
0
def calcDrift2D(im1, im2, m, n, n_files):
    if imr not in sys.modules:
        raise ModuleNotFoundError("The image-registration package was not installed with Eureka and is required for HST analyses.\n"+
                                  "You can install all HST-related dependencies with `pip install .[hst]`")
    drift2D = imr.chi2_shift(im1, im2, boundary='constant', nthreads=1,
                             zeromean=False, return_error=False)
    return (drift2D, m, n)
Exemple #4
0
 def _check_registration(reference, images):
     xo, yo = [], []
     for i in images:
         x_off, y_off, x_err, y_err = image_registration.chi2_shift(
             reference, i)
         xo.append(x_off)
         yo.append(y_off)
     xo = np.array(xo)
     yo = np.array(yo)
     return xo, yo
Exemple #5
0
def create_chi2_shift_list(image_list):
    """Calculate the shift between images using chi2 minimization.

    Uses image_registration.chi2_shift module.
    """
    from image_registration import chi2_shift

    shifts = [(0.0, 0.0)] * len(image_list)
    for i in range(len(image_list) - 1):
        im = image_list[i + 1]
        err = np.nanstd(im)
        dx, dy, _, _ = chi2_shift(image_list[0], im, err)
        shifts[i + 1] = (-dx, -dy)

    return shifts
Exemple #6
0
def calculate_shift(target_image, reference_image):
    '''Calculates the displacement between two images acquired at the same
    site in different cycles based on fast Fourier transform.

    Parameters
    ----------
    target_image: numpy.ndarray
        image that should be registered
    reference_image: numpy.ndarray
        image that should be used as a reference

    Returns
    -------
    Tuple[int]
        shift in y and x direction
    '''
    logger.debug('calculate shift between target and reference image')
    x, y, a, b = image_registration.chi2_shift(target_image, reference_image)
    return (int(np.round(y)), int(np.round(x)))
def register(cube, ref):
    """
    runs through each layer of a data cube and determines how far to shift it
    to match a given reference layer, and then shifts it.
    INPUTS:
        cube - data cube in 3 dimensional array form.
        ref - number of the reference layer.
    OUTPUTS:
        cube - 3 dimensional array in which each layer is shifted to match 
             the reference. Will have same dimensions as the original.
    """
    size = cube.shape[0]
    for z in range(size):
        #determines how far to shift to match ref image
        shift = ir.chi2_shift(cube[ref], cube[z])
        shiftX, shiftY = shift[0], shift[1]

        #actually shifts
        shifted_image = ir.fft_tools.shift2d(cube[z], -shiftX, -shiftY)
        cube[z] = shifted_image
    return cube
Exemple #8
0
def chi2(location):
    x = 0
    template = glob.glob(location[:-4] + '/templates/*.fits')
    images = glob.glob(location + '/*_a_.fits')
    if len(template) == 1:
        ref_data = fits.getdata(template[0])
        ref_data = np.array(ref_data, dtype='float64')
        print("\n-> Aligning images with chi2...")
        for i in images:
            data = fits.getdata(i)
            data = np.array(data, dtype='float64')
            dx, dy, edx, edy = chi2_shift(ref_data,
                                          data,
                                          upsample_factor='auto')
            corrected_image = shift.shiftnd(data, (-dx, -dy))
            hdu = fits.PrimaryHDU(corrected_image)
            hdu.writeto(i[:-8] + '_A_.fits')
            os.remove(i)
            x += 1
            print("-> %.1f%% aligned..." %
                  (float(x) / float(len(images)) * 100))
    else:
        print("-> Alignment failed: Template missing")
Exemple #9
0
def calc_offset(imgpair, thresh=4):
    columns = ('chan', 'gbt_snr', 'vla_snr', 'xoff', 'yoff', 'e_xoff',
            'e_yoff')
    df = pd.DataFrame(columns=columns)
    df = df.set_index('chan')
    gbt_thresh = thresh * imgpair.gbt_rms
    vla_thresh = thresh * imgpair.vla_rms
    for ii in range(imgpair.nchan):
        gbt_slice = imgpair.gbt[ii]
        vla_slice = imgpair.vla[ii]
        gbt_snr = gbt_slice.max() / imgpair.gbt_rms
        vla_snr = vla_slice.max() / imgpair.vla_rms
        if gbt_snr < thresh or vla_snr < thresh:
            continue
        df.loc[ii, ['gbt_snr', 'vla_snr']] = gbt_snr, vla_snr
        offset_pars = image_registration.chi2_shift(
                vla_slice.value, gbt_slice.value,
                err=imgpair.gbt_errmap.value,
                nthreads=imgpair.nthreads, return_error=True,
                upsample_factor='auto')
        df.loc[ii, ['xoff', 'yoff', 'e_xoff', 'e_yoff']] = offset_pars
    df['w_xoff'] = 1 / df.e_xoff**2
    df['w_yoff'] = 1 / df.e_yoff**2
    return df
Exemple #10
0
def image_chisqr(modelresults, observations, wavelength=None, write=True,
                 normalization='total', registration='sub_pixel',
                 inclinationflag=True, convolvepsf=True, background=0.0):
    """
    Not written yet - this is just a placeholder

    Parameters
    ------------
    wavelength : float
        Wavelength of the image to compute the chi squared of.
    write : bool
        If set, write output to a file, in addition to displaying
        on screen.

    """

    if inclinationflag:
        mod_inclinations = modelresults.parameters.inclinations
    else:
        mod_inclinations = ['0.0']

    im = observations.images
    mask = im[wavelength].mask
    image = im[wavelength].image
    noise = im[wavelength].uncertainty
    if convolvepsf:
        psf = im[wavelength].psf
    model = modelresults.images[wavelength].data
    
    #mask[:,:]=1
    sz = len(mod_inclinations)
    chisqr = np.zeros(sz)

    for n in np.arange(sz):
        if inclinationflag:
            model_n = np.asarray(model[0,0,n,:,:])
        else:
            model_n = np.asarray(model)

        # Convolve the model image with the appropriate psf
        if convolvepsf:
            model_n = np.asarray(image_registration.fft_tools.convolve_nd.convolvend(model_n,psf))
        # Determine the shift between model image and observations via fft cross correlation

        # Normalize model to observed image and calculate chisqrd
        background=np.min(noise)
        background=0.0
        model_n+=background
        if normalization == 'total':
            weightgd=image.sum()/model_n.sum()
        elif normalization == 'peak':
            weightgd=image.max()/model_n.max()
        else:
            weightgd = 1.0
        model_n*=weightgd
        subgd=image-model_n
        print 'subgd  ',np.sum(np.square(subgd))
        print 'normalization = ',weightgd
       

        #model_n=np.multiply(model_n,mask)
        #image=np.multiply(image,mask)
        dy,dx,xerr,yerr = image_registration.chi2_shift(model_n,image)

        if registration == 'integer_pixel':
            dx = np.round(dx)
            dy = np.round(dy)
        #if registration == 'sub_pixel':
            #print dx, dy
        # Shift the model image to the same location as observations
        model_n = sciim.interpolation.shift(model_n,np.asarray((dx,dy)))
        print 'subgd  ',np.max(image-model_n)

        chisquared=(image-model_n)**2.0/noise**2.0
        chisqr[n]=chisquared[mask !=0].sum()#/2500.0
        if dx == 0 or dy == 0:
            chisqr[n]=chisqr[n-1]+1.0

#        modelresults.images.closeimage
        _log.info( "inclination {0} : {1:4.1f} deg has chi2 = {2:5g}".format(n, mod_inclinations[n], chisqr[n]))

    return chisqr
Exemple #11
0
def stack_images(_files_list,
                 _path_out='./',
                 cx0=None,
                 cy0=None,
                 _win=None,
                 _obs=None,
                 _nthreads=4,
                 _interactive_plot=True,
                 _v=True):
    """

    :param _files_list:
    :param _path_out:
    :param _obs:
    :param _nthreads:
    :param _interactive_plot:
    :param _v:
    :return:
    """

    if _obs is None:
        _obs = os.path.split(_files_list[0])[1]

    if _interactive_plot:
        plt.axes([0., 0., 1., 1.])
        plt.ion()
        plt.grid('off')
        plt.axis('off')
        plt.show()

    numFrames = len(_files_list)

    # use first image as pivot:
    with fits.open(_files_list[0]) as _hdulist:
        im1 = np.array(_hdulist[0].data, dtype=np.float)  # do proper casting
        image_size = _hdulist[0].shape
        # get fits header for output:
        header = _hdulist[0].header
        if cx0 is None:
            cx0 = header.get('NAXIS1') // 2
        if cy0 is None:
            cy0 = header.get('NAXIS2') // 2
        if _win is None:
            _win = int(np.min([cx0, cy0]))
        im1 = im1[cy0 - _win:cy0 + _win, cx0 - _win:cx0 + _win]

    # Sum of all frames (with not too large a shift and chi**2)
    summed_frame = np.zeros(image_size)

    # frame_num x y ex ey:
    shifts = np.zeros((numFrames, 5))

    # set up frequency grid for shift2d
    ny, nx = image_size
    xfreq_0 = np.fft.fftfreq(nx)[np.newaxis, :]
    yfreq_0 = np.fft.fftfreq(ny)[:, np.newaxis]

    fftn, ifftn = image_registration.fft_tools.fast_ffts.get_ffts(
        nthreads=_nthreads, use_numpy_fft=False)

    if _v:
        bar = pyprind.ProgBar(numFrames - 1,
                              stream=1,
                              title='Registering frames')

    fn = 0
    for jj, _file in enumerate(_files_list[1:]):
        with fits.open(_file) as _hdulist:
            for ii, _ in enumerate(_hdulist):
                img = np.array(_hdulist[ii].data,
                               dtype=np.float)  # do proper casting

                # tic = _time()
                # img_comp = gaussian_filter(img, sigma=5)
                img_comp = img
                img_comp = img_comp[cy0 - _win:cy0 + _win,
                                    cx0 - _win:cx0 + _win]
                # print(_time() - tic)

                # tic = _time()
                # chi2_shift -> chi2_shift_iterzoom
                dy2, dx2, edy2, edx2 = image_registration.chi2_shift(
                    im1,
                    img_comp,
                    nthreads=_nthreads,
                    upsample_factor='auto',
                    zeromean=True)
                img = shift2d(fftn, ifftn, img, -dy2, -dx2, xfreq_0, yfreq_0)
                # print(_time() - tic, '\n')

                if np.sqrt(dx2**2 + dy2**2) > 0.8 * _win:
                    # skip frames with too large a shift
                    pass
                else:
                    # otherwise store the shift values and add to the 'integrated' image
                    shifts[fn, :] = [fn, -dx2, -dy2, edx2, edy2]
                    summed_frame += img

                if _interactive_plot:
                    plt.imshow(summed_frame,
                               cmap='gray',
                               origin='lower',
                               interpolation='nearest')
                    plt.draw()
                    plt.pause(0.001)

                if _v:
                    bar.update()

                # increment frame number
                fn += 1

    if _interactive_plot:
        raw_input('press any key to close plot')

    if _v:
        print('Largest move was {:.2f} pixels for frame {:d}'.format(
            np.max(np.sqrt(shifts[:, 1]**2 + shifts[:, 2]**2)),
            np.argmax(np.sqrt(shifts[:, 1]**2 + shifts[:, 2]**2))))

    # output
    if not os.path.exists(os.path.join(_path_out)):
        os.makedirs(os.path.join(_path_out))

    export_fits(os.path.join(_path_out, _obs + '.stacked.fits'), summed_frame,
                header)
Exemple #12
0
def extended_align(ref,toshift):
    '''Align with fft xcorr'''
    xoff,yoff,_,_ = chi2_shift(ref,toshift,boundary='constant')
    return xoff,yoff
Exemple #13
0
def cube_recenter_dft_upsampling(array, subimage=False, ref_y=None, ref_x=None,
                                 fwhm=4, full_output=False, verbose=True,
                                 save_shifts=False, debug=False):                          
    """ Recenters a cube of frames using the DFT upsampling method as 
    proposed in Guizar et al. 2008 (see Notes) plus a chi^2, for determinig
    automatically the upsampling factor, as implemented in the package 
    'image_registration' (see Notes).
    
    The algorithm (DFT upsampling) obtains an initial estimate of the 
    cross-correlation peak by an FFT and then refines the shift estimation by 
    upsampling the DFT only in a small neighborhood of that estimate by means 
    of a matrix-multiply DFT.
    
    Parameters
    ----------
    array : array_like
        Input cube.
    subimage : {False, True}, bool optional
        Whether to use a subimage instead of the full frame.
    ref_y, ref_x : int
        Coordinates of the center of the subimage.    
    fwhm : float
        FWHM size in pixels.
    full_output : {False, True}, bool optional
        Whether to return 2 1d arrays of shifts along with the recentered cube 
        or not.
    verbose : {True, False}, bool optional
        Whether to print to stdout the timing or not.
    save_shifts : {False, True}, bool optional
        Whether to save the shifts to a file in disk.
    debug : {False, True}, bool optional
        Whether to print to stdout the shifts or not. 
    
    Returns
    -------
    array_recentered : array_like
        The recentered cube.
    If full_output is True:
    y, x : array_like
        1d arrays with the shifts in y and x.     
    
    Notes
    -----
    Package documentation for "Image Registration Methods for Astronomy":
    https://github.com/keflavich/image_registration
    http://image-registration.rtfd.org
    
    Guizar-Sicairos et al. "Efficient subpixel image registration algorithms," 
    Opt. Lett. 33, 156-158 (2008). 
    The algorithm registers two images (2-D rigid translation) within a fraction 
    of a pixel specified by the user. 
    Instead of computing a zero-padded FFT (fast Fourier transform), this code 
    uses selective upsampling by a matrix-multiply DFT (discrete FT) to 
    dramatically reduce computation time and memory without sacrificing 
    accuracy. With this procedure all the image points are used to compute the 
    upsampled cross-correlation in a very small neighborhood around its peak. 
    
    """
    if not array.ndim == 3:
        raise TypeError('Input array is not a cube or 3d array')
    
    if verbose:  start_time = timeInit()
    
    n_frames = array.shape[0]
    x = np.zeros((n_frames))
    y = np.zeros((n_frames))
    array_rec = array.copy()
    if subimage: 
        size = int(fwhm*3)
        sub_image_1 = get_square(array_rec[0], size=size, y=ref_y, x=ref_x)
        
    for i in xrange(1, n_frames):
        if subimage:
            size = int(fwhm*3)
            sub_image = get_square(array[i], size=size, y=ref_y, x=ref_x)
            dx, dy, edx, edy = chi2_shift(sub_image_1, sub_image, 
                                      upsample_factor='auto')
        else:
            dx, dy, edx, edy = chi2_shift(array_rec[0], array[i], 
                                      upsample_factor='auto')
        x[i] = -dx
        y[i] = -dy
        if debug:  print y[i], x[i]
        array_rec[i] = frame_shift(array[i], y[i], x[i])
        
    if verbose:  timing(start_time)
        
    if save_shifts: 
        np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
    if full_output:
        return array_rec, y, x
    else:
        return array_rec
Exemple #14
0
noise_taper = True
imsize=100
xsh=3.75
ysh=1.2
image = image_registration.tests.make_extended(imsize)
offset_image_taper = image_registration.tests.make_offset_extended(image, xsh, ysh, noise=0.5, noise_taper=True)
offset_image = image_registration.tests.make_offset_extended(image, xsh, ysh, noise=0.5, noise_taper=False)
noise = 0.5/image_registration.tests.edge_weight(imsize)

im1 = image
im2 = offset_image_taper

print "SCALAR"
xoff, yoff, exoff, eyoff, (x, y, c2a) = image_registration.chi2_shift(image,
        offset_image, 0.1, return_error=True, verbose=2,
        upsample_factor='auto', return_chi2array=True)
print "SCALAR error: ",xoff,yoff,exoff,eyoff
print
xoff, yoff, exoff, eyoff, (x, y, c2) = \
        image_registration.chi2_shift(image, offset_image_taper, noise,
            return_error=True, verbose=2, upsample_factor='auto',
            return_chi2array=True)
c2map,term1,term2,term3 = image_registration.chi2n_map(image,offset_image_taper,noise,return_all=True)

xoff3,yoff3,exoff3,eyoff3,(x3,y3,c3) = image_registration.chi2_shifts.chi2_shift_iterzoom(image, offset_image_taper, noise, return_chi2array=True, return_error=True,verbose=True,mindiff=0.1)
xoff4,yoff4,exoff4,eyoff4,(x4,y4,c4) = image_registration.chi2_shifts.chi2_shift_iterzoom(image, offset_image, 0.5, return_chi2array=True, return_error=True,verbose=True,mindiff=0.1)

c2mapA,term1A,term2A,term3A = image_registration.chi2n_map(image,offset_image,0.5,return_all=True)
print "TAPERED error: ",xoff,yoff,exoff,eyoff
print "TAPERED error absolute difference: ",abs(xoff-xsh),abs(yoff-ysh)
Exemple #15
0
def image_chisqr(modelresults,
                 observations,
                 wavelength=None,
                 write=True,
                 normalization='total',
                 registration='sub_pixel',
                 inclinationflag=True,
                 convolvepsf=True,
                 background=0.0):
    """
    Not written yet - this is just a placeholder

    Parameters
    ------------
    wavelength : float
        Wavelength of the image to compute the chi squared of.
    write : bool
        If set, write output to a file, in addition to displaying
        on screen.

    """

    if inclinationflag:
        mod_inclinations = modelresults.parameters.inclinations
    else:
        mod_inclinations = ['0.0']

    im = observations.images
    mask = im[wavelength].mask
    image = im[wavelength].image
    noise = im[wavelength].uncertainty
    if convolvepsf:
        psf = im[wavelength].psf
    model = modelresults.images[wavelength].data

    #mask[:,:]=1
    sz = len(mod_inclinations)
    chisqr = np.zeros(sz)

    for n in np.arange(sz):
        if inclinationflag:
            model_n = np.asarray(model[0, 0, n, :, :])
        else:
            model_n = np.asarray(model)

        # Convolve the model image with the appropriate psf
        if convolvepsf:
            model_n = np.asarray(
                image_registration.fft_tools.convolve_nd.convolvend(
                    model_n, psf))
        # Determine the shift between model image and observations via fft cross correlation

        # Normalize model to observed image and calculate chisqrd
        background = np.min(noise)
        background = 0.0
        model_n += background
        if normalization == 'total':
            weightgd = image.sum() / model_n.sum()
        elif normalization == 'peak':
            weightgd = image.max() / model_n.max()
        else:
            weightgd = 1.0
        model_n *= weightgd
        subgd = image - model_n
        print 'subgd  ', np.sum(np.square(subgd))
        print 'normalization = ', weightgd

        #model_n=np.multiply(model_n,mask)
        #image=np.multiply(image,mask)
        dy, dx, xerr, yerr = image_registration.chi2_shift(model_n, image)

        if registration == 'integer_pixel':
            dx = np.round(dx)
            dy = np.round(dy)
        #if registration == 'sub_pixel':
        #print dx, dy
        # Shift the model image to the same location as observations
        model_n = sciim.interpolation.shift(model_n, np.asarray((dx, dy)))
        print 'subgd  ', np.max(image - model_n)

        chisquared = (image - model_n)**2.0 / noise**2.0
        chisqr[n] = chisquared[mask != 0].sum()  #/2500.0
        if dx == 0 or dy == 0:
            chisqr[n] = chisqr[n - 1] + 1.0


#        modelresults.images.closeimage
        _log.info("inclination {0} : {1:4.1f} deg has chi2 = {2:5g}".format(
            n, mod_inclinations[n], chisqr[n]))

    return chisqr
Exemple #16
0
def extended_align(ref, toshift):
    '''Align with fft xcorr'''
    xoff, yoff, _, _ = chi2_shift(ref, toshift, boundary='constant')
    return xoff, yoff
Exemple #17
0
wcs1 = wcs.WCS(epoch1[0].header).sub([wcs.WCSSUB_CELESTIAL])
epoch3 = fits.open(paths.dpath("W51C_ACarray_continuum_4096_both_uniform_contsplit.clean.image.fits"))
beam3 = Beam.from_fits_header(epoch3[0].header)
epoch3[0].data = epoch3[0].data.squeeze()
wcs3 = wcs.WCS(epoch3[0].header).sub([wcs.WCSSUB_CELESTIAL])
epoch3header = wcs3.to_header()
epoch3header['NAXIS'] = 2
epoch3header['NAXIS1'] = epoch3[0].data.shape[1]
epoch3header['NAXIS2'] = epoch3[0].data.shape[0]

#epoch1reproj = FITS_tools.hcongrid.hcongrid(epoch1[0].data, epoch1header, epoch3header)
epoch1reproj, footprint = reproject.reproject(epoch1[0], epoch3header)

scalefactor = (beam3.sr/beam1.sr).value

xshift,yshift, ex, ey = image_registration.chi2_shift(epoch3[0].data, epoch1reproj*scalefactor, err=0.001)

center = coordinates.SkyCoord(290.92443*u.deg, 14.515755*u.deg, frame='fk5')
xx,yy = wcs1.wcs_world2pix([[center.fk4.ra.deg, center.fk4.dec.deg]], 0)[0]
subim1 = epoch1[0].data[yy-10:yy+10, xx-10:xx+10]
wcs1sub = wcs1[yy-10:yy+10, xx-10:xx+10]
xx,yy = wcs3.wcs_world2pix([[center.fk5.ra.deg, center.fk5.dec.deg]], 0)[0]
wcs3sub = wcs3[yy-10:yy+10, xx-10:xx+10]
subim3 = epoch3[0].data[yy-10:yy+10, xx-10:xx+10]
gf1 = gaussfitter.gaussfit(subim1)
gf3 = gaussfitter.gaussfit(subim3)

dx1,dy1 = gf1[2:4]
dx3,dy3 = gf3[2:4]
dx_gf, dy_gf = dx3-dx1, dy3-dy1
Exemple #18
0
def align2(location, method='standard'):
    x = 1
    y = 0
    images = glob.glob(location + "/*_N_.fits")
    ref = glob.glob(location + "/*_ref_A_.fits")
    hdu2 = fits.open(ref[0])
    data2 = hdu2[0].data
    data2 = np.array(data2, dtype="float64")
    if images != []:
        if method == 'fakes':
            intensity_match.int_match_to_ref(location[:-5])
        else:
            print("\n-> Aligning images with astroalign...")
            for i in tqdm(images):
                worked = True
                hdu1 = fits.open(i)
                data1 = hdu1[0].data
                data1 = np.array(data1, dtype="float64")
                hdr1 = hdu1[0].header
                mask1 = (hdu1[1].data).astype(bool)
                data1 = np.ma.array(data1, mask=mask1)
                try:
                    try:
                        aligned = astroalign.register(data1, data2)
                        astroalign_data = (aligned.data).round(3)
                        astroalign_mask = (aligned.mask).astype(int)
                    except:
                        astroalign_data = data1
                        astroalign_mask = mask1.astype(int)
                        print(
                            "\n-> WARNING: astroalign failed, image rotation will not be accounted for\n"
                        )
                    dx, dy, edx, edy = chi2_shift(data2,
                                                  astroalign_data,
                                                  upsample_factor='auto')
                    alignedData = shift(astroalign_data, [-1 * dy, -1 * dx])
                    alignedMask = shift(astroalign_mask, [-1 * dy, -1 * dx],
                                        cval=1)
                except Exception as e:
                    print(
                        "\n-> Alignment failed: Moving trouble image to OASIS archive...\n"
                    )
                    print(e, '\n')
                    os.system(
                        "mkdir -p %s/OASIS/archive/failed_alignments ; mv %s %s/OASIS/archive/failed_alignments"
                        % (loc, i, loc))
                    worked = False
                    x += 1
                    y += 1
                if worked == True:
                    aligned_name = i[:-8] + "_A_.fits"
                    #write aligned array and mask to original image location
                    hduData = fits.PrimaryHDU(alignedData, header=hdr1)
                    hduMask = fits.ImageHDU(alignedMask)
                    hduList = fits.HDUList([hduData, hduMask])
                    hduList.writeto(aligned_name, overwrite=True)
                    hdu1.close()
                    os.system("mv %s %s/OASIS/archive/data" % (i, loc))
                    x += 1
            hdu2.close()
            print(
                "-> Sucessfuly aligned %d images \n-> Moved %d failed alignment(s) to archive"
                % (len(images) - y, y))
            intensity_match.int_match_to_ref(location[:-5])
    else:
        print("-> Images already aligned...")
Exemple #19
0
def align(im, ref, method=None, **kargs):
    """Use one of a variety of algroithms to align two images.

    Args:
        im (ndarray) image to align
        ref (ndarray) reference array

    Keyword Args:
        method (str or None):
            If given specifies which module to try and use.
            Options: 'scharr', 'chi2_shift', 'imreg_dft', 'cv2'
        **kargs (various): All other keyword arguments are passed to the specific algorithm.

    Returns
        (ImageArray or ndarray) aligned image

    Notes:
        Currently three algorithms are supported:
            - image_registration module's chi^2 shift: This uses a dft with an automatic
              up-sampling of the fourier transform for sub-pixel alignment. The metadata
              key *chi2_shift* contains the translation vector and errors.
            - imreg_dft module's similarity function. This implements a full scale, rotation, translation
              algorithm (by default cosntrained for just translation). It's unclear how much sub-pixel translation
              is accomodated.
            - cv2 module based affine transform on a gray scale image.
              from: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
    """
    # To be consistent with x-y co-ordinate systems
    if all([m is None for m in [imreg_dft, chi2_shift, cv2]]):
        raise ImportError("align requires one of imreg_dft, chi2_shift or cv2 modules to be available.")
    if method == "scharr" and imreg_dft is not None:
        im = im.T
        ref = ref.T
        scale = np.ceil(np.max(im.shape) / 500.0)
        ref1 = ref.gaussian_filter(sigma=scale, mode="wrap").scharr()
        im1 = im.gaussian_filter(sigma=scale, mode="wrap").scharr()
        im1 = im1.align(ref1, method="imreg_dft")
        tvec = np.array(im1["tvec"])
        new_im = im.shift(tvec)
        new_im["tvec"] = tuple(-tvec)
        new_im = new_im.T
    elif (method is None and chi2_shift is not None) or method == "chi2_shift":
        kargs["zeromean"] = kargs.get("zeromean", True)
        result = np.array(chi2_shift(ref, im, **kargs))
        new_im = im.__class__(fft_tools.shiftnd(im, -result[0:2]))
        new_im.metadata.update(im.metadata)
        new_im.metadata["chi2_shift"] = result
    elif (method is None and imreg_dft is not None) or method == "imreg_dft":
        constraints = kargs.pop("constraints", {"angle": [0.0, 0.0], "scale": [1.0, 0.0]})
        cls = im.__class__
        with warnings.catch_warnings():  # This causes a warning due to the masking
            warnings.simplefilter("ignore")
            result = imreg_dft.similarity(ref, im, constraints=constraints)
        new_im = (result.pop("timg")).view(type=cls)
        new_im.metadata.update(im.metadata)
        new_im.metadata.update(result)
    elif (method is None and cv2 is not None) or method == "cv2":
        im1_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
        im2_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

        # Find size of image1
        sz = im.shape

        # Define the motion model
        warp_mode = cv2.MOTION_TRANSLATION
        warp_matrix = np.eye(2, 3, dtype=np.float32)

        # Specify the number of iterations.
        number_of_iterations = 5000

        # Specify the threshold of the increment
        # in the correlation coefficient between two iterations
        termination_eps = 1e-10

        # Define termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)

        # Run the ECC algorithm. The results are stored in warp_matrix.
        (_, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria)

        # Use warpAffine for Translation, Euclidean and Affine
        new_im = cv2.warpAffine(im, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)

    else:  # No cv2 available so don't do anything.
        raise RuntimeError("Couldn't find an image alignment algorithm to use")
    return new_im.T
Exemple #20
0
def _align_chi2_shift(im, ref, **kargs):
    """Return the translation vector to shirt im to align to ref using the chi^2 shift method."""
    results = np.array(chi2_shift(ref, im, **kargs))
    return -results[1::-1], {}
Exemple #21
0
    # read file
    targetimgfile = fits.open(imgfile)
    targetimg = targetimgfile[0].data
    targetimgfile.close()
    # divide image
    targetimgtotal = np.sum(targetimg)
    print targetimgtotal
    if targetimgtotal >= args.mincounts:
        targetimg_norm = targetimg/targetimgtotal


#---------------------------------------
#           Align Image
#---------------------------------------

        final_dx,final_dy,dxerr,dyerr = imgreg.chi2_shift(targetimg_norm,refimg,upsample_factor=1000)
        print 'dx, dy, = ',final_dx,final_dy
        print 'dxerr, dyerr, = ',dxerr,dyerr

#---------------------------------------
#        Save Translated Image
#---------------------------------------

    # Translate Image
        img_trans = np.roll(targetimg,int(final_dx),axis=1)
        img_trans = np.roll(img_trans,int(final_dy),axis=0)

    # Truncate lower limit to zero (eliminate negative values)
        img_trans[img_trans < 0.0] = 0.0

    # Suppress very faint edge emission to zero (soft removal of 
Exemple #22
0
            proj_image1[~ok] = np.nan
            proj_image2[~ok] = np.nan
            proj_image1[~mask] = np.nan
            proj_image2[~mask] = np.nan

            # to make the xcorr stuff go faster...
            slices = ndimage.find_objects(ok)[0]

            proj_image1 = proj_image1[slices]
            proj_image2 = proj_image2[slices]

        #raise ValueError()

        xcorr = image_registration.chi2_shift(proj_image1,
                                              proj_image2,
                                              zeromean=False,
                                              return_error=True,
                                              upsample_factor=100.)

        offset[regname][survey][
            'nomeansub'] = xcorr * correction_sign, xcorr * pixscale.to(
                u.arcsec) * correction_sign

        print(
            f"{regname}: MAGPIS {survey} = {xcorr} = {xcorr*pixscale.to(u.arcsec)}"
        )
        #dx,dy,wdx,wdy,edx,edy,ewdx,ewdy,cr1,cr2,shf = xcorr
        #raise ValueError("STOP HERE")
        #dx,dy,wdx,wdy,edx,edy,ewdx,ewdy = xcorr
        xcorr = image_registration.chi2_shift(proj_image1,
                                              proj_image2,
# In[24]:


refproj_out     = reproject_exact(ref_frame_adn[0], sci_frame_adn[0].header)


# **Image Registration**
# 
# Register (i.e. interpolate) the reference frame into the science frame pixel coordiantes

# In[25]:


# Determine the magnitude of the shfit in X and Y pixel coordinates
xshift, yshift, xshifterr, yshifterr  = chi2_shift(refproj_out[0], sci_frame_adn[0].data)

# Perform the shift in X and Y pixel coordinates
refproj_imReg_out = fft_tools.shift2d(refproj_out[0], xshift, yshift)


# In[61]:


imshow(refproj_imReg_out - np.median(refproj_imReg_out) + 2*np.std(refproj_imReg_out), norm=LogNorm())

sub_image_half = 200 # pixels
xlim(xc_0 - sub_image_half, xc_0 + sub_image_half) # we transposed the image, so the x location goes with `ylim`
ylim(yc_0 - sub_image_half, yc_0 + sub_image_half) # we transposed the image, so the y location goes with `xlim`

colorbar();
                      wcs=wcs.WCS(cont3mm[0].header).celestial)
cutout_7mm = Cutout2D(cont7mm[0].data.squeeze(),
                      cutout_center,
                      size,
                      wcs=wcs.WCS(cont7mm[0].header).celestial)
proj_7mmto3mm, _ = reproject.reproject_interp(
    (cutout_7mm.data, cutout_7mm.wcs),
    cutout_3mm.wcs,
    shape_out=cutout_3mm.shape)

pixscale = wcsutils.proj_plane_pixel_area(cutout_3mm.wcs)**0.5 * u.deg

errest = stats.mad_std(cutout_3mm.data)

chi2shift = image_registration.chi2_shift(proj_7mmto3mm,
                                          cutout_3mm.data,
                                          err=errest,
                                          upsample_factor=1000)
print(chi2shift)
print(chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt * 3600)
print(chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt)
print((((chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt * 3600)**2).sum())**0.5)
"""
[-4.295500000000004, 3.9625000000000057, 0.0034999999999999892, 0.003500000000000003]
[0.0214775 0.0198125]
[5.96597222e-06 5.50347222e-06]
"""
ichi2shift = image_registration.chi2_shift_iterzoom(proj_7mmto3mm,
                                                    cutout_3mm.data,
                                                    err=errest,
                                                    upsample_factor=1000)
print(ichi2shift)
Exemple #25
0
def image(source_s, reference=None, method="astroalign"):
    """
    Aligns the source astronomical image(s) to the reference astronomical image
    ARGUMENTS
        source_s -- the image(s) to align; fitsio HDU object, numpy array,
            or a list of either one of the above
    KEYWORD ARGUMENTS
        reference -- the image against which to align the source image;
            fitsio HDU object or numpy array. If None, the best option is chosen
            from among the sources.
        method -- the library to use to align the images. options are:
            astroalign (default), skimage, imreg, skimage, chi2
    RETURNS
        a transformed copy of the source image[s] in the same data type
        which was passed in
    """
    # make sure that we can handle source as a list
    sources = []
    outputs = []
    if isinstance(source_s, list):
        sources = source_s
    else:
        sources.append(source_s)

    if reference is None:
        reference = ref_image(sources)
    print(reference.header["ORIGNAME"])
    np_ref = to_np(
        reference,
        "Cannot align to unexpected type {}; expected numpy array or FITS HDU")

    for source in sources:
        np_src = to_np(
            source,
            "Cannot align unexpected type {}; expected numpy array or FITS HDU"
        )
        # possibly unneccessary but unsure about scoping
        output = np.array([])

        if method == "astroalign":
            try:
                output = astroalign.register(np_src, np_ref)[0]
            except NameError:
                raise ValueError(DISABLED.format(method, "astroalign"))
        elif method == "skimage":
            try:
                shift = register_translation(np_ref, np_src, 100)[0]
                output_fft = fourier_shift(np.fft.fftn(np_src), shift)
                output = np.fft.ifftn(output_fft)
            except NameError:
                raise ValueError(DISABLED.format(method, "scipy or numpy"))
        elif method == "chi2":
            try:
                dx, dy = chi2_shift(np_ref, np_src, upsample_factor='auto')[:2]
                output = fft_tools.shift.shiftnd(data, (-dx, -dy))
            except NameError:
                raise ValueError(DISABLED.format(method, "image_registration"))
        elif method == "imreg":
            try:
                output = imreg_dft.similarity(np_ref, np_src)["timg"]
            except NameError:
                raise ValueError(DISABLED.format(method, "imreg_dft"))
        else:
            raise ValueError("Unexpected alignment method {}!".format(method))

        if isinstance(source, HDU_TYPES):
            output = PrimaryHDU(output, source.header)
        outputs.append(output)

    return outputs if isinstance(source_s, list) else outputs[0]
    filenames = []

    for item in files_to_merge:
        x, y = item
        filenames.append(x)

    print(filenames)

    image_concat = []
    for image in filenames:
        image_data = fits.getdata(path_to_files + image)
        # print(image_data)
        if len(image_concat) != 0:
            xoff, yoff, exoff, eyoff = chi2_shift(image_concat[0],
                                                  image_data,
                                                  return_error=True,
                                                  upsample_factor='auto')

            corrected = np.roll(image_data, int(-xoff),
                                axis=1)  # x axis roll delta
            corrected2 = np.roll(corrected, int(-yoff),
                                 axis=0)  # y axis roll delta

            image_concat.append(corrected2)
        else:
            image_concat.append(image_data)

    final_image = np.zeros(shape=image_concat[0].shape)

    for image in image_concat:
        final_image += image
Exemple #27
0
def cube_recenter_dft_upsampling(array, cy_1, cx_1, fwhm=4, 
                                 subi_size=2, full_output=False, verbose=True,
                                 save_shifts=False, debug=False):                          
    """ Recenters a cube of frames using the DFT upsampling method as 
    proposed in Guizar et al. 2008 (see Notes) plus a chi^2, for determining
    automatically the upsampling factor, as implemented in the package 
    'image_registration' (see Notes).
    
    The algorithm (DFT upsampling) obtains an initial estimate of the 
    cross-correlation peak by an FFT and then refines the shift estimation by 
    upsampling the DFT only in a small neighborhood of that estimate by means 
    of a matrix-multiply DFT.
    
    Parameters
    ----------
    array : array_like
        Input cube.
    cy_1, cx_1 : int
        Coordinates of the center of the subimage for centroiding the 1st frame.    
    fwhm : float, optional
        FWHM size in pixels.
    subi_size : int, optional
        Size of the square subimage sides in terms of FWHM.
    full_output : {False, True}, bool optional
        Whether to return 2 1d arrays of shifts along with the recentered cube 
        or not.
    verbose : {True, False}, bool optional
        Whether to print to stdout the timing or not.
    save_shifts : {False, True}, bool optional
        Whether to save the shifts to a file in disk.
    debug : {False, True}, bool optional
        Whether to print to stdout the shifts or not. 
    
    Returns
    -------
    array_recentered : array_like
        The recentered cube. Frames have now odd size.
    If full_output is True:
    y, x : array_like
        1d arrays with the shifts in y and x.     
    
    Notes
    -----
    Package documentation for "Image Registration Methods for Astronomy":
    https://github.com/keflavich/image_registration
    http://image-registration.rtfd.org
    
    Guizar-Sicairos et al. "Efficient subpixel image registration algorithms," 
    Opt. Lett. 33, 156-158 (2008). 
    The algorithm registers two images (2-D rigid translation) within a fraction 
    of a pixel specified by the user. 
    Instead of computing a zero-padded FFT (fast Fourier transform), this code 
    uses selective upsampling by a matrix-multiply DFT (discrete FT) to 
    dramatically reduce computation time and memory without sacrificing 
    accuracy. With this procedure all the image points are used to compute the 
    upsampled cross-correlation in a very small neighborhood around its peak. 
    
    """
    if not array.ndim == 3:
        raise TypeError('Input array is not a cube or 3d array')
    
    # If frame size is even we drop a row and a column
    if array.shape[1]%2==0:
        array = array[:,1:,:].copy()
    if array.shape[2]%2==0:
        array = array[:,:,1:].copy()
    
    if verbose:  start_time = timeInit()
    
    n_frames = array.shape[0]
    x = np.zeros((n_frames))
    y = np.zeros((n_frames))
    array_rec = array.copy()
    
    # Centroiding first frame with 2d gaussian and shifting
    size = int(fwhm*subi_size)
    cy, cx = frame_center(array[0])
    y1, x1 = _centroid_2dg_frame(array_rec, 0, size, cy_1, cx_1)
    array_rec[0] = frame_shift(array_rec[0], shift_y=cy-y1, shift_x=cx-x1)
    x[0] = cx-x1
    y[0] = cy-y1
    
    # Finding the shifts with DTF upsampling of each frame wrt the first
    bar = pyprind.ProgBar(n_frames, stream=1, title='Looping through frames')
    for i in xrange(1, n_frames):
        dx, dy, _, _ = chi2_shift(array_rec[0], array[i], upsample_factor='auto')
        x[i] = -dx
        y[i] = -dy
        array_rec[i] = frame_shift(array[i], y[i], x[i])
        bar.update()
    print
    
    if debug:
        print  
        for i in xrange(n_frames):  
            print y[i], x[i]
        
    if verbose:  timing(start_time)
        
    if save_shifts: 
        np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
    if full_output:
        return array_rec, y, x
    else:
        return array_rec
    b = crpb3 * (pixb4 / pixb3)**2 - (almaimf_b3.spectral_axis * m)
    return (m * nu + b).decompose().value


alma_b4_interp = interp_almaimf(
    pdbi_b4.with_spectral_unit(u.GHz).spectral_axis[0])

fits.writeto('ALMAIMF_B4interp_IRS2_proj_to_PBDI.fits',
             data=alma_b4_interp.decompose().value,
             header=pdbi_b4[0].header,
             overwrite=True)

slc = (slice(310, 329), slice(528, 553))

im1 = alma_b4_interp[slc]
im2 = pdbi_b4[0].value[slc]

print(image_registration.chi2_shift_iterzoom(im1, im2))
print(
    image_registration.chi2_shift_iterzoom(im1,
                                           im2,
                                           verbose=True,
                                           zeromean=True))
print(image_registration.chi2_shift(im1, im2))
print(image_registration.chi2_shift(im1, im2, zeromean=True))
print(image_registration.chi2_shift(alma_b4_interp, pdbi_b4[0].value))
print(
    image_registration.chi2_shift(alma_b4_interp,
                                  pdbi_b4[0].value,
                                  zeromean=True))
Exemple #29
0
def stack_images(_files_list, _path_out='./', cx0=None, cy0=None, _win=None,
                 _obs=None, _nthreads=4, _interactive_plot=True, _v=True):
    """

    :param _files_list:
    :param _path_out:
    :param _obs:
    :param _nthreads:
    :param _interactive_plot:
    :param _v:
    :return:
    """

    if _obs is None:
        _obs = os.path.split(_files_list[0])[1]

    if _interactive_plot:
        plt.axes([0., 0., 1., 1.])
        plt.ion()
        plt.grid('off')
        plt.axis('off')
        plt.show()

    numFrames = len(_files_list)

    # use first image as pivot:
    with fits.open(_files_list[0]) as _hdulist:
        im1 = np.array(_hdulist[0].data, dtype=np.float)  # do proper casting
        image_size = _hdulist[0].shape
        # get fits header for output:
        header = _hdulist[0].header
        if cx0 is None:
            cx0 = header.get('NAXIS1') // 2
        if cy0 is None:
            cy0 = header.get('NAXIS2') // 2
        if _win is None:
            _win = int(np.min([cx0, cy0]))
        im1 = im1[cy0 - _win: cy0 + _win, cx0 - _win: cx0 + _win]

    # Sum of all frames (with not too large a shift and chi**2)
    summed_frame = np.zeros(image_size)

    # frame_num x y ex ey:
    shifts = np.zeros((numFrames, 5))

    # set up frequency grid for shift2d
    ny, nx = image_size
    xfreq_0 = np.fft.fftfreq(nx)[np.newaxis, :]
    yfreq_0 = np.fft.fftfreq(ny)[:, np.newaxis]

    fftn, ifftn = image_registration.fft_tools.fast_ffts.get_ffts(nthreads=_nthreads, use_numpy_fft=False)

    if _v:
        bar = pyprind.ProgBar(numFrames-1, stream=1, title='Registering frames')

    fn = 0
    for jj, _file in enumerate(_files_list[1:]):
        with fits.open(_file) as _hdulist:
            for ii, _ in enumerate(_hdulist):
                img = np.array(_hdulist[ii].data, dtype=np.float)  # do proper casting

                # tic = _time()
                # img_comp = gaussian_filter(img, sigma=5)
                img_comp = img
                img_comp = img_comp[cy0 - _win: cy0 + _win, cx0 - _win: cx0 + _win]
                # print(_time() - tic)

                # tic = _time()
                # chi2_shift -> chi2_shift_iterzoom
                dy2, dx2, edy2, edx2 = image_registration.chi2_shift(im1, img_comp, nthreads=_nthreads,
                                                                     upsample_factor='auto', zeromean=True)
                img = shift2d(fftn, ifftn, img, -dy2, -dx2, xfreq_0, yfreq_0)
                # print(_time() - tic, '\n')

                if np.sqrt(dx2 ** 2 + dy2 ** 2) > 0.8 * _win:
                    # skip frames with too large a shift
                    pass
                else:
                    # otherwise store the shift values and add to the 'integrated' image
                    shifts[fn, :] = [fn, -dx2, -dy2, edx2, edy2]
                    summed_frame += img

                if _interactive_plot:
                    plt.imshow(summed_frame, cmap='gray', origin='lower', interpolation='nearest')
                    plt.draw()
                    plt.pause(0.001)

                if _v:
                    bar.update()

                # increment frame number
                fn += 1

    if _interactive_plot:
        raw_input('press any key to close plot')

    if _v:
        print('Largest move was {:.2f} pixels for frame {:d}'.
              format(np.max(np.sqrt(shifts[:, 1] ** 2 + shifts[:, 2] ** 2)),
                     np.argmax(np.sqrt(shifts[:, 1] ** 2 + shifts[:, 2] ** 2))))

    # output
    if not os.path.exists(os.path.join(_path_out)):
        os.makedirs(os.path.join(_path_out))

    export_fits(os.path.join(_path_out, _obs + '.stacked.fits'),
                summed_frame, header)
Exemple #30
0
def align(im, ref, method=None, **kargs):
    """Use one of a variety of algroithms to align two images.

    Args:
        im (ndarray) image to align
        ref (ndarray) reference array

    Keyword Args:
        method (str or None): 
            If given specifies which module to try and use.
            Options: 'scharr', 'chi2_shift', 'imreg_dft', 'cv2'
        **kargs (various): All other keyword arguments are passed to the specific algorithm.

    Returns
        (ImageArray or ndarray) aligned image

    Notes:
        Currently three algorithms are supported:
            - image_registration module's chi^2 shift: This uses a dft with an automatic
              up-sampling of the fourier transform for sub-pixel alignment. The metadata
              key *chi2_shift* contains the translation vector and errors.
            - imreg_dft module's similarity function. This implements a full scale, rotation, translation
              algorithm (by default cosntrained for just translation). It's unclear how much sub-pixel translation
              is accomodated.
            - cv2 module based affine transform on a gray scale image.
              from: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
    """
    #To be consistent with x-y co-ordinate systems
    if all([m is None for m in [imreg_dft, chi2_shift, cv2]]):
        raise ImportError(
            'align requires one of imreg_dft, chi2_shift or cv2 modules to be available.'
        )
    if method == "scharr" and imreg_dft is not None:
        im = im.T
        ref = ref.T
        scale = np.ceil(np.max(im.shape) / 500.0)
        ref1 = ref.gaussian_filter(sigma=scale, mode="wrap").scharr()
        im1 = im.gaussian_filter(sigma=scale, mode="wrap").scharr()
        im1 = im1.align(ref1, method="imreg_dft")
        tvec = np.array(im1["tvec"])
        new_im = im.shift(tvec)
        new_im["tvec"] = tuple(-tvec)
        new_im = new_im.T
    elif (method is None and chi2_shift is not None) or method == "chi2_shift":
        kargs["zeromean"] = kargs.get("zeromean", True)
        result = np.array(chi2_shift(ref, im, **kargs))
        new_im = im.__class__(fft_tools.shiftnd(im, -result[0:2]))
        new_im.metadata.update(im.metadata)
        new_im.metadata["chi2_shift"] = result
    elif (method is None and imreg_dft is not None) or method == "imreg_dft":
        constraints = kargs.pop("constraints", {
            "angle": [0.0, 0.0],
            "scale": [1.0, 0.0]
        })
        result = imreg_dft.similarity(ref, im, constraints=constraints)
        new_im = result.pop("timg").view(type=ImageArray)
        new_im.metadata.update(im.metadata)
        new_im.metadata.update(result)
    elif (method is None and cv2 is not None) or method == "cv2":
        im1_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
        im2_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

        # Find size of image1
        sz = im.shape

        # Define the motion model
        warp_mode = cv2.MOTION_TRANSLATION
        warp_matrix = np.eye(2, 3, dtype=np.float32)

        # Specify the number of iterations.
        number_of_iterations = 5000

        # Specify the threshold of the increment
        # in the correlation coefficient between two iterations
        termination_eps = 1e-10

        # Define termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                    number_of_iterations, termination_eps)

        # Run the ECC algorithm. The results are stored in warp_matrix.
        (_, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray,
                                                warp_matrix, warp_mode,
                                                criteria)

        # Use warpAffine for Translation, Euclidean and Affine
        new_im = cv2.warpAffine(im,
                                warp_matrix, (sz[1], sz[0]),
                                flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)

    else:  # No cv2 available so don't do anything.
        raise RuntimeError("Couldn't find an image alignment algorithm to use")
    return new_im.T
Exemple #31
0
def find_shift(im,
               refpos=None,
               refim=None,
               guesspos=None,
               searchbox=None,
               fitbox=None,
               method='fast',
               guessmeth='max',
               searchsmooth=3,
               refsign=1,
               guessFWHM=None,
               guessamp=None,
               guessbg=None,
               fixFWHM=False,
               minamp=0.01,
               maxamp=None,
               maxFWHM=None,
               minFWHM=None,
               verbose=False,
               plot=False,
               silent=False):
    """
    detect and determine the shift or offset of one image either with respect
    to a reference image or a reference position. In the first case, the
    shift is determined with cross-correlation, while in the second a Gaussian
    fit is performed
    the convention of the shift is foundpos - refpos
    OPTIONAL INPUT
    - refpos: tuple, given (approx.) the reference position of a source that
    can be used for fine centering before cropping
    - method: string(max, mpfitgauss, fastgauss, skimage, ginsberg), giving the method that
    should be used for the fine centering using the reference source.
    'skimage' and 'ginsberg' do 2D cross-correlation
    - refim: 2D array, giving a reference image if the method for the
    centering is cross-correlation
    - fitbox: scalar, giving the box length in x and y for the fitting of the
    reference source

    """

    s = np.array(np.shape(im))

    if guesspos is None and refpos is not None:
        guesspos = refpos
    elif refpos is None and guesspos is not None:
        refpos = guesspos
    elif guesspos is None and refpos is None:
        guesspos = 0.5 * np.array(s)
        refpos = guesspos

    if verbose:
        print("GET_SHIFT: guesspos: ", guesspos)
        print("GET_SHIFT: refpos: ", refpos)

    # --- if a reference image was provided then perform a cross-correlation
    if method in ['cross', 'skimage', 'ginsberg']:

        sr = np.array(np.shape(refim))
        if verbose:
            print("GET_SHIFT: input image dimension: ", s)
            print("GET_SHIFT: reference image dimension: ", sr)

        # --- for the cross-correlation, the image and reference must have the
        #     the same size
        if (s[0] > sr[0]) | (s[1] > sr[1]):
            cim = _crop_image(im, box=sr, cenpos=guesspos)

            cenpos = guesspos

            #            # --- adjust the ref and guesspos
            #            refpos = refpos - guesspos + 0.5 * sr
            #            guesspos = 0.5 * sr

            if verbose:
                print("GET_SHIFT: refim smaller than im --> cut im")
                print("GET_SHIFT: adjusted guesspos: ", guesspos)
                print("GET_SHIFT: adjusted refpos: ", refpos)

        elif (sr[0] > s[0]) | (sr[1] > s[1]):
            cim = im
            refim = _crop_image(refim, box=s)

            cenpos = 0.5 * s

        else:
            cim = im

            cenpos = 0.5 * s

        # --- which cross-correlation algorithm should it be?
        if (method == 'cross') | (method == 'skimage'):
            from skimage.feature import register_translation

            shift, error, diffphase = register_translation(cim,
                                                           refim,
                                                           upsample_factor=100)

            #print(shift,error)
            error = [error,
                     error]  # apparently only on error value is returned?

        elif method == 'ginsberg':
            import warnings
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore")
                import image_registration
            np.seterr(
                all='ignore')  # silence warning messages about div-by-zero

            dx, dy, ex, ey = image_registration.chi2_shift(
                refim,
                cim,
                return_error=True,
                zeromean=True,
                upsample_factor='auto')

            shift = [dy, dx]
            error = [ey, ex]

        else:
            print("GET_SHIFT: ERROR: requested method not available: ", method)
            sys.exit(0)

        fitim = None
        params = None
        perrs = None

        # --- correct shift for any difference between guesspos and refpos:
        shift = [
            shift[0] + cenpos[0] - refpos[0], shift[1] + cenpos[1] - refpos[1]
        ]

# --- if not reference image is provided then perform a Gaussian fit
    else:

        # --- fit a Gaussian to find the center for the crop
        params, perrs, fitim = _find_source(im,
                                            searchbox=searchbox,
                                            fitbox=fitbox,
                                            method=method,
                                            verbose=verbose,
                                            guesspos=guesspos,
                                            sign=refsign,
                                            plot=plot,
                                            guessFWHM=guessFWHM,
                                            guessamp=guessamp,
                                            guessbg=guessbg,
                                            searchsmooth=searchsmooth,
                                            fixFWHM=fixFWHM,
                                            minamp=minamp,
                                            maxFWHM=maxFWHM,
                                            minFWHM=minFWHM,
                                            silent=silent)

        # --- compute the shift from the fit results:
        shift = np.array([params[2] - refpos[0], params[3] - refpos[1]])

        # --- eror on the shift from the fit results:
        error = np.array([perrs[2], perrs[3]])

    if verbose:
        print('GET_SHIFT: Found shift: ', shift)
        print('GET_SHIFT: Uncertainty: ', error)
        print('GET_SHIFT: Fit Params: ', params)

    return (shift, error, [params, perrs, fitim])
Exemple #32
0
"""
from skimage import io
from image_registration import chi2_shift

image = io.imread("images/Osteosarcoma_01.tif", as_gray=True)
offset_image = io.imread("images/Osteosarcoma_01_transl.tif", as_gray=True)
# offset image translated by (-17, 18.) in y and x 



#Method 1: chi squared shift
#Find the offsets between image 1 and image 2 using the DFT upsampling method
# 2D rigid

noise=0.1
xoff, yoff, exoff, eyoff = chi2_shift(image, offset_image, noise, 
                                      return_error=True, upsample_factor='auto')

print("Offset image was translated by: 18, -17")
print("Pixels shifted by: ", xoff, yoff)

from scipy.ndimage import shift
corrected_image = shift(offset_image, shift=(xoff,yoff), mode='constant')

from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
ax1 = fig.add_subplot(2,2,1)
ax1.imshow(image, cmap='gray')
ax1.title.set_text('Input Image')
ax2 = fig.add_subplot(2,2,2)
ax2.imshow(offset_image, cmap='gray')
ax2.title.set_text('Offset image')
Exemple #33
0
def cube_recenter_dft_upsampling(array,
                                 cy_1,
                                 cx_1,
                                 fwhm=4,
                                 subi_size=None,
                                 full_output=False,
                                 verbose=True,
                                 save_shifts=False,
                                 debug=False):
    """ Recenters a cube of frames using the DFT upsampling method as 
    proposed in Guizar et al. 2008 (see Notes) plus a chi^2, for determining
    automatically the upsampling factor, as implemented in the package 
    'image_registration' (see Notes).
    
    The algorithm (DFT upsampling) obtains an initial estimate of the 
    cross-correlation peak by an FFT and then refines the shift estimation by 
    upsampling the DFT only in a small neighborhood of that estimate by means 
    of a matrix-multiply DFT.
    
    Parameters
    ----------
    array : array_like
        Input cube.
    cy_1, cx_1 : int
        Coordinates of the center of the subimage for centroiding the 1st frame.    
    fwhm : float, optional
        FWHM size in pixels.
    subi_size : int or None, optional
        Size of the square subimage sides in terms of FWHM that will be used
        to centroid to frist frame. If subi_size is None then the first frame
        is assumed to be centered already.
    full_output : {False, True}, bool optional
        Whether to return 2 1d arrays of shifts along with the recentered cube 
        or not.
    verbose : {True, False}, bool optional
        Whether to print to stdout the timing or not.
    save_shifts : {False, True}, bool optional
        Whether to save the shifts to a file in disk.
    debug : {False, True}, bool optional
        Whether to print to stdout the shifts or not. 
    
    Returns
    -------
    array_recentered : array_like
        The recentered cube. Frames have now odd size.
    If full_output is True:
    y, x : array_like
        1d arrays with the shifts in y and x.     
    
    Notes
    -----
    Package documentation for "Image Registration Methods for Astronomy":
    https://github.com/keflavich/image_registration
    http://image-registration.rtfd.org
    
    Guizar-Sicairos et al. "Efficient subpixel image registration algorithms," 
    Opt. Lett. 33, 156-158 (2008). 
    The algorithm registers two images (2-D rigid translation) within a fraction 
    of a pixel specified by the user. 
    Instead of computing a zero-padded FFT (fast Fourier transform), this code 
    uses selective upsampling by a matrix-multiply DFT (discrete FT) to 
    dramatically reduce computation time and memory without sacrificing 
    accuracy. With this procedure all the image points are used to compute the 
    upsampled cross-correlation in a very small neighborhood around its peak. 
    
    """
    if not array.ndim == 3:
        raise TypeError('Input array is not a cube or 3d array')

    # If frame size is even we drop a row and a column
    if array.shape[1] % 2 == 0:
        array = array[:, 1:, :].copy()
    if array.shape[2] % 2 == 0:
        array = array[:, :, 1:].copy()

    if verbose: start_time = timeInit()

    n_frames = array.shape[0]
    x = np.zeros((n_frames))
    y = np.zeros((n_frames))
    array_rec = array.copy()

    # Centroiding first frame with 2d gaussian and shifting
    if subi_size is not None:
        size = int(fwhm * subi_size)
        cy, cx = frame_center(array[0])
        y1, x1 = _centroid_2dg_frame(array_rec, 0, size, cy_1, cx_1)
        array_rec[0] = frame_shift(array_rec[0],
                                   shift_y=cy - y1,
                                   shift_x=cx - x1)
        x[0] = cx - x1
        y[0] = cy - y1
    else:
        x[0] = cx
        y[0] = cy

    # Finding the shifts with DTF upsampling of each frame wrt the first
    bar = pyprind.ProgBar(n_frames, stream=1, title='Looping through frames')
    for i in xrange(1, n_frames):
        dx, dy, _, _ = chi2_shift(array_rec[0],
                                  array[i],
                                  upsample_factor='auto')
        x[i] = -dx
        y[i] = -dy
        array_rec[i] = frame_shift(array[i], y[i], x[i])
        bar.update()
    print

    if debug:
        print
        for i in xrange(n_frames):
            print y[i], x[i]

    if verbose: timing(start_time)

    if save_shifts:
        np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
    if full_output:
        return array_rec, y, x
    else:
        return array_rec
Exemple #34
0
    # read file
    targetimgfile = fits.open(imgfile)
    targetimg = targetimgfile[0].data
    targetimgfile.close()
    # divide image
    targetimgtotal = np.sum(targetimg)
    print targetimgtotal
    if targetimgtotal >= args.mincounts:
        targetimg_norm = targetimg / targetimgtotal

        #---------------------------------------
        #           Align Image
        #---------------------------------------

        final_dx, final_dy, dxerr, dyerr = imgreg.chi2_shift(
            targetimg_norm, refimg, upsample_factor=1000)
        print 'dx, dy, = ', final_dx, final_dy
        print 'dxerr, dyerr, = ', dxerr, dyerr

        #---------------------------------------
        #        Save Translated Image
        #---------------------------------------

        # Translate Image
        img_trans = np.roll(targetimg, int(final_dx), axis=1)
        img_trans = np.roll(img_trans, int(final_dy), axis=0)

        # Truncate lower limit to zero (eliminate negative values)
        img_trans[img_trans < 0.0] = 0.0

        # Suppress very faint edge emission to zero (soft removal of
Exemple #35
0
"""

import glob
from astropy.io import fits
from image_registration import chi2_shift
from image_registration.fft_tools import shift
import numpy as np

image = '/home/andrew/sdi/targets/NGC6744/19:09:46.104_-63:51:27.00/rp/20.0/data/NORM_17:53:30.954_ref_A_.fits'
data = fits.getdata(image)
data = np.array(data, dtype='float64')
#image = '/home/andrew/sdi/targets/NGC6744/19:09:46.104_-63:51:27.00/rp/20.0/data/17:34:05.966_N_.fits'
#data = fits.getdata(image)
#Min = np.min(data)
#Max = np.max(data)
#new_min = 1
#new_max = -1
#data = (data-Min)*((new_max-new_min)/(Max-Min))
#data += new_min
#data -= np.mean(data)
#hdu = fits.PrimaryHDU(data)
#hdu.writeto('/home/andrew/sdi/targets/NGC6744/19:09:46.104_-63:51:27.00/rp/20.0/data/NORM3_17:34:05.966_N_.fits')
image2 = '/home/andrew/sdi/targets/NGC6744/19:09:46.104_-63:51:27.00/rp/20.0/data/NORM3_17:34:05.966_N_.fits'
data2 = fits.getdata(image2)
data2 = np.array(data2, dtype='float64')
dx, dy, edx, edy = chi2_shift(data, data2, upsample_factor='auto')
corrected_image = shift.shiftnd(data2, (-dy, -dx))
hdu2 = fits.PrimaryHDU(corrected_image)
hdu2.writeto(
    '/home/andrew/sdi/targets/NGC6744/19:09:46.104_-63:51:27.00/rp/20.0/data/NORM5_align.fits'
)