Example #1
0
    def matching_kernel(psf1,
                        psf2,
                        window_type='TukeyWindow',
                        alpha=None,
                        beta=None):
        """Use photutils to create a matching kernel given two PSFs and
        the window type and parameters

        Parameters
        ----------
        psf1 : numpy.ndarray
            2D array containing the first PSF

        psf2 : numpy.ndarray
            2D array containing the second PSF

        window_type : str
            Name of the window function to use when filtering the matching kernel

        alpha : float
            Optional input for some of the window functions

        beta : float
            Optional input for some of the window functions

        Returns
        -------
        matched_kernel : numpy.ndarray
            2D array containing the matching PSF kernel
        """
        # Create the filtering window
        orig_window_type = copy.deepcopy(window_type)
        window_type = window_type.lower()
        if window_type == 'tophatwindow':
            window = TopHatWindow(beta=beta)
        elif window_type == 'cosinebellwindow':
            window = CosineBellWindow(alpha=alpha)
        elif window_type == 'splitcosinebellwindow':
            window = SplitCosineBellWindow(alpha=alpha, beta=beta)
        elif window_type == 'tukeywindow':
            window = TukeyWindow(alpha=alpha)
        elif window_type == 'hanningwindow':
            window = HanningWindow()
        else:
            raise ValueError(
                "ERROR: Unrecognized window_type: {}".format(orig_window_type))

        # Create the matching kernel
        matched_kernel = create_matching_kernel(psf1, psf2, window=window)

        return matched_kernel
def run_lc_psf(field, CCD, FILTER, row_pix, col_pix, seeing_limit=1.5,
               verbose=False):
    name = '%s_%s_%04i_%04i_%s' % (field, CCD, col_pix, row_pix, FILTER)
    epochs = np.loadtxt('%s/info/%s/%s_epochs_%s.txt' %
                        (jorgepath, field, field, FILTER),
                        dtype={'names': ('EPOCH', 'MJD'),
                               'formats': ('S2', 'f4')}, comments='#')

    images, airmass, seeing, dqmask, gain, exptime = [], [], [], [], [], []

    print 'Loading catalogues...'
    print '\tEpoch |      MJD     |  SEEING  | AIRMASS'
    for ll, epoch in enumerate(epochs):
        # fits
        imag_file = "%s/DATA/%s/%s/%s_%s_%s_image_crblaster.fits" % (astropath,
                                                                     field,
                                                                     CCD,
                                                                     field,
                                                                     CCD,
                                                                     epoch[0])
        if not os.path.exists(imag_file):
            if verbose:
                print '\t\tNo image file: %s' % (imag_file)
            continue
        hdu = fits.open(imag_file)
        seeing.append(hdu[0].header['FWHM'] * hdu[0].header['PIXSCAL1'])
        airmass.append(hdu[0].header['AIRMASS'])
        gain.append(hdu[0].header['GAINA'])
        exptime.append(hdu[0].header['EXPTIME'])
        data = hdu[0].data
        images.append(data)
        if verbose:
            print '\t   %s | %f | %f | %f' % (epoch[0], epoch[1],
                                              seeing[-1], airmass[-1])

        dqm_file = "%s/DATA/%s/%s/%s_%s_%s_dqmask.fits.fz" % (astropath, field,
                                                              CCD, field, CCD,
                                                              epoch[0])
        if not os.path.exists(dqm_file):
            if verbose:
                print '\t\tNo dqmask file: %s' % (dqm_file)
                dqmask.append(False)
        else:
            hdu = fits.open(dqm_file)
            dqmask.append(hdu[0].data)

    # clean bad observation conditions
    seeing = np.array(seeing)
    airmass = np.array(airmass)
    images = np.array(images)
    dqmask = np.array(dqmask)
    # mask_good = (seeing <= seeing_limit)
    mask_good = (epochs['MJD'] <= 57073.)
    if verbose:
        print 'Total number of images     : ', len(images)
        print 'Epochs with good conditions: ', len(images[mask_good])

    epochs = epochs[mask_good]
    images = images[mask_good]
    seeing = seeing[mask_good]
    airmass = airmass[mask_good]
    dqmask = dqmask[mask_good]

    # selecting worst epoch
    idx_worst = np.argmax(seeing)
    if verbose:
        print 'Worst epoch  : ', epochs[idx_worst]
        print 'Worst seeing : ', seeing[idx_worst]
        print 'Worst airmass: ', airmass[idx_worst]

    # select 100 stars closer to
    if verbose:
        print 'Searching for nearest & cleanest stars near position...'
    ref_cat_file = "%s/catalogues/%s/%s/%s_%s_%s_image_crblaster_thresh%s_minarea%s_backsize64_final-scamp.dat" % \
        (jorgepath, field, CCD, field, CCD,
         epochs[idx_worst][0], str(thresh), str(minarea))
    if not os.path.exists(ref_cat_file):
        if verbose:
            print '\t\tNo catalog file for worst epoch: %s' % (ref_cat_file)
        return

    ref_cata = Table.read(ref_cat_file, format='ascii')
    ref_tree_XY = cKDTree(np.transpose(np.array((ref_cata['X_IMAGE_REF'],
                                                 ref_cata['Y_IMAGE_REF']))))
    # quering asked position in worst image
    XY_worst = np.transpose(np.array((col_pix, row_pix)))
    dist_w, indx_w = ref_tree_XY.query(XY_worst, k=1, distance_upper_bound=5)
    print dist_w, indx_w
    row_pix_w = int(np.around(ref_cata['Y_IMAGE'][indx_w]))
    col_pix_w = int(np.around(ref_cata['X_IMAGE'][indx_w]))
    print col_pix_w, row_pix_w
    stamp_worst = images[idx_worst][row_pix_w - dx_stamp:
                                    row_pix_w + dx_stamp + 1,
                                    col_pix_w - dx_stamp:
                                    col_pix_w + dx_stamp + 1].copy()

    nearest_cata = get_filter_nearest_stars(ref_cata, col_pix_w, row_pix_w)
    if nearest_cata is None:
        return
    if verbose:
        print 'Creating kernel...'
    psf_worst = get_kernel(images[idx_worst], nearest_cata)
    # psf_worst1 = get_kernel(images[idx_worst],
    #                         nearest_cata[:int(len(nearest_cata)/2)])
    # psf_worst2 = get_kernel(images[idx_worst],
    #                         nearest_cata[int(len(nearest_cata)/2):])
    if verbose:
        print 'Kernel done!'
        print 'Going into time serie...'

    # aperture radii for photometry
    ap_radii = seeing[idx_worst] / 0.27 * np.array([0.5, 0.75, 1., 1.25, 1.5])
    print 'Aperture radii: ', ap_radii
    lc_yes, lc_no = [], []
    stamps_lc = []
    airmass2, exptime2, ZP = [], [], []

    for k, epoch in enumerate(epochs):

        if verbose:
            print 'Working in epoch %s...' % (epoch[0])

        # loading catalogs
        cat_file = "%s/catalogues/%s/%s/%s_%s_%s_image_crblaster_thresh%s_minarea%s_backsize64_final-scamp.dat" % \
            (jorgepath, field, CCD, field, CCD,
             epoch[0], str(thresh), str(minarea))
        if not os.path.exists(cat_file):
            if verbose:
                print '\t\tNo catalog file for worst epoch: %s' % (cat_file)
            continue
        cata = Table.read(cat_file, format='ascii')
        cata_XY = np.transpose(np.array((cata['X_IMAGE_REF'],
                                         cata['Y_IMAGE_REF'])))
        tree_XY = cKDTree(cata_XY)

        # quering asked position
        XY_obj = np.transpose(np.array((col_pix, row_pix)))
        dist, indx = tree_XY.query(XY_obj, k=1, distance_upper_bound=5)
        if np.isinf(dist):
            if verbose:
                print '\t\tNo match in epoch %s' % epoch[0]
                lc_no.append(epoch)
                continue

        # position in non projected coordinates, i.e. loaded image
        row_pix2 = int(np.around(cata['Y_IMAGE'][indx]))
        col_pix2 = int(np.around(cata['X_IMAGE'][indx]))
        err_mag = cata['MAGERR_AUTO_ZP'][indx]
        print row_pix2, col_pix2, err_mag
        stamp = images[k][row_pix2 - dx_stamp: row_pix2 + dx_stamp + 1,
                          col_pix2 - dx_stamp: col_pix2 + dx_stamp + 1].copy()
        stamp_mask = dqmask[k][row_pix2 - dx_stamp:
                               row_pix2 + dx_stamp + 1,
                               col_pix2 - dx_stamp:
                               col_pix2 + dx_stamp + 1].copy()

        if epoch[0] != epochs[idx_worst][0]:
            nearest_cata2 = get_filter_nearest_stars(cata, col_pix2, row_pix2)
            if nearest_cata2 is None:
                continue
            psf = get_kernel(images[k], nearest_cata2)
            part = 2
            matched_kernel = []
            for p in range(part):
                aux_psf = get_kernel(images[k],
                                     nearest_cata2[int(p * len(nearest_cata2) /
                                                       part):
                                                   int((p+1) *
                                                       len(nearest_cata2) /
                                                       part)])

                aux_kernel = create_matching_kernel(aux_psf, psf_worst,
                                                    window=TopHatWindow(0.62))
                matched_kernel.append(aux_kernel)
                # aux_kernel2 = create_matching_kernel(aux_psf, psf_worst2,
                #                                      window=TopHatWindow(0.7))
                # matched_kernel.append(aux_kernel2)

            matched_kernel = np.array(matched_kernel)
            print matched_kernel.shape
            matched_kernel = np.median(matched_kernel, axis=0)
            matched_kernel /= matched_kernel.sum()
            print '\tMatching kernel sum: ', matched_kernel.sum()
            convolved_kernel = filter_data(psf, matched_kernel,
                                           mode='nearest')

            convolved_stamp = filter_data(stamp, matched_kernel,
                                          mode='nearest')
        else:
            print '\tWorst epoch'
            matched_kernel = psf_worst.copy()
            stamp = stamp_worst.copy()
            convolved_stamp = stamp_worst.copy()
        stamps_lc.append([stamp, convolved_stamp, matched_kernel,
                          float(epoch[1]), convolved_kernel])

        data_point = get_photometry(convolved_stamp, mask=stamp_mask,
                                    gain=gain[k], pos=(dx_stamp, dx_stamp),
                                    radii=ap_radii)
        if data_point is None:
            print '\t\tNo match in epoch %s' % epoch[0]
            lc_no.append(epoch)
            continue
        data_point['mjd'] = float(epoch[1])
        data_point['epoch'] = epoch[0]
        data_point['aperture_mag_err_0_cat'] = err_mag

        lc_yes.append(data_point)
        airmass2.append(airmass[k])
        exptime2.append(exptime[k])

        ZP_PS = np.load('%s/info/%s/%s/ZP_%s_PS_%s_%s_%s.npy' %
                        (jorgepath, field, CCD, 'AUTO', field, CCD, epoch[0]))
        ZP.append([ZP_PS[0][0], ZP_PS[2][0]])

        if False:

            fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(18, 10))

            im2 = ax[0, 0].imshow(stamp,
                                  cmap='viridis', interpolation='nearest')
            ax[0, 0].set_title(epoch[0])
            fig.colorbar(im2, ax=ax[0, 0])

            im1 = ax[0, 1].imshow(stamp_worst,
                                  cmap='viridis', interpolation='nearest')
            ax[0, 1].set_title('Worst')
            fig.colorbar(im1, ax=ax[0, 1])

            im3 = ax[0, 2].imshow(convolved_stamp,
                                  cmap='viridis', interpolation='nearest')
            ax[0, 2].set_title('%s convolved' % epoch[0])
            for k in ap_radii:
                circle = plt.Circle([data_point['xcenter'][0],
                                     data_point['ycenter'][0]],
                                    k, color='r', fill=False)
                ax[0, 2].add_artist(circle)
            fig.colorbar(im3, ax=ax[0, 2])

            im4 = ax[0, 3].imshow(psf_worst - convolved_kernel,
                                  cmap='viridis', interpolation='nearest')
            ax[0, 3].set_title('kernel sustraction')
            fig.colorbar(im4, ax=ax[0, 3])

            im2 = ax[1, 0].imshow(psf, cmap='viridis',
                                  interpolation='nearest')
            ax[1, 0].set_title('%s' % (epoch[0]))
            fig.colorbar(im2, ax=ax[1, 0])

            im1 = ax[1, 1].imshow(psf_worst, cmap='viridis',
                                  interpolation='nearest')
            ax[1, 1].set_title('Worst')
            fig.colorbar(im1, ax=ax[1, 1])

            im3 = ax[1, 2].imshow(matched_kernel, cmap='viridis',
                                  interpolation='nearest')
            ax[1, 2].set_title('matching kernel')
            fig.colorbar(im3, ax=ax[1, 2])

            im4 = ax[1, 3].imshow(convolved_kernel, cmap='viridis',
                                  interpolation='nearest')
            ax[1, 3].set_title('convolved %s' % (epoch[0]))
            fig.colorbar(im4, ax=ax[1, 3])
            fig.tight_layout()
            # plt.savefig('%s/lightcurves/galaxy/%s/%s_%s_psf.png' %
            #             (jorgepath, field, name, epoch[0]),
            #             tight_layout=True, pad_inches=0.01,
            #             bbox_inches='tight')
            plt.show()

        # if k == 3: break

    if len(lc_yes) <= 10:
        print 'No LC for this source...'
        return None
    lc = vstack(lc_yes)
    # lc_raw = vstack(lc_raw)
    airmass2 = np.array(airmass2)
    exptime2 = np.array(exptime2)
    ZP = np.array(ZP)

    # magnitudes
    for k in range(len(ap_radii)):
        (mags, e1_mags) = ADU2mag_PS(lc['aperture_sum_%i' % k],
                                     lc['aperture_flx_err_%i' % k],
                                     exptime2, ZP[:, 0], ZP[:, 1])
        lc['aperture_mag_%i' % k] = mags
        lc['aperture_mag_err_%i' % k] = e1_mags

    lc_df = lc.to_pandas()
    lc_df.drop('id', axis=1, inplace=True)
    for k in range(len(ap_radii)):
        lc_df.rename(columns={'aperture_sum_%i' % k: 'aperture_flx_%i' % k},
                     inplace=True)
    print lc_df.columns.values
    print lc_df.shape
    f = open('%s/lightcurves/galaxy/%s/%s_psf.csv' %
                 (jorgepath, field, name), 'w')
    f.write('# Worst epoch    : %s\n' % epochs[idx_worst][0])
    f.write('# Worst MJD      : %s\n' % epochs[idx_worst][1])
    f.write('# Worst seeing   : %s\n' % seeing[idx_worst])
    f.write('# Aperture radii : seeing * [0.5, 0.75, 1., 1.25, 1.5]\n')
    f.write('# Aperture radii : %s\n' % str(ap_radii))
    lc_df.to_csv(f)
    f.close()
    # print lc_df[['aperture_flx_0', 'aperture_flx_err_0',
    #              'aperture_mag_0', 'aperture_mag_err_0',
    #              'aperture_mag_0_cat']]

    if True:
        brightest_idx = np.argmax(lc_df.aperture_flx_0)
        cmin = np.percentile(stamps_lc[brightest_idx][1].flatten(), 50)
        cmax = stamps_lc[brightest_idx][1].flatten().max()

        fig, ax = plt.subplots(ncols=len(stamps_lc), nrows=4,
                               figsize=(2. * len(stamps_lc), 10))
        for i in range(len(stamps_lc)):
            ax[0, i].imshow(stamp_worst, interpolation="nearest",
                            cmap='gray', origin='lower')
            ax[1, i].imshow(stamps_lc[i][0], interpolation="nearest",
                            cmap='gray', origin='lower')
            ax[2, i].imshow(stamps_lc[i][1], interpolation="nearest",
                            cmap='gray', origin='lower', clim=(cmin, cmax))
            circle = plt.Circle([lc_df['xcenter'][i],
                                 lc_df['ycenter'][i]],
                                ap_radii[0], color='r', lw=.5, fill=False)
            ax[2, i].add_artist(circle)
            circle = plt.Circle([lc_df['xcenter'][i],
                                 lc_df['ycenter'][i]],
                                ap_radii[1], color='r', lw=.5, fill=False)
            ax[2, i].add_artist(circle)
            cmin_k = np.percentile(stamps_lc[i][2].flatten(), 50)
            cmax_k = stamps_lc[i][2].flatten().max()
            ax[3, i].imshow(stamps_lc[i][2], interpolation="nearest",
                            cmap='gray', origin='lower', clim=(cmin_k, cmax_k))
            ax[0, i].text(1, 1, "%8.2f" % epochs[idx_worst][1],
                          fontsize=14, color='orange')
            ax[1, i].text(1, 1, "%8.2f" % stamps_lc[i][3],
                          fontsize=14, color='orange')
            for j in range(4):
                ax[j, i].axes.get_xaxis().set_visible(False)
                ax[j, i].axes.get_yaxis().set_visible(False)

        fig.subplots_adjust(wspace=0, hspace=0)
        plt.savefig('%s/lightcurves/galaxy/%s/%s_psf_series.png' %
                    (jorgepath, field, name),
                    tight_layout=True, pad_inches=0.01, facecolor='black',
                    bbox_inches='tight')
        # plt.show()
        plt.close(fig)

    if False:
        plt.errorbar(lc_df['mjd'], lc_df['aperture_mag_0'],
                     yerr=lc_df['aperture_mag_err_0'], lw=0, label='psf',
                     elinewidth=1, c='r', marker='.', markersize=15)
        plt.xlabel('mjd')
        plt.ylabel('g flux')
        plt.legend(loc='best')
        plt.show()
Example #3
0
import photutils
from photutils import create_matching_kernel
from astropy.io import fits
from matplotlib import pyplot as plt
import numpy as np
import scipy
from photutils import TopHatWindow
from photutils import CosineBellWindow

window = CosineBellWindow(alpha=0.35)
from scipy.signal import convolve as scipy_convolve
window = TopHatWindow(0.35)

dir1 = '/home/sourabh/ULIRG_package/data/OPTICAL_PSF/'
data_ref = fits.getdata(dir1 + 'f165psf.fits')
data_psf = fits.getdata(dir1 + 'PSF_775_gal4_rotate_cut.fits')
kernel = create_matching_kernel(data_psf, data_ref)  # , window = window )
fits.writeto('ker.fits', data=kernel, overwrite=True)
plt.imshow(kernel, cmap='Greys_r', origin='lower')
filename = '/home/sourabh/ULIRG_package/data/IRASF10594+3818/gal1_HA.fits'
fileout = '/home/sourabh/ULIRG_package/data/IRASF10594+3818/gal1_HA_psfmatch.fits'
ker = 'ker.fits'
#ker_shift = np.pad(kernel, ((0, 1), (0, 1)), mode='constant')
data1 = scipy_convolve(data_psf, kernel, mode='same')
fits.writeto('test2.fits', data=data1, overwrite=True)
data3 = data1 - data_ref
fits.writeto('test3.fits', data=data3, overwrite=True)


def psf_match(filename, fileout, ker):
    hdulist = fits.open(filename)
Example #4
0
# size as the target PSF. We therefore need to extrapolate it.
# First try by just cutting it out and replace the NaNs with 0.
source_psf = Cutout2D(
    data=hires_psf.copy(),
    position=(hires_psf.shape[0] // 2, hires_psf.shape[0] // 2),
    size=target_psf.shape,  # currently, this is hard coded. Change later!
    mode="partial",
    copy=True).data
source_psf[np.isnan(source_psf)] = 0
source_psf = source_psf / np.nansum(source_psf)

## Now compute the Kernel
kernel_HIRES_to_LOWRES = create_matching_kernel(
    source_psf=source_psf.copy(),
    target_psf=target_psf.copy(),
    window=TopHatWindow(0.4))  # this is currently hard coded. Change later?
kernel_HIRES_to_LOWRES = kernel_HIRES_to_LOWRES / np.nansum(
    kernel_HIRES_to_LOWRES)

## Save
hdu = fits.PrimaryHDU(data=kernel_HIRES_to_LOWRES)
hdul = fits.HDUList([hdu])
hdul.writeto(os.path.join(this_work_dir, "kernel.fits"), overwrite=True)

## Plot the PSFs for checking
if userinput["make_plots"] == "true":
    print("Making some Figures.")
    LOG.append("Making some Figures.")

    fig = plt.figure(figsize=(15, 5))
    ax1 = fig.add_subplot(1, 3, 1)
Example #5
0
aimsem = sems[aimind]
psf = {}

for semester in sems:
    psf[semester] = fits.open('PSFs/small_' + semester + '_K_PSF.fits')[0].data

aimpsf = psf[aimsem]

for semester in sems:
    if semester == aimsem:
        #        plt.figure()
        #        plt.imshow(np.log(psf[semester]))
        continue
    kernel = create_matching_kernel(psf[semester],
                                    aimpsf,
                                    window=TopHatWindow(0.5))

    plt.figure()
    plt.subplot(121)
    #    plt.imshow(kernel)
    plt.imshow(np.log(kernel))
    plt.subplot(122)
    #    plt.imshow(psf[semester])
    plt.imshow(np.log(psf[semester]))
#    ### Open image ###
#    im05Bfull = fits.open('UDS_'+semester+'_K.fits', memmap=True)
#    im05B = im05Bfull[0].data
#    hdr = im05Bfull[0].header
#
#    ### Convolve Image ###
#    newim05B = convolve(im05B, kernel)
Example #6
0
mindata, maxdata = adjustimage(Cutoutimg)
mindata1, maxdata1 = adjustimage(Cutoutimg1)

plt.figure(0)
plt.imshow(Cutoutimg, vmin=mindata, vmax=maxdata, cmap='gray')

plt.figure(1)
plt.imshow(Cutoutimg1, vmin=mindata1, vmax=maxdata1, cmap='gray')

plt.figure(2)
doubleimg = np.float64(Cutoutimg) - np.float64(Cutoutimg1)
doubleimg = np.abs(doubleimg)
mindata2, maxdata2 = adjustimage(doubleimg)
plt.imshow(doubleimg, vmin=mindata2, vmax=maxdata2, cmap='gray')

window = TopHatWindow(0.8)
fimg = np.float64(Cutoutimg)
fimg1 = np.float64(Cutoutimg1)
kernel = create_matching_kernel(fimg, fimg1, window)

bluimage = make_blurred(Cutoutimg, kernel)
plt.figure(3)
mindata3, maxdata3 = adjustimage(bluimage)
plt.imshow(bluimage, vmin=mindata3, vmax=maxdata3, cmap='gray')

plt.figure(4)
doubleimg1 = np.float64(Cutoutimg1) - np.float64(bluimage)
doubleimg1 = np.abs(doubleimg1)
mindata4, maxdata4 = adjustimage(doubleimg1)
plt.imshow(doubleimg1, vmin=mindata4, vmax=maxdata4, cmap='gray')
Example #7
0
    def get_kernel(self,
                   from_filter,
                   to_filter,
                   pixelscale,
                   from_fwhm=None,
                   to_fwhm=None,
                   from_model="gaussian",
                   to_model="gaussian",
                   size=51):
        """
        This function ...
        :param from_filter:
        :param to_filter:
        :param pixelscale:
        :param from_fwhm:
        :param to_fwhm:
        :param from_model:
        :param to_model:
        :param size:
        :return:
        """

        # Make sure from_fwhm and to_fwhm is defined
        if from_fwhm is None: from_fwhm = get_fwhm(from_filter)
        if to_fwhm is None: to_fwhm = get_fwhm(to_filter)

        # Make sure that size is odd number
        if size % 2 == 0: size += 1

        # Determine center based on size
        center = int((size - 1) / 2)

        # Create window
        window = TopHatWindow(0.35)

        y, x = np.mgrid[0:size, 0:size]

        # Determine FWHMs in number of pixels
        from_fwhm_pix = from_fwhm / pixelscale.average
        to_fwhm_pix = to_fwhm / pixelscale.average

        # amplitude, x_mean, y_mean, x_stddev, y_stddev
        gm1 = Gaussian2D(1, center, center, from_fwhm_pix, from_fwhm_pix)
        gm2 = Gaussian2D(1, center, center, to_fwhm_pix, to_fwhm_pix)

        # Generate
        g1 = gm1(x, y)
        g2 = gm2(x, y)

        # Normalize
        g1 /= g1.sum()
        g2 /= g2.sum()

        # Create the kernel, set FWHM, from_filter, to_filter, prepared=True, and pixelscale
        data = create_matching_kernel(g1, g2, window=window)
        kernel = ConvolutionKernel(data,
                                   fwhm=to_fwhm,
                                   from_filter=from_filter,
                                   to_filter=to_filter,
                                   prepared=True,
                                   pixelscale=pixelscale)

        # Return the kernel
        return kernel
Example #8
0
from SynthObs.Morph import PSF

# --- own, this package

sys.path.insert(0, os.path.abspath('../../'))

import FLARE
import FLARE.obs
import FLARE.surveys
import FLARE.photom

survey = FLARE.surveys.XDF
field = survey.fields['XDF']
ndim = 21
window = TopHatWindow(1.0)
plot = False

target_filter = field.filters[-1]
psf = PSF.PSF(target_filter)
pixel_scale = field.pixel_scale
native_pixel_Scale = FLARE.filters.pixel_scale[target_filter]
x = y = np.linspace(-(ndim / 2.) * (pixel_scale / native_pixel_Scale),
                    (ndim / 2.) * (pixel_scale / native_pixel_Scale),
                    ndim)  # in original pixels
target_kernel = psf.f(x, y)
target_kernel /= np.sum(target_kernel)

for filter in field.filters:

    # --- calculate transfer kernel