示例#1
0
def simple_psf_options():

    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)

    psf_model.x_0.fixed = True
    psf_model.y_0.fixed = True
    pos = Table(names=['x_0', 'y_0'], data=[xcent, ycent])

    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(11, 11))

    photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                    group_maker=daogroup,
                                                    bkg_estimator=mmm_bkg,
                                                    psf_model=psf_model,
                                                    fitter=LevMarLSQFitter(),
                                                    niters=1,
                                                    fitshape=(11, 11))
    result_tab = photometry(image=imdata)
    residual_image = photometry.get_residual_image()

    plt.figure()
    plt.imshow(residual_image, vmin=-0.2, vmax=2.0, cmap='plasma')
    plt.show()

    return
示例#2
0
def LCO_PSF_PHOT(hdu,init_guesses):
	# im ~ np array dat, pixel [x0,y0] ~ float pixel position, sigma_psf ~ LCO-PSF sigma 
	x0,y0=init_guesses
	im = hdu.data
	hdr = hdu.header
	
	fwhm = hdr['L1FWHM']/hdr['PIXSCALE'] # PSF FWHM in pixels, roughly ~ 5 pixels, ~ 2 arcsec 
	sigma_psf = fwhm*gaussian_fwhm_to_sigma # PSF sigma in pixels
	
	psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
	daogroup = DAOGroup(2.0*sigma_psf*gaussian_sigma_to_fwhm)
	mmm_bkg = MMMBackground()
	fitter = LevMarLSQFitter()

	psf_model.x_0.fixed = True
	psf_model.y_0.fixed = True
	pos = Table(names=['x_0', 'y_0'], data=[[x0],[y0]]) # optionally give flux_0 has good aperture method for guessing though

	photometry = BasicPSFPhotometry(group_maker=daogroup,
									 bkg_estimator=mmm_bkg,
									 psf_model=psf_model,
									 fitter=LevMarLSQFitter(),
									 fitshape=(11,11))
	result_tab = photometry(image=im, init_guesses=pos)
	residual_image = photometry.get_residual_image()
	
	return result_tab
示例#3
0
def psf_photometry(filepath, filename, show, sn_ra, sn_dec):

    start = time.time()

    warnings.simplefilter('ignore', category=FITSFixedWarning)
    warnings.simplefilter('ignore', category=AstropyUserWarning)

    print(f'Working on {filename}')

    full_filepath = filepath + filename
    image_data = getdata(full_filepath)
    hdr = getheader(full_filepath)
    fwhm = hdr['L1FWHM'] / hdr['PIXSCALE']
    exptime = hdr['EXPTIME']

    metadata = load_pickle(filename)
    epsf_data = np.array(metadata['epsf'])
    epsf = EPSFModel(epsf_data, fwhm=fwhm, oversampling=2)
    # epsf.x_0.fixed = True
    # epsf.y_0.fixed = True
    daogroup = DAOGroup(2.0 * fwhm)
    bkg = MMMBackground()
    fitter = LevMarLSQFitter()
    fitshape = 25
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=bkg,
                                    psf_model=epsf,
                                    fitter=fitter,
                                    fitshape=fitshape,
                                    aperture_radius=fitshape)

    psfmags = []

    print('\tExtracting other stars . . .')
    counter = 0
    for star in metadata['psf_fitted_stars']:
        counter += 1
        (x, y) = star
        psfmags = _do_phot(x, y, image_data, exptime, fitshape, photometry,
                           psfmags)
        print(
            f'\t\tStars extracted: {counter}/{len(metadata["psf_fitted_stars"])}',
            end='\r')
    print()

    print('\tExtracting supernova . . .')
    x, y = _get_sn_xy(filepath, filename, sn_ra, sn_dec)
    psfmags = _do_phot(x, y, image_data, exptime, fitshape, photometry,
                       psfmags)

    create_or_update_pickle(filename, key='psfmags', val=psfmags)

    end = time.time()
    print(f'Time to perform photometry (s): {end-start:.3f}')

    if show:
        checkmag(image_data, photometry.get_residual_image(), x, y, fitshape)
    print()
示例#4
0
def astropy_psf_photometry(img,
                           sigma_psf,
                           aperture=3,
                           x0=None,
                           y0=None,
                           filter=True,
                           sigma_filter=1):
    """
    performs PSF photometry on an image. If x0 and y0 are None will attempt to locate the target by searching for the
    brightest PSF in the field

    :param img: 2D array, image on which to perform PSF photometry
    :param sigma_psf: float, standard deviation of the PSF
    :param aperture: int, size of the paerture (pixels)
    :param x0: x position of the target (pixels)
    :param y0: y position of the target (pixels)
    :param filter: If True will apply a gaussian filter to the image with standard deviation sigma_filter before
     performing PSF photometry
    :param sigma_filter: standard deviation of gaussian filter to apply to the image
    :return: x0 column of photometry table, y0 column of photometry table, flux column of photometry table
    """
    if filter:
        image = ndimage.gaussian_filter(img, sigma=sigma_filter, order=0)
    else:
        image = img
    bkgrms = MADStdBackgroundRMS()
    std = bkgrms(image[image != 0])
    iraffind = IRAFStarFinder(threshold=2 * std,
                              fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=0.01,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()
    fitter = LevMarLSQFitter()
    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
    if x0 and y0:
        pos = Table(names=['x_0', 'y_0'], data=[x0, y0])
        photometry = BasicPSFPhotometry(group_maker=daogroup,
                                        bkg_estimator=mmm_bkg,
                                        psf_model=psf_model,
                                        fitter=LevMarLSQFitter(),
                                        fitshape=(11, 11))
        res = photometry(image=image, init_guesses=pos)
        return res['x_fit'], res['y_fit'], res['flux_0']
    photometry = BasicPSFPhotometry(finder=iraffind,
                                    group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=fitter,
                                    fitshape=(11, 11),
                                    aperture_radius=aperture)
    res = photometry(image=image)
    return res['x_0'], res['y_0'], res['flux_0']
示例#5
0
def photomyPSF(imgdata, position, sigma):
    PSFdata = np.copy(imgdata)
    sigma_psf = sigma
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()
    #fitter = LevMarLSQFitter()
    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)

    sources = Table()

    sources['x_mean'] = position[:, 0].T
    sources['y_mean'] = position[:, 1].T

    psf_model.x_0.fixed = True
    psf_model.y_0.fixed = True
    pos = Table(names=['x_0', 'y_0'],
                data=[sources['x_mean'], sources['y_mean']])
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(11, 11))

    result_tab = photometry(image=PSFdata, init_guesses=pos)
    positionflux = np.transpose(
        (result_tab['x_fit'], result_tab['y_fit'], result_tab['flux_fit']))

    magstar = 25 - 2.5 * np.log10(abs(result_tab['flux_fit'] / 1))
    return positionflux, magstar
示例#6
0
def do_photometry_basic(image: np.ndarray,
                        σ_psf: float) -> Tuple[Table, np.ndarray]:
    """
    Find stars in an image with IRAFStarFinder

    :param image: The image data you want to find stars in
    :param σ_psf: expected deviation of PSF
    :return: tuple result table, residual image
    """
    bkgrms = MADStdBackgroundRMS()

    std = bkgrms(image)

    iraffind = IRAFStarFinder(threshold=3 * std,
                              sigma_radius=σ_psf,
                              fwhm=σ_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=2,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)
    daogroup = DAOGroup(0.1 * σ_psf * gaussian_sigma_to_fwhm)

    mmm_bkg = MMMBackground()

    # my_psf = AiryDisk2D(x_0=0., y_0=0.,radius=airy_minimum)
    # psf_model = prepare_psf_model(my_psf, xname='x_0', yname='y_0', fluxname='amplitude',renormalize_psf=False)
    psf_model = IntegratedGaussianPRF(sigma=σ_psf)
    # psf_model = AiryDisk2D(radius = airy_minimum)#prepare_psf_model(AiryDisk2D,xname ="x_0",yname="y_0")
    # psf_model = Moffat2D([amplitude, x_0, y_0, gamma, alpha])

    # photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup,
    #                                                bkg_estimator=mmm_bkg, psf_model=psf_model,
    #                                                fitter=LevMarLSQFitter(),
    #                                                niters=2, fitshape=(11,11))
    photometry = BasicPSFPhotometry(finder=iraffind,
                                    group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=LevMarLSQFitter(),
                                    aperture_radius=11.0,
                                    fitshape=(11, 11))

    result_table = photometry.do_photometry(image)
    return result_table, photometry.get_residual_image()
示例#7
0
    def run(self, stars_coords):
        stack_path = self.fits_explorer.get("stack")[0]
        stack_fwhm = np.mean(self.fwhm_fit.run(fits.getdata(stack_path), stars_coords)[0:2])

        print("{} global psf FWHM: {:.2f} (pixels)".format(INFO_LABEL, np.mean(stack_fwhm)))

        n_stars = np.shape(stars_coords)[0]
        n_images = len(self.files)

        fluxes = np.zeros((n_stars, n_images))

        pos = Table(
            names=["x_0", "y_0"], data=[stars_coords[:, 0], stars_coords[:, 1]]
        )

        daogroup = DAOGroup(2.0 * stack_fwhm * gaussian_sigma_to_fwhm)

        mmm_bkg = MMMBackground()

        psf_model = IntegratedGaussianPRF(sigma=stack_fwhm)
        psf_model.sigma.fixed = False

        sky = []

        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

        photometry = BasicPSFPhotometry(
            group_maker=daogroup,
            bkg_estimator=mmm_bkg,
            psf_model=psf_model,
            fitter=LevMarLSQFitter(),
            fitshape=(17, 17)
        )

        for i, image in enumerate(
                tqdm(
                    self.files[0::],
                    desc="Photometry extraction",
                    unit="files",
                    ncols=80,
                    bar_format=TQDM_BAR_FORMAT,
                )
        ):
            image = fits.getdata(image)

            result_tab = photometry(image=image, init_guesses=pos)

            fluxes[:, i] = result_tab["flux_fit"]
            sky.append(1)

        return fluxes, np.ones_like(fluxes), {"sky": sky}
示例#8
0
    def dophot(self, modelname, fitpix=11, apradpix=3, **kwargs):
        """Do photometry of the target: 
        set up a photometry object, do the photometry and store the results.
        """
        if modelname not in self.photometry:
            print("Model {} not loaded. Use load_psfmodel()".format(modelname))
            return
        hstpsfmodel = self.photometry[modelname].psfmodel

        # Make the photometry object
        hstphotobject = BasicPSFPhotometry(
            psf_model=hstpsfmodel.psfmodel,
            group_maker=hstpsfmodel.grouper,
            bkg_estimator=hstpsfmodel.bkg_estimator,
            fitter=hstpsfmodel.fitter,
            fitshape=(fitpix, fitpix),
            aperture_radius=apradpix)
        self.photometry[modelname].psfphotobject = hstphotobject

        phot_results_table = hstphotobject.do_photometry(
            image=self.imdat, init_guesses=self.target_table)
        self.photometry[modelname].psfphotresults = phot_results_table
示例#9
0
    def phot_sources(self, sources=None, peak=True, psf=True):

        if sources is None:
            sources = self.sources

        xx, yy = self.wcs.world_to_pixel_values(sources["ra"], sources["dec"])

        x_idx = np.floor(xx + 0.5).astype(int)
        y_idx = np.floor(yy + 0.5).astype(int)

        if peak:
            # Crude Peak Photometry
            # From pixel indexes to array indexing

            sources["flux_peak"] = Column(self.data[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)
            sources["eflux_peak"] = Column(self.uncertainty.array[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)

        if psf:
            # BasicPSFPhotometry with fixed positions

            sigma_psf = self.beam.sigma_pix.value

            # Using an IntegratedGaussianPRF can cause biais in the photometry
            # TODO: Check the NIKA2 calibration scheme
            # from photutils.psf import IntegratedGaussianPRF
            # psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
            psf_model = CircularGaussianPSF(sigma=sigma_psf)

            psf_model.x_0.fixed = True
            psf_model.y_0.fixed = True

            daogroup = DAOGroup(3 * self.beam.fwhm_pix.value)
            mmm_bkg = MedianBackground()

            photometry = BasicPSFPhotometry(group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), fitshape=9)

            positions = Table([Column(xx, name="x_0"), Column(yy, name="y_0"), Column(self.data[y_idx, x_idx], name="flux_0")])

            # Fill the mask with nan to perform correct photometry on the edge
            # of the mask, and catch numpy & astropy warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", AstropyWarning)
                warnings.simplefilter("ignore", RuntimeWarning)
                result_tab = photometry(image=np.ma.array(self.data, mask=self.mask).filled(np.nan), init_guesses=positions)

            result_tab.sort("id")
            for _source, _tab in zip(["flux_psf", "eflux_psf"], ["flux_fit", "flux_unc"]):
                sources[_source] = Column(result_tab[_tab] * psf_model(0, 0), unit=self.unit * u.beam).to(u.mJy)
            sources["group_id"] = result_tab["group_id"]

        self.sources = sources
示例#10
0
    def _basic_psf_flux(self,
                        image,
                        fwhm,
                        x=None,
                        y=None,
                        return_residual_image=False):

        w, h = image.shape
        if x is None:
            x = w / 2
        if y is None:
            y = h / 2

        wfit = w if w % 2 == 1 else w - 1
        hfit = h if h % 2 == 1 else h - 1
        fitshape = (wfit, hfit)

        daogroup = DAOGroup(2.0 * fwhm)
        psf_model = IntegratedGaussianPRF(sigma=fwhm / 2.35)

        photometry = BasicPSFPhotometry(group_maker=daogroup,
                                        bkg_estimator=MMMBackground(),
                                        psf_model=psf_model,
                                        fitshape=fitshape)

        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True
        pos = Table(names=['x_0', 'y_0'], data=[[x], [y]])

        result = photometry(image=image, positions=pos)
        flux = result["flux_fit"].data[0]

        self.results = result

        if return_residual_image:
            return flux, photometry.get_residual_image()
        else:
            return flux
示例#11
0
    def dopsfphot(self, modelname, fitpix=11, apradpix=3):
        """Do photometry of the target: 
        set up a photometry object, do the photometry and store the results.
        fitpix : int or length-2 array-like
          Rectangular shape around the center of a star which will be used
          to collect the data to do the fitting. Can be an integer to be
          the same along both axes. E.g., 5 is the same as (5, 5), which
          means to fit only at the following relative pixel positions:
          [-2, -1, 0, 1, 2].  Each element of ``fitshape`` must be an odd
          number.
        apradpix : float or None
          The radius (in units of pixels) used to compute initial
          estimates for the fluxes of sources. If ``None``, one FWHM will
          be used if it can be determined from the ```psf_model``.
        
        """
        if modelname not in self._photutils_output_dict:
            print("Model {} not loaded. Use load_psfmodel()".format(modelname))
            return
        if self.skyvalperpix is None:
            self.get_sky_from_annulus()
        hstpsfmodel = self._photutils_output_dict[modelname].psfmodel

        # Make the photometry object
        hstphotobject = BasicPSFPhotometry(
            psf_model=hstpsfmodel.psfmodel, group_maker=hstpsfmodel.grouper,
            bkg_estimator=hstpsfmodel.bkg_estimator,
            fitter=hstpsfmodel.fitter, fitshape=fitpix,
            aperture_radius=apradpix)
        self._photutils_output_dict[modelname].photobject = hstphotobject

        phot_results_table = hstphotobject.do_photometry(
            image=self.imdat, init_guesses=self.target_table)
        self._photutils_output_dict[modelname].photresultstable = phot_results_table

        self._photutils_output_dict[modelname].get_flux_and_mag(
            self.zpt, self.camera, self.filter)
示例#12
0
    def __init__(self, fwhm, **kwargs):
        super().__init__(**kwargs)

        daogroup = DAOGroup(2.0 * fwhm * gaussian_sigma_to_fwhm)
        mmm_bkg = MMMBackground()
        psf_model = IntegratedGaussianPRF(sigma=fwhm)
        psf_model.sigma.fixed = False
        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

        self.photometry = BasicPSFPhotometry(group_maker=daogroup,
                                             bkg_estimator=mmm_bkg,
                                             psf_model=psf_model,
                                             fitter=LevMarLSQFitter(),
                                             fitshape=(17, 17))
示例#13
0
def run_photometry(img_file, epsf, fwhm, x, y, subtract_back=False,
    forced=False):

    img_hdu = fits.open(img_file)
    if subtract_back:
        bkg = Background2D(img_hdu[0].data, (21,21), filter_size=(3,3))
        image = img_hdu[0].data - bkg.background
        ndimage = NDData(data=backsub)
    else:
        image = img_hdu[0].data
        ndimage = NDData(data=img_hdu[0].data)

    psf = copy.copy(epsf)

    stars_tbl = Table()
    stars_tbl['x'] = x
    stars_tbl['y'] = y
    stars = extract_stars(ndimage, stars_tbl, size=51)

    stars_tbl['flux'] = np.array([stars[0].estimate_flux()])

    targets = Table()
    targets['x_0'] = stars_tbl['x']
    targets['y_0'] = stars_tbl['y']
    targets['flux_0'] = stars_tbl['flux']

    if forced:
        psf.x_0.fixed = True
        psf.y_0.fixed = True

    daogroup = DAOGroup(fwhm)
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf,
                                    fitter=fitter,
                                    fitshape=(51,51))

    result_tab = photometry(image=image, init_guesses=targets)

    return(result_tab)
示例#14
0
    def forced_psf_photometry(self, forced_cat, save_residual_images=False):

        catalog = LsstStruct()
        positions = Table(names=['x_0', 'y_0'],
                          data=[forced_cat['x_fit'], forced_cat['y_fit']])
        if save_residual_images:
            self.residual_image_forced = LsstStruct()

        for band in self.mbi.bands:
            daogroup = DAOGroup(self.crit_separation * self.psf_fwhm[band])
            aperture_radius = self.aperture_radius * self.psf_fwhm[band]

            logger.info('performing forced photometry for ' + band + ' band')

            self.psf_model[band].x_0.fixed = True
            self.psf_model[band].y_0.fixed = True

            photometry = BasicPSFPhotometry(
                finder=None,
                group_maker=daogroup,
                fitshape=self.phot_opts['fitshape'],
                psf_model=self.psf_model[band],
                bkg_estimator=self.bkg,
                aperture_radius=aperture_radius)

            catalog[band] = photometry(image=self.phot_image[band],
                                       init_guesses=positions)

            # turns of the order might be different for each band!!!
            catalog[band].sort('x_fit')

            if save_residual_images:
                logger.info('generating residual image')
                self.residual_image_forced[band] = subtract_psf(
                    self.mbi.image[band], self.psf_model[band], catalog[band])

        return catalog
示例#15
0
def psfphotometry(imagefile,
                  ra=None,
                  dec=None,
                  x=None,
                  y=None,
                  fwhm=5.0,
                  zp=0.0,
                  gain=1.0,
                  doDifferential=False,
                  xfield=None,
                  yfield=None,
                  xfirst=None,
                  yfirst=None):

    hdulist = fits.open(imagefile)
    header = fits.getheader(imagefile)

    if x == None:
        w = WCS(header)
        x0, y0 = w.wcs_world2pix(ra, dec, 1)
        gain = 1.0
    else:
        x0, y0 = x, y

    if len(hdulist) > 3:
        image = hdulist[1].data
    elif len(hdulist) == 2:
        image = hdulist[0].data
    else:
        image = hdulist[0].data
    image_shape = image.shape

    #daogroup = DAOGroup(crit_separation=8)
    daogroup = DAOGroup(crit_separation=25)

    mmm_bkg = MMMBackground()
    #iraffind = IRAFStarFinder(threshold=2.0*mmm_bkg(image),
    #                          fwhm=4.0)
    fitter = LevMarLSQFitter()
    gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
    gaussian_prf.sigma.fixed = False
    gaussian_prf.flux.fixed = False

    psffile = imagefile.replace(".fits", ".psf")
    fid = open(psffile, 'w')

    if len(image_shape) == 3:

        nhdu, xshape, yshape = image.shape
        dateobs = utcparser(hdulist[0].header["UTCSTART"])
        mjd = dateobs.mjd

        if "KINCYCTI" in hdulist[0].header:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["KINCYCTI"] / 86400.0
        else:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["EXPTIME"] / 86400.0

        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for jj in range(nhdu):
            if np.mod(jj, 10) == 0:
                print('PSF fitting: %d/%d' % (jj, nhdu))

            image = hdulist[0].data[jj, :, :]
            mjd = mjdall[jj]

            n, median, std = sigma_clipped_stats(image, sigma=3.0)
            daofind = DAOStarFinder(fwhm=2.0, threshold=2. * std)

            #phot_obj = IterativelySubtractedPSFPhotometry(finder=daofind,
            #                                              group_maker=daogroup,
            #                                              bkg_estimator=mmm_bkg,
            #                                              psf_model=gaussian_prf,
            #                                              fitter=fitter,
            #                                              fitshape=(21, 21),
            #                                              niters=10)

            image = image - np.median(image)
            image_slice = np.zeros(image.shape)

            slsize = 25
            xmin = np.max([0, int(x0 - slsize)])
            xmax = np.min([int(x0 + slsize), image.shape[0]])
            ymin = np.max([0, int(y0 - slsize)])
            ymax = np.min([int(y0 + slsize), image.shape[1]])
            image_slice[ymin:ymax, xmin:xmax] = 1

            if doDifferential:
                xmin_f = np.max([0, int(xfield - slsize)])
                xmax_f = np.min([int(xfield + slsize), image.shape[0]])
                ymin_f = np.max([0, int(yfield - slsize)])
                ymax_f = np.min([int(yfield + slsize), image.shape[1]])
                image_slice[ymin_f:ymax_f, xmin_f:xmax_f] = 1

            image = image * image_slice

            if (xfirst is None) or (yfirst is None):
                phot_obj = BasicPSFPhotometry(finder=daofind,
                                              group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)
                phot_results = phot_obj(image)
            else:
                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = False
                gaussian_prf.y_0.fixed = False

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)

                pos = Table(names=['x_0', 'y_0'],
                            data=[[xfirst, xfield], [yfirst, yfield]])
                phot_results_tmp = phot_obj(image, init_guesses=pos)
                resimage = phot_obj.get_residual_image()

                pos = Table(names=['x_0', 'y_0'], data=[[x0], [y0]])

                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = True
                gaussian_prf.y_0.fixed = True

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(7, 7),
                                              bkg_estimator=mmm_bkg)

                phot_results = phot_obj(resimage, init_guesses=pos)

                phot_results = vstack([phot_results_tmp, phot_results])

            #if True:
            if False:
                #sources = iraffind(image)
                sources = daofind(image)
                import matplotlib.pyplot as plt

                positions = np.transpose(
                    (sources['ycentroid'], sources['xcentroid']))
                apertures = CircularAperture(positions, r=4.)
                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=0,
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])
                plt.savefig('test_%d.png' % jj)
                plt.close()

                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(resimage[resimage > 0], 10),
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])
                plt.savefig('test_f_%d.png' % jj)
                plt.close()

            #phot_results.pprint_all()

            #print(stop)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            if doDifferential:
                dist = np.sqrt((phot_results["x_fit"] - xfield)**2 +
                               (phot_results["y_fit"] - yfield)**2)
                idy = np.argmin(dist)
                flux_field = phot_results[idy]["flux_fit"]
                fluxerr_field = phot_results[idy]["flux_unc"]
                magerr_field = 1.0857 * fluxerr_field / flux_field  #1.0857 = 2.5/log(10)
                mag_field = zp - 2.5 * np.log10(flux_field)

                mag = mag - mag_field
                magerr = np.sqrt(magerr**2 + magerr_field**2)
                fluxerr = np.sqrt((fluxerr / flux)**2 +
                                  (fluxerr_field / flux_field)**2)
                flux = flux / flux_field
                fluxerr = flux * fluxerr

            #print(phot_results[idy]["flux_fit"], phot_results[idx]["flux_fit"])

            mjds.append(mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)

    else:
        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for ii, hdu in enumerate(hdulist):
            if ii == 0: continue
            header = hdulist[ii].header
            image = hdulist[ii].data
            if not "DATE" in header:
                print("Warning: 'DATE missing from %s hdu %d/%d" %
                      (imagefile, ii, len(hdulist)))
                continue

            dateobs = Time(header["DATE"])

            phot_results = phot_obj(image)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            mjds.append(dateobs.mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)
示例#16
0
def photometry(fileid):
    """
	Run photometry.

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

    # Settings:
    ref_mag_limit = 17  # Lower limit on reference target brightness
    ref_target_dist_limit = 30  # Reference star must be further than this away to be included

    logger = logging.getLogger(__name__)
    tic = default_timer()

    # Use local copy of archive if configured to do so:
    config = load_config()

    # Get datafile dict from API:
    datafile = api.get_datafile(fileid)
    logger.debug("Datafile: %s", datafile)
    targetid = datafile['targetid']
    photfilter = datafile['photfilter']

    archive_local = config.get('photometry', 'archive_local', fallback=None)
    if archive_local is not None:
        datafile['archive_path'] = archive_local
    if not os.path.isdir(datafile['archive_path']):
        raise FileNotFoundError("ARCHIVE is not available")

    # Get the catalog containing the target and reference stars:
    # TODO: Include proper-motion to the time of observation
    catalog = api.get_catalog(targetid, output='table')
    target = catalog['target'][0]

    # Extract information about target:
    target_name = str(target['target_name'])
    target_coord = coords.SkyCoord(ra=target['ra'],
                                   dec=target['decl'],
                                   unit='deg',
                                   frame='icrs')

    # Folder to save output:
    # TODO: Change this!
    output_folder_root = config.get('photometry', 'output', fallback='.')
    output_folder = os.path.join(output_folder_root, target_name,
                                 '%04d' % fileid)
    os.makedirs(output_folder, exist_ok=True)

    # Also write any logging output to the
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    _filehandler = logging.FileHandler(os.path.join(output_folder,
                                                    'photometry.log'),
                                       mode='w')
    _filehandler.setFormatter(formatter)
    _filehandler.setLevel(logging.INFO)
    logger.addHandler(_filehandler)

    # The paths to the science image:
    filepath = os.path.join(datafile['archive_path'], datafile['path'])

    # TODO: Download datafile using API to local drive:
    # TODO: Is this a security concern?
    #if archive_local:
    #	api.download_datafile(datafile, archive_local)

    # Translate photometric filter into table column:
    if photfilter == 'gp':
        ref_filter = 'g_mag'
    elif photfilter == 'rp':
        ref_filter = 'r_mag'
    elif photfilter == 'ip':
        ref_filter = 'i_mag'
    elif photfilter == 'zp':
        ref_filter = 'z_mag'
    elif photfilter == 'B':
        ref_filter = 'B_mag'
    elif photfilter == 'V':
        ref_filter = 'V_mag'
    else:
        logger.warning(
            "Could not find filter '%s' in catalogs. Using default gp filter.",
            photfilter)
        ref_filter = 'g_mag'

    references = catalog['references']
    references.sort(ref_filter)

    # Load the image from the FITS file:
    image = load_image(filepath)

    # Calculate pixel-coordinates of references:
    row_col_coords = image.wcs.all_world2pix(
        np.array([[ref['ra'], ref['decl']] for ref in references]), 0)
    references['pixel_column'] = row_col_coords[:, 0]
    references['pixel_row'] = row_col_coords[:, 1]

    # Calculate the targets position in the image:
    target_pixel_pos = image.wcs.all_world2pix(
        [[target['ra'], target['decl']]], 0)[0]

    # Clean out the references:
    hsize = 10
    x = references['pixel_column']
    y = references['pixel_row']
    references = references[
        (np.sqrt((x - target_pixel_pos[0])**2 +
                 (y - target_pixel_pos[1])**2) > ref_target_dist_limit)
        & (references[ref_filter] < ref_mag_limit)
        & (x > hsize) & (x < (image.shape[1] - 1 - hsize))
        & (y > hsize) & (y < (image.shape[0] - 1 - hsize))]

    #==============================================================================================
    # BARYCENTRIC CORRECTION OF TIME
    #==============================================================================================

    ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl')
    image.obstime = image.obstime.tdb + ltt_bary

    #==============================================================================================
    # BACKGROUND ESITMATION
    #==============================================================================================

    fig, ax = plt.subplots(1, 2, figsize=(20, 18))
    plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image')
    plot_image(image.mask,
               ax=ax[1],
               scale='linear',
               cbar='right',
               title='Mask')
    fig.savefig(os.path.join(output_folder, 'original.png'),
                bbox_inches='tight')
    plt.close(fig)

    # Estimate image background:
    # Not using image.clean here, since we are redefining the mask anyway
    bkg = Background2D(
        image.clean,
        (128, 128),
        filter_size=(5, 5),
        #mask=image.mask | (image.clean > background_cutoff),
        sigma_clip=SigmaClip(sigma=3.0),
        bkg_estimator=SExtractorBackground(),
        exclude_percentile=50.0)
    image.background = bkg.background

    # Create background-subtracted image:
    image.subclean = image.clean - image.background

    # Plot background estimation:
    fig, ax = plt.subplots(1, 3, figsize=(20, 6))
    plot_image(image.clean, ax=ax[0], scale='log', title='Original')
    plot_image(image.background, ax=ax[1], scale='log', title='Background')
    plot_image(image.subclean,
               ax=ax[2],
               scale='log',
               title='Background subtracted')
    fig.savefig(os.path.join(output_folder, 'background.png'),
                bbox_inches='tight')
    plt.close(fig)

    # TODO: Is this correct?!
    image.error = calc_total_error(image.clean, bkg.background_rms, 1.0)

    #==============================================================================================
    # DETECTION OF STARS AND MATCHING WITH CATALOG
    #==============================================================================================

    logger.info("References:\n%s", references)

    radius = 10
    fwhm_guess = 6.0
    fwhm_min = 3.5
    fwhm_max = 13.5

    # Extract stars sub-images:
    #stars = extract_stars(
    #	NDData(data=image.subclean, mask=image.mask),
    #	stars_for_epsf,
    #	size=size
    #)

    # Set up 2D Gaussian model for fitting to reference stars:
    g2d = models.Gaussian2D(amplitude=1.0,
                            x_mean=radius,
                            y_mean=radius,
                            x_stddev=fwhm_guess * gaussian_fwhm_to_sigma)
    g2d.amplitude.bounds = (0.1, 2.0)
    g2d.x_mean.bounds = (0.5 * radius, 1.5 * radius)
    g2d.y_mean.bounds = (0.5 * radius, 1.5 * radius)
    g2d.x_stddev.bounds = (fwhm_min * gaussian_fwhm_to_sigma,
                           fwhm_max * gaussian_fwhm_to_sigma)
    g2d.y_stddev.tied = lambda model: model.x_stddev
    g2d.theta.fixed = True

    gfitter = fitting.LevMarLSQFitter()

    fwhms = np.full(len(references), np.NaN)
    for i, (x, y) in enumerate(
            zip(references['pixel_column'], references['pixel_row'])):
        x = int(np.round(x))
        y = int(np.round(y))
        x0, y0, width, height = x - radius, y - radius, 2 * radius, 2 * radius
        cutout = slice(y0 - 1, y0 + height), slice(x0 - 1, x0 + width)

        curr_star = image.subclean[cutout] / np.max(image.subclean[cutout])
        npix = len(curr_star)

        ypos, xpos = np.mgrid[:npix, :npix]
        gfit = gfitter(g2d, x=xpos, y=ypos, z=curr_star)

        fwhms[i] = gfit.x_fwhm

    mask = ~np.isfinite(fwhms) | (fwhms <= fwhm_min) | (fwhms >= fwhm_max)
    masked_fwhms = np.ma.masked_array(fwhms, mask)

    fwhm = np.mean(sigma_clip(masked_fwhms, maxiters=20, sigma=2.0))
    logger.info("FWHM: %f", fwhm)

    # Use DAOStarFinder to search the image for stars, and only use reference-stars where a
    # star was actually detected close to the references-star coordinate:
    cleanout_references = (len(references) > 50)

    if cleanout_references:
        daofind_tbl = DAOStarFinder(100, fwhm=fwhm, roundlo=-0.5,
                                    roundhi=0.5).find_stars(image.subclean,
                                                            mask=image.mask)
        indx_good = np.zeros(len(references), dtype='bool')
        for k, ref in enumerate(references):
            dist = np.sqrt((daofind_tbl['xcentroid'] -
                            ref['pixel_column'])**2 +
                           (daofind_tbl['ycentroid'] - ref['pixel_row'])**2)
            if np.any(dist <= fwhm / 4):  # Cutoff set somewhat arbitrary
                indx_good[k] = True

        references = references[indx_good]

    fig, ax = plt.subplots(1, 1, figsize=(20, 18))
    plot_image(image.subclean,
               ax=ax,
               scale='log',
               cbar='right',
               title=target_name)
    ax.scatter(references['pixel_column'],
               references['pixel_row'],
               c='r',
               alpha=0.3)
    if cleanout_references:
        ax.scatter(daofind_tbl['xcentroid'],
                   daofind_tbl['ycentroid'],
                   c='g',
                   alpha=0.3)
    ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', c='r')
    fig.savefig(os.path.join(output_folder, 'positions.png'),
                bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # CREATE EFFECTIVE PSF MODEL
    #==============================================================================================

    # Make cutouts of stars using extract_stars:
    # Scales with FWHM
    size = int(np.round(29 * fwhm / 6))
    if size % 2 == 0:
        size += 1  # Make sure it's a uneven number
    size = max(size, 15)  # Never go below 15 pixels
    hsize = (size - 1) / 2

    x = references['pixel_column']
    y = references['pixel_row']
    mask_near_edge = ((x > hsize) & (x < (image.shape[1] - 1 - hsize))
                      & (y > hsize) & (y < (image.shape[0] - 1 - hsize)))

    stars_for_epsf = Table()
    stars_for_epsf['x'] = x[mask_near_edge]
    stars_for_epsf['y'] = y[mask_near_edge]

    # Store which stars were used in ePSF in the table:
    logger.info("Number of stars used for ePSF: %d", len(stars_for_epsf))
    references['used_for_epsf'] = mask_near_edge

    # Extract stars sub-images:
    stars = extract_stars(NDData(data=image.subclean, mask=image.mask),
                          stars_for_epsf,
                          size=size)

    # Plot the stars being used for ePSF:
    nrows = 5
    ncols = 5
    imgnr = 0
    for k in range(int(np.ceil(len(stars_for_epsf) / (nrows * ncols)))):
        fig, ax = plt.subplots(nrows=nrows,
                               ncols=ncols,
                               figsize=(20, 20),
                               squeeze=True)
        ax = ax.ravel()
        for i in range(nrows * ncols):
            if imgnr > len(stars_for_epsf) - 1:
                ax[i].axis('off')
            else:
                plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis')
            imgnr += 1

        fig.savefig(os.path.join(output_folder,
                                 'epsf_stars%02d.png' % (k + 1)),
                    bbox_inches='tight')
        plt.close(fig)

    # Build the ePSF:
    epsf = EPSFBuilder(oversampling=1.0,
                       maxiters=500,
                       fitter=EPSFFitter(fit_boxsize=2 * fwhm),
                       progress_bar=True)(stars)[0]

    logger.info('Successfully built PSF model')

    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15))
    plot_image(epsf.data, ax=ax1, cmap='viridis')

    fwhms = []
    for a, ax in ((0, ax3), (1, ax2)):
        # Collapse the PDF along this axis:
        profile = epsf.data.sum(axis=a)
        itop = profile.argmax()
        poffset = profile[itop] / 2

        # Run a spline through the points, but subtract half of the peak value, and find the roots:
        # We have to use a cubic spline, since roots() is not supported for other splines
        # for some reason
        profile_intp = UnivariateSpline(np.arange(0, len(profile)),
                                        profile - poffset,
                                        k=3,
                                        s=0,
                                        ext=3)
        lr = profile_intp.roots()
        axis_fwhm = lr[1] - lr[0]

        fwhms.append(axis_fwhm)

        x_fine = np.linspace(-0.5, len(profile) - 0.5, 500)

        ax.plot(profile, 'k.-')
        ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-')
        ax.axvline(itop)
        ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2)
        ax.set_xlim(-0.5, len(profile) - 0.5)

    # Let's make the final FWHM the largest one we found:
    fwhm = np.max(fwhms)
    logger.info("Final FWHM based on ePSF: %f", fwhm)

    #ax2.axvspan(itop - fwhm/2, itop + fwhm/2, facecolor='b', alpha=0.2)
    #ax3.axvspan(itop - fwhm/2, itop + fwhm/2, facecolor='b', alpha=0.2)
    ax4.axis('off')
    fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # COORDINATES TO DO PHOTOMETRY AT
    #==============================================================================================

    coordinates = np.array([[ref['pixel_column'], ref['pixel_row']]
                            for ref in references])

    # Add the main target position as the first entry:
    if datafile.get('template') is None:
        coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0)

    #==============================================================================================
    # APERTURE PHOTOMETRY
    #==============================================================================================

    # Define apertures for aperture photometry:
    apertures = CircularAperture(coordinates, r=fwhm)
    annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm)

    apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli],
                                     mask=image.mask,
                                     error=image.error)

    logger.debug("Aperture Photometry Table:\n%s", apphot_tbl)
    logger.info('Apperature Photometry Success')

    #==============================================================================================
    # PSF PHOTOMETRY
    #==============================================================================================

    # Are we fixing the postions?
    epsf.fixed.update({'x_0': False, 'y_0': False})

    # Create photometry object:
    photometry = BasicPSFPhotometry(group_maker=DAOGroup(fwhm),
                                    bkg_estimator=SExtractorBackground(),
                                    psf_model=epsf,
                                    fitter=fitting.LevMarLSQFitter(),
                                    fitshape=size,
                                    aperture_radius=fwhm)

    psfphot_tbl = photometry(image=image.subclean,
                             init_guesses=Table(coordinates,
                                                names=['x_0', 'y_0']))

    logger.debug("PSF Photometry Table:\n%s", psfphot_tbl)
    logger.info('PSF Photometry Success')

    #==============================================================================================
    # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY
    #==============================================================================================

    if datafile.get('template') is not None:
        # Find the pixel-scale of the science image:
        pixel_area = proj_plane_pixel_area(image.wcs.celestial)
        pixel_scale = np.sqrt(pixel_area) * 3600  # arcsec/pixel
        #print(image.wcs.celestial.cunit) % Doesn't work?
        logger.info("Science image pixel scale: %f", pixel_scale)

        # Run the template subtraction, and get back
        # the science image where the template has been subtracted:
        diffimage = run_imagematch(datafile,
                                   target,
                                   star_coord=coordinates,
                                   fwhm=fwhm,
                                   pixel_scale=pixel_scale)

        # Include mask from original image:
        diffimage = np.ma.masked_array(diffimage, image.mask)

        # Create apertures around the target:
        apertures = CircularAperture(target_pixel_pos, r=fwhm)
        annuli = CircularAnnulus(target_pixel_pos,
                                 r_in=1.5 * fwhm,
                                 r_out=2.5 * fwhm)

        # Create two plots of the difference image:
        fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20))
        plot_image(diffimage, ax=ax, cbar='right', title=target_name)
        ax.plot(target_pixel_pos[0],
                target_pixel_pos[1],
                marker='+',
                color='r')
        fig.savefig(os.path.join(output_folder, 'diffimg.png'),
                    bbox_inches='tight')
        apertures.plot(color='r')
        annuli.plot(color='k')
        ax.set_xlim(target_pixel_pos[0] - 50, target_pixel_pos[0] + 50)
        ax.set_ylim(target_pixel_pos[1] - 50, target_pixel_pos[1] + 50)
        fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'),
                    bbox_inches='tight')
        plt.close(fig)

        # Run aperture photometry on subtracted image:
        target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli],
                                                mask=image.mask,
                                                error=image.error)

        # Run PSF photometry on template subtracted image:
        target_psfphot_tbl = photometry(diffimage,
                                        init_guesses=Table(
                                            target_pixel_pos,
                                            names=['x_0', 'y_0']))

        # Combine the output tables from the target and the reference stars into one:
        apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact')
        psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl],
                             join_type='exact')

    # Build results table:
    tab = references.copy()
    tab.insert_row(
        0, {
            'starid': 0,
            'ra': target['ra'],
            'decl': target['decl'],
            'pixel_column': target_pixel_pos[0],
            'pixel_row': target_pixel_pos[1]
        })
    for key in ('pm_ra', 'pm_dec', 'gaia_mag', 'gaia_bp_mag', 'gaia_rp_mag',
                'H_mag', 'J_mag', 'K_mag', 'g_mag', 'r_mag', 'i_mag', 'z_mag'):
        tab[0][key] = np.NaN

    # Subtract background estimated from annuli:
    flux_aperture = apphot_tbl['aperture_sum_0'] - (
        apphot_tbl['aperture_sum_1'] / annuli.area()) * apertures.area()
    flux_aperture_error = np.sqrt(apphot_tbl['aperture_sum_err_0']**2 +
                                  (apphot_tbl['aperture_sum_err_1'] /
                                   annuli.area() * apertures.area())**2)

    # Add table columns with results:
    tab['flux_aperture'] = flux_aperture / image.exptime
    tab['flux_aperture_error'] = flux_aperture_error / image.exptime
    tab['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime
    tab['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime
    tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit']
    tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit']
    tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc']
    tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc']

    # Theck that we got valid photometry:
    if not np.isfinite(tab[0]['flux_psf']) or not np.isfinite(
            tab[0]['flux_psf_error']):
        raise Exception("Target magnitude is undefined.")

    #==============================================================================================
    # CALIBRATE
    #==============================================================================================

    # Convert PSF fluxes to magnitudes:
    mag_inst = -2.5 * np.log10(tab['flux_psf'])
    mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] /
                                         tab['flux_psf'])

    # Corresponding magnitudes in catalog:
    mag_catalog = tab[ref_filter]

    # Mask out things that should not be used in calibration:
    use_for_calibration = np.ones_like(mag_catalog, dtype='bool')
    use_for_calibration[0] = False  # Do not use target for calibration
    use_for_calibration[~np.isfinite(mag_inst)
                        | ~np.isfinite(mag_catalog)] = False

    # Just creating some short-hands:
    x = mag_catalog[use_for_calibration]
    y = mag_inst[use_for_calibration]
    yerr = mag_inst_err[use_for_calibration]

    # Fit linear function with fixed slope, using sigma-clipping:
    model = models.Linear1D(slope=1, fixed={'slope': True})
    fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),
                                               sigma_clip,
                                               sigma=3.0)
    best_fit, sigma_clipped = fitter(model, x, y, weights=1.0 / yerr**2)

    # Extract zero-point and estimate its error:
    # I don't know why there is not an error-estimate attached directly to the Parameter?
    zp = -1 * best_fit.intercept.value  # Negative, because that is the way zeropoints are usually defined
    zp_error = nanstd(y[~sigma_clipped] - best_fit(x[~sigma_clipped]))

    # Add calibrated magnitudes to the photometry table:
    tab['mag'] = mag_inst + zp
    tab['mag_error'] = np.sqrt(mag_inst_err**2 + zp_error**2)

    fig, ax = plt.subplots(1, 1)
    ax.errorbar(x, y, yerr=yerr, fmt='k.')
    ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r')
    ax.plot(x, best_fit(x), color='g', linewidth=3)
    ax.set_xlabel('Catalog magnitude')
    ax.set_ylabel('Instrumental magnitude')
    fig.savefig(os.path.join(output_folder, 'calibration.png'),
                bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # SAVE PHOTOMETRY
    #==============================================================================================

    # Descriptions of columns:
    tab['flux_aperture'].unit = u.count / u.second
    tab['flux_aperture_error'].unit = u.count / u.second
    tab['flux_psf'].unit = u.count / u.second
    tab['flux_psf_error'].unit = u.count / u.second
    tab['pixel_column'].unit = u.pixel
    tab['pixel_row'].unit = u.pixel
    tab['pixel_column_psf_fit'].unit = u.pixel
    tab['pixel_row_psf_fit'].unit = u.pixel
    tab['pixel_column_psf_fit_error'].unit = u.pixel
    tab['pixel_row_psf_fit_error'].unit = u.pixel

    # Meta-data:
    tab.meta['version'] = __version__
    tab.meta['fileid'] = fileid
    tab.meta['template'] = None if datafile.get(
        'template') is None else datafile['template']['fileid']
    tab.meta['photfilter'] = photfilter
    tab.meta['fwhm'] = fwhm
    tab.meta['obstime-bmjd'] = float(image.obstime.mjd)
    tab.meta['zp'] = zp
    tab.meta['zp_error'] = zp_error

    # Filepath where to save photometry:
    photometry_output = os.path.join(output_folder, 'photometry.ecsv')

    # Write the final table to file:
    tab.write(photometry_output,
              format='ascii.ecsv',
              delimiter=',',
              overwrite=True)

    toc = default_timer()
    logger.info("Photometry took: %f seconds", toc - tic)

    return photometry_output
示例#17
0
def extractFlux(cnam, ccd, rccd, read, gain, ccdwin, rfile, store):
    """This extracts the flux of all apertures of a given CCD.

    The steps are (1) creation of PSF model, (2) PSF fitting, (3)
    flux extraction. The apertures are assumed to be correctly positioned.

    It returns the results as a dictionary keyed on the aperture label. Each
    entry returns a list:

    [x, ex, y, ey, fwhm, efwhm, beta, ebeta, counts, countse, sky, esky,
    nsky, nrej, flag]

    flag = bitmask. See hipercam.core to see all the options which are
    referred to by name in the code e.g. ALL_OK. The various flags can
    signal that there no sky pixels (NO_SKY), the sky aperture was off
    the edge of the window (SKY_AT_EDGE), etc.

    This code::

       >> bset = flag & TARGET_SATURATED

    determines whether the data saturation flag is set for example.

    Arguments::

       cnam     : string
          CCD identifier label

       ccd       : CCD
           the debiassed, flat-fielded CCD.

       rccd : CCD
          corresponding raw CCD, used to work out whether data are
          saturated in target aperture.

       read      : CCD
           readnoise divided by the flat-field

       gain      : CCD
           gain multiplied by the flat field

       ccdwin   : dictionary of strings
           the Window label corresponding to each Aperture

       rfile     : Rfile
           reduce file configuration parameters

       store     : dict of dicts
           see moveApers for what this contains.

    """

    # initialise flag
    flag = hcam.ALL_OK

    ccdaper = rfile.aper[cnam]

    results = {}
    # get profile params from aperture store
    mfwhm = store["mfwhm"]
    mbeta = store["mbeta"]
    method = "m" if mbeta > 0.0 else "g"

    if mfwhm <= 0:
        # die hard, die soon as there's nothing we can do.
        print((" *** WARNING: CCD {:s}: no measured FWHM to create PSF model"
               "; no extraction possible").format(cnam))
        # set flag to indicate no FWHM
        flag = hcam.NO_FWHM

        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }
        return results

    # all apertures have to be in the same window, or we can't easily make a
    # postage stamp of the data
    wnames = set(ccdwin.values())
    if len(wnames) != 1:
        print((" *** WARNING: CCD {:s}: not all apertures"
               " lie within the same window; no extraction possible"
               ).format(cnam))

        # set flag to indicate no extraction
        flag = hcam.NO_EXTRACTION

        # return empty results
        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }
            return results
    wnam = wnames.pop()

    # PSF params are in binned pixels, so find binning
    bin_fac = ccd[wnam].xbin

    # create PSF model
    if method == "m":
        psf_model = MoffatPSF(beta=mbeta, fwhm=mfwhm / bin_fac)
    else:
        psf_model = IntegratedGaussianPRF(sigma=mfwhm *
                                          gaussian_fwhm_to_sigma / bin_fac)

    # force photometry only at aperture positions
    # this means PSF shape and positions are fixed, we are only fitting flux
    if rfile["psf_photom"]["positions"] == "fixed":
        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

    # create instances for PSF photometry
    gfac = float(rfile["psf_photom"]["gfac"])
    sclip = float(rfile["sky"]["thresh"])
    daogroup = DAOGroup(gfac * mfwhm / bin_fac)
    mmm_bkg = MMMBackground(sigma_clip=SigmaClip(sclip))
    fitter = LevMarLSQFitter()
    fitshape_box_size = int(2 * int(rfile["psf_photom"]["fit_half_width"]) + 1)
    fitshape = (fitshape_box_size, fitshape_box_size)

    photometry_task = BasicPSFPhotometry(
        group_maker=daogroup,
        bkg_estimator=mmm_bkg,
        psf_model=psf_model,
        fitter=fitter,
        fitshape=fitshape,
    )

    # initialise flag
    flag = hcam.ALL_OK

    # extract Windows relevant for these apertures
    wdata = ccd[wnam]
    wraw = rccd[wnam]

    # extract sub-windows that include all of the apertures, plus a little
    # extra around the edges.
    x1 = min([ap.x - ap.rsky2 - wdata.xbin for ap in ccdaper.values()])
    x2 = max([ap.x + ap.rsky2 + wdata.xbin for ap in ccdaper.values()])
    y1 = min([ap.y - ap.rsky2 - wdata.ybin for ap in ccdaper.values()])
    y2 = max([ap.y + ap.rsky2 + wdata.ybin for ap in ccdaper.values()])

    # extract sub-Windows
    swdata = wdata.window(x1, x2, y1, y2)
    swraw = wraw.window(x1, x2, y1, y2)

    # compute pixel positions of apertures in windows
    xpos, ypos = zip(*((swdata.x_pixel(ap.x), swdata.y_pixel(ap.y))
                       for ap in ccdaper.values()))
    positions = Table(names=["x_0", "y_0"], data=(xpos, ypos))

    # do the PSF photometry
    photom_results = photometry_task(swdata.data, init_guesses=positions)
    slevel = mmm_bkg(swdata.data)

    # unpack the results and check apertures
    for apnam, aper in ccdaper.items():
        try:
            # reset flag
            flag = hcam.ALL_OK

            result_row = photom_results[photom_results["id"] == int(apnam)]
            if len(result_row) == 0:
                flag |= hcam.NO_DATA
                raise hcam.HipercamError(
                    "no source in PSF photometry for this aperture")
            elif len(result_row) > 1:
                flag |= hcam.NO_EXTRACTION
                raise hcam.HipercamError(
                    "ambiguous lookup for this aperture in PSF photometry")
            else:
                result_row = result_row[0]

            # compute X, Y arrays over the sub-window relative to the centre
            # of the aperture and the distance squared from the centre (Rsq)
            # to save a little effort.
            x = swdata.x(np.arange(swdata.nx)) - aper.x
            y = swdata.y(np.arange(swdata.ny)) - aper.y
            X, Y = np.meshgrid(x, y)
            Rsq = X**2 + Y**2

            # size of a pixel which is used to taper pixels as they approach
            # the edge of the aperture to reduce pixellation noise
            size = np.sqrt(wdata.xbin * wdata.ybin)

            # target selection, accounting for extra apertures and allowing
            # pixels to contribute if their centres are as far as size/2 beyond
            # the edge of the circle (but with a tapered weight)
            dok = Rsq < (aper.rtarg + size / 2.0)**2
            if not dok.any():
                # check there are some valid pixels
                flag |= hcam.NO_DATA
                raise hcam.HipercamError("no valid pixels in aperture")

            # check for saturation and nonlinearity
            if cnam in rfile.warn:
                if swraw.data[dok].max() >= rfile.warn[cnam]["saturation"]:
                    flag |= hcam.TARGET_SATURATED

                if swraw.data[dok].max() >= rfile.warn[cnam]["nonlinear"]:
                    flag |= hcam.TARGET_NONLINEAR
            else:
                warnings.warn(
                    "CCD {:s} has no nonlinearity or saturation levels set")

            counts = result_row["flux_fit"]
            countse = result_row["flux_unc"]
            info = store[apnam]

            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": counts,
                "countse": countse,
                "sky": slevel,
                "skye": 0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }

        except hcam.HipercamError as err:

            info = store[apnam]
            flag |= hcam.NO_EXTRACTION

            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }

    # finally, we are done
    return results
示例#18
0
def epsf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_filenames = natsort.natsorted([x.name for x in reduced_path.glob('*.fits')])
    reduced_files = np.array([reduced_path/i for i in reduced_filenames])

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i.split(' ')[0]+' '+i.split(' ')[1] for i in centroided_sources.keys() if (i[0] == '2') or (i[0] == 'R')])))
    
    #Create output plot directories for each source.
    if plots:
        for name in source_names:
            #If the folders are already there, delete them. 
            source_path = (pines_path/('Objects/'+short_name+'/psf_phot/'+name+'/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(0, len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

        #Extract pixel cutouts of our stars, so let’s explicitly exclude stars that are too close to the image boundaries (because they cannot be extracted).
        size = 13
        hsize = (size - 1) / 2
        #mask = ((x > hsize) & (x < (data.shape[1] -1 - hsize)) & (y > hsize) & (y < (data.shape[0] -1 - hsize)) & (y > 100) & (y < 923))

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y
        
        #Subtract background (star cutouts from which we build the ePSF must have background subtracted).
        mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.)  
        data -= median_val
        
        #Replace nans in data using Gaussian. 
        # kernel = Gaussian2DKernel(x_stddev=0.5)
        # data = interpolate_replace_nans(data, kernel)

        #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  
                        

        #Plot. 
        nrows = 5
        ncols = 5
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10), squeeze=True)
        ax = ax.ravel()
        for j in range(len(stars)):           
            norm = simple_norm(stars[j], 'log', percent=99.)
            ax[j].imshow(stars[j].data, norm=norm, origin='lower', cmap='viridis')

        pdb.set_trace()

        #Construct the ePSF using the star cutouts.
        epsf_fitter = EPSFFitter()
        epsf_builder = EPSFBuilder(maxiters=4, progress_bar=False, fitter=epsf_fitter)   

        try:
            epsf, fitted_stars = epsf_builder(stars)
            output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

            for j in range(len(stars)):
                star = stars[j]
                source_name = source_names[j]
                sigma_psf = 1.85

                dtype = [('x_0', 'f8'), ('y_0', 'f8')]
                pos = Table(data=np.zeros(1, dtype=dtype))
                source_x = stars_tbl['x'][j]
                source_y = stars_tbl['y'][j]
                pos['x_0'] = source_x - int(source_x - size/2 + 1)
                pos['y_0'] = source_y - int(source_y - size/2 + 1)

                daogroup = DAOGroup(4.0*sigma_psf*gaussian_sigma_to_fwhm)
                mmm_bkg = MMMBackground()
                photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=epsf,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(13,13),
                                    aperture_radius=4.)
                

                result_tab = photometry(image=star, init_guesses=pos)
                residual_image = photometry.get_residual_image()
                psf_df[source_name+' Flux'][i] = result_tab['flux_fit'][0]
                psf_df[source_name+' Flux Error'][i] = result_tab['flux_unc'][0]

                if plots:
                    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
                    im = ax[0].imshow(star, origin='lower')
                    divider = make_axes_locatable(ax[0])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im, cax=cax, orientation='vertical')
                    ax[0].plot(result_tab['x_fit'][0], result_tab['y_fit'][0], 'rx')
                    ax[0].set_title('Data')

                    im2 = ax[1].imshow(epsf.data, origin='lower')
                    ax[1].set_title('EPSF Model')
                    divider = make_axes_locatable(ax[1])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im2, cax=cax, orientation='vertical')

                    im3 = ax[2].imshow(residual_image, origin='lower')
                    ax[2].set_title('Residual Image')
                    divider = make_axes_locatable(ax[2])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im3, cax=cax, orientation='vertical')
                    plt.suptitle(source_name+'\n'+reduced_files[i].name+', image '+str(i+1)+' of '+str(len(reduced_files)))
                    plt.subplots_adjust(wspace=0.5, top=0.95, bottom = 0.05)
                    plot_output_name = pines_path/('Objects/'+short_name+'/psf_phot/'+source_name+'/'+str(i).zfill(4)+'.jpg')
                    plt.savefig(plot_output_name)
                    plt.close()
        except:
            print('')
            print('EPSF BUILDER FAILED, SKIPPING IMAGE.')
            print('')
        #Plot the ePSF. 
        # plt.figure()
        # norm = simple_norm(epsf.data, 'log', percent=99.)
        # plt.imshow(epsf.data, norm=norm, origin='lower', cmap='viridis')
        # cb = plt.colorbar()
        # plt.tight_layout()   

        

    print('Saving psf photometry output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(psf_df)):
            if j == 0:
                f.write('{:>21s}, {:>22s}, {:>17s}, {:>7s}, {:>7s}, '.format('Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing'))
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        f.write('{:>20s}, {:>26s}, '.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))
                    else:
                        f.write('{:>20s}, {:>26s}\n'.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))

            format_string = '{:21s}, {:22s}, {:17.9f}, {:7.2f}, {:7.1f}, '

            #If the seeing value for this image is 'nan' (a string), convert it to a float. 
            #TODO: Not sure why it's being read in as a string, fix that. 
            if type(psf_df['Seeing'][j]) == str:
                psf_df['Seeing'][j] = float(psf_df['Seeing'][j])

            #Do a try/except clause for writeout, in case it breaks in the future. 
            try:
                f.write(format_string.format(psf_df['Filename'][j], psf_df['Time UT'][j], psf_df['Time JD'][j], psf_df['Airmass'][j], psf_df['Seeing'][j]))
            except:
                print('Writeout failed! Inspect quantities you are trying to write out.')
                pdb.set_trace()
            for i in range(len(source_names)):                    
                if i != len(source_names) - 1:
                    format_string = '{:20.11f}, {:26.11f}, '
                else:
                    format_string = '{:20.11f}, {:26.11f}\n'
                
                f.write(format_string.format(psf_df[source_names[i]+' Flux'][j], psf_df[source_names[i]+' Flux Error'][j]))
    print('')    
    return
           
示例#19
0
           interpolation='nearest',
           origin='lower')
plt.title('Residual Image')
plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
plt.show()

# Performing PSF Photometry with Fixed Centroids
psf_model.x_0.fixed = True
psf_model.y_0.fixed = True
pos = Table(names=['x_0', 'y_0'], data=[sources['x_mean'], sources['y_mean']])

from photutils.psf import BasicPSFPhotometry

photometry = BasicPSFPhotometry(group_maker=daogroup,
                                bkg_estimator=mmm_bkg,
                                psf_model=psf_model,
                                fitter=LevMarLSQFitter(),
                                fitshape=(11, 11))
result_tab = photometry(image=image, init_guesses=pos)
residual_image = photometry.get_residual_image()

plt.subplot(1, 2, 1)
plt.imshow(image,
           cmap='viridis',
           aspect=1,
           interpolation='nearest',
           origin='lower')
plt.title('Simulated data')
plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
plt.subplot(1, 2, 2)
plt.imshow(residual_image,
示例#20
0
def __fit_PSF(image_file, mask_file=None, nstars=40,                
              thresh_sigma=5.0, pixelmin=20, elongation_lim=1.4, area_max=500,             
              cutout=35, 
              astrom_sigma=5.0, psf_sigma=5.0, alim=10000, clean=True, 
              source_lim=None, 
              write_ePSF=False, ePSF_output=None, 
              plot_ePSF=True, ePSF_plotname=None, 
              plot_residuals=False, resid_plotname=None,
              verbose=False):
    """    
    Input: 
        general:
        - filename for a **BACKGROUND-SUBTRACTED** image
        - filename for a mask image (optional; default None)
        - maximum number of stars to use (optional; default 40; set to None
          to impose no limit)
          
        source detection:
        - sigma threshold for source detection with image segmentation 
          (optional; default 5.0)
        - *minimum* number of isophotal pixels (optional; default 20)
        - *maximum* allowed elongation for sources found by image segmentation 
          (optional; default 1.4)
        - *maximum* allowed area for sources found by image segmentation 
          (optional; default 500 pix**2)
        - cutout size around each star in pix (optional; default 35 pix; must 
          be ODD, rounded down if even)
        
        astrometry.net:
        - sigma threshold for astrometry.net source detection image (optional; 
          default 5.0)
        - sigma of the Gaussian PSF of the image (optional; default 5.0)
        - maximum allowed source area in pix**2 for astrometry.net for 
          deblending (optional; default 10000; only relevant if no source list 
          file is provided)
        - whether to remove files output by image2xy once finished with them 
          (optional; default True)

        misc:
        - limit on number of sources to fit with ePSF (optional; default None 
          which imposes no limit)        
                
        writing, plotting, verbosity:
        - whether to write the derived ePSF to a fits file (optional; default 
          False)
        - name for output ePSF fits file (optional; default set below)
        - whether to plot the derived ePSF (optional; default True)
        - name for output ePSF plot (optional; default set below)
        - whether to plot the residuals of the iterative PSF fitting (optional;
          default False)
        - name for output residuals plot (optional; default set below)
        - be verbose (optional; default False)
    
    Uses image segmentation to obtain a list of sources in the image with their 
    x, y coordinates. Uses EPSFBuilder to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtaind ePSF. Finally, uses 
    astrometry.net to find all sources in the image, and fits them with the 
    empirically obtained ePSF.
    
    The ePSF obtained here should NOT be used in convolutions. Instead, it can 
    serve as a tool for estimating the seeing of an image. 
    
    Output: table containing the coordinates and instrumental magnitudes of the 
    detected, ePSF-fit sources
    """

    # load in data 
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file) 
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"
    pixscale = image_header["PIXSCAL1"]
    
    ### SOURCE DETECTION

    ### use image segmentation to find sources with an area > pixelmin pix**2 
    ### which are above the threshold sigma*std 
    image_data = fits.getdata(image_file) # subfile data
    image_data = np.ma.masked_where(image_data==0.0, 
                                    image_data) # mask bad pixels
    
    ## build an actual mask
    mask = (image_data==0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    ## set detection standard deviation
    try:
        std = image_header["BKGSTD"] # header written by amakihi.bkgsub fn
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data, snr=3, npixels=5, 
                                       dilate_size=15, mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))
    
    ## use the segmentation image to get the source properties 
    # use <mask>, which does not mask sources
    segm = detect_sources(image_data, thresh_sigma*std, npixels=pixelmin,
                          mask=mask) 
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinates for sources
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return
    
    # restrict elongation and area to obtain only unsaturated stars 
    tbl = tbl[(tbl["elongation"] <= elongation_lim)]
    tbl = tbl[(tbl["area"].value <= area_max)]

    sources = Table() # build a table 
    sources['x'] = tbl['xcentroid'] # for EPSFBuilder 
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data/tbl["area"].data   
    sources.sort("flux")
    sources.reverse()
    
    if nstars:
        sources = sources[:min(nstars, len(sources))]

    ## setup: get WCS coords for all sources 
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"],
                                                    sources["y"], 1)
     
    ## mask out edge sources: 
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize/2.0
        dist_to_center = np.sqrt((sources['x']-xsize/2.0)**2 + 
                                 (sources['y']-ysize/2.0)**2)
        dmask = dist_to_center <= rad_limit
        sources = sources[dmask]
    else: 
        x_lims = [int(0.05*xsize), int(0.95*xsize)] 
        y_lims = [int(0.05*ysize), int(0.95*ysize)]
        dmask = (sources['x']>x_lims[0]) & (sources['x']<x_lims[1]) & (
                 sources['y']>y_lims[0]) & (sources['y']<y_lims[1])
        sources = sources[dmask]
        
    ## empirically obtain the effective Point Spread Function (ePSF)  
    nddata = NDData(image_data) # NDData object
    if mask_file: # supply a mask if needed 
        nddata.mask = fits.getdata(mask_file)
    if cutout%2 == 0: # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout) # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars) # no. of stars used in ePSF building
    
    if nstars_epsf == 0:
        print("\nNo valid sources were found to build the ePSF with the given"+
              " conditions. Exiting.")
        return
    
    if verbose:
        print(f"\n{nstars_epsf} stars used in building the ePSF")
        
    start = timer()
    epsf_builder = EPSFBuilder(oversampling=1, maxiters=7, # build it
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data
    
    end = timer() # timing 
    time_elaps = end-start
    
    # print ePSF FWHM, if desired
    print(f"Time required for ePSF building {time_elaps:.2f} s\n")
    if verbose: 
        ePSF_FWHM(epsf_data, True)

    epsf_hdu = fits.PrimaryHDU(data=epsf_data)
    if write_ePSF: # write, if desired
        if not(ePSF_output):
            ePSF_output = image_file.replace(".fits", "_ePSF.fits")
            
        epsf_hdu.writeto(ePSF_output, overwrite=True, output_verify="ignore")
    
    psf_model = epsf # set the model
    psf_model.x_0.fixed = True # fix centroids (known beforehand) 
    psf_model.y_0.fixed = True
 
    ### USE ASTROMETRY.NET TO FIND SOURCES TO FIT  
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma 
    # -m <alim> --> max object size for deblending is <alim>      
    options = f"-O -b -p {astrom_sigma} -w {psf_sigma}"
    options += f" -m {alim}"
    run(f"image2xy {options} {image_file}", shell=True)
    image_sources_file = image_file.replace(".fits", ".xy.fits")
    image_sources = fits.getdata(image_sources_file)
    if clean:
        run(f"rm {image_sources_file}", shell=True) # this file is not needed

    print(f'\n{len(image_sources)} stars at >{astrom_sigma}'+
          f' sigma found in image {re.sub(".*/", "", image_file)}'+
          ' with astrometry.net')   

    astrom_sources = Table() # build a table 
    astrom_sources['x_mean'] = image_sources['X'] # for BasicPSFPhotometry
    astrom_sources['y_mean'] = image_sources['Y']
    astrom_sources['flux'] = image_sources['FLUX']
    
    # initial guesses for centroids, fluxes
    pos = Table(names=['x_0', 'y_0','flux_0'], 
                data=[astrom_sources['x_mean'], astrom_sources['y_mean'], 
                      astrom_sources['flux']]) 

    ### FIT THE ePSF TO ALL DETECTED SOURCES 
    start = timer() # timing the fit 
    
    # sources separated by less than this critical separation are grouped 
    # together when fitting the PSF via the DAOGROUP algorithm
    sigma_psf = 2.0 # 2 pix
    crit_sep = 2.0*sigma_psf*gaussian_sigma_to_fwhm  # twice the PSF FWHM
    daogroup = DAOGroup(crit_sep) 

    # an astropy fitter, does Levenberg-Marquardt least-squares fitting
    fitter_tool = LevMarLSQFitter()
    
    # if we have a limit on the number of sources to fit
    if source_lim:
        try: 
            import random # pick a given no. of random sources 
            source_rows = random.choices(astrom_sources, k=source_lim)
            astrom_sources = Table(names=['x_mean', 'y_mean', 'flux'], 
                                   rows=source_rows)
            pos = Table(names=['x_0', 'y_0','flux_0'], 
                        data=[astrom_sources['x_mean'], 
                              astrom_sources['y_mean'], 
                              astrom_sources['flux']])
            
            
        except IndexError:
            print("The input source limit exceeds the number of sources"+
                  " detected by astrometry, so no limit is imposed.\n")
    
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                            bkg_estimator=None, # bg subtract already done
                            psf_model=psf_model,
                            fitter=fitter_tool,
                            fitshape=(11,11))
    
    result_tab = photometry(image=image_data, init_guesses=pos) # results
    residual_image = photometry.get_residual_image() # residuals of PSF fit
    residual_image = np.ma.masked_where(mask, residual_image)
    residual_image.fill_value = 0 # set to zero
    residual_image = residual_image.filled()

    
    end = timer() # timing 
    time_elaps = end - start
    print(f"Time required fit ePSF to all sources {time_elaps:.2f} s\n")
    
    # include WCS coordinates
    pos["ra"], pos["dec"] = w.all_pix2world(pos["x_0"], pos["y_0"], 1)
    result_tab.add_column(pos['ra'])
    result_tab.add_column(pos['dec'])
    
    # mask out negative flux_fit values in the results 
    mask_flux = (result_tab['flux_fit'] >= 0.0)
    psf_sources = result_tab[mask_flux] # PSF-fit sources 
    
    # compute magnitudes and their errors and add to the table
    # error = (2.5/(ln(10)*flux_fit))*flux_unc
    mag_fit = -2.5*np.log10(psf_sources['flux_fit']) # instrumental mags
    mag_fit.name = 'mag_fit'
    mag_unc = 2.5/(psf_sources['flux_fit']*np.log(10))
    mag_unc *= psf_sources['flux_unc']
    mag_unc.name = 'mag_unc' 
    psf_sources['mag_fit'] = mag_fit
    psf_sources['mag_unc'] = mag_unc
    
    # mask entries with large magnitude uncertainties 
    mask_unc = psf_sources['mag_unc'] < 0.4
    psf_sources = psf_sources[mask_unc]
    
    if plot_ePSF: # if we wish to see the ePSF
        plt.figure(figsize=(10,9))
        plt.imshow(epsf_data, origin='lower', aspect=1, cmap='magma',
                   interpolation="nearest")
        plt.xlabel("Pixels", fontsize=16)
        plt.ylabel("Pixels", fontsize=16)
        plt.title("Effective Point-Spread Function (1 pixel = "
                                                    +str(pixscale)+
                                                    '")', fontsize=16)
        plt.colorbar(orientation="vertical", fraction=0.046, pad=0.08)
        plt.rc("xtick",labelsize=16) # not working?
        plt.rc("ytick",labelsize=16)
        
        if not(ePSF_plotname):
            ePSF_plotname = image_file.replace(".fits", "_ePSF.png")
        plt.savefig(ePSF_plotname, bbox_inches="tight")
        plt.close()
    
    if plot_residuals: # if we wish to see a plot of the residuals
        if "WIRCam" in instrument:
            plt.figure(figsize=(10,9))
        else:
            plt.figure(figsize=(12,14))
        ax = plt.subplot(projection=w)
        plt.imshow(residual_image, cmap='magma', aspect=1, 
                   interpolation='nearest', origin='lower')
        plt.xlabel("RA (J2000)", fontsize=16)
        plt.ylabel("Dec (J2000)", fontsize=16)
        plt.title("PSF residuals", fontsize=16)
        cb = plt.colorbar(orientation='vertical', fraction=0.046, pad=0.08) 
        cb.set_label(label="ADU", fontsize=16)
        ax.coords["ra"].set_ticklabel(size=15)
        ax.coords["dec"].set_ticklabel(size=15)
        
        if not(resid_plotname):
            resid_plotname = image_file.replace(".fits", "_ePSFresiduals.png")
        plt.savefig(resid_plotname, bbox_inches="tight")
        plt.close()
    
    return psf_sources     
示例#21
0
def cheating_astrometry(image,
                        input_table,
                        psf: np.ndarray,
                        filename: str = '?',
                        config: Config = Config.instance()):
    """
    Evaluate the maximum achievable precision of the EPSF fitting approach by using a hand-defined psf
    :param input_table:
    :param image:
    :param filename:
    :param psf:
    :param config:
    :return:
    """
    try:
        print(f'starting job on image {filename} with {config}')
        origin = np.array(psf.shape) / 2
        # type: ignore
        epsf = photutils.psf.EPSFModel(psf,
                                       flux=1,
                                       origin=origin,
                                       oversampling=1,
                                       normalize=False)
        epsf = photutils.psf.prepare_psf_model(epsf, renormalize_psf=False)

        finder = get_finder(image, config)

        #fwhm = estimate_fwhm(epsf.psfmodel)
        fwhm = config.fwhm_guess
        grouper = DAOGroup(config.separation_factor * fwhm)

        epsf.fwhm = astropy.modeling.Parameter(
            'fwhm', 'this is not the way to add this I think')
        epsf.fwhm.value = fwhm
        bkgrms = MADStdBackgroundRMS()

        photometry = BasicPSFPhotometry(finder=finder,
                                        group_maker=grouper,
                                        bkg_estimator=bkgrms,
                                        psf_model=epsf,
                                        fitter=LevMarLSQFitter(),
                                        fitshape=config.fitshape)

        guess_table = input_table.copy()
        guess_table = cut_edges(guess_table, 101, image.shape[0])
        guess_table.rename_columns(['x', 'y'], ['x_0', 'y_0'])

        guess_table['x_0'] += np.random.uniform(-0.1,
                                                +0.1,
                                                size=len(guess_table['x_0']))
        guess_table['y_0'] += np.random.uniform(-0.1,
                                                +0.1,
                                                size=len(guess_table['y_0']))

        result_table = photometry(image, guess_table)

        return PhotometryResult(image, input_table, result_table, epsf, None,
                                config, filename)
    except Exception as ex:
        import traceback
        print(f'error in cheating_astrometry({filename}, {psf}, {config})')
        error = ''.join(
            traceback.format_exception(type(ex), ex, ex.__traceback__))
        print(error)
        return error