Example #1
0
def LCO_PSF_PHOT(hdu,init_guesses):
	# im ~ np array dat, pixel [x0,y0] ~ float pixel position, sigma_psf ~ LCO-PSF sigma 
	x0,y0=init_guesses
	im = hdu.data
	hdr = hdu.header
	
	fwhm = hdr['L1FWHM']/hdr['PIXSCALE'] # PSF FWHM in pixels, roughly ~ 5 pixels, ~ 2 arcsec 
	sigma_psf = fwhm*gaussian_fwhm_to_sigma # PSF sigma in pixels
	
	psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
	daogroup = DAOGroup(2.0*sigma_psf*gaussian_sigma_to_fwhm)
	mmm_bkg = MMMBackground()
	fitter = LevMarLSQFitter()

	psf_model.x_0.fixed = True
	psf_model.y_0.fixed = True
	pos = Table(names=['x_0', 'y_0'], data=[[x0],[y0]]) # optionally give flux_0 has good aperture method for guessing though

	photometry = BasicPSFPhotometry(group_maker=daogroup,
									 bkg_estimator=mmm_bkg,
									 psf_model=psf_model,
									 fitter=LevMarLSQFitter(),
									 fitshape=(11,11))
	result_tab = photometry(image=im, init_guesses=pos)
	residual_image = photometry.get_residual_image()
	
	return result_tab
def do_psf(image_num, Letter):

    epsf = build_epsf(image_num)

    daofind = DAOStarFinder(fwhm=8, threshold=3. * Std[image_num])

    sigma_psf = 2.0
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)

    photometry = IterativelySubtractedPSFPhotometry(finder=daofind,
                                                    group_maker=daogroup,
                                                    bkg_estimator=None,
                                                    psf_model=epsf,
                                                    fitter=LevMarLSQFitter(),
                                                    aperture_radius=5,
                                                    niters=1,
                                                    fitshape=(11, 11))

    result_tab = photometry(image=Reduced_Image_Data[image_num])
    #residual_image = photometry.get_residual_image()

    X_0 = result_tab['x_fit']
    Y_0 = result_tab['y_fit']
    Flux = result_tab['flux_fit']
    Flux_Err = np.sqrt(np.array(Flux))
    PSF_Mags_1 = -2.5 * np.log10(
        np.array(Flux))  # Convert aperture sums to magnitudes
    PSF_Mags_Err_1 = ((-2.5 * Flux_Err) / (np.log10(10) * Flux))

    return PSF_Mags_1, PSF_Mags_Err_1, Flux, Flux_Err, X_0, Y_0
Example #3
0
def photomyPSF(imgdata, position, sigma):
    PSFdata = np.copy(imgdata)
    sigma_psf = sigma
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()
    #fitter = LevMarLSQFitter()
    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)

    sources = Table()

    sources['x_mean'] = position[:, 0].T
    sources['y_mean'] = position[:, 1].T

    psf_model.x_0.fixed = True
    psf_model.y_0.fixed = True
    pos = Table(names=['x_0', 'y_0'],
                data=[sources['x_mean'], sources['y_mean']])
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(11, 11))

    result_tab = photometry(image=PSFdata, init_guesses=pos)
    positionflux = np.transpose(
        (result_tab['x_fit'], result_tab['y_fit'], result_tab['flux_fit']))

    magstar = 25 - 2.5 * np.log10(abs(result_tab['flux_fit'] / 1))
    return positionflux, magstar
Example #4
0
def PSF_photometry(data, coord_table, sigma_psf=10, scale=0.67, step=0.5):
    FLUX = []
    bkgrms = MADStdBackgroundRMS()
    std = bkgrms(data)
    iraffind = IRAFStarFinder(threshold=3.5 * std,
                              fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=0.01,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()
    fitter = LevMarLSQFitter()
    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
    # psf_model.x_0.fixed = True
    # psf_model.y_0.fixed = True

    pos = Table(names=['x_0', 'y_0'],
                data=[coord_table['X'],
                      coord_table['Y']])[coord_table['good_star']]

    photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                    group_maker=daogroup,
                                                    bkg_estimator=mmm_bkg,
                                                    psf_model=psf_model,
                                                    fitter=LevMarLSQFitter(),
                                                    niters=1,
                                                    fitshape=(41, 41))
    result_tab = photometry(image=data, init_guesses=pos)
    return result_tab['flux_fit']
Example #5
0
def psf_photometry(filepath, filename, show, sn_ra, sn_dec):

    start = time.time()

    warnings.simplefilter('ignore', category=FITSFixedWarning)
    warnings.simplefilter('ignore', category=AstropyUserWarning)

    print(f'Working on {filename}')

    full_filepath = filepath + filename
    image_data = getdata(full_filepath)
    hdr = getheader(full_filepath)
    fwhm = hdr['L1FWHM'] / hdr['PIXSCALE']
    exptime = hdr['EXPTIME']

    metadata = load_pickle(filename)
    epsf_data = np.array(metadata['epsf'])
    epsf = EPSFModel(epsf_data, fwhm=fwhm, oversampling=2)
    # epsf.x_0.fixed = True
    # epsf.y_0.fixed = True
    daogroup = DAOGroup(2.0 * fwhm)
    bkg = MMMBackground()
    fitter = LevMarLSQFitter()
    fitshape = 25
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=bkg,
                                    psf_model=epsf,
                                    fitter=fitter,
                                    fitshape=fitshape,
                                    aperture_radius=fitshape)

    psfmags = []

    print('\tExtracting other stars . . .')
    counter = 0
    for star in metadata['psf_fitted_stars']:
        counter += 1
        (x, y) = star
        psfmags = _do_phot(x, y, image_data, exptime, fitshape, photometry,
                           psfmags)
        print(
            f'\t\tStars extracted: {counter}/{len(metadata["psf_fitted_stars"])}',
            end='\r')
    print()

    print('\tExtracting supernova . . .')
    x, y = _get_sn_xy(filepath, filename, sn_ra, sn_dec)
    psfmags = _do_phot(x, y, image_data, exptime, fitshape, photometry,
                       psfmags)

    create_or_update_pickle(filename, key='psfmags', val=psfmags)

    end = time.time()
    print(f'Time to perform photometry (s): {end-start:.3f}')

    if show:
        checkmag(image_data, photometry.get_residual_image(), x, y, fitshape)
    print()
Example #6
0
def astropy_psf_photometry(img,
                           sigma_psf,
                           aperture=3,
                           x0=None,
                           y0=None,
                           filter=True,
                           sigma_filter=1):
    """
    performs PSF photometry on an image. If x0 and y0 are None will attempt to locate the target by searching for the
    brightest PSF in the field

    :param img: 2D array, image on which to perform PSF photometry
    :param sigma_psf: float, standard deviation of the PSF
    :param aperture: int, size of the paerture (pixels)
    :param x0: x position of the target (pixels)
    :param y0: y position of the target (pixels)
    :param filter: If True will apply a gaussian filter to the image with standard deviation sigma_filter before
     performing PSF photometry
    :param sigma_filter: standard deviation of gaussian filter to apply to the image
    :return: x0 column of photometry table, y0 column of photometry table, flux column of photometry table
    """
    if filter:
        image = ndimage.gaussian_filter(img, sigma=sigma_filter, order=0)
    else:
        image = img
    bkgrms = MADStdBackgroundRMS()
    std = bkgrms(image[image != 0])
    iraffind = IRAFStarFinder(threshold=2 * std,
                              fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=0.01,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)
    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()
    fitter = LevMarLSQFitter()
    psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
    if x0 and y0:
        pos = Table(names=['x_0', 'y_0'], data=[x0, y0])
        photometry = BasicPSFPhotometry(group_maker=daogroup,
                                        bkg_estimator=mmm_bkg,
                                        psf_model=psf_model,
                                        fitter=LevMarLSQFitter(),
                                        fitshape=(11, 11))
        res = photometry(image=image, init_guesses=pos)
        return res['x_fit'], res['y_fit'], res['flux_0']
    photometry = BasicPSFPhotometry(finder=iraffind,
                                    group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=fitter,
                                    fitshape=(11, 11),
                                    aperture_radius=aperture)
    res = photometry(image=image)
    return res['x_0'], res['y_0'], res['flux_0']
Example #7
0
    def __init__(self,
                 psfimfilename,
                 targetpixscale,
                 psfpixscale=0.03,
                 x_0=None,
                 y_0=None,
                 psf_recenter=False,
                 fix_target_pos=True,
                 fwhm_arcsec=_HST_WFC3_PSF_FWHM_ARCSEC):
        """ 
        :param psfimfilename: full path for a .fits image file
        :param targetpixscale: arcseconds per pixel of the target image        
        :param psfpixscale: arcseconds per pixel of the psf image (if not 
        provided in the image header). Defaults to 0.03" per pixel
        :param xycenter: tuple, giving pixel coordinates of  the star. If not 
        provided, we assume it is at the center of the image.
        :param psf_recenter: execute a centroiding algorithm to locate the
        center of the psf
        :param fix_target_pos: execute "forced photometry" with the center of 
        the target fixed, and only the psf flux scaling as a free parameter.
        """
        psfimage = fits.open(psfimfilename)
        self.header = psfimage[0].header
        self.psfimdat = psfimage[0].data

        # TODO : check the header for pixel scale info

        if x_0 is None:
            self.xpsf = self.psfimdat.shape[0] / 2.,
            self.ypsf = self.psfimdat.shape[1] / 2.
        else:
            self.xpsf, self.ypsf = x_0, y_0

        if psf_recenter:
            self.xpsf, self.ypsf = centroid(self.psfimdat, x_0, y_0)

        self.psfmodel = photutils.psf.models.FittableImageModel(
            self.psfimdat,
            x_0=self.xpsf,
            y_0=self.ypsf,
            oversampling=targetpixscale / psfpixscale)

        # Fix the center of the psf for "forced photometry" -- no recentering
        if fix_target_pos:
            self.psfmodel.x_0.fixed = True
            self.psfmodel.y_0.fixed = True
        else:
            self.psfmodel.x_0.fixed = False
            self.psfmodel.y_0.fixed = False

        # Set up the grouper, background estimator, and fitter objects:
        self.grouper = DAOGroup(2.0 * fwhm_arcsec * psfpixscale)
        self.bkg_estimator = MMMBackground()
        self.fitter = LevMarLSQFitter()
Example #8
0
    def run(self, stars_coords):
        stack_path = self.fits_explorer.get("stack")[0]
        stack_fwhm = np.mean(self.fwhm_fit.run(fits.getdata(stack_path), stars_coords)[0:2])

        print("{} global psf FWHM: {:.2f} (pixels)".format(INFO_LABEL, np.mean(stack_fwhm)))

        n_stars = np.shape(stars_coords)[0]
        n_images = len(self.files)

        fluxes = np.zeros((n_stars, n_images))

        pos = Table(
            names=["x_0", "y_0"], data=[stars_coords[:, 0], stars_coords[:, 1]]
        )

        daogroup = DAOGroup(2.0 * stack_fwhm * gaussian_sigma_to_fwhm)

        mmm_bkg = MMMBackground()

        psf_model = IntegratedGaussianPRF(sigma=stack_fwhm)
        psf_model.sigma.fixed = False

        sky = []

        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

        photometry = BasicPSFPhotometry(
            group_maker=daogroup,
            bkg_estimator=mmm_bkg,
            psf_model=psf_model,
            fitter=LevMarLSQFitter(),
            fitshape=(17, 17)
        )

        for i, image in enumerate(
                tqdm(
                    self.files[0::],
                    desc="Photometry extraction",
                    unit="files",
                    ncols=80,
                    bar_format=TQDM_BAR_FORMAT,
                )
        ):
            image = fits.getdata(image)

            result_tab = photometry(image=image, init_guesses=pos)

            fluxes[:, i] = result_tab["flux_fit"]
            sky.append(1)

        return fluxes, np.ones_like(fluxes), {"sky": sky}
Example #9
0
    def phot_sources(self, sources=None, peak=True, psf=True):

        if sources is None:
            sources = self.sources

        xx, yy = self.wcs.world_to_pixel_values(sources["ra"], sources["dec"])

        x_idx = np.floor(xx + 0.5).astype(int)
        y_idx = np.floor(yy + 0.5).astype(int)

        if peak:
            # Crude Peak Photometry
            # From pixel indexes to array indexing

            sources["flux_peak"] = Column(self.data[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)
            sources["eflux_peak"] = Column(self.uncertainty.array[y_idx, x_idx], unit=self.unit * u.beam).to(u.mJy)

        if psf:
            # BasicPSFPhotometry with fixed positions

            sigma_psf = self.beam.sigma_pix.value

            # Using an IntegratedGaussianPRF can cause biais in the photometry
            # TODO: Check the NIKA2 calibration scheme
            # from photutils.psf import IntegratedGaussianPRF
            # psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
            psf_model = CircularGaussianPSF(sigma=sigma_psf)

            psf_model.x_0.fixed = True
            psf_model.y_0.fixed = True

            daogroup = DAOGroup(3 * self.beam.fwhm_pix.value)
            mmm_bkg = MedianBackground()

            photometry = BasicPSFPhotometry(group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), fitshape=9)

            positions = Table([Column(xx, name="x_0"), Column(yy, name="y_0"), Column(self.data[y_idx, x_idx], name="flux_0")])

            # Fill the mask with nan to perform correct photometry on the edge
            # of the mask, and catch numpy & astropy warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", AstropyWarning)
                warnings.simplefilter("ignore", RuntimeWarning)
                result_tab = photometry(image=np.ma.array(self.data, mask=self.mask).filled(np.nan), init_guesses=positions)

            result_tab.sort("id")
            for _source, _tab in zip(["flux_psf", "eflux_psf"], ["flux_fit", "flux_unc"]):
                sources[_source] = Column(result_tab[_tab] * psf_model(0, 0), unit=self.unit * u.beam).to(u.mJy)
            sources["group_id"] = result_tab["group_id"]

        self.sources = sources
Example #10
0
    def __init__(self, fwhm, **kwargs):
        super().__init__(**kwargs)

        daogroup = DAOGroup(2.0 * fwhm * gaussian_sigma_to_fwhm)
        mmm_bkg = MMMBackground()
        psf_model = IntegratedGaussianPRF(sigma=fwhm)
        psf_model.sigma.fixed = False
        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

        self.photometry = BasicPSFPhotometry(group_maker=daogroup,
                                             bkg_estimator=mmm_bkg,
                                             psf_model=psf_model,
                                             fitter=LevMarLSQFitter(),
                                             fitshape=(17, 17))
Example #11
0
def do_photometry_epsf(
    image: np.ndarray,
    epsf: photutils.psf.EPSFModel,
    star_finder: Optional[photutils.StarFinderBase],
    initial_guess: Optional[Table] = None,
    config: Config = Config()) -> Table:
    """
    Given an image an a epsf model, perform photometry and return star positions (and more) in table
    :param image: input image
    :param epsf: EPSF model to use in photometry
    :param star_finder: which starfinder to use?
    :param initial_guess: initial estimates for star positions
    :param config:

    :return: Table with results
    """

    separation_factor = config.separation_factor
    clip_sigma = config.clip_sigma
    photometry_iterations = config.photometry_iterations

    epsf = photutils.psf.prepare_psf_model(
        epsf, renormalize_psf=False)  # renormalize is super slow...

    background_rms = MADStdBackgroundRMS()

    _, img_median, img_stddev = sigma_clipped_stats(image, sigma=clip_sigma)
    fwhm_guess = estimate_fwhm(epsf.psfmodel)

    grouper = DAOGroup(separation_factor * fwhm_guess)

    epsf.fwhm = astropy.modeling.Parameter(
        'fwhm', 'this is not the way to add this I think')
    epsf.fwhm.value = fwhm_guess

    photometry = IterativelySubtractedPSFPhotometry(
        finder=star_finder,
        group_maker=grouper,
        bkg_estimator=background_rms,
        psf_model=epsf,
        fitter=LevMarLSQFitter(),
        niters=photometry_iterations,
        fitshape=config.fitshape)

    return photometry.do_photometry(image, init_guesses=initial_guess)
Example #12
0
def do_photometry_basic(image: np.ndarray,
                        σ_psf: float) -> Tuple[Table, np.ndarray]:
    """
    Find stars in an image with IRAFStarFinder

    :param image: The image data you want to find stars in
    :param σ_psf: expected deviation of PSF
    :return: tuple result table, residual image
    """
    bkgrms = MADStdBackgroundRMS()

    std = bkgrms(image)

    iraffind = IRAFStarFinder(threshold=3 * std,
                              sigma_radius=σ_psf,
                              fwhm=σ_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=2,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)
    daogroup = DAOGroup(0.1 * σ_psf * gaussian_sigma_to_fwhm)

    mmm_bkg = MMMBackground()

    # my_psf = AiryDisk2D(x_0=0., y_0=0.,radius=airy_minimum)
    # psf_model = prepare_psf_model(my_psf, xname='x_0', yname='y_0', fluxname='amplitude',renormalize_psf=False)
    psf_model = IntegratedGaussianPRF(sigma=σ_psf)
    # psf_model = AiryDisk2D(radius = airy_minimum)#prepare_psf_model(AiryDisk2D,xname ="x_0",yname="y_0")
    # psf_model = Moffat2D([amplitude, x_0, y_0, gamma, alpha])

    # photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup,
    #                                                bkg_estimator=mmm_bkg, psf_model=psf_model,
    #                                                fitter=LevMarLSQFitter(),
    #                                                niters=2, fitshape=(11,11))
    photometry = BasicPSFPhotometry(finder=iraffind,
                                    group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf_model,
                                    fitter=LevMarLSQFitter(),
                                    aperture_radius=11.0,
                                    fitshape=(11, 11))

    result_table = photometry.do_photometry(image)
    return result_table, photometry.get_residual_image()
Example #13
0
def run_photometry(img_file, epsf, fwhm, x, y, subtract_back=False,
    forced=False):

    img_hdu = fits.open(img_file)
    if subtract_back:
        bkg = Background2D(img_hdu[0].data, (21,21), filter_size=(3,3))
        image = img_hdu[0].data - bkg.background
        ndimage = NDData(data=backsub)
    else:
        image = img_hdu[0].data
        ndimage = NDData(data=img_hdu[0].data)

    psf = copy.copy(epsf)

    stars_tbl = Table()
    stars_tbl['x'] = x
    stars_tbl['y'] = y
    stars = extract_stars(ndimage, stars_tbl, size=51)

    stars_tbl['flux'] = np.array([stars[0].estimate_flux()])

    targets = Table()
    targets['x_0'] = stars_tbl['x']
    targets['y_0'] = stars_tbl['y']
    targets['flux_0'] = stars_tbl['flux']

    if forced:
        psf.x_0.fixed = True
        psf.y_0.fixed = True

    daogroup = DAOGroup(fwhm)
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=psf,
                                    fitter=fitter,
                                    fitshape=(51,51))

    result_tab = photometry(image=image, init_guesses=targets)

    return(result_tab)
Example #14
0
def init_setup():

    fitimage = fits.open('Serpens3/idxq28010_drz.fits')
    imdata = fitimage[1].data
    head = fitimage[0].header

    bkgrms = MADStdBackgroundRMS()
    std = bkgrms(imdata)
    mean = np.mean(imdata)
    sigma_psf = 2.0
    iraffind = IRAFStarFinder(threshold=3.5 * std,
                              fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                              minsep_fwhm=0.01,
                              roundhi=5.0,
                              roundlo=-5.0,
                              sharplo=0.0,
                              sharphi=2.0)

    daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
    mmm_bkg = MMMBackground()

    return imdata, bkgrms, std, sigma_psf, iraffind, daogroup, mmm_bkg, mean
Example #15
0
    def _basic_psf_flux(self,
                        image,
                        fwhm,
                        x=None,
                        y=None,
                        return_residual_image=False):

        w, h = image.shape
        if x is None:
            x = w / 2
        if y is None:
            y = h / 2

        wfit = w if w % 2 == 1 else w - 1
        hfit = h if h % 2 == 1 else h - 1
        fitshape = (wfit, hfit)

        daogroup = DAOGroup(2.0 * fwhm)
        psf_model = IntegratedGaussianPRF(sigma=fwhm / 2.35)

        photometry = BasicPSFPhotometry(group_maker=daogroup,
                                        bkg_estimator=MMMBackground(),
                                        psf_model=psf_model,
                                        fitshape=fitshape)

        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True
        pos = Table(names=['x_0', 'y_0'], data=[[x], [y]])

        result = photometry(image=image, positions=pos)
        flux = result["flux_fit"].data[0]

        self.results = result

        if return_residual_image:
            return flux, photometry.get_residual_image()
        else:
            return flux
Example #16
0
    def forced_psf_photometry(self, forced_cat, save_residual_images=False):

        catalog = LsstStruct()
        positions = Table(names=['x_0', 'y_0'],
                          data=[forced_cat['x_fit'], forced_cat['y_fit']])
        if save_residual_images:
            self.residual_image_forced = LsstStruct()

        for band in self.mbi.bands:
            daogroup = DAOGroup(self.crit_separation * self.psf_fwhm[band])
            aperture_radius = self.aperture_radius * self.psf_fwhm[band]

            logger.info('performing forced photometry for ' + band + ' band')

            self.psf_model[band].x_0.fixed = True
            self.psf_model[band].y_0.fixed = True

            photometry = BasicPSFPhotometry(
                finder=None,
                group_maker=daogroup,
                fitshape=self.phot_opts['fitshape'],
                psf_model=self.psf_model[band],
                bkg_estimator=self.bkg,
                aperture_radius=aperture_radius)

            catalog[band] = photometry(image=self.phot_image[band],
                                       init_guesses=positions)

            # turns of the order might be different for each band!!!
            catalog[band].sort('x_fit')

            if save_residual_images:
                logger.info('generating residual image')
                self.residual_image_forced[band] = subtract_psf(
                    self.mbi.image[band], self.psf_model[band], catalog[band])

        return catalog
Example #17
0
##THIS IS THE FILE, CHANGE THIS FOR RESULTS
with fits.open(
        r'/Users/phystastical/Desktop/BB2/script/twomass-j.fits') as hdul:
    image = hdul[0].data

bkgrms = MADStdBackgroundRMS()
std = bkgrms(image)

iraffind = IRAFStarFinder(threshold=3.5 * std,
                          fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                          minsep_fwhm=0.01,
                          roundhi=5.0,
                          roundlo=-5.0,
                          sharplo=0.0,
                          sharphi=2.0)
daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)

mmm_bkg = MMMBackground()
fitter = LevMarLSQFitter()
psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                group_maker=daogroup,
                                                bkg_estimator=mmm_bkg,
                                                psf_model=psf_model,
                                                fitter=LevMarLSQFitter(),
                                                niters=1,
                                                fitshape=(11, 11))
result_tab = photometry(image=image)
residual_image = photometry.get_residual_image()

#Plot images made#
Example #18
0
def epsf_phot(target, centroided_sources, plots=False):
    def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
        """
        Convert hours, minutes, seconds, and microseconds to fractional days.
        
        """
        days = sec + (micro / 1.e6)
        days = min + (days / 60.)
        days = hour + (days / 60.)
        return days / 24.
    
    def date_to_jd(year,month,day):
        """
        Convert a date to Julian Day.
        
        Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 
            4th ed., Duffet-Smith and Zwart, 2011.
        
        """
        if month == 1 or month == 2:
            yearp = year - 1
            monthp = month + 12
        else:
            yearp = year
            monthp = month
        
        # this checks where we are in relation to October 15, 1582, the beginning
        # of the Gregorian calendar.
        if ((year < 1582) or
            (year == 1582 and month < 10) or
            (year == 1582 and month == 10 and day < 15)):
            # before start of Gregorian calendar
            B = 0
        else:
            # after start of Gregorian calendar
            A = math.trunc(yearp / 100.)
            B = 2 - A + math.trunc(A / 4.)
            
        if yearp < 0:
            C = math.trunc((365.25 * yearp) - 0.75)
        else:
            C = math.trunc(365.25 * yearp)
            
        D = math.trunc(30.6001 * (monthp + 1))
        
        jd = B + C + D + day + 1720994.5
        
        return jd

    pines_path = pines_dir_check()
    short_name = short_name_creator(target)
    reduced_path = pines_path/('Objects/'+short_name+'/reduced/')
    reduced_filenames = natsort.natsorted([x.name for x in reduced_path.glob('*.fits')])
    reduced_files = np.array([reduced_path/i for i in reduced_filenames])

    centroided_sources.columns = centroided_sources.columns.str.strip()
    source_names = natsort.natsorted(list(set([i.split(' ')[0]+' '+i.split(' ')[1] for i in centroided_sources.keys() if (i[0] == '2') or (i[0] == 'R')])))
    
    #Create output plot directories for each source.
    if plots:
        for name in source_names:
            #If the folders are already there, delete them. 
            source_path = (pines_path/('Objects/'+short_name+'/psf_phot/'+name+'/'))
            if source_path.exists():
                shutil.rmtree(source_path)
            #Create folders.
            os.mkdir(source_path)

    #Declare a new dataframe to hold the information for all targets for this .
    columns = ['Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing']
    for i in range(0, len(source_names)):
        columns.append(source_names[i]+' Flux')
        columns.append(source_names[i]+' Flux Error')
    psf_df = pd.DataFrame(index=range(len(reduced_files)), columns=columns)
    output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

    for i in range(0, len(reduced_files)):
        #Read in image data/header. 
        file = reduced_files[i]
        data = fits.open(file)[0].data
        header = fits.open(file)[0].header
        print('{}, image {} of {}.'.format(file.name, i+1, len(reduced_files)))

        #Read in some supporting information.
        log_path = pines_path/('Logs/'+file.name.split('.')[0]+'_log.txt')
        log = pines_log_reader(log_path)
        date_obs = header['DATE-OBS']
        #Catch a case that can cause datetime strptime to crash; Mimir headers sometimes have DATE-OBS with seconds specified as 010.xx seconds, when it should be 10.xx seconds. 
        if len(date_obs.split(':')[-1].split('.')[0]) == 3:
            date_obs = date_obs.split(':')[0] + ':' + date_obs.split(':')[1] + ':' + date_obs.split(':')[-1][1:]
        #Keep a try/except clause here in case other unknown DATE-OBS formats pop up. 
        try:
            date = datetime.datetime.strptime(date_obs, '%Y-%m-%dT%H:%M:%S.%f')
        except:
            print('Header DATE-OBS format does not match the format code in strptime! Inspect/correct the DATE-OBS value.')
            pdb.set_trace()
        
        days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
        jd = date_to_jd(date.year,date.month,days)
        psf_df['Filename'][i] = file.name
        psf_df['Time UT'][i] = header['DATE-OBS']
        psf_df['Time JD'][i] = jd
        psf_df['Airmass'][i] = header['AIRMASS']
        psf_df['Seeing'][i] = log['X seeing'][np.where(log['Filename'] == file.name.split('_')[0]+'.fits')[0][0]]
        
        #Read in source centroids for this image
        x = np.zeros(len(source_names))
        y = np.zeros(len(source_names))
        for j in range(len(source_names)):
            source = source_names[j]
            x[j] = centroided_sources[source+' X'][i]
            y[j] = centroided_sources[source+' Y'][i]

        #Extract pixel cutouts of our stars, so let’s explicitly exclude stars that are too close to the image boundaries (because they cannot be extracted).
        size = 13
        hsize = (size - 1) / 2
        #mask = ((x > hsize) & (x < (data.shape[1] -1 - hsize)) & (y > hsize) & (y < (data.shape[0] -1 - hsize)) & (y > 100) & (y < 923))

        #Create table of good star positions
        stars_tbl = Table()
        stars_tbl['x'] = x
        stars_tbl['y'] = y
        
        #Subtract background (star cutouts from which we build the ePSF must have background subtracted).
        mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.)  
        data -= median_val
        
        #Replace nans in data using Gaussian. 
        # kernel = Gaussian2DKernel(x_stddev=0.5)
        # data = interpolate_replace_nans(data, kernel)

        #The extract_stars() function requires the input data as an NDData object. 
        nddata = NDData(data=data)  

        #Extract star cutouts.
        stars = extract_stars(nddata, stars_tbl, size=size)  
                        

        #Plot. 
        nrows = 5
        ncols = 5
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 10), squeeze=True)
        ax = ax.ravel()
        for j in range(len(stars)):           
            norm = simple_norm(stars[j], 'log', percent=99.)
            ax[j].imshow(stars[j].data, norm=norm, origin='lower', cmap='viridis')

        pdb.set_trace()

        #Construct the ePSF using the star cutouts.
        epsf_fitter = EPSFFitter()
        epsf_builder = EPSFBuilder(maxiters=4, progress_bar=False, fitter=epsf_fitter)   

        try:
            epsf, fitted_stars = epsf_builder(stars)
            output_filename = pines_path/('Objects/'+short_name+'/psf_phot/'+short_name+'_psf_phot.csv')

            for j in range(len(stars)):
                star = stars[j]
                source_name = source_names[j]
                sigma_psf = 1.85

                dtype = [('x_0', 'f8'), ('y_0', 'f8')]
                pos = Table(data=np.zeros(1, dtype=dtype))
                source_x = stars_tbl['x'][j]
                source_y = stars_tbl['y'][j]
                pos['x_0'] = source_x - int(source_x - size/2 + 1)
                pos['y_0'] = source_y - int(source_y - size/2 + 1)

                daogroup = DAOGroup(4.0*sigma_psf*gaussian_sigma_to_fwhm)
                mmm_bkg = MMMBackground()
                photometry = BasicPSFPhotometry(group_maker=daogroup,
                                    bkg_estimator=mmm_bkg,
                                    psf_model=epsf,
                                    fitter=LevMarLSQFitter(),
                                    fitshape=(13,13),
                                    aperture_radius=4.)
                

                result_tab = photometry(image=star, init_guesses=pos)
                residual_image = photometry.get_residual_image()
                psf_df[source_name+' Flux'][i] = result_tab['flux_fit'][0]
                psf_df[source_name+' Flux Error'][i] = result_tab['flux_unc'][0]

                if plots:
                    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
                    im = ax[0].imshow(star, origin='lower')
                    divider = make_axes_locatable(ax[0])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im, cax=cax, orientation='vertical')
                    ax[0].plot(result_tab['x_fit'][0], result_tab['y_fit'][0], 'rx')
                    ax[0].set_title('Data')

                    im2 = ax[1].imshow(epsf.data, origin='lower')
                    ax[1].set_title('EPSF Model')
                    divider = make_axes_locatable(ax[1])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im2, cax=cax, orientation='vertical')

                    im3 = ax[2].imshow(residual_image, origin='lower')
                    ax[2].set_title('Residual Image')
                    divider = make_axes_locatable(ax[2])
                    cax = divider.append_axes('right', size='5%', pad=0.05)
                    fig.colorbar(im3, cax=cax, orientation='vertical')
                    plt.suptitle(source_name+'\n'+reduced_files[i].name+', image '+str(i+1)+' of '+str(len(reduced_files)))
                    plt.subplots_adjust(wspace=0.5, top=0.95, bottom = 0.05)
                    plot_output_name = pines_path/('Objects/'+short_name+'/psf_phot/'+source_name+'/'+str(i).zfill(4)+'.jpg')
                    plt.savefig(plot_output_name)
                    plt.close()
        except:
            print('')
            print('EPSF BUILDER FAILED, SKIPPING IMAGE.')
            print('')
        #Plot the ePSF. 
        # plt.figure()
        # norm = simple_norm(epsf.data, 'log', percent=99.)
        # plt.imshow(epsf.data, norm=norm, origin='lower', cmap='viridis')
        # cb = plt.colorbar()
        # plt.tight_layout()   

        

    print('Saving psf photometry output to {}.'.format(output_filename))
    with open(output_filename, 'w') as f:
        for j in range(len(psf_df)):
            if j == 0:
                f.write('{:>21s}, {:>22s}, {:>17s}, {:>7s}, {:>7s}, '.format('Filename', 'Time UT', 'Time JD', 'Airmass', 'Seeing'))
                for i in range(len(source_names)):
                    if i != len(source_names) - 1:
                        f.write('{:>20s}, {:>26s}, '.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))
                    else:
                        f.write('{:>20s}, {:>26s}\n'.format(source_names[i]+' Flux', source_names[i]+' Flux Error'))

            format_string = '{:21s}, {:22s}, {:17.9f}, {:7.2f}, {:7.1f}, '

            #If the seeing value for this image is 'nan' (a string), convert it to a float. 
            #TODO: Not sure why it's being read in as a string, fix that. 
            if type(psf_df['Seeing'][j]) == str:
                psf_df['Seeing'][j] = float(psf_df['Seeing'][j])

            #Do a try/except clause for writeout, in case it breaks in the future. 
            try:
                f.write(format_string.format(psf_df['Filename'][j], psf_df['Time UT'][j], psf_df['Time JD'][j], psf_df['Airmass'][j], psf_df['Seeing'][j]))
            except:
                print('Writeout failed! Inspect quantities you are trying to write out.')
                pdb.set_trace()
            for i in range(len(source_names)):                    
                if i != len(source_names) - 1:
                    format_string = '{:20.11f}, {:26.11f}, '
                else:
                    format_string = '{:20.11f}, {:26.11f}\n'
                
                f.write(format_string.format(psf_df[source_names[i]+' Flux'][j], psf_df[source_names[i]+' Flux Error'][j]))
    print('')    
    return
           
Example #19
0
        def SubmitEvent(self):

            #Not a fan of globals but this is the easiest way to grab the file location
            global fileLocation
            #sigma_psf = 2.88
            #Grab the Sigma from the Entry box in the GUI
            SigmaPSF = SigmaPSFentry.get()
            #Turn the string into a float
            sigma_psf = float(SigmaPSF)
            #Grab the number of iterations from Entry box in GUI
            N_iters1 = nitersEntry.get()
            #Turn the string into a float
            N_iters = float(N_iters1)
            #Test cases to make sure that information was flowing from the GUI to the program
            #print(SigmaPSF)
            #print(N_iters)

            #Open the file as a fits (allows us to handle it) then turn that into readable data.
            with fits.open(fileLocation) as hdul:
                image = hdul[0].data

            #automatically gathered information needed to run the Star Finder
            bkgrms = MADStdBackgroundRMS()
            std = bkgrms(image)

            #Find the stars
            iraffind = IRAFStarFinder(threshold=3.5 * std,
                                      fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                                      minsep_fwhm=0.01,
                                      roundhi=5.0,
                                      roundlo=-5.0,
                                      sharplo=0.0,
                                      sharphi=2.0)
            #Group the stars
            daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)

            #More automatically gathered info needed for IS-PSFPhotometry to take places
            mmm_bkg = MMMBackground()
            fitter = LevMarLSQFitter()
            #Grabbed from the user input
            psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
            #Run IS-PSFPhotometry
            photometry = IterativelySubtractedPSFPhotometry(
                finder=iraffind,
                group_maker=daogroup,
                bkg_estimator=mmm_bkg,
                psf_model=psf_model,
                fitter=LevMarLSQFitter(),
                niters=N_iters,
                fitshape=(11, 11))
            #Do photometry on the image
            result_tab = photometry(image=image)
            #grab the resiudal image
            residual_image = photometry.get_residual_image()

            #Get the results of the photometry and print the aspects we want.
            phot_results = photometry(image)
            with open("output.txt", "w") as text_file:
                print(phot_results['x_fit', 'y_fit', 'flux_fit'],
                      file=text_file)
            print(phot_results['x_fit', 'y_fit', 'flux_fit'])
            print("Sum of pixels: {}".format(sum(sum(residual_image))))
            #Plot images made#
            #Start by creating plots.
            plt.subplot(1, 5, 1)
            #Show the first plot (which is just the raw image)
            plt.imshow(image,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('Raw')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #Create the second plot
            plt.subplot(1, 5, 2)
            #Show the residual_image
            plt.imshow(residual_image,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('PSF')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #Draw in the sum of pixels.
            plt.text(0,
                     65,
                     "Sum of pixels: {}".format(sum(sum(residual_image))),
                     fontsize=7)
            #Create the third plot which is the subtracted images combined.
            sb = image - residual_image
            plt.subplot(1, 5, 3)
            plt.imshow(sb,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('PSF-S')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            with open("AP_RI.txt", "w") as f:
                for _ in range(len(residual_image)):
                    f.write(str(residual_image[_]))
            with open("AP_BS.txt", "w") as f:
                for _ in range(len(sb)):
                    f.write(str(sb[_]))

            print("Starting creation of CSV")
            subprocess.run(['py', 'create_CSV.py'], shell=False)

            print("Starting creation of Stats")
            subprocess.run(['py', 'create_info.py'], shell=False)

            print("Starting Threshold")
            subprocess.run(['py', 'threshold.py'], shell=False)

            with open("APC_Res.csv", "r") as f:
                APC_Res = f.read()
            APC_Res = APC_Res.split(",")
            APC_Res = [float(i) for i in APC_Res]

            #Every (SquareRoot of the Pixels) datapoints create a new array. Into a 2D Array.
            #I'm going to use the Correct_Res list as the main list and store the temp list every Sqrt(pix) in it,
            #then reset that list and continue until the pixel count is met.
            #Have an internal counter. Reset that every Sqrt(Pix)
            temp_list = np.array([])
            SqrPixels = math.sqrt(len(APC_Res))
            internal_counter = 0
            #print(SqrPixels)
            #print(len(APC_Res))
            Corrected_Res = np.array([[]])

            for _ in range(len(APC_Res)):
                if internal_counter <= SqrPixels - 2:
                    try:
                        temp_list = np.append(temp_list, APC_Res[_ - 1])
                        #print(_)
                        if _ + 1 == (int(SqrPixels) * int(SqrPixels)):
                            Corrected_Res = np.append(Corrected_Res, temp_list)
                    except:
                        print("Not right 2.0")
                    internal_counter = internal_counter + 1
                else:
                    internal_counter = 0
                    #print(temp_list)
                    Corrected_Res = np.append(Corrected_Res, temp_list)
                    temp_list = []
                    temp_list = np.append(temp_list, APC_Res[_ - 1])
                    #print("Resetting Counter & List {}".format(_))
                    if _ + 1 == (int(SqrPixels) * int(SqrPixels)):
                        Corrected_Res = np.append(Corrected_Res, temp_list)
                        #print(_+1)
                    #print("Iteration {}".format(_))
            #print(residual_image)
            #print("\n")
            #print(Corrected_Res)
            Corrected_Res = np.reshape(Corrected_Res,
                                       (int(SqrPixels), int(SqrPixels)))

            Correct_BS = image - Corrected_Res
            plt.subplot(1, 5, 4)
            plt.imshow(Corrected_Res,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('CPSF')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            plt.subplot(1, 5, 5)
            plt.imshow(Correct_BS,
                       cmap='viridis',
                       aspect=1,
                       interpolation='nearest',
                       origin='lower')
            plt.title('CPSF-S')
            plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)

            #Number of bins
            n_bins = 20
            #Not super sure why this works the way that it does if I’m being truthful, took tinkering to work, and lots of documentation examples.
            fig, axs = plt.subplots(1, 2)

            # We can set the number of bins with the `bins` kwarg
            axs[0].hist(residual_image, bins=n_bins)
            plt.title('Residual Image Hist')
            axs[1].hist(sb, bins=n_bins)
            plt.title('Background Subtracted Hist')
            #plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
            #All Pixels from residual image

            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            delta = (6 * (1 / len(sb)))

            nx = ny = np.arange(-3.0, 3.0, delta)
            X, Y = np.meshgrid(nx, ny)
            #print(X)
            #print(Y)
            x, y, z = X * len(sb), Y * len(sb), sb
            ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis')

            figi = plt.figure()
            axi = figi.add_subplot(111, projection='3d')
            deltai = (6 * (1 / len(sb)))

            nxi = nyi = np.arange(-3.0, 3.0, deltai)
            Xi, Yi = np.meshgrid(nxi, nyi)
            #print(X)
            #print(Y)
            xi, yi, zi = Xi * len(Correct_BS), Yi * len(Correct_BS), Correct_BS
            axi.plot_surface(xi, yi, zi, rstride=1, cstride=1, cmap='viridis')

            plt.show()
Example #20
0
def extractFlux(cnam, ccd, rccd, read, gain, ccdwin, rfile, store):
    """This extracts the flux of all apertures of a given CCD.

    The steps are (1) creation of PSF model, (2) PSF fitting, (3)
    flux extraction. The apertures are assumed to be correctly positioned.

    It returns the results as a dictionary keyed on the aperture label. Each
    entry returns a list:

    [x, ex, y, ey, fwhm, efwhm, beta, ebeta, counts, countse, sky, esky,
    nsky, nrej, flag]

    flag = bitmask. See hipercam.core to see all the options which are
    referred to by name in the code e.g. ALL_OK. The various flags can
    signal that there no sky pixels (NO_SKY), the sky aperture was off
    the edge of the window (SKY_AT_EDGE), etc.

    This code::

       >> bset = flag & TARGET_SATURATED

    determines whether the data saturation flag is set for example.

    Arguments::

       cnam     : string
          CCD identifier label

       ccd       : CCD
           the debiassed, flat-fielded CCD.

       rccd : CCD
          corresponding raw CCD, used to work out whether data are
          saturated in target aperture.

       read      : CCD
           readnoise divided by the flat-field

       gain      : CCD
           gain multiplied by the flat field

       ccdwin   : dictionary of strings
           the Window label corresponding to each Aperture

       rfile     : Rfile
           reduce file configuration parameters

       store     : dict of dicts
           see moveApers for what this contains.

    """

    # initialise flag
    flag = hcam.ALL_OK

    ccdaper = rfile.aper[cnam]

    results = {}
    # get profile params from aperture store
    mfwhm = store["mfwhm"]
    mbeta = store["mbeta"]
    method = "m" if mbeta > 0.0 else "g"

    if mfwhm <= 0:
        # die hard, die soon as there's nothing we can do.
        print((" *** WARNING: CCD {:s}: no measured FWHM to create PSF model"
               "; no extraction possible").format(cnam))
        # set flag to indicate no FWHM
        flag = hcam.NO_FWHM

        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }
        return results

    # all apertures have to be in the same window, or we can't easily make a
    # postage stamp of the data
    wnames = set(ccdwin.values())
    if len(wnames) != 1:
        print((" *** WARNING: CCD {:s}: not all apertures"
               " lie within the same window; no extraction possible"
               ).format(cnam))

        # set flag to indicate no extraction
        flag = hcam.NO_EXTRACTION

        # return empty results
        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }
            return results
    wnam = wnames.pop()

    # PSF params are in binned pixels, so find binning
    bin_fac = ccd[wnam].xbin

    # create PSF model
    if method == "m":
        psf_model = MoffatPSF(beta=mbeta, fwhm=mfwhm / bin_fac)
    else:
        psf_model = IntegratedGaussianPRF(sigma=mfwhm *
                                          gaussian_fwhm_to_sigma / bin_fac)

    # force photometry only at aperture positions
    # this means PSF shape and positions are fixed, we are only fitting flux
    if rfile["psf_photom"]["positions"] == "fixed":
        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

    # create instances for PSF photometry
    gfac = float(rfile["psf_photom"]["gfac"])
    sclip = float(rfile["sky"]["thresh"])
    daogroup = DAOGroup(gfac * mfwhm / bin_fac)
    mmm_bkg = MMMBackground(sigma_clip=SigmaClip(sclip))
    fitter = LevMarLSQFitter()
    fitshape_box_size = int(2 * int(rfile["psf_photom"]["fit_half_width"]) + 1)
    fitshape = (fitshape_box_size, fitshape_box_size)

    photometry_task = BasicPSFPhotometry(
        group_maker=daogroup,
        bkg_estimator=mmm_bkg,
        psf_model=psf_model,
        fitter=fitter,
        fitshape=fitshape,
    )

    # initialise flag
    flag = hcam.ALL_OK

    # extract Windows relevant for these apertures
    wdata = ccd[wnam]
    wraw = rccd[wnam]

    # extract sub-windows that include all of the apertures, plus a little
    # extra around the edges.
    x1 = min([ap.x - ap.rsky2 - wdata.xbin for ap in ccdaper.values()])
    x2 = max([ap.x + ap.rsky2 + wdata.xbin for ap in ccdaper.values()])
    y1 = min([ap.y - ap.rsky2 - wdata.ybin for ap in ccdaper.values()])
    y2 = max([ap.y + ap.rsky2 + wdata.ybin for ap in ccdaper.values()])

    # extract sub-Windows
    swdata = wdata.window(x1, x2, y1, y2)
    swraw = wraw.window(x1, x2, y1, y2)

    # compute pixel positions of apertures in windows
    xpos, ypos = zip(*((swdata.x_pixel(ap.x), swdata.y_pixel(ap.y))
                       for ap in ccdaper.values()))
    positions = Table(names=["x_0", "y_0"], data=(xpos, ypos))

    # do the PSF photometry
    photom_results = photometry_task(swdata.data, init_guesses=positions)
    slevel = mmm_bkg(swdata.data)

    # unpack the results and check apertures
    for apnam, aper in ccdaper.items():
        try:
            # reset flag
            flag = hcam.ALL_OK

            result_row = photom_results[photom_results["id"] == int(apnam)]
            if len(result_row) == 0:
                flag |= hcam.NO_DATA
                raise hcam.HipercamError(
                    "no source in PSF photometry for this aperture")
            elif len(result_row) > 1:
                flag |= hcam.NO_EXTRACTION
                raise hcam.HipercamError(
                    "ambiguous lookup for this aperture in PSF photometry")
            else:
                result_row = result_row[0]

            # compute X, Y arrays over the sub-window relative to the centre
            # of the aperture and the distance squared from the centre (Rsq)
            # to save a little effort.
            x = swdata.x(np.arange(swdata.nx)) - aper.x
            y = swdata.y(np.arange(swdata.ny)) - aper.y
            X, Y = np.meshgrid(x, y)
            Rsq = X**2 + Y**2

            # size of a pixel which is used to taper pixels as they approach
            # the edge of the aperture to reduce pixellation noise
            size = np.sqrt(wdata.xbin * wdata.ybin)

            # target selection, accounting for extra apertures and allowing
            # pixels to contribute if their centres are as far as size/2 beyond
            # the edge of the circle (but with a tapered weight)
            dok = Rsq < (aper.rtarg + size / 2.0)**2
            if not dok.any():
                # check there are some valid pixels
                flag |= hcam.NO_DATA
                raise hcam.HipercamError("no valid pixels in aperture")

            # check for saturation and nonlinearity
            if cnam in rfile.warn:
                if swraw.data[dok].max() >= rfile.warn[cnam]["saturation"]:
                    flag |= hcam.TARGET_SATURATED

                if swraw.data[dok].max() >= rfile.warn[cnam]["nonlinear"]:
                    flag |= hcam.TARGET_NONLINEAR
            else:
                warnings.warn(
                    "CCD {:s} has no nonlinearity or saturation levels set")

            counts = result_row["flux_fit"]
            countse = result_row["flux_unc"]
            info = store[apnam]

            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": counts,
                "countse": countse,
                "sky": slevel,
                "skye": 0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }

        except hcam.HipercamError as err:

            info = store[apnam]
            flag |= hcam.NO_EXTRACTION

            results[apnam] = {
                "x": aper.x,
                "xe": info["xe"],
                "y": aper.y,
                "ye": info["ye"],
                "fwhm": info["fwhm"],
                "fwhme": info["fwhme"],
                "beta": info["beta"],
                "betae": info["betae"],
                "counts": 0.0,
                "countse": -1,
                "sky": 0.0,
                "skye": 0.0,
                "nsky": 0,
                "nrej": 0,
                "flag": flag,
            }

    # finally, we are done
    return results
Example #21
0
def __fit_PSF(image_file, mask_file=None, nstars=40,                
              thresh_sigma=5.0, pixelmin=20, elongation_lim=1.4, area_max=500,             
              cutout=35, 
              astrom_sigma=5.0, psf_sigma=5.0, alim=10000, clean=True, 
              source_lim=None, 
              write_ePSF=False, ePSF_output=None, 
              plot_ePSF=True, ePSF_plotname=None, 
              plot_residuals=False, resid_plotname=None,
              verbose=False):
    """    
    Input: 
        general:
        - filename for a **BACKGROUND-SUBTRACTED** image
        - filename for a mask image (optional; default None)
        - maximum number of stars to use (optional; default 40; set to None
          to impose no limit)
          
        source detection:
        - sigma threshold for source detection with image segmentation 
          (optional; default 5.0)
        - *minimum* number of isophotal pixels (optional; default 20)
        - *maximum* allowed elongation for sources found by image segmentation 
          (optional; default 1.4)
        - *maximum* allowed area for sources found by image segmentation 
          (optional; default 500 pix**2)
        - cutout size around each star in pix (optional; default 35 pix; must 
          be ODD, rounded down if even)
        
        astrometry.net:
        - sigma threshold for astrometry.net source detection image (optional; 
          default 5.0)
        - sigma of the Gaussian PSF of the image (optional; default 5.0)
        - maximum allowed source area in pix**2 for astrometry.net for 
          deblending (optional; default 10000; only relevant if no source list 
          file is provided)
        - whether to remove files output by image2xy once finished with them 
          (optional; default True)

        misc:
        - limit on number of sources to fit with ePSF (optional; default None 
          which imposes no limit)        
                
        writing, plotting, verbosity:
        - whether to write the derived ePSF to a fits file (optional; default 
          False)
        - name for output ePSF fits file (optional; default set below)
        - whether to plot the derived ePSF (optional; default True)
        - name for output ePSF plot (optional; default set below)
        - whether to plot the residuals of the iterative PSF fitting (optional;
          default False)
        - name for output residuals plot (optional; default set below)
        - be verbose (optional; default False)
    
    Uses image segmentation to obtain a list of sources in the image with their 
    x, y coordinates. Uses EPSFBuilder to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtaind ePSF. Finally, uses 
    astrometry.net to find all sources in the image, and fits them with the 
    empirically obtained ePSF.
    
    The ePSF obtained here should NOT be used in convolutions. Instead, it can 
    serve as a tool for estimating the seeing of an image. 
    
    Output: table containing the coordinates and instrumental magnitudes of the 
    detected, ePSF-fit sources
    """

    # load in data 
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file) 
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"
    pixscale = image_header["PIXSCAL1"]
    
    ### SOURCE DETECTION

    ### use image segmentation to find sources with an area > pixelmin pix**2 
    ### which are above the threshold sigma*std 
    image_data = fits.getdata(image_file) # subfile data
    image_data = np.ma.masked_where(image_data==0.0, 
                                    image_data) # mask bad pixels
    
    ## build an actual mask
    mask = (image_data==0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    ## set detection standard deviation
    try:
        std = image_header["BKGSTD"] # header written by amakihi.bkgsub fn
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data, snr=3, npixels=5, 
                                       dilate_size=15, mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))
    
    ## use the segmentation image to get the source properties 
    # use <mask>, which does not mask sources
    segm = detect_sources(image_data, thresh_sigma*std, npixels=pixelmin,
                          mask=mask) 
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinates for sources
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return
    
    # restrict elongation and area to obtain only unsaturated stars 
    tbl = tbl[(tbl["elongation"] <= elongation_lim)]
    tbl = tbl[(tbl["area"].value <= area_max)]

    sources = Table() # build a table 
    sources['x'] = tbl['xcentroid'] # for EPSFBuilder 
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data/tbl["area"].data   
    sources.sort("flux")
    sources.reverse()
    
    if nstars:
        sources = sources[:min(nstars, len(sources))]

    ## setup: get WCS coords for all sources 
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"],
                                                    sources["y"], 1)
     
    ## mask out edge sources: 
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize/2.0
        dist_to_center = np.sqrt((sources['x']-xsize/2.0)**2 + 
                                 (sources['y']-ysize/2.0)**2)
        dmask = dist_to_center <= rad_limit
        sources = sources[dmask]
    else: 
        x_lims = [int(0.05*xsize), int(0.95*xsize)] 
        y_lims = [int(0.05*ysize), int(0.95*ysize)]
        dmask = (sources['x']>x_lims[0]) & (sources['x']<x_lims[1]) & (
                 sources['y']>y_lims[0]) & (sources['y']<y_lims[1])
        sources = sources[dmask]
        
    ## empirically obtain the effective Point Spread Function (ePSF)  
    nddata = NDData(image_data) # NDData object
    if mask_file: # supply a mask if needed 
        nddata.mask = fits.getdata(mask_file)
    if cutout%2 == 0: # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout) # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars) # no. of stars used in ePSF building
    
    if nstars_epsf == 0:
        print("\nNo valid sources were found to build the ePSF with the given"+
              " conditions. Exiting.")
        return
    
    if verbose:
        print(f"\n{nstars_epsf} stars used in building the ePSF")
        
    start = timer()
    epsf_builder = EPSFBuilder(oversampling=1, maxiters=7, # build it
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data
    
    end = timer() # timing 
    time_elaps = end-start
    
    # print ePSF FWHM, if desired
    print(f"Time required for ePSF building {time_elaps:.2f} s\n")
    if verbose: 
        ePSF_FWHM(epsf_data, True)

    epsf_hdu = fits.PrimaryHDU(data=epsf_data)
    if write_ePSF: # write, if desired
        if not(ePSF_output):
            ePSF_output = image_file.replace(".fits", "_ePSF.fits")
            
        epsf_hdu.writeto(ePSF_output, overwrite=True, output_verify="ignore")
    
    psf_model = epsf # set the model
    psf_model.x_0.fixed = True # fix centroids (known beforehand) 
    psf_model.y_0.fixed = True
 
    ### USE ASTROMETRY.NET TO FIND SOURCES TO FIT  
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma 
    # -m <alim> --> max object size for deblending is <alim>      
    options = f"-O -b -p {astrom_sigma} -w {psf_sigma}"
    options += f" -m {alim}"
    run(f"image2xy {options} {image_file}", shell=True)
    image_sources_file = image_file.replace(".fits", ".xy.fits")
    image_sources = fits.getdata(image_sources_file)
    if clean:
        run(f"rm {image_sources_file}", shell=True) # this file is not needed

    print(f'\n{len(image_sources)} stars at >{astrom_sigma}'+
          f' sigma found in image {re.sub(".*/", "", image_file)}'+
          ' with astrometry.net')   

    astrom_sources = Table() # build a table 
    astrom_sources['x_mean'] = image_sources['X'] # for BasicPSFPhotometry
    astrom_sources['y_mean'] = image_sources['Y']
    astrom_sources['flux'] = image_sources['FLUX']
    
    # initial guesses for centroids, fluxes
    pos = Table(names=['x_0', 'y_0','flux_0'], 
                data=[astrom_sources['x_mean'], astrom_sources['y_mean'], 
                      astrom_sources['flux']]) 

    ### FIT THE ePSF TO ALL DETECTED SOURCES 
    start = timer() # timing the fit 
    
    # sources separated by less than this critical separation are grouped 
    # together when fitting the PSF via the DAOGROUP algorithm
    sigma_psf = 2.0 # 2 pix
    crit_sep = 2.0*sigma_psf*gaussian_sigma_to_fwhm  # twice the PSF FWHM
    daogroup = DAOGroup(crit_sep) 

    # an astropy fitter, does Levenberg-Marquardt least-squares fitting
    fitter_tool = LevMarLSQFitter()
    
    # if we have a limit on the number of sources to fit
    if source_lim:
        try: 
            import random # pick a given no. of random sources 
            source_rows = random.choices(astrom_sources, k=source_lim)
            astrom_sources = Table(names=['x_mean', 'y_mean', 'flux'], 
                                   rows=source_rows)
            pos = Table(names=['x_0', 'y_0','flux_0'], 
                        data=[astrom_sources['x_mean'], 
                              astrom_sources['y_mean'], 
                              astrom_sources['flux']])
            
            
        except IndexError:
            print("The input source limit exceeds the number of sources"+
                  " detected by astrometry, so no limit is imposed.\n")
    
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                            bkg_estimator=None, # bg subtract already done
                            psf_model=psf_model,
                            fitter=fitter_tool,
                            fitshape=(11,11))
    
    result_tab = photometry(image=image_data, init_guesses=pos) # results
    residual_image = photometry.get_residual_image() # residuals of PSF fit
    residual_image = np.ma.masked_where(mask, residual_image)
    residual_image.fill_value = 0 # set to zero
    residual_image = residual_image.filled()

    
    end = timer() # timing 
    time_elaps = end - start
    print(f"Time required fit ePSF to all sources {time_elaps:.2f} s\n")
    
    # include WCS coordinates
    pos["ra"], pos["dec"] = w.all_pix2world(pos["x_0"], pos["y_0"], 1)
    result_tab.add_column(pos['ra'])
    result_tab.add_column(pos['dec'])
    
    # mask out negative flux_fit values in the results 
    mask_flux = (result_tab['flux_fit'] >= 0.0)
    psf_sources = result_tab[mask_flux] # PSF-fit sources 
    
    # compute magnitudes and their errors and add to the table
    # error = (2.5/(ln(10)*flux_fit))*flux_unc
    mag_fit = -2.5*np.log10(psf_sources['flux_fit']) # instrumental mags
    mag_fit.name = 'mag_fit'
    mag_unc = 2.5/(psf_sources['flux_fit']*np.log(10))
    mag_unc *= psf_sources['flux_unc']
    mag_unc.name = 'mag_unc' 
    psf_sources['mag_fit'] = mag_fit
    psf_sources['mag_unc'] = mag_unc
    
    # mask entries with large magnitude uncertainties 
    mask_unc = psf_sources['mag_unc'] < 0.4
    psf_sources = psf_sources[mask_unc]
    
    if plot_ePSF: # if we wish to see the ePSF
        plt.figure(figsize=(10,9))
        plt.imshow(epsf_data, origin='lower', aspect=1, cmap='magma',
                   interpolation="nearest")
        plt.xlabel("Pixels", fontsize=16)
        plt.ylabel("Pixels", fontsize=16)
        plt.title("Effective Point-Spread Function (1 pixel = "
                                                    +str(pixscale)+
                                                    '")', fontsize=16)
        plt.colorbar(orientation="vertical", fraction=0.046, pad=0.08)
        plt.rc("xtick",labelsize=16) # not working?
        plt.rc("ytick",labelsize=16)
        
        if not(ePSF_plotname):
            ePSF_plotname = image_file.replace(".fits", "_ePSF.png")
        plt.savefig(ePSF_plotname, bbox_inches="tight")
        plt.close()
    
    if plot_residuals: # if we wish to see a plot of the residuals
        if "WIRCam" in instrument:
            plt.figure(figsize=(10,9))
        else:
            plt.figure(figsize=(12,14))
        ax = plt.subplot(projection=w)
        plt.imshow(residual_image, cmap='magma', aspect=1, 
                   interpolation='nearest', origin='lower')
        plt.xlabel("RA (J2000)", fontsize=16)
        plt.ylabel("Dec (J2000)", fontsize=16)
        plt.title("PSF residuals", fontsize=16)
        cb = plt.colorbar(orientation='vertical', fraction=0.046, pad=0.08) 
        cb.set_label(label="ADU", fontsize=16)
        ax.coords["ra"].set_ticklabel(size=15)
        ax.coords["dec"].set_ticklabel(size=15)
        
        if not(resid_plotname):
            resid_plotname = image_file.replace(".fits", "_ePSFresiduals.png")
        plt.savefig(resid_plotname, bbox_inches="tight")
        plt.close()
    
    return psf_sources     
Example #22
0
################################## SUM BV ################################

# On commence par déterminer la déviation standard du fond de ciel.
bkgrms = MADStdBackgroundRMS ()
std = bkgrms (imageSumBV)

# On donne les paramètres du finder pour déterminer les étoiles de base dans l'image
iraffind = IRAFStarFinder (threshold = 10 * std,
                          fwhm = fwhm,
                          minsep_fwhm = 0.01, roundhi = 1.0, roundlo = -1.0,
                          sharplo = 0.1, sharphi = 0.8)



# On donne un critère de groupage des étoiles
daogroup = DAOGroup (2.0 * fwhm)

# On détermine le fond de ciel et la procédure de fitting
mmm_bkg = MMMBackground ()
fitter = LevMarLSQFitter ()
psf_model = IntegratedGaussianPRF (sigma = fwhm / 2.35)

# On met tout ça dans une boîte noire qui fait des itérations soustractives
photometry = IterativelySubtractedPSFPhotometry (finder = iraffind,
                                                group_maker = daogroup,
                                                bkg_estimator = mmm_bkg,
                                                psf_model = psf_model,
                                                fitter = LevMarLSQFitter(),
                                                niters = 1, fitshape = (2 * fwhm - 1, 2 * fwhm - 1))

# On exécute le tout et on extrait des résultats !
def align_coordsystems(starlist,
                       psf_stars_x,
                       psf_stars_y,
                       shift_stars_x,
                       shift_stars_y,
                       wl_image,
                       plot_flag=True):
    epsf, gauss_std, n_resample = get_psf(wl_image,
                                          psf_stars_x,
                                          psf_stars_y,
                                          do_plot='yes')
    aper_rad = 4 * gauss_std / n_resample

    phot_psf = photometry.BasicPSFPhotometry(group_maker=DAOGroup(7.),
                                             psf_model=epsf,
                                             bkg_estimator=None,
                                             fitter=LevMarLSQFitter(),
                                             fitshape=(21),
                                             aperture_radius=aper_rad)

    pos = Table(names=['x_0', 'y_0'], data=[shift_stars_x, shift_stars_y])

    # determine their positions in MUSE image by fitting their PSFs
    result_tab = phot_psf.do_photometry(image=wl_image, init_guesses=pos)
    shift_phot_x = [i for i in result_tab['x_fit']]
    shift_phot_y = [i for i in result_tab['y_fit']]

    # find closest HST star
    shift_starlist = [0] * len(shift_stars_x)

    for i in range(len(shift_phot_x)):
        distance = 1000.
        x_muse, y_muse = shift_phot_x[i], shift_phot_y[i]
        for star in starlist:
            dist = ((x_muse - star.xcoord)**2 + (y_muse - star.ycoord)**2)**0.5
            if dist < distance:
                shift_starlist[i] = star
                distance = dist

    # get array of coordinates
    x_hst_list = [i.xcoord for i in shift_starlist]
    y_hst_list = [i.ycoord for i in shift_starlist]

    # coordinate transformation parameters including translation and rotation
    params = lmfit.Parameters()
    params.add('delta_x', 1., min=-10, max=10, vary=True)
    params.add('delta_y', 1., min=-10, max=10, vary=True)
    params.add('theta', 0., min=-5., max=5., vary=True)

    # shift the stars and minimize their distance
    minimizer = lmfit.Minimizer(get_coordshift,
                                params,
                                fcn_args=(shift_phot_x, shift_phot_y,
                                          x_hst_list, y_hst_list))
    result = minimizer.minimize()
    print("Done with fitting the coordinate shift.")

    # best-fit parameters
    x_shift = result.params['delta_x']
    y_shift = result.params['delta_y']
    theta = result.params['theta']

    # adjust the coordinates of all stars
    for star in starlist:
        new_x, new_y = trans_rot(star.xcoord, star.ycoord, x_shift, y_shift,
                                 theta)
        star.xcoord, star.ycoord = new_x, new_y

    if plot_flag:
        plot_spatial(wl_image, plotfname='adjusted_coords.pdf', stars=starlist)

    # sort stars by x and y coordinate
    starlist.sort(key=lambda x: x.xcoord)
    starlist.sort(key=lambda x: x.ycoord)

    return starlist
#    z = -((x-50*np.random.random())**2+(y-50*np.random.random())**2)/7
#    image += 50*np.random.random()*np.exp(z)

#plt.imshow(image)
#plt.show()

fitsStars = fits.open("Images/FitsImages/L_2019-04-08_22-59-51_c.fits")
imgStars = fitsStars[0].data.astype(np.float64)

#bkg = MMMBackground()
#background = bkg(imgStars)
#gaussian_prf = PRF()
#gaussian_prf.sigma.fixed = False
#photTester = DAOP(8,background,5,gaussian_prf,(11,11))

daogroup = DAOGroup(crit_separation=8)
mmm_bkg = MMMBackground()
iraffind = DAOStarFinder(threshold=2 * mmm_bkg(imgStars), fwhm=4.5)
fitter = LevMarLSQFitter()
gaussian_prf = IntegratedGaussianPRF(sigma=2.05)
gaussian_prf.sigma.fixed = False
photTester = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                group_maker=daogroup,
                                                bkg_estimator=mmm_bkg,
                                                psf_model=gaussian_prf,
                                                fitter=fitter,
                                                fitshape=(11, 11),
                                                niters=2)

photResults = photTester(imgStars)
print(photResults['x_fit', 'y_fit', 'flux_fit'])
Example #25
0
    def psf_photometry(self,
                       mbi,
                       label=None,
                       subtract_galaxy=True,
                       sersic_model=None,
                       save_residual_images=False,
                       **kwargs):

        self.mbi = mbi
        self._setup_psf()

        if subtract_galaxy:
            self.phot_image = self.subtract_galaxy(label, sersic_model,
                                                   **kwargs)
        else:
            self.phot_image = mbi.image

        if self.use_hsc_bright_mask['phot']:
            logger.info('applying hsc bright object mask')
            for b in mbi.bands:
                mask = mbi.get_hsc_bright_object_mask(b).astype(bool)
                self.phot_image[b][mask] = 0

        catalog = LsstStruct()
        self.stddev = LsstStruct()

        if save_residual_images:
            self.residual_image = LsstStruct()

        for band in mbi.bands:
            if len(mbi.stats) > 0:
                self.stddev[band] = mbi.stats[band].stdev
            else:
                self.stddev[band] = self.bkgrms(self.phot_image[band])
            daogroup = DAOGroup(self.crit_separation * self.psf_fwhm[band])
            self.daofinder_opt['fwhm'] = self.psf_fwhm[band]
            self.phot_opts['aperture_radius'] = self.aperture_radius
            self.phot_opts['aperture_radius'] *= self.psf_fwhm[band]

            daofind = DAOStarFinder(self.threshold * self.stddev[band],
                                    exclude_border=True,
                                    **self.daofinder_opt)

            logger.info('performing ' + band + '-band psf photometry')

            photometry = IterativelySubtractedPSFPhotometry(
                finder=daofind,
                group_maker=daogroup,
                psf_model=self.psf_model[band],
                **self.phot_opts)

            with warnings.catch_warnings():
                message = '.*The fit may be unsuccessful;.*'
                warnings.filterwarnings('ignore',
                                        message=message,
                                        category=AstropyUserWarning)
                catalog[band] = photometry(image=self.phot_image[band])

            if save_residual_images:
                logger.info('generating residual image')
                self.residual_image[band] = subtract_psf(
                    mbi.image[band], self.psf_model[band], catalog[band])
        return catalog
    finder = IRAFStarFinder(threshold=threshold, fwhm=4, minsep_fwhm=5, peakmax=image.max() / 0.8)

    star_table = finder(image)
    star_table.rename_columns(('xcentroid', 'ycentroid'),('x','y'))

    sigma_clip = SigmaClip(sigma=5.0)
    bkg_estimator = MMMBackground()
    bkg = Background2D(image, 5, filter_size=(3, 3),
                       sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)

    nddata = NDData(image-bkg.background)
    stars = extract_stars(nddata, star_table, size=51)

    epsf, fitted_stars = EPSFBuilder(oversampling=4, maxiters=3, progress_bar=True, smoothing_kernel='quadratic')(stars)
    epsf_model = prepare_psf_model(epsf, renormalize_psf=False)

    with open(epsf_file,'wb') as f:
        pickle.dump([epsf_model, finder], f)
else:
    with open(epsf_file, 'rb') as f:
        epsf_model, finder = pickle.load(f)

phot = IterativelySubtractedPSFPhotometry(group_maker=DAOGroup(5),
                                          bkg_estimator=MMMBackground(),
                                          psf_model=epsf_model,
                                          fitshape=[31,31],
                                          finder=finder,
                                          aperture_radius=5,
                                          niters=2)
phot(image)
Example #27
0
def psfphotometry(imagefile,
                  ra=None,
                  dec=None,
                  x=None,
                  y=None,
                  fwhm=5.0,
                  zp=0.0,
                  gain=1.0,
                  doDifferential=False,
                  xfield=None,
                  yfield=None,
                  xfirst=None,
                  yfirst=None):

    hdulist = fits.open(imagefile)
    header = fits.getheader(imagefile)

    if x == None:
        w = WCS(header)
        x0, y0 = w.wcs_world2pix(ra, dec, 1)
        gain = 1.0
    else:
        x0, y0 = x, y

    if len(hdulist) > 3:
        image = hdulist[1].data
    elif len(hdulist) == 2:
        image = hdulist[0].data
    else:
        image = hdulist[0].data
    image_shape = image.shape

    #daogroup = DAOGroup(crit_separation=8)
    daogroup = DAOGroup(crit_separation=25)

    mmm_bkg = MMMBackground()
    #iraffind = IRAFStarFinder(threshold=2.0*mmm_bkg(image),
    #                          fwhm=4.0)
    fitter = LevMarLSQFitter()
    gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
    gaussian_prf.sigma.fixed = False
    gaussian_prf.flux.fixed = False

    psffile = imagefile.replace(".fits", ".psf")
    fid = open(psffile, 'w')

    if len(image_shape) == 3:

        nhdu, xshape, yshape = image.shape
        dateobs = utcparser(hdulist[0].header["UTCSTART"])
        mjd = dateobs.mjd

        if "KINCYCTI" in hdulist[0].header:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["KINCYCTI"] / 86400.0
        else:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["EXPTIME"] / 86400.0

        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for jj in range(nhdu):
            if np.mod(jj, 10) == 0:
                print('PSF fitting: %d/%d' % (jj, nhdu))

            image = hdulist[0].data[jj, :, :]
            mjd = mjdall[jj]

            n, median, std = sigma_clipped_stats(image, sigma=3.0)
            daofind = DAOStarFinder(fwhm=2.0, threshold=2. * std)

            #phot_obj = IterativelySubtractedPSFPhotometry(finder=daofind,
            #                                              group_maker=daogroup,
            #                                              bkg_estimator=mmm_bkg,
            #                                              psf_model=gaussian_prf,
            #                                              fitter=fitter,
            #                                              fitshape=(21, 21),
            #                                              niters=10)

            image = image - np.median(image)
            image_slice = np.zeros(image.shape)

            slsize = 25
            xmin = np.max([0, int(x0 - slsize)])
            xmax = np.min([int(x0 + slsize), image.shape[0]])
            ymin = np.max([0, int(y0 - slsize)])
            ymax = np.min([int(y0 + slsize), image.shape[1]])
            image_slice[ymin:ymax, xmin:xmax] = 1

            if doDifferential:
                xmin_f = np.max([0, int(xfield - slsize)])
                xmax_f = np.min([int(xfield + slsize), image.shape[0]])
                ymin_f = np.max([0, int(yfield - slsize)])
                ymax_f = np.min([int(yfield + slsize), image.shape[1]])
                image_slice[ymin_f:ymax_f, xmin_f:xmax_f] = 1

            image = image * image_slice

            if (xfirst is None) or (yfirst is None):
                phot_obj = BasicPSFPhotometry(finder=daofind,
                                              group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)
                phot_results = phot_obj(image)
            else:
                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = False
                gaussian_prf.y_0.fixed = False

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)

                pos = Table(names=['x_0', 'y_0'],
                            data=[[xfirst, xfield], [yfirst, yfield]])
                phot_results_tmp = phot_obj(image, init_guesses=pos)
                resimage = phot_obj.get_residual_image()

                pos = Table(names=['x_0', 'y_0'], data=[[x0], [y0]])

                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = True
                gaussian_prf.y_0.fixed = True

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(7, 7),
                                              bkg_estimator=mmm_bkg)

                phot_results = phot_obj(resimage, init_guesses=pos)

                phot_results = vstack([phot_results_tmp, phot_results])

            #if True:
            if False:
                #sources = iraffind(image)
                sources = daofind(image)
                import matplotlib.pyplot as plt

                positions = np.transpose(
                    (sources['ycentroid'], sources['xcentroid']))
                apertures = CircularAperture(positions, r=4.)
                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=0,
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])
                plt.savefig('test_%d.png' % jj)
                plt.close()

                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(resimage[resimage > 0], 10),
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])
                plt.savefig('test_f_%d.png' % jj)
                plt.close()

            #phot_results.pprint_all()

            #print(stop)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            if doDifferential:
                dist = np.sqrt((phot_results["x_fit"] - xfield)**2 +
                               (phot_results["y_fit"] - yfield)**2)
                idy = np.argmin(dist)
                flux_field = phot_results[idy]["flux_fit"]
                fluxerr_field = phot_results[idy]["flux_unc"]
                magerr_field = 1.0857 * fluxerr_field / flux_field  #1.0857 = 2.5/log(10)
                mag_field = zp - 2.5 * np.log10(flux_field)

                mag = mag - mag_field
                magerr = np.sqrt(magerr**2 + magerr_field**2)
                fluxerr = np.sqrt((fluxerr / flux)**2 +
                                  (fluxerr_field / flux_field)**2)
                flux = flux / flux_field
                fluxerr = flux * fluxerr

            #print(phot_results[idy]["flux_fit"], phot_results[idx]["flux_fit"])

            mjds.append(mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)

    else:
        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for ii, hdu in enumerate(hdulist):
            if ii == 0: continue
            header = hdulist[ii].header
            image = hdulist[ii].data
            if not "DATE" in header:
                print("Warning: 'DATE missing from %s hdu %d/%d" %
                      (imagefile, ii, len(hdulist)))
                continue

            dateobs = Time(header["DATE"])

            phot_results = phot_obj(image)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            mjds.append(dateobs.mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)
Example #28
0
from photutils.detection import IRAFStarFinder
from photutils.psf import IntegratedGaussianPRF, DAOGroup
from photutils.background import MMMBackground, MADStdBackgroundRMS
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import gaussian_sigma_to_fwhm

bkgrms = MADStdBackgroundRMS()
std = bkgrms(image)
iraffind = IRAFStarFinder(threshold=3.5 * std,
                          fwhm=sigma_psf * gaussian_sigma_to_fwhm,
                          minsep_fwhm=0.01,
                          roundhi=5.0,
                          roundlo=-5.0,
                          sharplo=0.0,
                          sharphi=2.0)
daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm)
mmm_bkg = MMMBackground()
fitter = LevMarLSQFitter()
psf_model = IntegratedGaussianPRF(sigma=sigma_psf)
#from photutils.psf import IterativelySubtractedPSFPhotometry
photometry = IterativelySubtractedPSFPhotometry(finder=iraffind,
                                                group_maker=daogroup,
                                                bkg_estimator=mmm_bkg,
                                                psf_model=psf_model,
                                                fitter=LevMarLSQFitter(),
                                                niters=1,
                                                fitshape=(11, 11))
result_tab = photometry(image=image)
residual_image = photometry.get_residual_image()

plt.subplot(1, 2, 1)
Example #29
0
def run_iterative_PSF_photometry(setup,
                                 reduction_metadata,
                                 image_path,
                                 log,
                                 diagnostics=False):
    """Function to perform PSF-fitting photometry to all objects detected
    in an image, using DAOphot-standard routines from photutils.
    """

    iterate = False

    log.info('Performing PSF-fitting photometry on ' +
             os.path.basename(image_path))

    image_data = fits.getdata(image_path)

    psf_size = reduction_metadata.reduction_parameters[1]['PSF_SIZE'][0]

    image_idx = reduction_metadata.images_stats[1]['IM_NAME'].tolist().index(
        os.path.basename(image_path))

    fwhm = reduction_metadata.images_stats[1]['FWHM_X'][image_idx]

    log.info('Applying psf size = ' + str(psf_size))
    log.info('         fwhm = ' + str(fwhm))

    psf_radius = psf_size * fwhm

    log.info('         psf size = ' + str(psf_radius))

    star_finder = starfind.build_star_finder(reduction_metadata, image_path,
                                             log)

    daogroup = DAOGroup(2.0 * fwhm)

    log.info(' -> Build star grouping object')

    sigma_clip = SigmaClip(sigma=3.0)

    mmm_bkg = MMMBackground(sigma_clip=sigma_clip)

    log.info(' -> Build sky background model')

    fitter = LevMarLSQFitter()

    psf_model = IntegratedGaussianPRF(sigma=fwhm)

    log.info(' -> Build PSF model')

    psf_x = calc_psf_dimensions(psf_size, fwhm, log)

    if iterate:
        photometer = IterativelySubtractedPSFPhotometry(finder=star_finder,
                                                        group_maker=daogroup,
                                                        bkg_estimator=mmm_bkg,
                                                        psf_model=psf_model,
                                                        fitter=fitter,
                                                        niters=3,
                                                        fitshape=(psf_x,
                                                                  psf_x))
    else:
        photometer = BasicPSFPhotometry(finder=star_finder,
                                        group_maker=daogroup,
                                        bkg_estimator=mmm_bkg,
                                        psf_model=psf_model,
                                        fitter=fitter,
                                        fitshape=(psf_x, psf_x))
    photometry = photometer(image=image_data)
    print photometry.colnames
    print photometry['flux_unc'].data

    log.info('Photometry warnings, if any: ' +
             repr(fitter.fit_info['message']))

    log.info('Executed photometry of ' + str(len(photometry)) + ' stars')

    if diagnostics:
        store_residual_image(setup, photometer, image_path, log)

    return photometry
Example #30
0
def jwst_camera_fpa_data(data_dir,
                         pattern,
                         standardized_data_dir,
                         parameters,
                         overwrite_source_extraction=False):
    """Generate standardized focal plane alignment (fpa) data
       based on JWST camera image.
    """

    save_plot = parameters['save_plot']

    file_list = glob.glob(os.path.join(data_dir, '*{}'.format(pattern)))

    if len(file_list) == 0:
        raise RuntimeError('No data found')

    file_list.sort()
    for f in file_list:

        plt.close('all')

        print()
        print('Data directory: {}'.format(data_dir))
        print('Image being processed: {}'.format(f))

        im = datamodels.open(f)
        if hasattr(im, 'data') is False:
            im.data = fits.getdata(f)
            #im.dq    = np.zeros(im.data.shape)

        header_info = OrderedDict()

        for attribute in 'telescope'.split():
            header_info[attribute] = getattr(im.meta, attribute)

        # observations
        for attribute in 'date time visit_number visit_id visit_group activity_id program_number'.split(
        ):
            header_info['observation_{}'.format(attribute)] = getattr(
                im.meta.observation, attribute)

        header_info['epoch_isot'] = '{}T{}'.format(
            header_info['observation_date'], header_info['observation_time'])

        #  instrument
        for attribute in 'name filter pupil detector'.split():
            header_info['instrument_{}'.format(attribute)] = getattr(
                im.meta.instrument, attribute)

        # subarray
        for attribute in 'name'.split():
            header_info['subarray_{}'.format(attribute)] = getattr(
                im.meta.subarray, attribute)

        # aperture
        for attribute in 'name position_angle pps_name'.split():
            try:
                value = getattr(im.meta.aperture, attribute)
            except AttributeError:
                value = None

            header_info['aperture_{}'.format(attribute)] = value

        header_info['INSTRUME'] = header_info['instrument_name']
        header_info['SIAFAPER'] = header_info['aperture_name']

        instrument_name = getattr(im.meta.instrument, 'name')
        instrument_detector = getattr(im.meta.instrument, 'detector')
        instrument_filter = getattr(im.meta.instrument, 'filter')

        # temporary solution, this should come from populated aperture attributes
        #if header_info['subarray_name'] == 'FULL':
        #    master_apertures = pysiaf.read.read_siaf_detector_layout()
        #    if header_info['instrument_name'].lower() in ['niriss', 'miri']:
        #        header_info['SIAFAPER'] = master_apertures['AperName'][np.where(master_apertures['InstrName']==header_info['instrument_name'])[0][0]]
        #    elif header_info['instrument_name'].lower() in ['fgs']:
        #        header_info['SIAFAPER'] = 'FGS{}_FULL'.format(header_info['instrument_detector'][-1])
        #    elif header_info['instrument_name'].lower() in ['nircam']:
        #        header_info['SIAFAPER'] = header_info['aperture_name']
        #else:
        #    sys.exit('Only FULL arrays are currently supported.')

        # target
        for attribute in 'ra dec catalog_name proposer_name'.split():
            header_info['target_{}'.format(attribute)] = getattr(
                im.meta.target, attribute)

        # pointing
        for attribute in 'ra_v1 dec_v1 pa_v3'.split():
            try:
                value = getattr(im.meta.pointing, attribute)
            except AttributeError:
                value = None
            header_info['pointing_{}'.format(attribute)] = value

        # add HST style keywords
        header_info['PROGRAM_VISIT'] = '{}_{}'.format(
            header_info['observation_program_number'],
            header_info['observation_visit_id'])
        header_info['PROPOSID'] = header_info['observation_program_number']
        header_info['DATE-OBS'] = header_info['observation_date']
        header_info['TELESCOP'] = header_info['telescope']
        header_info['INSTRUME'] = header_info['instrument_name']
        try:
            header_info['APERTURE'] = header_info['SIAFAPER']
        except KeyError:
            header_info['APERTURE'] = None
        header_info['CHIP'] = 0

        # TBD: Need to remove making yet another directory
        #extracted_sources_dir = os.path.join(standardized_data_dir, 'extraction')
        #if os.path.isdir(extracted_sources_dir) is False:
        #    os.makedirs(extracted_sources_dir)
        extracted_sources_file = os.path.join(
            standardized_data_dir,  #extracted_sources_dir,
            '{}_extracted_sources.fits'.format(
                os.path.basename(f).split('.')[0]))

        mask_extreme_slope_values = False
        parameters['maximum_slope_value'] = 1000.

        # Check if extracted_sources_file exists, or overwrite_source_extraction is set to True
        if (not os.path.isfile(extracted_sources_file)) or (
                overwrite_source_extraction):
            data = copy.deepcopy(im.data)
            #dq = copy.deepcopy(im.dq)

            # Convert image data to counts per second
            photmjsr = getattr(im.meta.photometry, 'conversion_megajanskys')
            data_cps = data / photmjsr

            if mask_extreme_slope_values:
                # clean up extreme slope values
                bad_index = np.where(
                    np.abs(data) > parameters['maximum_slope_value'])
                data[bad_index] = 0.
                dq[bad_index] = -1

            bkgrms = MADStdBackgroundRMS()
            mmm_bkg = MMMBackground()
            bgrms = bkgrms(data_cps)
            bgavg = mmm_bkg(data_cps)

            # Default parameters that generally works for NIRCam/NIRISS images
            sigma_factor = 10
            round_lo, round_hi = 0.0, 0.6
            sharp_lo, sharp_hi = 0.3, 1.4
            fwhm_lo, fwhm_hi = 1.0, 20.0
            fwhm = 2.0
            minsep_fwhm = 7  # NOTE: minsep_fwhm>5 to reject artifacts around saturated stars
            flux_percent_lo, flux_percent_hi = 10, 99

            # if 'sharp_lo' in parameters:
            #    sharp_lo = parameters['sharp_lo']

            ###
            ### TBD1: Relocate params below to config parts/files
            ###
            # Use different criteria for selecting good stars
            if parameters['nominalpsf']:
                # If using Nominal PSF models
                if instrument_name == 'NIRISS':
                    #fwhm_lo, fwhm_hi = 1.0, 2.0
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'FGS':
                    #fwhm_lo, fwhm_hi = 1.0, 1.4
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'NIRCAM':
                    sharp_lo, sharp_hi = 0.6, 1.4
                elif instrument_name == 'MIRI':
                    sharp_lo, sharp_hi = 0.8, 1.0
                    fwhm_lo, fwhm_hi = 1.5, 2.2
                    sigma_factor = 3
                elif instrument_name == 'NIRSPEC':
                    sharp_lo, sharp_hi = 0.6, 0.8
                    round_lo, round_hi = 0.0, 0.3
                    fwhm_lo, fwhm_hi = 1.0, 1.75
            else:
                ###
                ### For OTE commissioning, tweak the params below after finding
                ### the correct ranges by runnin the photometry notebook.
                ###

                # If using Commissioning (non-phased) PSF models
                if instrument_name == 'NIRISS':
                    sharp_lo, sharp_hi = 0.6, 1.4
                    fwhm_lo, fwhm_hi = 1.4, 2.4

################################################################################
################################################################################
################################################################################

                elif instrument_name == 'FGS':
                    sigma_factor = 10
                    minsep_fwhm = 2.5
                    sharp_lo, sharp_hi = 0.45, 0.7
                    round_lo, round_hi = 0.0, 0.3
                    flux_percent_lo, flux_percent_hi = 2, 99
                    fwhm = 4

################################################################################
################################################################################
################################################################################

# Below works well for F200W and F356W images

                elif instrument_name == 'NIRCAM':
                    sigma_factor = 3
                    minsep_fwhm = 2.5
                    sharp_lo, sharp_hi = 0.5, 0.7
                    round_lo, round_hi = 0.0, 0.2
                    flux_percent_lo, flux_percent_hi = 2, 99
                    if 'F200W' in instrument_filter:
                        fwhm = 10
                    elif 'F356W' in instrument_filter:
                        fwhm = 8
                    elif 'F090W' in instrument_filter:
                        fwhm = 5.5
                    elif 'F277W' in instrument_filter:
                        fwhm = 6.5
                    else:
                        fwhm = 3


################################################################################
################################################################################
################################################################################

                elif instrument_name == 'MIRI':
                    sharl_lo, sharp_hi = 0.5, 1.0
                    fwhm_lo, fwhm_hi = 1.5, 2.2
                    sigma_factor = 3
                elif instrument_name == 'NIRSPEC':
                    sharp_lo, sharp_hi = 0.5, 0.8
                    round_lo, round_hi = 0.0, 0.3
                    fwhm_lo, fwhm_hi = 1.0, 1.75

            # Use IRAFStarFinder for source detection
            iraffind = IRAFStarFinder(threshold=sigma_factor * bgrms + bgavg,
                                      fwhm=fwhm,
                                      minsep_fwhm=minsep_fwhm,
                                      roundlo=round_lo,
                                      roundhi=round_hi,
                                      sharplo=sharp_lo,
                                      sharphi=sharp_hi)

            # Create default mask with all False values
            datamask = np.zeros(
                data_cps.shape,
                dtype=bool)  # This creates an array with all False

            # Mask the left (for NRS1) and right regions (for NRS2) for NIRSpec
            if instrument_detector == 'NRS1':
                datamask[:, :1023] = True  # Mask everything on the left side
            elif instrument_detector == 'NRS2':
                datamask[:, 1024:] = True  # Mask everything on the right side

            iraf_extracted_sources = iraffind(data_cps, mask=datamask)

            # Perform some basic filtering

            # Remove sources based on flux percentile
            # 10-99% works well for filtering out too faint or saturated sources
            flux_min = np.percentile(iraf_extracted_sources['flux'],
                                     flux_percent_lo)
            flux_max = np.percentile(iraf_extracted_sources['flux'],
                                     flux_percent_hi)
            iraf_extracted_sources.remove_rows(
                np.where(iraf_extracted_sources['flux'] < flux_min))
            iraf_extracted_sources.remove_rows(
                np.where(iraf_extracted_sources['flux'] > flux_max))

            # Also remove sources based on fwhm
            ###
            ### Don't use below for now - 2/23/2022 (Don't use it unless we get lots of bad sources)
            ###
            #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']<fwhm_lo))
            #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']>fwhm_hi))

            # Now improve the positions by re-running centroiding algorithm if necessary.
            # NOTE: For now, re-centroiding will be turned off

            ###
            ### TBD2: Add re-centroiding algorithm adopted from Paul here
            ###
            #xarr = sources_masked['xcentroid']
            #yarr = sources_masked['ycentroid']
            #newx, newy = centroid_sources(data_cps, xarr, yarr, box_size=5, centroid_func=centroid_2dg)
            #coords = np.column_stack((newx, newy))
            #srcaper = CircularAnnulus(coords, r_in=1, r_out=3)
            #srcaper_masks = srcaper.to_mask(method='center')
            #satflag = np.zeros((len(newx),),dtype=int)
            #i = 0
            #for mask in srcaper_masks:
            #    srcaper_dq = mask.multiply(dqarr)
            #    srcaper_dq_1d = srcaper_dq[mask.data>0]
            #    badpix = np.logical_and(srcaper_dq_1d>2, srcaper_dq_1d<7)
            #    reallybad = np.where(srcaper_dq_1d==1)
            #    if ((len(srcaper_dq_1d[badpix]) > 1) or (len(srcaper_dq_1d[reallybad]) > 0)):
            #        satflag[i] = 1
            #        i =+1
            #goodx = newx[np.where(satflag==0)]
            #goody = newy[np.where(satflag==0)]
            #print('Number of sources before removing saturated or bad pixels: ', len(xarr))
            #print('Number of sources without saturated or bad pixels: ', len(goodx))
            #print(' ')
            #coords = np.column_stack((goodx,goody))

            print('Number of extracted sources after filtering: {} sources'.
                  format(len(iraf_extracted_sources)))

            if parameters['use_epsf'] is True:
                size = 25
                hsize = (size - 1) / 2
                x = iraf_extracted_sources['xcentroid']
                y = iraf_extracted_sources['ycentroid']
                mask = ((x > hsize) & (x < (data_cps.shape[1] - 1 - hsize)) &
                        (y > hsize) & (y < (data_cps.shape[0] - 1 - hsize)))
                stars_tbl = Table()
                stars_tbl['x'] = x[mask]
                stars_tbl['y'] = y[mask]
                print('Using {} stars to build epsf'.format(len(stars_tbl)))

                data_cps_bkgsub = data_cps.copy()
                data_cps_bkgsub -= bgavg
                nddata = NDData(data=data_cps_bkgsub)
                stars = extract_stars(nddata, stars_tbl, size=size)

                #
                # Figure - PSF stars
                #
                nrows = 10
                ncols = 10
                fig, ax = plt.subplots(nrows=nrows,
                                       ncols=ncols,
                                       figsize=(20, 20),
                                       squeeze=True)
                ax = ax.ravel()
                for i in range(nrows * ncols):
                    if i <= len(stars) - 1:
                        norm = simple_norm(stars[i], 'log', percent=99.)
                        ax[i].imshow(stars[i],
                                     norm=norm,
                                     origin='lower',
                                     cmap='viridis')
                plt.title('{} sample stars for epsf'.format(
                    header_info['APERTURE']))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir, '{}_sample_psfs.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_extracted_sources']:
                    plt.show()

                #
                # Timer for ePSF construction
                #
                tic = time.perf_counter()
                epsf_builder = EPSFBuilder(oversampling=4,
                                           maxiters=3,
                                           progress_bar=False)
                print("Building ePSF ...")
                epsf, fitted_stars = epsf_builder(stars)
                toc = time.perf_counter()
                print("Time elapsed for building ePSF:", toc - tic)

                #
                # Figure - ePSF plot
                #
                norm_epsf = simple_norm(epsf.data, 'log', percent=99.)
                plt.figure()
                plt.imshow(epsf.data,
                           norm=norm_epsf,
                           origin='lower',
                           cmap='viridis')
                plt.colorbar()
                plt.title('{} epsf using {} stars'.format(
                    header_info['APERTURE'], len(stars_tbl)))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir, '{}_epsf.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_extracted_sources']:
                    plt.show()

                daogroup = DAOGroup(5.0 * 2.0)
                psf_model = epsf.copy()

                tic = time.perf_counter()
                photometry = IterativelySubtractedPSFPhotometry(
                    finder=iraffind,
                    group_maker=daogroup,
                    bkg_estimator=mmm_bkg,
                    psf_model=psf_model,
                    fitter=LevMarLSQFitter(),
                    niters=1,
                    fitshape=(11, 11),
                    aperture_radius=5)
                print('Performing source extraction and photometry ...')
                epsf_extracted_sources = photometry(data_cps)
                toc = time.perf_counter()
                print("Time elapsed for PSF photometry:", toc - tic)
                print('Final source extraction with epsf: {} sources'.format(
                    len(epsf_extracted_sources)))

                epsf_extracted_sources['xcentroid'] = epsf_extracted_sources[
                    'x_fit']
                epsf_extracted_sources['ycentroid'] = epsf_extracted_sources[
                    'y_fit']
                extracted_sources = epsf_extracted_sources
                extracted_sources.write(extracted_sources_file, overwrite=True)

                norm = simple_norm(data_cps, 'sqrt', percent=99.)
                diff = photometry.get_residual_image()
                plt.figure()
                ax1 = plt.subplot(1, 2, 1)
                plt.xlabel("X [pix]")
                plt.ylabel("Y [pix]")
                ax1.imshow(data_cps, norm=norm, cmap='Greys')
                ax2 = plt.subplot(1, 2, 2)
                plt.xlabel("X [pix]")
                plt.ylabel("Y [pix]")
                ax2.imshow(diff, norm=norm, cmap='Greys')
                plt.title('PSF subtracted image for {}'.format(
                    os.path.basename(f)))
                if save_plot:
                    figname = os.path.join(
                        extracted_sources_dir,
                        '{}_psfsubtracted_image.pdf'.format(
                            os.path.basename(f).split('.')[0]))
                    plt.savefig(figname)
                if parameters['show_psfsubtracted_image']:
                    plt.show()

            else:

                extracted_sources = iraf_extracted_sources
                extracted_sources.write(extracted_sources_file, overwrite=True)

            positions = np.transpose((extracted_sources['xcentroid'],
                                      extracted_sources['ycentroid']))
            apertures = CircularAperture(positions, r=10)
            norm = simple_norm(data_cps, 'sqrt', percent=99.)

            plt.figure(figsize=(12, 12))
            plt.xlabel("X [pix]")
            plt.ylabel("Y [pix]")
            plt.imshow(data_cps, norm=norm, cmap='Greys', origin='lower')
            apertures.plot(color='blue', lw=1.5, alpha=0.5)
            title_string = '{}: {} selected sources'.format(
                os.path.basename(f), len(extracted_sources))
            plt.title(title_string)
            plt.tight_layout()
            if save_plot:
                figname = os.path.join(
                    standardized_data_dir, '{}_extracted_sources.pdf'.format(
                        os.path.basename(f).split('.')[0]))
                plt.savefig(figname)
            if parameters['show_extracted_sources']:
                plt.show()
            plt.close()

        else:
            extracted_sources = Table.read(extracted_sources_file)

        print('Extracted {} sources from {}'.format(len(extracted_sources), f))
        impose_positive_flux = True
        if impose_positive_flux and parameters['use_epsf']:
            extracted_sources.remove_rows(
                np.where(extracted_sources['flux_fit'] < 0)[0])
            print('Only {} sources have positve flux'.format(
                len(extracted_sources)))

        astrometry_uncertainty_mas = 5

        if len(extracted_sources) > 0:
            # Cal images are in DMS coordinates which correspond to the SIAF Science (SCI) frame
            extracted_sources['x_SCI'], extracted_sources[
                'y_SCI'] = extracted_sources['xcentroid'], extracted_sources[
                    'ycentroid']

            # For now, astrometric uncertainty defaults to 5 mas for each source.
            extracted_sources['sigma_x_mas'] = np.ones(
                len(extracted_sources)) * astrometry_uncertainty_mas
            extracted_sources['sigma_y_mas'] = np.ones(
                len(extracted_sources)) * astrometry_uncertainty_mas

        # transfer info to astropy table header
        for key, value in header_info.items():
            extracted_sources.meta[key] = value

        extracted_sources.meta['DATAFILE'] = os.path.basename(f)
        extracted_sources.meta['DATAPATH'] = os.path.dirname(f)
        extracted_sources.meta['EPOCH'] = header_info['epoch_isot']

        out_file = os.path.join(
            standardized_data_dir, '{}_FPA_data.fits'.format(
                extracted_sources.meta['DATAFILE'].split('.')[0]))

        print('Writing {}'.format(out_file))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', AstropyWarning, append=True)
            extracted_sources.write(out_file, overwrite=True)

    return im