Ejemplo n.º 1
0
def nddata_cutout2d(nddata, position, size, mode='trim', fill_value=np.nan):
    """
    Create a 2D cutout of a `~astropy.nddata.NDData` object.

    Specifically, cutouts will made for the ``nddata.data`` and
    ``nddata.mask`` (if present) arrays.  If ``nddata.wcs`` exists, then
    it will also be updated.

    Note that cutouts will not be made for ``nddata.uncertainty`` (if
    present) because they are general objects and not arrays.

    Parameters
    ----------
    nddata : `~astropy.nddata.NDData`
        The 2D `~astropy.nddata.NDData` from which the cutout is taken.

    position : tuple or `~astropy.coordinates.SkyCoord`
        The position of the cutout array's center with respect to the
        ``nddata.data`` array.  The position can be specified either as
        a ``(x, y)`` tuple of pixel coordinates or a
        `~astropy.coordinates.SkyCoord`, in which case ``nddata.wcs``
        must exist.

    size : int, array-like, `~astropy.units.Quantity`
        The size of the cutout array along each axis.  If ``size`` is a
        scalar number or a scalar `~astropy.units.Quantity`, then a
        square cutout of ``size`` will be created.  If ``size`` has two
        elements, they should be in ``(ny, nx)`` order.  Scalar numbers
        in ``size`` are assumed to be in units of pixels.  ``size`` can
        also be a `~astropy.units.Quantity` object or contain
        `~astropy.units.Quantity` objects.  Such
        `~astropy.units.Quantity` objects must be in pixel or angular
        units.  For all cases, ``size`` will be converted to an integer
        number of pixels, rounding the the nearest integer.  See the
        ``mode`` keyword for additional details on the final cutout
        size.

    mode : {'trim', 'partial', 'strict'}, optional
        The mode used for creating the cutout data array.  For the
        ``'partial'`` and ``'trim'`` modes, a partial overlap of the
        cutout array and the input ``nddata.data`` array is sufficient.
        For the ``'strict'`` mode, the cutout array has to be fully
        contained within the ``nddata.data`` array, otherwise an
        `~astropy.nddata.utils.PartialOverlapError` is raised.   In all
        modes, non-overlapping arrays will raise a
        `~astropy.nddata.utils.NoOverlapError`.  In ``'partial'`` mode,
        positions in the cutout array that do not overlap with the
        ``nddata.data`` array will be filled with ``fill_value``.  In
        ``'trim'`` mode only the overlapping elements are returned, thus
        the resulting cutout array may be smaller than the requested
        ``size``.

    fill_value : number, optional
        If ``mode='partial'``, the value to fill pixels in the cutout
        array that do not overlap with the input ``nddata.data``.
        ``fill_value`` must have the same ``dtype`` as the input
        ``nddata.data`` array.

    Returns
    -------
    result : `~astropy.nddata.NDData`
        A `~astropy.nddata.NDData` object with cutouts for the data and
        mask, if input.

    Examples
    --------
    >>> from astropy.nddata import NDData
    >>> import astropy.units as u
    >>> from astroimtools import nddata_cutout2d
    >>> data = np.random.random((500, 500))
    >>> unit = u.electron / u.s
    >>> mask = (data > 0.7)
    >>> meta = {'exptime': 1234 * u.s}
    >>> nddata = NDData(data, mask=mask, unit=unit, meta=meta)
    >>> cutout = nddata_cutout2d(nddata, (100, 100), (10, 10))
    >>> cutout.data.shape
    (10, 10)
    >>> cutout.mask.shape
    (10, 10)
    >>> cutout.unit
    Unit("electron / s")
    """

    from astropy.nddata.utils import Cutout2D

    if not isinstance(nddata, NDData):
        raise ValueError('nddata input must be an NDData object')

    if isinstance(position, SkyCoord):
        if nddata.wcs is None:
            raise ValueError('nddata must contain WCS if the input '
                             'position is a SkyCoord')
        position = skycoord_to_pixel(position, nddata.wcs, mode='all')

    data_cutout = Cutout2D(np.asanyarray(nddata.data), position, size,
                           wcs=nddata.wcs, mode=mode, fill_value=fill_value)
    # need to create a new NDData instead of copying/replacing
    nddata_out = NDData(data_cutout.data, unit=nddata.unit,
                        uncertainty=nddata.uncertainty, meta=nddata.meta)

    if nddata.wcs is not None:
        nddata_out.wcs = data_cutout.wcs

    if nddata.mask is not None:
        mask_cutout = Cutout2D(np.asanyarray(nddata.mask), position, size,
                               mode=mode, fill_value=fill_value)
        nddata_out.mask = mask_cutout.data

    return nddata_out
Ejemplo n.º 2
0
def nddata_cutout2d(nddata, position, size, mode='trim', fill_value=np.nan):
    """
    Create a 2D cutout of a `~astropy.nddata.NDData` object.

    Specifically, cutouts will made for the ``nddata.data`` and
    ``nddata.mask`` (if present) arrays.  If ``nddata.wcs`` exists, then
    it will also be updated.

    Note that cutouts will not be made for ``nddata.uncertainty`` (if
    present) because they are general objects and not arrays.

    Parameters
    ----------
    nddata : `~astropy.nddata.NDData`
        The 2D `~astropy.nddata.NDData` from which the cutout is taken.

    position : tuple or `~astropy.coordinates.SkyCoord`
        The position of the cutout array's center with respect to the
        ``nddata.data`` array.  The position can be specified either as
        a ``(x, y)`` tuple of pixel coordinates or a
        `~astropy.coordinates.SkyCoord`, in which case ``nddata.wcs``
        must exist.

    size : int, array-like, `~astropy.units.Quantity`
        The size of the cutout array along each axis.  If ``size`` is a
        scalar number or a scalar `~astropy.units.Quantity`, then a
        square cutout of ``size`` will be created.  If ``size`` has two
        elements, they should be in ``(ny, nx)`` order.  Scalar numbers
        in ``size`` are assumed to be in units of pixels.  ``size`` can
        also be a `~astropy.units.Quantity` object or contain
        `~astropy.units.Quantity` objects.  Such
        `~astropy.units.Quantity` objects must be in pixel or angular
        units.  For all cases, ``size`` will be converted to an integer
        number of pixels, rounding the the nearest integer.  See the
        ``mode`` keyword for additional details on the final cutout
        size.

    mode : {'trim', 'partial', 'strict'}, optional
        The mode used for creating the cutout data array.  For the
        ``'partial'`` and ``'trim'`` modes, a partial overlap of the
        cutout array and the input ``nddata.data`` array is sufficient.
        For the ``'strict'`` mode, the cutout array has to be fully
        contained within the ``nddata.data`` array, otherwise an
        `~astropy.nddata.utils.PartialOverlapError` is raised.   In all
        modes, non-overlapping arrays will raise a
        `~astropy.nddata.utils.NoOverlapError`.  In ``'partial'`` mode,
        positions in the cutout array that do not overlap with the
        ``nddata.data`` array will be filled with ``fill_value``.  In
        ``'trim'`` mode only the overlapping elements are returned, thus
        the resulting cutout array may be smaller than the requested
        ``size``.

    fill_value : number, optional
        If ``mode='partial'``, the value to fill pixels in the cutout
        array that do not overlap with the input ``nddata.data``.
        ``fill_value`` must have the same ``dtype`` as the input
        ``nddata.data`` array.

    Returns
    -------
    result : `~astropy.nddata.NDData`
        A `~astropy.nddata.NDData` object with cutouts for the data and
        mask, if input.

    Examples
    --------
    >>> from astropy.nddata import NDData
    >>> import astropy.units as u
    >>> from astroimtools import nddata_cutout2d
    >>> data = np.random.random((500, 500))
    >>> unit = u.electron / u.s
    >>> mask = (data > 0.7)
    >>> meta = {'exptime': 1234 * u.s}
    >>> nddata = NDData(data, mask=mask, unit=unit, meta=meta)
    >>> cutout = nddata_cutout2d(nddata, (100, 100), (10, 10))
    >>> cutout.data.shape
    (10, 10)
    >>> cutout.mask.shape
    (10, 10)
    >>> cutout.unit
    Unit("electron / s")
    """

    from astropy.nddata.utils import Cutout2D

    if not isinstance(nddata, NDData):
        raise TypeError('nddata input must be an NDData object')

    if isinstance(position, SkyCoord):
        if nddata.wcs is None:
            raise ValueError('nddata must contain WCS if the input '
                             'position is a SkyCoord')
        position = skycoord_to_pixel(position, nddata.wcs, mode='all')

    data_cutout = Cutout2D(np.asanyarray(nddata.data), position, size,
                           wcs=nddata.wcs, mode=mode, fill_value=fill_value)
    # need to create a new NDData instead of copying/replacing
    nddata_out = NDData(data_cutout.data, unit=nddata.unit,
                        uncertainty=nddata.uncertainty, meta=nddata.meta)

    if nddata.wcs is not None:
        nddata_out.wcs = data_cutout.wcs

    if nddata.mask is not None:
        mask_cutout = Cutout2D(np.asanyarray(nddata.mask), position, size,
                               mode=mode, fill_value=fill_value)
        nddata_out.mask = mask_cutout.data

    return nddata_out
Ejemplo n.º 3
0
def build_ePSF_astrometry(image_file,
                          mask_file=None,
                          nstars=40,
                          image_source_file=None,
                          astrom_sigma=5.0,
                          psf_sigma=5.0,
                          alim=10000,
                          lowper=0.6,
                          highper=0.9,
                          keep=False,
                          cutout=35,
                          write=True,
                          output=None,
                          plot=False,
                          output_plot=None,
                          verbose=False):
    """Build the effective Point-Spread Function using a sample of stars from
    some image acquired via the `image2xy` tool of `astrometry.net`.

    Arguments
    ---------
    image_file : str
        Filename for a **background-subtracted** image
    mask_file : str, optional
        Filename for a mask file (default None)
    nstars : int, optional
        *Maximum* number of stars to use in building the ePSF (default 40;
        set to None to impose no limit)
    image_source_file : str, optional
        Filename for a `.xy.fits` file containing detected sources with their 
        pixel coordinates and **background-subtracted** flux (default None, in 
        which case a new such file is produced)
    astrom_sigma : float, optional
        Detection significance when using `image2xy` in `astrometry.net` to 
        find sources (default 5.0)
    psf_sigma : float, optional
        Sigma of the approximate Gaussian PSF of the images (default 5.0)
    alim : int, optional
        *Maximum* allowed source area in square pixels for `astrometry.net`, 
        above which sources will be deblended (default 10000)
    lowper, highper : float, optional
        Lower and upper flux percentiles (as a fraction between 0 and 1) such
        that sources outside the corresponding flux range will be excluded from 
        ePSF building (default 0.6 and 0.9, respectively)
    keep : bool, optional
        Whether to keep the source list file (`.xy.fits` files; default False)
    cutout : int, optional
        Cutout size around each star in pixels (default 35; must be **odd**; 
        rounded **down** if even)
    write : bool, optional
        Whether to write the ePSF to a new fits file (default True)
    output : str, optional
        Name for the output ePSF data fits file (default
        `image_file.replace(".fits", "_ePSF.fits")`)
    plot : bool, optional
        Whether to plot the newly-built ePSF (default False)
    output_plot : str, optional
        Name for the output figure (default 
        `image_file.replace(".fits", "_ePSF.png")`)
    verbose : bool, optional
        Whether to be verbose (default False)

    Returns
    -------
    np.ndarray
        The ePSF data in a 2D array
    
    Notes
    -----
    Uses `astrometry.net` to obtain a list of sources in the image with their 
    x, y coordinates, flux, and background at their location. (If a list of 
    sources has already been obtained `solve-field` or `image2xy`, this can 
    be input). Finally, selects stars between the `lowper` th and `highper`  
    percentile fluxes.
    
    Finally, uses `EPSFBuilder` to empirically obtain the ePSF of these stars. 
    Optionally writes and/or plots the obtained ePSF.
    
    **The ePSF obtained here should not be used in convolutions.** Instead, it 
    can serve as a tool for estimating the seeing of an image. 
    """

    # ignore annoying warnings from photutils
    from astropy.utils.exceptions import AstropyWarning
    warnings.simplefilter('ignore', category=AstropyWarning)

    from astropy.nddata import NDData
    from photutils.psf import extract_stars
    from photutils import EPSFBuilder

    # load in data
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file)
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"

    ### source detection
    ## use pre-existing file obtained by astrometry.net, if supplied
    if image_source_file:
        image_sources = np.logical_not(fits.getdata(image_source_file))

    ## use astrometry.net to find the sources
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma
    # -m <alim> --> max object size for deblending is <alim>
    else:
        options = f" -b -O -p {astrom_sigma} -w {psf_sigma} -m {alim}"
        run(f"image2xy {options} {image_file}", shell=True)
        image_sources_file = image_file.replace(".fits", ".xy.fits")
        image_sources = fits.getdata(image_sources_file)
        if not (keep):
            run(f"rm {image_sources_file}", shell=True)  # file is not needed
        print(f'\n{len(image_sources)} stars at >{astrom_sigma} sigma found ' +
              f'in image {re.sub(".*/", "", image_file)} with astrometry.net')

        sources = Table()  # build a table
        sources['x'] = image_sources['X']  # for EPSFBuilder
        sources['y'] = image_sources['Y']
        sources['flux'] = image_sources['FLUX']

        if nstars:
            sources = sources[:min(nstars, len(sources))]

    ## get WCS coords for all sources
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"], sources["y"],
                                                    1)
    ## mask out edge sources:
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize / 2.0
        dist_to_center = np.sqrt((sources['x'] - xsize / 2.0)**2 +
                                 (sources['y'] - ysize / 2.0)**2)
        mask = dist_to_center <= rad_limit
        sources = sources[mask]
    else:
        x_lims = [int(0.05 * xsize), int(0.95 * xsize)]
        y_lims = [int(0.05 * ysize), int(0.95 * ysize)]
        mask = (sources['x'] > x_lims[0]) & (sources['x'] < x_lims[1]) & (
            sources['y'] > y_lims[0]) & (sources['y'] < y_lims[1])
        sources = sources[mask]

    ## empirically obtain the effective Point Spread Function (ePSF)
    nddata = NDData(image_data)  # NDData object
    if mask_file:  # supply a mask if needed
        nddata.mask = fits.getdata(mask_file)
    if cutout % 2 == 0:  # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout)  # extract stars

    ## use only the stars with fluxes between two percentiles if using
    ## astrometry.net
    if image_source_file:
        stars_tab = Table()  # temporary table
        stars_col = Column(data=range(len(stars.all_stars)), name="stars")
        stars_tab["stars"] = stars_col  # column of indices of each star
        fluxes = [s.flux for s in stars]
        fluxes_col = Column(data=fluxes, name="flux")
        stars_tab["flux"] = fluxes_col  # column of fluxes

        # get percentiles
        per_low = np.percentile(fluxes, lowper * 100)  # lower percentile flux
        per_high = np.percentile(fluxes,
                                 highper * 100)  # upper percentile flux
        mask = (stars_tab["flux"] >= per_low) & (stars_tab["flux"] <= per_high)
        stars_tab = stars_tab[mask]  # include only stars between these fluxes
        idx_stars = (stars_tab["stars"]).data  # indices of these stars

        # update stars object
        # have to manually update all_stars AND _data attributes
        stars.all_stars = [stars[i] for i in idx_stars]
        stars._data = stars.all_stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars)  # no. of stars used in ePSF building

    if nstars_epsf == 0:
        print(
            "\nNo valid sources were found to build the ePSF with the given" +
            " conditions. Exiting.")
        return
    if verbose:
        print(f"{nstars_epsf} stars used in building the ePSF")

    epsf_builder = EPSFBuilder(
        oversampling=1,
        maxiters=7,  # build it
        progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data

    if write:  # write, if desired
        epsf_hdu = fits.PrimaryHDU(data=epsf_data)
        if not (output):
            output = image_file.replace(".fits", "_ePSF.fits")

        epsf_hdu.writeto(output, overwrite=True, output_verify="ignore")

    if plot:  # plot, if desired
        if not (output_plot):  # set output name if not given
            output_plot = image_file.replace(".fits", "_ePSF.png")
        __plot_ePSF(epsf_data=epsf_data, output=output_plot)

    return epsf_data
Ejemplo n.º 4
0
def build_ePSF_imsegm(image_file,
                      mask_file=None,
                      nstars=40,
                      thresh_sigma=5.0,
                      pixelmin=20,
                      etamax=1.4,
                      areamax=500,
                      cutout=35,
                      write=True,
                      output=None,
                      plot=False,
                      output_plot=None,
                      verbose=False):
    """Build the effective Point-Spread Function using a sample of stars from
    some image acquired via image segmentation.

    Arguments
    ---------
    image_file : str
        Filename for a **background-subtracted** image
    mask_file : str, optional
        Filename for a mask file (default None)
    nstars : int, optional
        *Maximum* number of stars to use in building the ePSF (default 40;
        set to None to impose no limit)
    thresh_sigma : float, optional
        Sigma threshold for source detection with image segmentation (default
        5.0)
    pixelmin : float, optional
        *Minimum* pixel area of an isophote to be considered a good source for 
        building the ePSF (default 20)
    etamax : float, optional
        *Maximum* allowed elongation for an isophote to be considered a good 
        source for building the ePSF (default 1.4)
    areamax : float, optional
        *Maximum* allowed area (in square pixels) for an isophote to be 
        considered a good source for building the ePSF (default 500)
    cutout : int, optional
        Cutout size around each star in pixels (default 35; must be **odd**; 
        rounded **down** if even)
    write : bool, optional
        Whether to write the ePSF to a new fits file (default True)
    output : str, optional
        Name for the output ePSF data fits file (default
        `image_file.replace(".fits", "_ePSF.fits")`)
    plot : bool, optional
        Whether to plot the newly-built ePSF (default False)
    output_plot : str, optional
        Name for the output figure (default 
        `image_file.replace(".fits", "_ePSF.png")`)
    verbose : bool, optional
        Whether to be verbose (default False)

    Returns
    -------
    np.ndarray
        The ePSF data in a 2D array
    
    Notes
    -----
    Uses image segmentation via `photutils` to obtain a list of sources in the 
    image with their x, y coordinates, flux, and background at their 
    location. Then uses `EPSFBuilder` to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtained ePSF.
    
    **The ePSF obtained here should not be used in convolutions.** Instead, it 
    can serve as a tool for estimating the seeing of an image. 
    """

    # ignore annoying warnings from photutils
    from astropy.utils.exceptions import AstropyWarning
    warnings.simplefilter('ignore', category=AstropyWarning)

    # imports
    from astropy.nddata import NDData
    from photutils.psf import extract_stars
    from photutils import EPSFBuilder

    # load in data
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file)
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"

    ## source detection
    # add mask to image_data
    image_data = np.ma.masked_where(image_data == 0.0, image_data)

    # build an actual mask
    mask = (image_data == 0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    # set detection standard deviation
    try:
        std = image_header["BKGSTD"]  # header written by bkgsub function
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data,
                                       snr=3,
                                       npixels=5,
                                       dilate_size=15,
                                       mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))

    # use the segmentation image to get the source properties
    segm = detect_sources(image_data,
                          thresh_sigma * std,
                          npixels=pixelmin,
                          mask=mask)
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinate/fluxes for sources, do some filtering
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return

    # restrict elongation and area to obtain only unsaturated stars
    tbl = tbl[(tbl["elongation"] <= etamax)]
    tbl = tbl[(tbl["area"].value <= areamax)]
    # build a table
    sources = Table()  # build a table
    sources['x'] = tbl['xcentroid']  # for EPSFBuilder
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data / tbl["area"].data
    sources.sort("flux")
    sources.reverse()
    # restrict number of stars (if requested)
    if nstars: sources = sources[:min(nstars, len(sources))]

    ## get WCS coords for all sources
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"], sources["y"],
                                                    1)
    ## mask out edge sources:
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:  # bounding circle
        rad_limit = xsize / 2.0
        dist_to_center = np.sqrt((sources['x'] - xsize / 2.0)**2 +
                                 (sources['y'] - ysize / 2.0)**2)
        mask = dist_to_center <= rad_limit
        sources = sources[mask]
    else:  # rectangle
        x_lims = [int(0.05 * xsize), int(0.95 * xsize)]
        y_lims = [int(0.05 * ysize), int(0.95 * ysize)]
        mask = (sources['x'] > x_lims[0]) & (sources['x'] < x_lims[1]) & (
            sources['y'] > y_lims[0]) & (sources['y'] < y_lims[1])
        sources = sources[mask]

    ## empirically obtain the effective Point Spread Function (ePSF)
    nddata = NDData(image_data)  # NDData object
    if mask_file:  # supply a mask if needed
        nddata.mask = fits.getdata(mask_file)
    if cutout % 2 == 0:  # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout)  # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars)  # no. of stars used in ePSF building

    if nstars_epsf == 0:
        print(
            "\nNo valid sources were found to build the ePSF with the given" +
            " conditions. Exiting.")
        return
    if verbose:
        print(f"{nstars_epsf} stars used in building the ePSF")

    epsf_builder = EPSFBuilder(
        oversampling=1,
        maxiters=7,  # build it
        progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data

    if write:  # write, if desired
        epsf_hdu = fits.PrimaryHDU(data=epsf_data)
        if not (output):
            output = image_file.replace(".fits", "_ePSF.fits")

        epsf_hdu.writeto(output, overwrite=True, output_verify="ignore")

    if plot:  # plot, if desired
        if not (output_plot):  # set output name if not given
            output_plot = image_file.replace(".fits", "_ePSF.png")
        __plot_ePSF(epsf_data=epsf_data, output=output_plot)

    return epsf_data
Ejemplo n.º 5
0
def __fit_PSF(image_file, mask_file=None, nstars=40,                
              thresh_sigma=5.0, pixelmin=20, elongation_lim=1.4, area_max=500,             
              cutout=35, 
              astrom_sigma=5.0, psf_sigma=5.0, alim=10000, clean=True, 
              source_lim=None, 
              write_ePSF=False, ePSF_output=None, 
              plot_ePSF=True, ePSF_plotname=None, 
              plot_residuals=False, resid_plotname=None,
              verbose=False):
    """    
    Input: 
        general:
        - filename for a **BACKGROUND-SUBTRACTED** image
        - filename for a mask image (optional; default None)
        - maximum number of stars to use (optional; default 40; set to None
          to impose no limit)
          
        source detection:
        - sigma threshold for source detection with image segmentation 
          (optional; default 5.0)
        - *minimum* number of isophotal pixels (optional; default 20)
        - *maximum* allowed elongation for sources found by image segmentation 
          (optional; default 1.4)
        - *maximum* allowed area for sources found by image segmentation 
          (optional; default 500 pix**2)
        - cutout size around each star in pix (optional; default 35 pix; must 
          be ODD, rounded down if even)
        
        astrometry.net:
        - sigma threshold for astrometry.net source detection image (optional; 
          default 5.0)
        - sigma of the Gaussian PSF of the image (optional; default 5.0)
        - maximum allowed source area in pix**2 for astrometry.net for 
          deblending (optional; default 10000; only relevant if no source list 
          file is provided)
        - whether to remove files output by image2xy once finished with them 
          (optional; default True)

        misc:
        - limit on number of sources to fit with ePSF (optional; default None 
          which imposes no limit)        
                
        writing, plotting, verbosity:
        - whether to write the derived ePSF to a fits file (optional; default 
          False)
        - name for output ePSF fits file (optional; default set below)
        - whether to plot the derived ePSF (optional; default True)
        - name for output ePSF plot (optional; default set below)
        - whether to plot the residuals of the iterative PSF fitting (optional;
          default False)
        - name for output residuals plot (optional; default set below)
        - be verbose (optional; default False)
    
    Uses image segmentation to obtain a list of sources in the image with their 
    x, y coordinates. Uses EPSFBuilder to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtaind ePSF. Finally, uses 
    astrometry.net to find all sources in the image, and fits them with the 
    empirically obtained ePSF.
    
    The ePSF obtained here should NOT be used in convolutions. Instead, it can 
    serve as a tool for estimating the seeing of an image. 
    
    Output: table containing the coordinates and instrumental magnitudes of the 
    detected, ePSF-fit sources
    """

    # load in data 
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file) 
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"
    pixscale = image_header["PIXSCAL1"]
    
    ### SOURCE DETECTION

    ### use image segmentation to find sources with an area > pixelmin pix**2 
    ### which are above the threshold sigma*std 
    image_data = fits.getdata(image_file) # subfile data
    image_data = np.ma.masked_where(image_data==0.0, 
                                    image_data) # mask bad pixels
    
    ## build an actual mask
    mask = (image_data==0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    ## set detection standard deviation
    try:
        std = image_header["BKGSTD"] # header written by amakihi.bkgsub fn
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data, snr=3, npixels=5, 
                                       dilate_size=15, mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))
    
    ## use the segmentation image to get the source properties 
    # use <mask>, which does not mask sources
    segm = detect_sources(image_data, thresh_sigma*std, npixels=pixelmin,
                          mask=mask) 
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinates for sources
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return
    
    # restrict elongation and area to obtain only unsaturated stars 
    tbl = tbl[(tbl["elongation"] <= elongation_lim)]
    tbl = tbl[(tbl["area"].value <= area_max)]

    sources = Table() # build a table 
    sources['x'] = tbl['xcentroid'] # for EPSFBuilder 
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data/tbl["area"].data   
    sources.sort("flux")
    sources.reverse()
    
    if nstars:
        sources = sources[:min(nstars, len(sources))]

    ## setup: get WCS coords for all sources 
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"],
                                                    sources["y"], 1)
     
    ## mask out edge sources: 
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize/2.0
        dist_to_center = np.sqrt((sources['x']-xsize/2.0)**2 + 
                                 (sources['y']-ysize/2.0)**2)
        dmask = dist_to_center <= rad_limit
        sources = sources[dmask]
    else: 
        x_lims = [int(0.05*xsize), int(0.95*xsize)] 
        y_lims = [int(0.05*ysize), int(0.95*ysize)]
        dmask = (sources['x']>x_lims[0]) & (sources['x']<x_lims[1]) & (
                 sources['y']>y_lims[0]) & (sources['y']<y_lims[1])
        sources = sources[dmask]
        
    ## empirically obtain the effective Point Spread Function (ePSF)  
    nddata = NDData(image_data) # NDData object
    if mask_file: # supply a mask if needed 
        nddata.mask = fits.getdata(mask_file)
    if cutout%2 == 0: # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout) # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars) # no. of stars used in ePSF building
    
    if nstars_epsf == 0:
        print("\nNo valid sources were found to build the ePSF with the given"+
              " conditions. Exiting.")
        return
    
    if verbose:
        print(f"\n{nstars_epsf} stars used in building the ePSF")
        
    start = timer()
    epsf_builder = EPSFBuilder(oversampling=1, maxiters=7, # build it
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data
    
    end = timer() # timing 
    time_elaps = end-start
    
    # print ePSF FWHM, if desired
    print(f"Time required for ePSF building {time_elaps:.2f} s\n")
    if verbose: 
        ePSF_FWHM(epsf_data, True)

    epsf_hdu = fits.PrimaryHDU(data=epsf_data)
    if write_ePSF: # write, if desired
        if not(ePSF_output):
            ePSF_output = image_file.replace(".fits", "_ePSF.fits")
            
        epsf_hdu.writeto(ePSF_output, overwrite=True, output_verify="ignore")
    
    psf_model = epsf # set the model
    psf_model.x_0.fixed = True # fix centroids (known beforehand) 
    psf_model.y_0.fixed = True
 
    ### USE ASTROMETRY.NET TO FIND SOURCES TO FIT  
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma 
    # -m <alim> --> max object size for deblending is <alim>      
    options = f"-O -b -p {astrom_sigma} -w {psf_sigma}"
    options += f" -m {alim}"
    run(f"image2xy {options} {image_file}", shell=True)
    image_sources_file = image_file.replace(".fits", ".xy.fits")
    image_sources = fits.getdata(image_sources_file)
    if clean:
        run(f"rm {image_sources_file}", shell=True) # this file is not needed

    print(f'\n{len(image_sources)} stars at >{astrom_sigma}'+
          f' sigma found in image {re.sub(".*/", "", image_file)}'+
          ' with astrometry.net')   

    astrom_sources = Table() # build a table 
    astrom_sources['x_mean'] = image_sources['X'] # for BasicPSFPhotometry
    astrom_sources['y_mean'] = image_sources['Y']
    astrom_sources['flux'] = image_sources['FLUX']
    
    # initial guesses for centroids, fluxes
    pos = Table(names=['x_0', 'y_0','flux_0'], 
                data=[astrom_sources['x_mean'], astrom_sources['y_mean'], 
                      astrom_sources['flux']]) 

    ### FIT THE ePSF TO ALL DETECTED SOURCES 
    start = timer() # timing the fit 
    
    # sources separated by less than this critical separation are grouped 
    # together when fitting the PSF via the DAOGROUP algorithm
    sigma_psf = 2.0 # 2 pix
    crit_sep = 2.0*sigma_psf*gaussian_sigma_to_fwhm  # twice the PSF FWHM
    daogroup = DAOGroup(crit_sep) 

    # an astropy fitter, does Levenberg-Marquardt least-squares fitting
    fitter_tool = LevMarLSQFitter()
    
    # if we have a limit on the number of sources to fit
    if source_lim:
        try: 
            import random # pick a given no. of random sources 
            source_rows = random.choices(astrom_sources, k=source_lim)
            astrom_sources = Table(names=['x_mean', 'y_mean', 'flux'], 
                                   rows=source_rows)
            pos = Table(names=['x_0', 'y_0','flux_0'], 
                        data=[astrom_sources['x_mean'], 
                              astrom_sources['y_mean'], 
                              astrom_sources['flux']])
            
            
        except IndexError:
            print("The input source limit exceeds the number of sources"+
                  " detected by astrometry, so no limit is imposed.\n")
    
    photometry = BasicPSFPhotometry(group_maker=daogroup,
                            bkg_estimator=None, # bg subtract already done
                            psf_model=psf_model,
                            fitter=fitter_tool,
                            fitshape=(11,11))
    
    result_tab = photometry(image=image_data, init_guesses=pos) # results
    residual_image = photometry.get_residual_image() # residuals of PSF fit
    residual_image = np.ma.masked_where(mask, residual_image)
    residual_image.fill_value = 0 # set to zero
    residual_image = residual_image.filled()

    
    end = timer() # timing 
    time_elaps = end - start
    print(f"Time required fit ePSF to all sources {time_elaps:.2f} s\n")
    
    # include WCS coordinates
    pos["ra"], pos["dec"] = w.all_pix2world(pos["x_0"], pos["y_0"], 1)
    result_tab.add_column(pos['ra'])
    result_tab.add_column(pos['dec'])
    
    # mask out negative flux_fit values in the results 
    mask_flux = (result_tab['flux_fit'] >= 0.0)
    psf_sources = result_tab[mask_flux] # PSF-fit sources 
    
    # compute magnitudes and their errors and add to the table
    # error = (2.5/(ln(10)*flux_fit))*flux_unc
    mag_fit = -2.5*np.log10(psf_sources['flux_fit']) # instrumental mags
    mag_fit.name = 'mag_fit'
    mag_unc = 2.5/(psf_sources['flux_fit']*np.log(10))
    mag_unc *= psf_sources['flux_unc']
    mag_unc.name = 'mag_unc' 
    psf_sources['mag_fit'] = mag_fit
    psf_sources['mag_unc'] = mag_unc
    
    # mask entries with large magnitude uncertainties 
    mask_unc = psf_sources['mag_unc'] < 0.4
    psf_sources = psf_sources[mask_unc]
    
    if plot_ePSF: # if we wish to see the ePSF
        plt.figure(figsize=(10,9))
        plt.imshow(epsf_data, origin='lower', aspect=1, cmap='magma',
                   interpolation="nearest")
        plt.xlabel("Pixels", fontsize=16)
        plt.ylabel("Pixels", fontsize=16)
        plt.title("Effective Point-Spread Function (1 pixel = "
                                                    +str(pixscale)+
                                                    '")', fontsize=16)
        plt.colorbar(orientation="vertical", fraction=0.046, pad=0.08)
        plt.rc("xtick",labelsize=16) # not working?
        plt.rc("ytick",labelsize=16)
        
        if not(ePSF_plotname):
            ePSF_plotname = image_file.replace(".fits", "_ePSF.png")
        plt.savefig(ePSF_plotname, bbox_inches="tight")
        plt.close()
    
    if plot_residuals: # if we wish to see a plot of the residuals
        if "WIRCam" in instrument:
            plt.figure(figsize=(10,9))
        else:
            plt.figure(figsize=(12,14))
        ax = plt.subplot(projection=w)
        plt.imshow(residual_image, cmap='magma', aspect=1, 
                   interpolation='nearest', origin='lower')
        plt.xlabel("RA (J2000)", fontsize=16)
        plt.ylabel("Dec (J2000)", fontsize=16)
        plt.title("PSF residuals", fontsize=16)
        cb = plt.colorbar(orientation='vertical', fraction=0.046, pad=0.08) 
        cb.set_label(label="ADU", fontsize=16)
        ax.coords["ra"].set_ticklabel(size=15)
        ax.coords["dec"].set_ticklabel(size=15)
        
        if not(resid_plotname):
            resid_plotname = image_file.replace(".fits", "_ePSFresiduals.png")
        plt.savefig(resid_plotname, bbox_inches="tight")
        plt.close()
    
    return psf_sources