Ejemplo n.º 1
0
def test_epsf_build_oversampling(oversamp):
    offsets = np.arange(oversamp) * 1. / oversamp - 0.5 + 1. / (2. * oversamp)
    xydithers = np.array(list(itertools.product(offsets, offsets)))
    xdithers = np.transpose(xydithers)[0]
    ydithers = np.transpose(xydithers)[1]

    nstars = oversamp**2
    sigma = 3.0
    sources = Table()
    offset = 50
    size = oversamp * offset + offset
    y, x = np.mgrid[0:oversamp, 0:oversamp] * offset + offset
    sources['amplitude'] = np.full((nstars, ), 100.0)
    sources['x_0'] = x.ravel() + xdithers
    sources['y_0'] = y.ravel() + ydithers
    sources['sigma'] = np.full((nstars, ), sigma)

    data = make_gaussian_prf_sources_image((size, size), sources)
    nddata = NDData(data=data)
    stars_tbl = Table()
    stars_tbl['x'] = sources['x_0']
    stars_tbl['y'] = sources['y_0']
    stars = extract_stars(nddata, stars_tbl, size=25)
    epsf_builder = EPSFBuilder(oversampling=oversamp,
                               maxiters=15,
                               progress_bar=False,
                               recentering_maxiters=20)
    epsf, fitted_stars = epsf_builder(stars)

    # input PSF shape
    size = epsf.data.shape[0]
    cen = (size - 1) / 2
    sigma2 = oversamp * sigma
    m = IntegratedGaussianPRF(sigma2, x_0=cen, y_0=cen, flux=1)
    yy, xx = np.mgrid[0:size, 0:size]
    psf = m(xx, yy)

    assert_allclose(epsf.data, psf * epsf.data.sum(), atol=2.5e-4)
Ejemplo n.º 2
0
    def test_make_cutouts_inputs(self):
        # Construct image:
        image_hdu = self.construct_test_image()

        # Construct catalog
        ra = [0, 1] * u.deg
        dec = [45, 46] * u.deg
        ids = ["Target_1", "Target_2"]
        cutout_width = cutout_height = [4.0, 4.0] * u.pix

        catalog = Table(
            data=[ids, ra, dec, cutout_width, cutout_height],
            names=['id', 'ra', 'dec', 'cutout_width', 'cutout_height'])

        # From NumPy Array and WCS:
        array = image_hdu.data
        w = WCS(image_hdu.header)
        assert None not in make_cutouts(array, catalog, wcs=w)

        # From NDData Array Only:
        w = WCS(image_hdu.header)
        array = NDData(data=image_hdu.data, wcs=w)
        assert None not in make_cutouts(array, catalog)
Ejemplo n.º 3
0
def test_epsf_star_residual_image():
    """
    Test to ensure ``compute_residual_image`` gives correct residuals.
    """

    size = 100
    yy, xx, = np.mgrid[0:size + 1, 0:size + 1] / 4
    gmodel = IntegratedGaussianPRF().evaluate(xx, yy, 1, 12.5, 12.5, 2.5)
    epsf = EPSFModel(gmodel, oversampling=4, norm_radius=100)
    _size = 25
    data = np.zeros((_size, _size))
    _yy, _xx, = np.mgrid[0:_size, 0:_size]
    data += epsf.evaluate(x=_xx, y=_yy, flux=16, x_0=12, y_0=12)
    tbl = Table()
    tbl['x'] = [12]
    tbl['y'] = [12]
    stars = extract_stars(NDData(data), tbl, size=23)
    residual = stars[0].compute_residual_image(epsf)
    # As current EPSFStar instances cannot accept IntegratedGaussianPRF as input,
    # we have to accept some loss of precision from the conversion to ePSF, and
    # spline fitting (twice), so assert_allclose cannot be more more precise than
    # 0.001 currently.
    assert_allclose(np.sum(residual), 0., atol=1.e-3, rtol=1e-3)
def build_epsf(image_num):

    size = 7
    hsize = (size - 1) / 2
    peaks_tbl = find_peaks(Reduced_Image_Data[image_num], threshold=750.)
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) &
            (x < (Reduced_Image_Data[image_num].shape[1] - 1 - hsize)) &
            (y > hsize) &
            (y < (Reduced_Image_Data[image_num].shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    nddata = NDData(data=Reduced_Image_Data[image_num])

    stars = extract_stars(nddata, stars_tbl, size=10)

    #nrows = 5
    #ncols = 5
    #fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize = (15, 15), squeeze = True)
    #ax = ax.ravel()

    #for i in range(nrows*ncols):
    #norm = simple_norm(stars[i], 'log', percent = 99.)
    #ax[i].imshow(stars[i], norm = norm, origin = 'lower', cmap = 'gray')

    epsf_builder = EPSFBuilder(oversampling=8, maxiters=20, progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #norm = simple_norm(epsf.data, 'log', percent = 99.)
    #plt.imshow(epsf.data, norm = norm, origin = 'lower', cmap = 'viridis')
    #plt.colorbar()

    return epsf
Ejemplo n.º 5
0
 def test_parse_nddata_with_everything(self, imviz_helper):
     ndd = NDData([[1, 2], [3, 4]],
                  mask=[[True, False], [False, False]],
                  uncertainty=StdDevUncertainty([[0.1, 0.2], [0.3, 0.4]]),
                  unit=u.MJy / u.sr,
                  wcs=WCS(naxis=2),
                  meta={'name': 'my_ndd'})
     parse_data(imviz_helper.app,
                ndd,
                data_label='some_data',
                show_in_viewer=False)
     for i, attrib in enumerate(['DATA', 'MASK', 'UNCERTAINTY']):
         data = imviz_helper.app.data_collection[i]
         comp = data.get_component(attrib)
         assert data.label == f'some_data[{attrib}]'
         assert data.shape == (2, 2)
         assert data.meta['name'] == 'my_ndd'
         assert isinstance(data.coords, WCS)
         assert comp.data.shape == (2, 2)
         if attrib == 'MASK':
             assert comp.units == ''
         else:
             assert comp.units == 'MJy / sr'
     assert len(imviz_helper.app.data_collection) == 3
Ejemplo n.º 6
0
    def to_model(data, meta):
        """
        Create a photutils GriddedPSFModel object from input data and meta information

        Parameters
        ----------
        data : ndarray
            3D numpy array of PSFs at different points across the detector
        meta : dict
            Dictionary containing meta data

        Returns
        -------
        model : GriddedPSFModel
            Photutils object with 3D data array and metadata with specified grid_xypos
            and oversampling keys
        """
        try:
            from photutils import GriddedPSFModel
        except ImportError:
            raise ImportError("This method requires photutils >= 0.6")

        ndd = NDData(data, meta=meta, copy=True)

        ndd.meta['grid_xypos'] = [
            ((float(ndd.meta[key][0].split(',')[1].split(')')[0])),
             (float(ndd.meta[key][0].split(',')[0].split('(')[1])))
            for key in ndd.meta.keys() if "DET_YX" in key
        ]

        ndd.meta['oversampling'] = meta["OVERSAMP"][0]  # just pull the value
        ndd.meta = {key.lower(): ndd.meta[key] for key in ndd.meta}

        model = GriddedPSFModel(ndd)

        return model
Ejemplo n.º 7
0
    def test_gridded_psf_model_invalid_inputs(self):
        data = np.ones((4, 3, 3))

        # check if NDData
        with pytest.raises(TypeError):
            GriddedPSFModel(data)

        # check PSF data dimension
        with pytest.raises(ValueError):
            GriddedPSFModel(NDData(np.ones((3, 3))))

        # check that grid_xypos is in meta
        meta = {'oversampling': 4}
        nddata = NDData(data, meta=meta)
        with pytest.raises(ValueError):
            GriddedPSFModel(nddata)

        # check grid_xypos length
        meta = {'grid_xypos': [[0, 0], [1, 0], [1, 0]],
                'oversampling': 4}
        nddata = NDData(data, meta=meta)
        with pytest.raises(ValueError):
            GriddedPSFModel(nddata)

        # check if grid_xypos is a regular grid
        meta = {'grid_xypos': [[0, 0], [1, 0], [1, 0], [3, 4]],
                'oversampling': 4}
        nddata = NDData(data, meta=meta)
        with pytest.raises(ValueError):
            GriddedPSFModel(nddata)

        # check that oversampling is in meta
        meta = {'grid_xypos': [[0, 0], [0, 1], [1, 0], [1, 1]]}
        nddata = NDData(data, meta=meta)
        with pytest.raises(ValueError):
            GriddedPSFModel(nddata)

        # check oversampling is a scalar
        meta = {'grid_xypos': [[0, 0], [0, 1], [1, 0], [1, 1]],
                'oversampling': [4, 4]}
        nddata = NDData(data, meta=meta)
        with pytest.raises(ValueError):
            GriddedPSFModel(nddata)
Ejemplo n.º 8
0
def find_pinholes_regular(fname,
                          sname,
                          fdarkff,
                          fdark,
                          fff,
                          files,
                          ref_shape,
                          size,
                          threshold,
                          fwhm,
                          fitshape,
                          range_psf,
                          sigma=2.,
                          oversampling=4,
                          maxiters=3):
    """Finds and fits regullary spread pinhole positions with a ePSF in a FITS image.
    
    Parameters
    ----------
    fname : str
        Folder name of the input fits files.
    sname : str
        Folder name of the returned found and matched pinhole positions (txt files) 
    fdarkff : string
        Location of the dark images for the flat field images.
    fdark : string
        Location of the dark images for the raw images.
    fff : string
        Location of the flat field images.
    files : (1, 2)-shaped int array
        File range to create a median image
    ref_shape : (1,2)-shaped array
        Number of reference stars in x and y direction [x, y].
    size : int
        Rectangular size of the ePSF. Size must be an odd number.
    threshold : float
        The absolute image value above which to select sources.
    fwhm : float
        The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels.
    fitshape : int or length-2 array-like
        Rectangular shape around the center of a star which will be used to collect the data to do the fitting. 
        Can be an integer to be the same along both axes. E.g., 5 is the same as (5, 5), which means to fit only at the following 
        relative pixel positions: [-2, -1, 0, 1, 2]. Each element of fitshape must be an odd number.
    range_psf : (1, 4)-shaped int array
        Position range to compute epsf [xmin,xmax,ymin,ymax]
    sigma : float
        Number of standard deviations used to perform sigma clip with a astropy.stats.SigmaClip object.
    oversampling : int or tuple of two int
        The oversampling factor(s) of the ePSF relative to the input stars along the x and y axes. 
        The oversampling can either be a single float or a tuple of two floats of the form (x_oversamp, y_oversamp). 
        If oversampling is a scalar then the oversampling will be the same for both the x and y axes.
    maxiters : int
        The maximum number of iterations to perform.
    Returns
    -------
    positions_sort : (N,2)-shaped array
        Found and matched positions of the pinholes.
    ref_positions : (N,2)-shaped array
        Matched reference grid positions.
    """

    #Load the sample of fits images
    entries = os.listdir(fname)

    data_col = np.array([fits.getdata(fname + '/' + entries[files[0]], ext=0)])
    for k in range(files[0], files[1]):
        data_col = np.append(data_col,
                             [fits.getdata(fname + '/' + entries[k], ext=0)],
                             axis=0)

    #Data reduction: Darc current + Flatfield + bias
    data_col = data_correction(data_col, fdarkff, fdark, fff)

    #Claculate median image
    data_full = np.median(data_col, axis=0)

    data = data_full[range_psf[2]:range_psf[3], range_psf[0]:range_psf[1]]

    #Find peaks in data
    peaks_tbl = find_peaks(data, threshold=threshold)
    peaks_tbl['peak_value'].info.format = '%.8g'

    #Load data around found peaks
    hsize = (size - 1) / 2
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) & (x < (data.shape[1] - 1 - hsize)) & (y > hsize) &
            (y < (data.shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    #Calculate mean, median, std
    mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=sigma)
    data = data - median_val

    #Find pinholes and create ePSF
    nddata = NDData(data=data)

    stars = extract_stars(nddata, stars_tbl, size=size)

    epsf_builder = EPSFBuilder(oversampling=oversampling,
                               maxiters=maxiters,
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #Use ePSF to find precise locations of pinholes
    daofind = DAOPhotPSFPhotometry(crit_separation=30,
                                   threshold=threshold,
                                   fwhm=fwhm,
                                   psf_model=epsf,
                                   fitshape=fitshape,
                                   aperture_radius=12,
                                   niters=1)

    #Get positions
    sources = daofind(data_full)

    for col in sources.colnames:
        sources[col].info.format = '%.8g'

    pos_full = np.transpose((sources['x_fit'], sources['y_fit']))

    #Plot found pinholes
    apertures = CircularAperture(pos_full, r=10)

    norm = ImageNormalize(stretch=SqrtStretch())

    fig, ax = plt.subplots()
    ax.set_title('Pinhole Positions')
    ax.set(xlabel='x [pixel]', ylabel='y [pixel]')
    ax.imshow(data_full, cmap='Greys', origin='lower', norm=norm)
    apertures.plot(color='blue', lw=1.5, alpha=0.5)
    ax.legend(['#pinholes = ' + str(len(pos_full[:, 0]))],
              loc='lower left',
              prop={'size': 12})
    plt.show()

    #Find central position
    xcent = (np.max(pos_full[:, 0]) + np.min(pos_full[:, 0])) / 2
    ycent = (np.max(pos_full[:, 1]) + np.min(pos_full[:, 1])) / 2

    #Find positions at the edges to set base positions for linear transformatio to match pinholes with reference grid
    distance = (pos_full[:, 0] - xcent)**2 + (pos_full[:, 1] - ycent)**2
    pins = len(distance)
    sort_distance = np.partition(distance,
                                 (pins - 4, pins - 3, pins - 2, pins - 1))
    maxpos = pos_full[distance == sort_distance[pins - 1]]
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 2]],
                       axis=0)
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 3]],
                       axis=0)
    maxpos = np.append(maxpos,
                       pos_full[distance == sort_distance[pins - 4]],
                       axis=0)

    b01 = maxpos[maxpos[:, 1] < ycent]
    b23 = maxpos[maxpos[:, 1] > ycent]

    posbase = np.array(b01[b01[:, 0] < xcent])

    posbase = np.append(posbase, b01[b01[:, 0] > xcent], axis=0)
    posbase = np.append(posbase, b23[b23[:, 0] < xcent], axis=0)

    print(posbase)

    #Sort positions by matching with reference grid
    positions_sort, ref_positions = sort_positions_regular(
        pos_full, posbase, ref_shape)

    text = np.array([
        positions_sort[:, 0], positions_sort[:, 1], ref_positions[:, 0],
        ref_positions[:, 1]
    ])
    text_trans = np.zeros((len(positions_sort[:, 0]), 4))

    #Transpose text matrix
    for k in range(0, 4):
        for l in range(0, len(positions_sort[:, 0])):
            text_trans[l][k] = text[k][l]

    #Save data as txt file
    np.savetxt(
        sname + '.txt',
        text_trans,
        fmt='%1.9E',
        delimiter='\t',
        header=
        ' x-measured         y-measured         x-reference          y-reference',
        comments='')

    return positions_sort, ref_positions
Ejemplo n.º 9
0
    def to_object(self, data_or_subset, attribute=None):
        """
        Convert a glue Data object to a CCDData object.

        Parameters
        ----------
        data_or_subset : `glue.core.data.Data` or `glue.core.subset.Subset`
            The data to convert to a Spectrum1D object
        attribute : `glue.core.component_id.ComponentID`
            The attribute to use for the Spectrum1D data
        """

        if isinstance(data_or_subset, Subset):
            data = data_or_subset.data
            subset_state = data_or_subset.subset_state
        else:
            data = data_or_subset
            subset_state = None

        if isinstance(data.coords, WCS):
            has_fitswcs = True
            wcs = data.coords
        elif isinstance(data.coords, BaseHighLevelWCS):
            has_fitswcs = False
            wcs = data.coords
        elif type(data.coords) is Coordinates or data.coords is None:
            has_fitswcs = True  # For backward compatibility
            wcs = None
        else:
            raise TypeError(
                'data.coords should be an instance of Coordinates or WCS')

        if isinstance(attribute, str):
            attribute = data.id[attribute]
        elif len(data.main_components) == 0:
            raise ValueError('Data object has no attributes.')
        elif attribute is None:
            if len(data.main_components) == 1:
                attribute = data.main_components[0]
            else:
                raise ValueError(
                    "Data object has more than one attribute, so "
                    "you will need to specify which one to use as "
                    "the flux for the spectrum using the "
                    "attribute= keyword argument.")

        component = data.get_component(attribute)

        if data.ndim != 2:
            raise ValueError(
                "Only 2-dimensional datasets can be converted to CCDData")

        values = data.get_data(attribute)

        if subset_state is None:
            mask = None
        else:
            mask = data.get_mask(subset_state=subset_state)
            values = values.copy()
            # Flip mask to match astropy.ndddata formalism
            mask = ~mask

        values = values * u.Unit(component.units)

        if has_fitswcs:
            result = CCDData(values, mask=mask, wcs=wcs, meta=data.meta)
        else:
            # https://github.com/astropy/astropy/issues/11727
            result = NDData(values, mask=mask, wcs=wcs, meta=data.meta)

        return result
Ejemplo n.º 10
0
# the original catalogue from Francesco
with fits.open(basedir / 'data' / 'interim' /
               'Nebulae_Catalogue_v2p1.fits') as hdul:
    nebulae = Table(hdul[1].data)
nebulae['age_mw'] = np.nan
nebulae['age_lw'] = np.nan

for gal_name in tqdm(np.unique(nebulae['gal_name'])):

    # read in the stellar pops ages
    filename = next(
        (data_ext / 'MUSE' / 'DR2.1' / 'copt').glob(f'{gal_name}*.fits'))
    copt_res = float(filename.stem.split('-')[1].split('asec')[0])
    with fits.open(filename) as hdul:
        age_mw = NDData(data=hdul['AGE_MW'].data,
                        meta=hdul['AGE_MW'].header,
                        wcs=WCS(hdul['AGE_MW'].header))
        age_lw = NDData(data=hdul['AGE_LW'].data,
                        meta=hdul['AGE_LW'].header,
                        wcs=WCS(hdul['AGE_LW'].header))

    tmp = nebulae[nebulae['gal_name'] == gal_name]
    positions = np.transpose((tmp['cen_x'], tmp['cen_y']))
    apertures = CircularAperture(positions, 2)
    ages_mw = aperture_photometry(age_mw,
                                  apertures)['aperture_sum'] / apertures.area
    ages_lw = aperture_photometry(age_lw,
                                  apertures)['aperture_sum'] / apertures.area

    nebulae['age_mw'][nebulae['gal_name'] == gal_name] = ages_mw
    nebulae['age_lw'][nebulae['gal_name'] == gal_name] = ages_lw
Ejemplo n.º 11
0
    import matplotlib  # noqa
    HAS_MATPLOTLIB = True
except ImportError:
    HAS_MATPLOTLIB = False

DATA = np.ones((100, 100))
BKG_RMS = np.zeros((100, 100))
BKG_MESH = np.ones((4, 4))
BKG_RMS_MESH = np.zeros((4, 4))
PADBKG_MESH = np.ones((5, 5))
PADBKG_RMS_MESH = np.zeros((5, 5))
FILTER_SIZES = [(1, 1), (3, 3)]
INTERPOLATORS = [BkgZoomInterpolator(), BkgIDWInterpolator()]

DATA1 = DATA << u.ct
DATA2 = NDData(DATA, unit=None)
DATA3 = NDData(DATA, unit=u.ct)
DATA4 = CCDData(DATA, unit=u.ct)


@pytest.mark.skipif('not HAS_SCIPY')
class TestBackground2D:
    @pytest.mark.parametrize(
        ('filter_size', 'interpolator'),
        list(itertools.product(FILTER_SIZES, INTERPOLATORS)))
    def test_background(self, filter_size, interpolator):
        b = Background2D(DATA, (25, 25),
                         filter_size=filter_size,
                         interpolator=interpolator)
        assert_allclose(b.background, DATA)
        assert_allclose(b.background_rms, BKG_RMS)
Ejemplo n.º 12
0
def build_nddata(image, group_id, source_catalog):
    """ Return a list of NDData objects for all chips in an image.

    Parameters
    ===========
    image : filename, HDUList
        Either filename or HDUList of a single HST observation

    group_id : int
        Integer ID for group this image should be associated with; primarily
        used when separate chips are in separate files to treat them all as one
        exposure.

    source_catalog : dict, optional
        If provided (default:None), these catalogs will be attached as `catalog`
        entries in each chip's NDData.meta.  It should be provided as a
        dict of astropy Tables identified by chip number with
        each table containing sources from image extension `('sci',chip)` as
        generated by `generate_source_catalog()`.

    Returns
    ========
    ndlist : list
        List of astropy NDData defined for all chips in input image

    """
    open_file = False
    if isinstance(image, str):
        hdulist = pf.open(image)
        open_file = True
    elif isinstance(image, pf.HDUList):
        hdulist = image
    else:
        print("Wrong type of input, {}, for build_nddata...".format(
            type(image)))
        raise ValueError

    images = []
    numsci = countExtn(hdulist)
    for chip in range(1, numsci + 1):
        im_data = hdulist[('SCI', chip)].data
        dq_data = hdulist[('DQ', chip)].data
        w = wcsutil.HSTWCS(hdulist, ('SCI', chip))

        # Below, simply consider non-zero DQ data as invalid.
        # A more sophisticated approach would use bitmask module.
        # Also, here we set group ID to a different number for each image,
        # but for ACS images, for example, we likely would assign
        # the same group ID to the images corresponding to different
        # SCI extensions *of the same FITS file* so that they can be
        # aligned together.
        img = NDData(data=im_data,
                     mask=dq_data != 0,
                     wcs=w,
                     meta={
                         'chip': chip,
                         'group_id': group_id
                     })
        # append source catalog, if provided
        if source_catalog:
            imcat = source_catalog[chip]
            # rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs
            if 'xcentroid' in imcat.colnames:
                imcat.rename_column('xcentroid', 'x')
                imcat.rename_column('ycentroid', 'y')
            imcat.meta['name'] = 'im{:d} sources'.format(group_id)
            img.meta['catalog'] = imcat
        images.append(img)

    if open_file:
        hdulist.close()

    return images
Ejemplo n.º 13
0
def build_ePSF_astrometry(image_file,
                          mask_file=None,
                          nstars=40,
                          image_source_file=None,
                          astrom_sigma=5.0,
                          psf_sigma=5.0,
                          alim=10000,
                          lowper=0.6,
                          highper=0.9,
                          keep=False,
                          cutout=35,
                          write=True,
                          output=None,
                          plot=False,
                          output_plot=None,
                          verbose=False):
    """Build the effective Point-Spread Function using a sample of stars from
    some image acquired via the `image2xy` tool of `astrometry.net`.

    Arguments
    ---------
    image_file : str
        Filename for a **background-subtracted** image
    mask_file : str, optional
        Filename for a mask file (default None)
    nstars : int, optional
        *Maximum* number of stars to use in building the ePSF (default 40;
        set to None to impose no limit)
    image_source_file : str, optional
        Filename for a `.xy.fits` file containing detected sources with their 
        pixel coordinates and **background-subtracted** flux (default None, in 
        which case a new such file is produced)
    astrom_sigma : float, optional
        Detection significance when using `image2xy` in `astrometry.net` to 
        find sources (default 5.0)
    psf_sigma : float, optional
        Sigma of the approximate Gaussian PSF of the images (default 5.0)
    alim : int, optional
        *Maximum* allowed source area in square pixels for `astrometry.net`, 
        above which sources will be deblended (default 10000)
    lowper, highper : float, optional
        Lower and upper flux percentiles (as a fraction between 0 and 1) such
        that sources outside the corresponding flux range will be excluded from 
        ePSF building (default 0.6 and 0.9, respectively)
    keep : bool, optional
        Whether to keep the source list file (`.xy.fits` files; default False)
    cutout : int, optional
        Cutout size around each star in pixels (default 35; must be **odd**; 
        rounded **down** if even)
    write : bool, optional
        Whether to write the ePSF to a new fits file (default True)
    output : str, optional
        Name for the output ePSF data fits file (default
        `image_file.replace(".fits", "_ePSF.fits")`)
    plot : bool, optional
        Whether to plot the newly-built ePSF (default False)
    output_plot : str, optional
        Name for the output figure (default 
        `image_file.replace(".fits", "_ePSF.png")`)
    verbose : bool, optional
        Whether to be verbose (default False)

    Returns
    -------
    np.ndarray
        The ePSF data in a 2D array
    
    Notes
    -----
    Uses `astrometry.net` to obtain a list of sources in the image with their 
    x, y coordinates, flux, and background at their location. (If a list of 
    sources has already been obtained `solve-field` or `image2xy`, this can 
    be input). Finally, selects stars between the `lowper` th and `highper`  
    percentile fluxes.
    
    Finally, uses `EPSFBuilder` to empirically obtain the ePSF of these stars. 
    Optionally writes and/or plots the obtained ePSF.
    
    **The ePSF obtained here should not be used in convolutions.** Instead, it 
    can serve as a tool for estimating the seeing of an image. 
    """

    # ignore annoying warnings from photutils
    from astropy.utils.exceptions import AstropyWarning
    warnings.simplefilter('ignore', category=AstropyWarning)

    from astropy.nddata import NDData
    from photutils.psf import extract_stars
    from photutils import EPSFBuilder

    # load in data
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file)
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"

    ### source detection
    ## use pre-existing file obtained by astrometry.net, if supplied
    if image_source_file:
        image_sources = np.logical_not(fits.getdata(image_source_file))

    ## use astrometry.net to find the sources
    # -b --> no background-subtraction
    # -O --> overwrite
    # -p <astrom_sigma> --> signficance
    # -w <psf_sigma> --> estimated PSF sigma
    # -m <alim> --> max object size for deblending is <alim>
    else:
        options = f" -b -O -p {astrom_sigma} -w {psf_sigma} -m {alim}"
        run(f"image2xy {options} {image_file}", shell=True)
        image_sources_file = image_file.replace(".fits", ".xy.fits")
        image_sources = fits.getdata(image_sources_file)
        if not (keep):
            run(f"rm {image_sources_file}", shell=True)  # file is not needed
        print(f'\n{len(image_sources)} stars at >{astrom_sigma} sigma found ' +
              f'in image {re.sub(".*/", "", image_file)} with astrometry.net')

        sources = Table()  # build a table
        sources['x'] = image_sources['X']  # for EPSFBuilder
        sources['y'] = image_sources['Y']
        sources['flux'] = image_sources['FLUX']

        if nstars:
            sources = sources[:min(nstars, len(sources))]

    ## get WCS coords for all sources
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"], sources["y"],
                                                    1)
    ## mask out edge sources:
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:
        rad_limit = xsize / 2.0
        dist_to_center = np.sqrt((sources['x'] - xsize / 2.0)**2 +
                                 (sources['y'] - ysize / 2.0)**2)
        mask = dist_to_center <= rad_limit
        sources = sources[mask]
    else:
        x_lims = [int(0.05 * xsize), int(0.95 * xsize)]
        y_lims = [int(0.05 * ysize), int(0.95 * ysize)]
        mask = (sources['x'] > x_lims[0]) & (sources['x'] < x_lims[1]) & (
            sources['y'] > y_lims[0]) & (sources['y'] < y_lims[1])
        sources = sources[mask]

    ## empirically obtain the effective Point Spread Function (ePSF)
    nddata = NDData(image_data)  # NDData object
    if mask_file:  # supply a mask if needed
        nddata.mask = fits.getdata(mask_file)
    if cutout % 2 == 0:  # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout)  # extract stars

    ## use only the stars with fluxes between two percentiles if using
    ## astrometry.net
    if image_source_file:
        stars_tab = Table()  # temporary table
        stars_col = Column(data=range(len(stars.all_stars)), name="stars")
        stars_tab["stars"] = stars_col  # column of indices of each star
        fluxes = [s.flux for s in stars]
        fluxes_col = Column(data=fluxes, name="flux")
        stars_tab["flux"] = fluxes_col  # column of fluxes

        # get percentiles
        per_low = np.percentile(fluxes, lowper * 100)  # lower percentile flux
        per_high = np.percentile(fluxes,
                                 highper * 100)  # upper percentile flux
        mask = (stars_tab["flux"] >= per_low) & (stars_tab["flux"] <= per_high)
        stars_tab = stars_tab[mask]  # include only stars between these fluxes
        idx_stars = (stars_tab["stars"]).data  # indices of these stars

        # update stars object
        # have to manually update all_stars AND _data attributes
        stars.all_stars = [stars[i] for i in idx_stars]
        stars._data = stars.all_stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars)  # no. of stars used in ePSF building

    if nstars_epsf == 0:
        print(
            "\nNo valid sources were found to build the ePSF with the given" +
            " conditions. Exiting.")
        return
    if verbose:
        print(f"{nstars_epsf} stars used in building the ePSF")

    epsf_builder = EPSFBuilder(
        oversampling=1,
        maxiters=7,  # build it
        progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data

    if write:  # write, if desired
        epsf_hdu = fits.PrimaryHDU(data=epsf_data)
        if not (output):
            output = image_file.replace(".fits", "_ePSF.fits")

        epsf_hdu.writeto(output, overwrite=True, output_verify="ignore")

    if plot:  # plot, if desired
        if not (output_plot):  # set output name if not given
            output_plot = image_file.replace(".fits", "_ePSF.png")
        __plot_ePSF(epsf_data=epsf_data, output=output_plot)

    return epsf_data
Ejemplo n.º 14
0
def combine_arrays(
    data,
    mask=None,
    variance=None,
    clipping_limits=(3, 3),
    clipping_method='none',
    max_iters=100,
    method='mean',
    num_threads=0,
    # weights=None,
):
    """
    Parameters:
    -----------
    data : list of ndarray or list of NDData
        Data arrays.
    mask : list of ndarray, optional
        Mask arrays.
    variance : list of ndarray, optional
        Variance arrays.
    clipping_limits : tuple of int
        For sigma clipping, the lower and upper bounds: (sigma_lower,
        sigma_upper).
    clipping_method : str, {'sigclip', 'varclip', 'none'}
        Clipping method.
    max_iters : int
        Maximum number of iterations (for sigma clipping).
    method : str, {'mean', 'median', 'sum'}
        Combination method.
    num_threads : int
        Number of threads.

    """

    def flatten_arr(arr, dtype):
        return arr.astype(dtype, order='C', copy=False).ravel()

    if isinstance(data[0], NDData):
        ndds = data
        input_shape = ndds[0].data.shape
        data, mask, variance = [], [], []

        for nd in ndds:
            data.append(flatten_arr(nd.data, DATA_t))
            if nd.mask is not None:
                mask.append(flatten_arr(nd.mask, MASK_t))
            if nd.uncertainty is not None:
                if not isinstance(nd.uncertainty, VarianceUncertainty):
                    raise ValueError('TODO')
                variance.append(flatten_arr(nd.uncertainty.array, DATA_t))

        # Ensure mask and variance are set to None if empty
        mask = mask or None
        variance = variance or None
    else:
        input_shape = data[0].shape
        data = [flatten_arr(arr, DATA_t) for arr in data]
        if mask is not None:
            mask = [flatten_arr(arr, MASK_t) for arr in mask]
        if variance is not None:
            variance = [flatten_arr(arr, DATA_t) for arr in variance]

    if mask is None:
        mask = list(np.zeros_like(data, dtype=MASK_t))

    lsigma, hsigma = clipping_limits

    outdata, outvar, outmask = ndcombine(
        data,
        mask,
        list_of_var=variance,
        combine_method=method,
        hsigma=hsigma,
        lsigma=lsigma,
        max_iters=max_iters,
        num_threads=num_threads,
        reject_method=clipping_method,
    )

    outdata = outdata.reshape(input_shape)
    if outvar is not None:
        outvar = VarianceUncertainty(outvar.reshape(input_shape))

    out = NDData(outdata, uncertainty=outvar)
    out.meta['REJMAP'] = len(data) - outmask.reshape(input_shape)
    return out
Ejemplo n.º 15
0
def read_associations(folder,target,scalepc,HSTband='nuv',version='v1p2',data='all'):
    '''read the catalogue and spatial mask for the associations
    
    Parameters
    ----------

    folder : pathlib.Path object
            the parent folder (HST/)
    
    target : string
            the name of the target
    
    scalepc : float
    
    HSTband : string (nuv/v)

    version : string

    data : string
        'all', 'catalogue', 'mask'
    '''
    
    folder = folder/f'associations_{version}'
    
    # define basefolder and check if file exists 
    if not (folder/f'{target}_{HSTband}').is_dir():
        print(f'target not available. Use\n{",".join([x.stem for x in folder.iterdir()])}')
        raise FileNotFoundError
    target_folder = folder/f'{target}_{HSTband}'
    if not (target_folder/f'{scalepc}pc').is_dir():
        msg = f'scalepc={scalepc}pc not available. Use: '
        msg += ','.join([x.stem for x in target_folder.iterdir() if x.stem.endswith('pc')])
        raise FileNotFoundError(msg)
    folder = target_folder/f'{scalepc}pc'

    if data=='all' or data=='catalogue':
        # first the association catalogue
        catalogue_file = folder / f'{target}_phangshst_associations_{HSTband}_ws{scalepc}pc_{version}.fits'
        with fits.open(catalogue_file) as hdul:
            associations = Table(hdul[1].data)

        # modify table (rename the columns such that the clusters and associations are identical)
        associations['SkyCoord'] = SkyCoord(associations['reg_ra']*u.degree,associations['reg_dec']*u.degree)
        associations.rename_columns(['reg_id','reg_ra','reg_dec','reg_x','reg_y',
                                    'reg_dolflux_Age_MinChiSq','reg_dolflux_Mass_MinChiSq','reg_dolflux_Ebv_MinChiSq',
                                    'reg_dolflux_Age_MinChiSq_err','reg_dolflux_Mass_MinChiSq_err','reg_dolflux_Ebv_MinChiSq_err'],
                                    ['assoc_ID','RA','DEC','X','Y','age','mass','EBV','age_err','mass_err','EBV_err'])
        #for col in list(associations.columns):
        #    if col.endswith('mjy'):
        #        associations[f'{col.split("_")[0]}_FLUX'] = 1e20*associations[col]*u.mJy.to(u.erg/u.s/u.cm**2/u.Hz)
        #    if col.endswith('mjy_err'):
        #        associations[f'{col.split("_")[0]}_FLUX_ERR'] = 1e20*associations[col]*u.mJy.to(u.erg/u.s/u.cm**2/u.Hz)

        for col in list(associations.columns):
            if col.endswith('mjy'):
                band = col.split("_")[0]
                associations[f'{band}_FLUX'] = associations[col]*freq_to_wave(band)
                associations[f'{band}_FLUX_ERR'] = associations[col+'_err']*freq_to_wave(band)

        if data=='catalogue':
            return associations

    if data=='all' or data=='mask':
        # next the spatial masks for the associations
        mask_file = folder / f'{target}_phangshst_associations_{HSTband}_ws{scalepc}pc_idmask_{version}.fits'
        with fits.open(mask_file) as hdul:
            mask = hdul[0].data.astype(float)
            mask[mask==0] = np.nan
            associations_mask = NDData(mask,
                                    mask=mask==0,
                                    meta=hdul[0].header,
                                    wcs=WCS(hdul[0].header))
        if data=='mask':
            return associations_mask

    return associations, associations_mask
Ejemplo n.º 16
0
    # open fits file
    image_hdu = fits.open(imageFolder + image[i])

    # Recover header
    hdr = image_hdu[hdu].header

    # Recover data
    image_data = image_hdu[hdu].data

    # Recover WCS
    wcs_orig = WCS(image_hdu[hdu].header)
    image_hdu.close()

    # Numpy array where the data and the sky coordinates wcs are saved
    nddata = NDData(data=image_data, wcs=wcs_orig)

    #########################
    ######### STAMP #########
    #########################

    # Check if needed
    if saveFits or savePDF or stacking:

        # Define the units
        size = sizeStamp * u.arcsec

        # Cut the image into stamps at the coordinates of objects
        cutout[i] = [
            Cutout2D(nddata.data, tbl['skycoord'][j], size, wcs=nddata.wcs)
            for j in range(np.size(tbl))
Ejemplo n.º 17
0
def flux(nddata, tbl, zeroPoint, gain, radius):
    """
    Derive the flux and the magnitude within circular aperture
    
    Parameters
    ----------
    nddata: numpy array
        Numpy array where is saved the data and the sky coordinates wcs.
    tbl: table
        Table where the first column is the id, the second the ra coordinate, the third
        is dec coordinate and the four the skycoordinate. 
    zeroPoint: float
        Zero point of your image
    gain: float
        The gain of your image
    radius: float 
        The radius of your circle in arcsec to derive the flux
    Return
    ------
    Table with id, skycoord, flux, flux error, magnitude, magnitude error       
    """

    # By convention, sextractor
    if gain == 0.:

        gain = 1000000000000000000000000000.

    result = tbl['id', 'skycoord']
    result['flux'] = float(np.size(tbl))
    result['flux_err'] = float(np.size(tbl))

    for i in range(np.size(tbl)):

        # Recover the position for each object
        position = tbl['skycoord'][i]

        if hdr['NAXIS1'] >= 50 and hdr['NAXIS2'] >= 50:

            # size of the background map
            sizeBkg = 50
            # cut the mosaic in stamp to the wcs coordinate of your objects
            cutout = Cutout2D(nddata.data,
                              position, (sizeBkg, sizeBkg),
                              wcs=nddata.wcs)

            # Recover new data and wcs of the stamp
            data = cutout.data
            wcs = cutout.wcs

        else:

            # size of the background map
            sizeBkg = min(hdr['NAXIS1'], hdr['NAXIS2'])

            # Keep data and wcs of the initial image
            data = nddata.data
            wcs = nddata.wcs

        #########################
        ####### Background ######
        #########################

        # Mask sources
        mask = make_source_mask(data, snr=1, npixels=3, dilate_size=3)

        # Derive the background and the rms image
        bkg = Background2D(
            data,
            int(sizeBkg / 10),
            filter_size=1,
            sigma_clip=None,
            bkg_estimator=SExtractorBackground(SigmaClip(sigma=2.5)),
            bkgrms_estimator=StdBackgroundRMS(SigmaClip(sigma=2.5)),
            exclude_percentile=60,
            mask=mask)

        ###########################
        ###### Aperture Flux ######
        ###########################

        nddataStamp = NDData(data=data - bkg.background, wcs=wcs)

        # Calculate the total error
        error = calc_total_error(cutout.data, bkg.background_rms, gain)

        # Define a cicularAperture in the wcs position of your objects
        apertures = SkyCircularAperture(position, r=radius * u.arcsec)

        # Derive the flux and error flux
        phot_table = aperture_photometry(nddataStamp, apertures, error=error)
        phot_table['aperture_sum'].info.format = '%.8g'
        phot_table['aperture_sum_err'].info.format = '%.8g'

        # Recover data
        result['flux'][i] = phot_table['aperture_sum'][0]
        result['flux_err'][i] = phot_table['aperture_sum_err'][0]

        ###########################
        ######## Magnitude ########
        ###########################

    # convert flux into magnitude
    result['mag'] = -2.5 * np.log10(result['flux']) + zeroPoint

    # convert flux error into magnitude error
    result['mag_err'] = 1.0857 * (result['flux_err'] / result['flux'])

    return result
Ejemplo n.º 18
0
# functions should be provided to add and remove PSFs from images (e.g. for
# completeness studies):

result = add_psf(data, (x, y), psf, flux)

result = remove_psf(data, (x, y), psf, flux)

# Integration with NDData
# -----------------------
#
# While the photometry can be performed on data in the form of a Numpy
# array, one can also pass an astropy.nddata.NDData object (or
# anything initializing an NDData object):

from astropy.nddata import NDData
data = NDData(image, mask=mask)
results = psf_photometry(data, (x, y), psf)

# which allows masks and uncertainties to be set on the NDData object
# (the behavior of the mask when doing photometry will need to be well
# documented) Not all types of uncertainties will be supported by
# psf_photometry and aperture_photometry, so if a type of uncertainty is
# passed which is not supported, an exception will be raised:

data = NDData(image, uncertainty=CustomUncertainty(unc_image))
results = psf_photometry(data, (x, y), psf)

# UnsupportedUncertaintyError: cannot perform PSF photometry on data
# with uncertainties of type CustomUncertainty.
#
# Once photutils is integrated into astropy.photometry, one could
Ejemplo n.º 19
0
def build_ePSF_imsegm(image_file,
                      mask_file=None,
                      nstars=40,
                      thresh_sigma=5.0,
                      pixelmin=20,
                      etamax=1.4,
                      areamax=500,
                      cutout=35,
                      write=True,
                      output=None,
                      plot=False,
                      output_plot=None,
                      verbose=False):
    """Build the effective Point-Spread Function using a sample of stars from
    some image acquired via image segmentation.

    Arguments
    ---------
    image_file : str
        Filename for a **background-subtracted** image
    mask_file : str, optional
        Filename for a mask file (default None)
    nstars : int, optional
        *Maximum* number of stars to use in building the ePSF (default 40;
        set to None to impose no limit)
    thresh_sigma : float, optional
        Sigma threshold for source detection with image segmentation (default
        5.0)
    pixelmin : float, optional
        *Minimum* pixel area of an isophote to be considered a good source for 
        building the ePSF (default 20)
    etamax : float, optional
        *Maximum* allowed elongation for an isophote to be considered a good 
        source for building the ePSF (default 1.4)
    areamax : float, optional
        *Maximum* allowed area (in square pixels) for an isophote to be 
        considered a good source for building the ePSF (default 500)
    cutout : int, optional
        Cutout size around each star in pixels (default 35; must be **odd**; 
        rounded **down** if even)
    write : bool, optional
        Whether to write the ePSF to a new fits file (default True)
    output : str, optional
        Name for the output ePSF data fits file (default
        `image_file.replace(".fits", "_ePSF.fits")`)
    plot : bool, optional
        Whether to plot the newly-built ePSF (default False)
    output_plot : str, optional
        Name for the output figure (default 
        `image_file.replace(".fits", "_ePSF.png")`)
    verbose : bool, optional
        Whether to be verbose (default False)

    Returns
    -------
    np.ndarray
        The ePSF data in a 2D array
    
    Notes
    -----
    Uses image segmentation via `photutils` to obtain a list of sources in the 
    image with their x, y coordinates, flux, and background at their 
    location. Then uses `EPSFBuilder` to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtained ePSF.
    
    **The ePSF obtained here should not be used in convolutions.** Instead, it 
    can serve as a tool for estimating the seeing of an image. 
    """

    # ignore annoying warnings from photutils
    from astropy.utils.exceptions import AstropyWarning
    warnings.simplefilter('ignore', category=AstropyWarning)

    # imports
    from astropy.nddata import NDData
    from photutils.psf import extract_stars
    from photutils import EPSFBuilder

    # load in data
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file)
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"

    ## source detection
    # add mask to image_data
    image_data = np.ma.masked_where(image_data == 0.0, image_data)

    # build an actual mask
    mask = (image_data == 0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    # set detection standard deviation
    try:
        std = image_header["BKGSTD"]  # header written by bkgsub function
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data,
                                       snr=3,
                                       npixels=5,
                                       dilate_size=15,
                                       mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))

    # use the segmentation image to get the source properties
    segm = detect_sources(image_data,
                          thresh_sigma * std,
                          npixels=pixelmin,
                          mask=mask)
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinate/fluxes for sources, do some filtering
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return

    # restrict elongation and area to obtain only unsaturated stars
    tbl = tbl[(tbl["elongation"] <= etamax)]
    tbl = tbl[(tbl["area"].value <= areamax)]
    # build a table
    sources = Table()  # build a table
    sources['x'] = tbl['xcentroid']  # for EPSFBuilder
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data / tbl["area"].data
    sources.sort("flux")
    sources.reverse()
    # restrict number of stars (if requested)
    if nstars: sources = sources[:min(nstars, len(sources))]

    ## get WCS coords for all sources
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"], sources["y"],
                                                    1)
    ## mask out edge sources:
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:  # bounding circle
        rad_limit = xsize / 2.0
        dist_to_center = np.sqrt((sources['x'] - xsize / 2.0)**2 +
                                 (sources['y'] - ysize / 2.0)**2)
        mask = dist_to_center <= rad_limit
        sources = sources[mask]
    else:  # rectangle
        x_lims = [int(0.05 * xsize), int(0.95 * xsize)]
        y_lims = [int(0.05 * ysize), int(0.95 * ysize)]
        mask = (sources['x'] > x_lims[0]) & (sources['x'] < x_lims[1]) & (
            sources['y'] > y_lims[0]) & (sources['y'] < y_lims[1])
        sources = sources[mask]

    ## empirically obtain the effective Point Spread Function (ePSF)
    nddata = NDData(image_data)  # NDData object
    if mask_file:  # supply a mask if needed
        nddata.mask = fits.getdata(mask_file)
    if cutout % 2 == 0:  # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout)  # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars)  # no. of stars used in ePSF building

    if nstars_epsf == 0:
        print(
            "\nNo valid sources were found to build the ePSF with the given" +
            " conditions. Exiting.")
        return
    if verbose:
        print(f"{nstars_epsf} stars used in building the ePSF")

    epsf_builder = EPSFBuilder(
        oversampling=1,
        maxiters=7,  # build it
        progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data

    if write:  # write, if desired
        epsf_hdu = fits.PrimaryHDU(data=epsf_data)
        if not (output):
            output = image_file.replace(".fits", "_ePSF.fits")

        epsf_hdu.writeto(output, overwrite=True, output_verify="ignore")

    if plot:  # plot, if desired
        if not (output_plot):  # set output name if not given
            output_plot = image_file.replace(".fits", "_ePSF.png")
        __plot_ePSF(epsf_data=epsf_data, output=output_plot)

    return epsf_data
Ejemplo n.º 20
0
def gen_data(dims=(256, 1, 1, 1, 256),
             noise=1,
             signal=1,
             width=0.02,
             seed=None,
             mask=True):
    """
    generate data from scratch so we can fill an SDFITS file from scratch

    data[ntime, nbeam, npol, nband, nchan]
        (ntime,nbeam) are multiplexed for OTF - usually CRVAL3,CRVAL4 - but nbeam can be 1
                      even though beam can cycle over 16 in the case of SEQ
        npol usually in CRVAL2 - but npol can be 1
        (nband,nchan) are both under control of CRVAL1 - but band can be 1

    noise:    gaussian noise level if > 0
    signal:   peak of (gaussian) line in middle of channel ; 0 will add no line
    width:    FWHM width of (gaussian) line in fraction of band width
    seed:     set it to some integer if you want a fixed seed
    mask:     set to False if you don't need the mask (saves a bit of memory)
    
    """

    if len(dims) != 5:
        print("We currently only do 5D arrays: ", dims)
        return None

    ntime = dims[axis_time]
    nbeam = dims[axis_beam]
    npol = dims[axis_pol]
    nband = dims[axis_band]
    nchan = dims[axis_chan]

    # start by creating an np.float32 multi-dim numpy array
    if noise > 0:
        if seed != None:
            np.random.seed(seed)
        print("random")
        data = np.random.normal(0, noise, dims)
        # why on earth we cannot get float32 random numbers
        print("astype")
        data = data.astype(np.float32)
    else:
        data = np.zeros(dims, dtype=np.float32)

    if False:
        print("Using NDData now")
        data = NDData(data, unit=u.K)

    if signal != 0:
        print("signal")
        w2 = 11.1 * (nchan * width)**2
        c = nchan / 2.0
        n2 = dimsize(dims[:-1])
        data2 = data.reshape(n2, nchan)
        # add the lines
        x2 = (np.arange(nchan) - c)**2 / w2
        g = np.exp(-x2)
        if True:
            # counterintuitive; this is a bit faster
            # print("looping data2 + g")
            for i in range(n2):
                data2[i, :] = data2[i, :] + g
        else:
            # print("single data2 + g")
            #data2[:,:] = data2[:,:] + g
            data2 = data2 + g

    if mask:
        # adding a bool mask seems to take up 4bytes per value....????
        print("mask")
        data = ma.masked_invalid(data, copy=False)

    print("done")
    return data
Ejemplo n.º 21
0
def main(name,version,HSTband,scalepc):
    '''match nebulae and association catalogue

    '''

    print(f'parameters: {name} {version} {HSTband} {scalepc}')


    # =====================================================================
    # Read in the data
    # =====================================================================

    #p = {x:sample_table.loc[name][x] for x in sample_table.columns}

    # DAP linemaps (Halpha and OIII)
    filename = data_ext / 'MUSE' / 'DR2.1' / 'copt' / 'MUSEDAP'
    filename = [x for x in filename.iterdir() if x.stem.startswith(name)][0]

    with fits.open(filename) as hdul:
        Halpha = NDData(data=hdul['HA6562_FLUX'].data,
                        uncertainty=StdDevUncertainty(hdul['HA6562_FLUX_ERR'].data),
                        mask=np.isnan(hdul['HA6562_FLUX'].data),
                        meta=hdul['HA6562_FLUX'].header,
                        wcs=WCS(hdul['HA6562_FLUX'].header))
        OIII = NDData(data=hdul['OIII5006_FLUX'].data,
                        uncertainty=StdDevUncertainty(hdul['OIII5006_FLUX_ERR'].data),
                        mask=np.isnan(hdul['OIII5006_FLUX'].data),
                        meta=hdul['OIII5006_FLUX'].header,
                        wcs=WCS(hdul['OIII5006_FLUX'].header))

    # the original catalogue from Francesco
    with fits.open(nebulae_file) as hdul:
        nebulae = Table(hdul[1].data)
    nebulae['SkyCoord'] = SkyCoord(nebulae['cen_ra']*u.deg,nebulae['cen_dec']*u.deg,frame='icrs')

    '''
    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_dig.fits') as hdul:
        dig = Table(hdul[1].data)

    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_fuv.fits') as hdul:
        fuv = Table(hdul[1].data)

    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_eq.fits') as hdul:
        eq_width = Table(hdul[1].data)

    nebulae = join(nebulae,fuv,keys=['gal_name','region_ID'])
    nebulae = join(nebulae,eq_width,keys=['gal_name','region_ID'])
    nebulae = join(nebulae,dig,keys=['gal_name','region_ID'])'
    '''

    nebulae.rename_columns(['cen_x','cen_y'],['x','y'])

    with np.errstate(divide='ignore',invalid='ignore'):
        nebulae['[SIII]/[SII]'] = np.nan
        SII = nebulae['SII6716_FLUX_CORR']+nebulae['SII6730_FLUX_CORR']
        SIII = nebulae['SIII6312_FLUX_CORR']+nebulae['SIII9068_FLUX_CORR']
        nebulae[SII>0]['[SIII]/[SII]'] = SIII[SII>0]/SII[SII>0]
        #nebulae['HA/FUV'] = nebulae['HA6562_FLUX_CORR']/nebulae['FUV_FLUX_CORR']
        #nebulae['HA/FUV_err'] = nebulae['HA/FUV']*np.sqrt((nebulae['HA6562_FLUX_CORR_ERR']/nebulae['HA6562_FLUX_CORR'])**2+(nebulae['FUV_FLUX_CORR_ERR']/nebulae['FUV_FLUX_CORR'])**2)

    nebulae = nebulae[nebulae['gal_name']==name]
    nebulae.add_index('region_ID')

    filename = data_ext / 'Products' / 'Nebulae_catalogs'/'Nebulae_catalogue_v2' /'spatial_masks'/f'{name}_nebulae_mask_V2.fits'
    with fits.open(filename) as hdul:
        nebulae_mask = NDData(hdul[0].data.astype(float),mask=Halpha.mask,meta=hdul[0].header,wcs=WCS(hdul[0].header))
        nebulae_mask.data[nebulae_mask.data==-1] = np.nan

    #print(f'{name}: {len(nebulae)} HII-regions in final catalogue')

    # the association catalogue and mask
    target  = name.lower()
    associations, associations_mask = read_associations(folder=association_folder,
                                                        target=target,scalepc=scalepc,
                                                        HSTband=HSTband,version=version,data='all')
    if not associations:
        return 0
    
    # enviornmental masks
    with fits.open(env_masks_folder / f'{name}_simple.fits') as hdul:
        mask = reproject_interp(hdul[0],Halpha.meta,order='nearest-neighbor',return_footprint=False)
        env_masks_neb = NDData(data=mask,
                           meta=hdul[0].header,
                           wcs=Halpha.wcs)
    
    #print(f'{name}: {len(associations)} associations in catalogue')

    # =====================================================================
    # reproject and match catalogues
    # =====================================================================

    nebulae_hst, _  = reproject_interp(nebulae_mask,
                                    output_projection=associations_mask.wcs,
                                    shape_out=associations_mask.data.shape,
                                    order='nearest-neighbor')    

    # we scale the associations such that the the id is in the decimal
    scale = 10**np.ceil(np.log10(max(associations_mask.data[~np.isnan(associations_mask.data)])))
    s_arr = associations_mask.data/scale+nebulae_hst

    #print(f'masks reprojected')

    # ids of associations, nebulae and combination (sum) of both
    a_id = np.unique(associations_mask.data[~np.isnan(associations_mask.data)]).astype(int)
    n_id = np.unique(nebulae_mask.data[~np.isnan(nebulae_mask.data)]).astype(int)
    s_id = np.unique(s_arr[~np.isnan(s_arr)])

    # this splits the sum into two parts (nebulae and associations)
    a_modf,n_modf = np.modf(s_id)
    n_modf = n_modf.astype(int)
    a_modf = np.round(a_modf*scale).astype(int)

    unique_a, count_a = np.unique(a_modf,return_counts=True)
    unique_n, count_n = np.unique(n_modf,return_counts=True)

    nebulae_dict = {int(n) : a_modf[n_modf==n].tolist() for n in n_id}     
    associations_dict = {int(a) : n_modf[a_modf==a].tolist() for a in a_id}     


    # so far we ensured that the nebulae in unique_n have only one association,
    # but it is possible that this association goes beyond the nebulae and into
    # a second nebulae. Those objects are excluded here
    isolated_nebulae = set()
    isolated_assoc   = set()
    for n,v in nebulae_dict.items():
        if len(v)==1:
            if len(associations_dict[v[0]])==1:
                isolated_nebulae.add(n)
                isolated_assoc.add(v[0])

    #print(f'n_associations = {len(associations_dict)}')
    #print(f'n_nebulae      = {len(nebulae_dict)}')
    #print(f'1to1 match     = {len(isolated_nebulae)}')


    # we save those two dicts so we do not have to redo this everytime
    with open(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_nebulae.yml','w+') as f:
        yaml.dump(nebulae_dict,f)
    with open(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations.yml','w+') as f:
        yaml.dump(associations_dict,f)


    # find all assoc that have at least one pixel outside of the nebulae masks
    mask = associations_mask.data.copy()
    mask[~np.isnan(nebulae_hst)] = np.nan
    outside = np.unique(mask[~np.isnan(mask)].astype(int))

    # find all assoc that have at least one pixel inside of the nebulea masks
    mask = associations_mask.data.copy()
    mask[np.isnan(nebulae_hst)] = np.nan
    inside = np.unique(mask[~np.isnan(mask)].astype(int))

    contained = np.setdiff1d(inside,outside)
    partial   = np.intersect1d(inside,outside)
    isolated  = np.setdiff1d(outside,inside)

    #print(f'contained: {len(contained)}\npartial: {len(partial)}\nisolated: {len(isolated)}')

    assoc_tmp = associations[['assoc_ID']].copy()
    assoc_tmp.add_index('assoc_ID')

    x_asc,y_asc = associations['SkyCoord'].to_pixel(env_masks_neb.wcs)
    outside = (x_asc > env_masks_neb.data.shape[1]) | (y_asc > env_masks_neb.data.shape[0])
    x_asc[outside] = 0
    y_asc[outside] = 0
    assoc_tmp['env_asc'] = [environment_dict[env_masks_neb.data[y,x]] for 
                            x,y in zip(x_asc.astype(int),y_asc.astype(int))]
    assoc_tmp[outside]['env_asc'] = ''
    
    assoc_tmp['overlap'] = np.empty(len(associations),dtype='U9')
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],contained)] = 'contained'
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],partial)]   = 'partial'
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],isolated)]  = 'isolated'
    assoc_tmp['1to1'] = False
    assoc_tmp['1to1'][np.isin(assoc_tmp['assoc_ID'],list(isolated_assoc))] = True
    assoc_tmp['Nnebulae'] = [len(associations_dict[k]) for k in assoc_tmp['assoc_ID']]

    assoc_tmp['region_ID'] = np.nan
    assoc_tmp['region_ID'][assoc_tmp['1to1']] = [associations_dict[k][0] for k in assoc_tmp[assoc_tmp['1to1']]['assoc_ID']]

    overlap = join(
        Table(np.unique(associations_mask.data[~np.isnan(associations_mask.data)],return_counts=True),names=['assoc_ID','size']),
        Table(np.unique(associations_mask.data[~np.isnan(nebulae_hst) & ~np.isnan(associations_mask.data)],return_counts=True),names=['assoc_ID','overlap_size']),
        keys=['assoc_ID'],join_type='outer')
    overlap = overlap.filled(0)
    overlap['overlap_asc'] = overlap['overlap_size']/overlap['size']
    overlap['overlap_asc'].info.format = '%.2f'
    assoc_tmp = join(assoc_tmp,overlap[['assoc_ID','overlap_asc']],keys='assoc_ID')

    #print('write to file')
    hdu = fits.BinTableHDU(assoc_tmp,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations.fits',overwrite=True)


    nebulae_tmp = nebulae[['region_ID','x','y']].copy()
    nebulae_tmp.add_index('region_ID')

    nebulae_tmp['env_neb'] = [environment_dict[env_masks_neb.data[y,x]] for 
                              x,y in zip(nebulae_tmp['x'].astype(int),nebulae_tmp['y'].astype(int))]

    nebulae_tmp['neighbors'] = np.nan
    for row in nebulae_tmp:
        row['neighbors'] = len(find_neighbors(nebulae_mask.data,tuple(row[['x','y']]),row['region_ID'],plot=False))
    del nebulae_tmp[['x','y']]

    nebulae_tmp['1to1'] = False
    nebulae_tmp['1to1'][np.isin(nebulae_tmp['region_ID'],list(isolated_nebulae))] = True
    nebulae_tmp['Nassoc'] = [len(nebulae_dict[k]) for k in nebulae_tmp['region_ID']]
    nebulae_tmp['assoc_ID'] = np.nan
    nebulae_tmp['assoc_ID'][nebulae_tmp['1to1']] = [nebulae_dict[k][0] for k in nebulae_tmp[nebulae_tmp['1to1']]['region_ID']]


    overlap = join(
        Table(np.unique(nebulae_hst[~np.isnan(nebulae_hst)],return_counts=True),names=['region_ID','size']),
        Table(np.unique(nebulae_hst[~np.isnan(nebulae_hst) & ~np.isnan(associations_mask.data)],return_counts=True),names=['region_ID','overlap_size']),
        keys=['region_ID'],join_type='outer')
    overlap = overlap.filled(0)
    overlap['overlap_neb'] = overlap['overlap_size']/overlap['size']
    overlap['overlap_neb'].info.format = '%.2f'
    nebulae_tmp = join(nebulae_tmp,overlap[['region_ID','overlap_neb']],keys='region_ID')

    hdu = fits.BinTableHDU(nebulae_tmp,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_nebulae.fits',overwrite=True)
    #del nebulae_tmp['1to1']

    #print(f'{np.sum(nebulae_tmp["neighbors"]==0)} nebulae have no neighbors')

    catalogue = join(assoc_tmp,nebulae_tmp,keys=['assoc_ID','region_ID'])
    catalogue = join(catalogue,nebulae,keys='region_ID')
    catalogue = join(catalogue,associations,keys='assoc_ID')

    # pay attention to the order of assoc, neb
    catalogue.rename_columns(['X','Y','x','y','RA','DEC','cen_ra','cen_dec',
                              'reg_area','region_area',
                              'EBV_1','EBV_2','EBV_err','EBV_ERR',
                              'SkyCoord_1','SkyCoord_2'],
                             ['x_asc','y_asc','x_neb','y_neb','ra_asc','dec_asc','ra_neb','dec_neb',
                              'area_asc','area_neb',
                              'EBV_balmer','EBV_stars','EBV_balmer_err','EBV_stars_err',
                              'SkyCoord_asc','SkyCoord_neb'])

    # separation to other associations and nebulae
    idx,sep_asc,_= match_coordinates_sky(catalogue['SkyCoord_asc'],associations['SkyCoord'],nthneighbor=2)
    idx,sep_neb,_= match_coordinates_sky(catalogue['SkyCoord_neb'],nebulae['SkyCoord'],nthneighbor=2)
    catalogue['sep_asc'] = sep_asc.to(u.arcsec)
    catalogue['sep_neb'] = sep_neb.to(u.arcsec)

    # select the columns of the joined catalogue
    columns = ['assoc_ID','region_ID','x_asc','y_asc','x_neb','y_neb',
               'ra_asc','dec_asc','ra_neb','dec_neb','SkyCoord_asc','SkyCoord_neb',
               'env_asc','env_neb','area_asc','area_neb',
               'sep_asc','sep_neb','neighbors','Nassoc','overlap','overlap_asc','overlap_neb',
               'age','age_err','mass','mass_err','EBV_stars','EBV_stars_err','EBV_balmer','EBV_balmer_err',
               'met_scal','met_scal_err','logq_D91','logq_D91_err',] + \
                [x for x in nebulae.columns if x.endswith('_FLUX_CORR')] + \
                [x for x in nebulae.columns if x.endswith('_FLUX_CORR_ERR')] + \
                ['NUV_FLUX','NUV_FLUX_ERR','U_FLUX','U_FLUX_ERR','B_FLUX','B_FLUX_ERR',
                 'V_FLUX','V_FLUX_ERR','I_FLUX','I_FLUX_ERR'] 
    catalogue = catalogue[columns]
            
    catalogue.rename_columns([col for col in catalogue.columns if col.endswith('FLUX_CORR')],
                          [col.replace('FLUX_CORR','flux') for col in catalogue.columns if col.endswith('FLUX_CORR')])
    catalogue.rename_columns([col for col in catalogue.columns if col.endswith('FLUX_CORR_ERR')],
                          [col.replace('FLUX_CORR_ERR','flux_err') for col in catalogue.columns if col.endswith('FLUX_CORR_ERR')])
    catalogue['assoc_ID'] = catalogue['assoc_ID'].astype('int')
    catalogue['region_ID'] = catalogue['region_ID'].astype('int')

    catalogue.info.description = 'Joined catalogue between associations and nebulae'
    mean_sep = np.mean(catalogue['SkyCoord_asc'].separation(catalogue['SkyCoord_neb']))
    #print(f'{len(catalogue)} objects in catalogue')
    #print(f'the mean separation between cluster and association center is {mean_sep.to(u.arcsecond):.2f}')


    export = catalogue.copy() #[catalogue['contained']]
    #export.add_column(export['SkyCoord_asc'].to_string(style='hmsdms',precision=2),index=6,name='RaDec_asc')
    #export.add_column(export['SkyCoord_neb'].to_string(style='hmsdms',precision=2),index=8,name='RaDec_neb')

    RA_asc ,DEC_asc = zip(*[x.split(' ') for x in export['SkyCoord_asc'].to_string(style='hmsdms',precision=2)])
    RA_neb ,DEC_neb = zip(*[x.split(' ') for x in export['SkyCoord_neb'].to_string(style='hmsdms',precision=2)])

    export.add_column(RA_asc,index=6,name='Ra_asc')
    export.add_column(DEC_asc,index=8,name='Dec_asc')
    export.add_column(RA_neb,index=10,name='Ra_neb')
    export.add_column(DEC_neb,index=12,name='Dec_neb')

    for col in export.columns:
        if col not in ['Ra_asc','Dec_asc','Ra_neb','Dec_neb','region_ID','cluster_ID','overlap','env_asc','env_neb']:
            export[col].info.format = '%.2f'

    del export[['ra_asc','dec_asc','ra_neb','dec_neb','SkyCoord_neb','SkyCoord_asc']]

    hdu = fits.BinTableHDU(export,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations_and_nebulae_joined.fits',overwrite=True)
Ejemplo n.º 22
0
 def setup_class(self):
     self.nd1 = NDData([0, 1, 2, 3, 4])
     self.nd2 = NDData([1, 7, 5, 4, 2])
Ejemplo n.º 23
0
def nddata_cutout2d(nddata, position, size, mode='trim', fill_value=np.nan):
    """
    Create a 2D cutout of a `~astropy.nddata.NDData` object.

    Specifically, cutouts will made for the ``nddata.data`` and
    ``nddata.mask`` (if present) arrays.  If ``nddata.wcs`` exists, then
    it will also be updated.

    Note that cutouts will not be made for ``nddata.uncertainty`` (if
    present) because they are general objects and not arrays.

    Parameters
    ----------
    nddata : `~astropy.nddata.NDData`
        The 2D `~astropy.nddata.NDData` from which the cutout is taken.

    position : tuple or `~astropy.coordinates.SkyCoord`
        The position of the cutout array's center with respect to the
        ``nddata.data`` array.  The position can be specified either as
        a ``(x, y)`` tuple of pixel coordinates or a
        `~astropy.coordinates.SkyCoord`, in which case ``nddata.wcs``
        must exist.

    size : int, array-like, `~astropy.units.Quantity`
        The size of the cutout array along each axis.  If ``size`` is a
        scalar number or a scalar `~astropy.units.Quantity`, then a
        square cutout of ``size`` will be created.  If ``size`` has two
        elements, they should be in ``(ny, nx)`` order.  Scalar numbers
        in ``size`` are assumed to be in units of pixels.  ``size`` can
        also be a `~astropy.units.Quantity` object or contain
        `~astropy.units.Quantity` objects.  Such
        `~astropy.units.Quantity` objects must be in pixel or angular
        units.  For all cases, ``size`` will be converted to an integer
        number of pixels, rounding the the nearest integer.  See the
        ``mode`` keyword for additional details on the final cutout
        size.

    mode : {'trim', 'partial', 'strict'}, optional
        The mode used for creating the cutout data array.  For the
        ``'partial'`` and ``'trim'`` modes, a partial overlap of the
        cutout array and the input ``nddata.data`` array is sufficient.
        For the ``'strict'`` mode, the cutout array has to be fully
        contained within the ``nddata.data`` array, otherwise an
        `~astropy.nddata.utils.PartialOverlapError` is raised.   In all
        modes, non-overlapping arrays will raise a
        `~astropy.nddata.utils.NoOverlapError`.  In ``'partial'`` mode,
        positions in the cutout array that do not overlap with the
        ``nddata.data`` array will be filled with ``fill_value``.  In
        ``'trim'`` mode only the overlapping elements are returned, thus
        the resulting cutout array may be smaller than the requested
        ``size``.

    fill_value : number, optional
        If ``mode='partial'``, the value to fill pixels in the cutout
        array that do not overlap with the input ``nddata.data``.
        ``fill_value`` must have the same ``dtype`` as the input
        ``nddata.data`` array.

    Returns
    -------
    result : `~astropy.nddata.NDData`
        A `~astropy.nddata.NDData` object with cutouts for the data and
        mask, if input.

    Examples
    --------
    >>> from astropy.nddata import NDData
    >>> import astropy.units as u
    >>> from astroimtools import nddata_cutout2d
    >>> data = np.random.random((500, 500))
    >>> unit = u.electron / u.s
    >>> mask = (data > 0.7)
    >>> meta = {'exptime': 1234 * u.s}
    >>> nddata = NDData(data, mask=mask, unit=unit, meta=meta)
    >>> cutout = nddata_cutout2d(nddata, (100, 100), (10, 10))
    >>> cutout.data.shape
    (10, 10)
    >>> cutout.mask.shape
    (10, 10)
    >>> cutout.unit
    Unit("electron / s")
    """

    from astropy.nddata.utils import Cutout2D

    if not isinstance(nddata, NDData):
        raise TypeError('nddata input must be an NDData object')

    if isinstance(position, SkyCoord):
        if nddata.wcs is None:
            raise ValueError('nddata must contain WCS if the input '
                             'position is a SkyCoord')
        position = skycoord_to_pixel(position, nddata.wcs, mode='all')

    data_cutout = Cutout2D(np.asanyarray(nddata.data), position, size,
                           wcs=nddata.wcs, mode=mode, fill_value=fill_value)
    # need to create a new NDData instead of copying/replacing
    nddata_out = NDData(data_cutout.data, unit=nddata.unit,
                        uncertainty=nddata.uncertainty, meta=nddata.meta)

    if nddata.wcs is not None:
        nddata_out.wcs = data_cutout.wcs

    if nddata.mask is not None:
        mask_cutout = Cutout2D(np.asanyarray(nddata.mask), position, size,
                               mode=mode, fill_value=fill_value)
        nddata_out.mask = mask_cutout.data

    return nddata_out
Ejemplo n.º 24
0
 def test_invalid_nddata_shapes(self):
     nd2 = NDData(np.arange(10))
     with pytest.raises(ValueError):
         nddata_arith(self.nd1, nd2, '+')
Ejemplo n.º 25
0
def test_load_nddata():
    image = ImageWidget()
    data = np.random.random([100, 100])
    nddata = NDData(data)
    image.load_nddata(nddata)
Ejemplo n.º 26
0
from photutils.centroids import centroid_quadratic

img_grid, tab_grid = read_or_generate_image(
    'scopesim_grid_16_perturb2_mag18_24_subpixel')

epsf_sources = tab_grid.copy()
epsf_sources = epsf_sources[(epsf_sources['m'] > 19.5)
                            & ((1024 - 100) > epsf_sources['x']) &
                            (epsf_sources['x'] > 100)
                            & ((1024 - 100) > epsf_sources['y']) &
                            (epsf_sources['y'] > 100)]
epsf_sources.sort('m', reverse=False)

fitshape = 41

epsf_stars = extract_stars(NDData(img_grid),
                           epsf_sources[:100],
                           size=(fitshape + 2, fitshape + 2))
builder = EPSFBuilder(oversampling=4,
                      smoothing_kernel=make_gauss_kernel(2.3, N=21),
                      maxiters=5)
pre_epsf, _ = cached(lambda: builder.build_epsf(epsf_stars),
                     cache_dir / 'epsf_synthetic',
                     rerun=False)
data = pre_epsf.data[9:-9, 9:-9].copy()
data /= np.sum(data) / np.sum(pre_epsf.oversampling)
epsf = FittableImageModel(data=data, oversampling=pre_epsf.oversampling)
epsf.origin = centroid_quadratic(epsf.data)


def grid_photometry_epsf():
Ejemplo n.º 27
0
 def generate_PSFs(self,
                   equivalent_radius=2.,
                   size=20.,
                   oversampling=1,
                   plot=None,
                   filepaths=None):
     '''
     Generate effective point spread fuctions (ePSFs) for each image
     Parameters
     ----------
     equivalent_radius : float, unit arcsec
                         radius criteria to indentify star
     size : float, unit pixel
            use what size box to extract stars
     oversampling : int
                    oversample the ePSF
     plot : None for not plot stars & ePSF
            list like [1,2,3] to plot rgb image
     filepaths : filepath to store the ePSFs
     '''
     stars = self.common_catalog.copy()
     remolist = []
     for loop in range(len(stars)):
         for loop2 in range(self.__length):
             a = (
                 self.image_list[loop2].sources_catalog['equivalent_radius']
                 [stars['sloop_{0}'.format(loop2 + 1)][loop]]
             ) * self.image_list[loop2].pixel_scales[0].value
             if (a > equivalent_radius):
                 remolist.append(loop)
                 break
     stars.remove_rows(remolist)
     star_images = []
     PSFs = []
     for loop2 in range(self.__length):
         newsc = self.image_list[loop2].sources_catalog.copy()
         indexes = np.delete(
             np.arange(len(self.image_list[loop2].sources_catalog)),
             stars['sloop_{0}'.format(loop2 + 1)])
         newsc.remove_rows(indexes)
         stars_tbl = Table()
         stars_tbl['x'] = np.array(newsc['maxval_xpos'])
         stars_tbl['y'] = np.array(newsc['maxval_ypos'])
         nddata = NDData(data=np.array(self.image_list[loop2].ss_data))
         Tstar = extract_stars(nddata, stars_tbl, size=size)
         epsf_builder = EPSFBuilder(oversampling=oversampling,
                                    maxiters=15,
                                    progress_bar=False)
         epsf, fitted_stars = epsf_builder(Tstar)
         self.image_list[loop2].PSF = epsf.data
         if filepaths is not None:
             hdu = fits.PrimaryHDU(epsf.data.astype('float32'))
             After = fits.HDUList([hdu])
             After.writeto(filepaths[loop2], overwrite=True)
         if plot is not None:
             star_images.append(Tstar)
             PSFs.append(epsf.data)
     if plot is not None:
         tlens = len(stars)
         if (((tlens // 5) + 1) * 5 - tlens) < ((
             (tlens // 4) + 1) * 4 - tlens):
             ncols = 5
             nrows = (tlens // 5) + 1
         else:
             ncols = 4
             nrows = (tlens // 4) + 1
         fig, ax = plt.subplots(nrows=nrows,
                                ncols=ncols,
                                figsize=(3 * ncols, 3 * nrows),
                                squeeze=True)
         ax = ax.ravel()
         for i in range(tlens):
             if len(plot) > 2:
                 star_b = star_images[plot[0]][i].data * 100. / np.sum(
                     star_images[plot[0]][i].data)
                 star_g = star_images[plot[1]][i].data * 100. / np.sum(
                     star_images[plot[1]][i].data)
                 star_r = star_images[plot[2]][i].data * 100. / np.sum(
                     star_images[plot[2]][i].data)
                 norm = simple_norm(star_b, 'log', percent=99.)
                 image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
             else:
                 image = star_images[plot[0]][i].data
                 norm = simple_norm(image, 'log', percent=99.)
             ax[i].imshow(image, norm=norm, origin='lower')
         plt.show()
         fig = plt.figure(figsize=(10, 10))
         if len(plot) > 2:
             star_b = PSFs[plot[0]] * 100. / np.sum(PSFs[plot[0]])
             star_g = PSFs[plot[1]] * 100. / np.sum(PSFs[plot[1]])
             star_r = PSFs[plot[2]] * 100. / np.sum(PSFs[plot[2]])
             norm = simple_norm(star_b, 'log', percent=99.)
             image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
         else:
             image = PSFs[plot[0]]
             norm = simple_norm(image, 'log', percent=99.)
         plt.imshow(image, norm=norm, origin='lower')
         plt.show()
Ejemplo n.º 28
0
recipe_group = lambda:\
    generators.scopesim_groups(N1d=1,
                               jitter=8.,
                               border=400,
                               magnitude=lambda N: np.random.normal(19, 1.5, N),
                               group_size=9,
                               group_radius=7
                               )

img, input_table = generators.read_or_generate_image(recipe_group, name_group)

sigma_clipped_stats(img)
grid_img_no_background = img_grid - np.median(img_grid)


stars = extract_stars(NDData(grid_img_no_background), input_table_grid, size=cutout_size)

epsf, _ = EPSFBuilder(oversampling=2,
                      maxiters=6,
                      progress_bar=True,
                      smoothing_kernel=util.make_gauss_kernel(0.5)).build_epsf(stars)

mean, median, std = sigma_clipped_stats(img)

grouper = DAOGroup(60)
finder = DAOStarFinder(threshold=median-2*std, fwhm=2.)

phot = BasicPSFPhotometry(grouper, MADStdBackgroundRMS(), epsf, cutout_size+2, finder=finder)
phot_nogroup = BasicPSFPhotometry(DAOGroup(0.001), MADStdBackgroundRMS(), epsf, cutout_size+2, finder=finder)

init_guess = input_table.copy()
Ejemplo n.º 29
0
def find_pinholes_irregular(fname,
                            freference,
                            sname,
                            fdarkff,
                            fdark,
                            fff,
                            files,
                            size,
                            threshold,
                            fwhm,
                            fitshape,
                            MAX_CONTROL_POINTS,
                            PIXEL_TOL,
                            range_psf,
                            sigma=2.,
                            oversampling=4,
                            maxiters=3,
                            MIN_MATCHES_FRACTION=0.8,
                            NUM_NEAREST_NEIGHBORS=5):
    """Finds and fits irregularly spread pinhole positions with a ePSF in a FITS image. Then matches them to the reference positions.
    
    Parameters
    ----------
    fname : str
        Folder name of the input fits files.
    freference : str
        File name of the reference positions (txt file).
    sname : str
        Folder name of the returned found and matched pinhole positions (txt files).
    fdarkff : string
        Location of the dark images for the flat field images.
    fdark : string
        Location of the dark images for the raw images.
    fff : string
        Location of the flat field images.
    files : (1, 2)-shaped int array
        File range to create a median image
    size : int
        Rectangular size of the ePSF. Size must be an odd number.
    threshold : float
        The absolute image value above which to select sources.
    fwhm : float
        The full-width half-maximum (FWHM) of the major axis of the Gaussian kernel in units of pixels.
    fitshape : int or length-2 array-like
        Rectangular shape around the center of a star which will be used to collect the data to do the fitting. 
        Can be an integer to be the same along both axes. 
        E.g., 5 is the same as (5, 5), which means to fit only at the following 
        relative pixel positions: [-2, -1, 0, 1, 2]. Each element of fitshape must be an odd number.
    MAX_CONTROL_POINTS : int
        The maximum control points (stars) to use to build the invariants.    
    PIXEL_TOL : int
        The pixel distance tolerance to assume two invariant points are the same.
    range_psf : (1, 4)-shaped int array
        Position range to compute epsf [xmin,xmax,ymin,ymax]
    sigma : float
        Number of standard deviations used to perform sigma clip with a astropy.stats.SigmaClip object.
    oversampling : int or tuple of two int
        The oversampling factor(s) of the ePSF relative to the input stars along the x and y axes. 
        The oversampling can either be a single float or a tuple of two floats of the form (x_oversamp, y_oversamp). 
        If oversampling is a scalar then the oversampling will be the same for both the x and y axes.
    maxiters : int
        The maximum number of iterations to perform.
    MIN_MATCHES_FRACTION : float (0,1]
        The minimum fraction of triangle matches to accept a transformation.
        If the minimum fraction yields more than 10 triangles, 10 is used instead.
    NUM_NEAREST_NEIGHBORS : int
        The number of nearest neighbors of a given star (including itself) to construct the triangle invariants.
    Returns
    -------
    s_list : (N,2)-shaped array
        Found and matched positions of the pinholes.
    t_list : (N,2)-shaped array
        Matched reference grid positions.
    """

    #Load the sample of fits images
    entries = os.listdir(fname)

    data_col = np.array([fits.getdata(fname + '/' + entries[files[0]], ext=0)])
    for k in range(files[0] + 1, files[1] + 1):
        data_col = np.append(data_col,
                             [fits.getdata(fname + '/' + entries[k], ext=0)],
                             axis=0)

    #Data reduction: Darc current + Flatfield
    data_col = data_correction(data_col, fdarkff, fdark, fff)

    #Claculate median image
    data_full = np.median(data_col, axis=0)
    pos_full = np.array([[0, 0]])

    data = data_full[range_psf[2]:range_psf[3], range_psf[0]:range_psf[1]]

    #Find peaks in data
    peaks_tbl = find_peaks(data, threshold=threshold)
    peaks_tbl['peak_value'].info.format = '%.8g'

    #Load data around found peaks
    hsize = (size - 1) / 2
    x = peaks_tbl['x_peak']
    y = peaks_tbl['y_peak']
    mask = ((x > hsize) & (x < (data.shape[1] - 1 - hsize)) & (y > hsize) &
            (y < (data.shape[0] - 1 - hsize)))

    stars_tbl = Table()
    stars_tbl['x'] = x[mask]
    stars_tbl['y'] = y[mask]

    #Calculate mean, median, std
    mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=sigma)
    data = data - median_val

    #Find pinholes and create ePSF
    nddata = NDData(data=data)

    stars = extract_stars(nddata, stars_tbl, size=size)

    epsf_builder = EPSFBuilder(oversampling=oversampling,
                               maxiters=maxiters,
                               progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)

    #Use ePSF to find precise locations of pinholes
    daofind = DAOPhotPSFPhotometry(crit_separation=30,
                                   threshold=threshold,
                                   fwhm=fwhm,
                                   psf_model=epsf,
                                   fitshape=fitshape,
                                   aperture_radius=12,
                                   niters=1)

    #Get positions
    sources = daofind(data_full)

    for col in sources.colnames:
        sources[col].info.format = '%.8g'

    pos = np.transpose((sources['x_fit'], sources['y_fit']))
    pos_full = np.append(pos_full, pos, axis=0)

    pos_full = pos_full[1:]

    #Plot found pinholes
    apertures = CircularAperture(pos_full, r=10)

    norm = ImageNormalize(stretch=SqrtStretch())

    #Plot found pinholes
    fig, ax = plt.subplots()
    ax.set_title('Pinhole Positions')
    ax.set(xlabel='x [pixel]', ylabel='y [pixel]')
    ax.imshow(data_full, cmap='Greys', origin='lower', norm=norm)
    apertures.plot(color='blue', lw=1.5, alpha=0.5)
    ax.legend(['#pinholes = ' + str(len(pos_full[:, 0]))],
              loc='lower left',
              prop={'size': 12})
    plt.show()

    #Sort positions by matching with reference grid
    positions_sort = pos_full

    ref_positions = np.genfromtxt(freference, skip_header=0)

    transf, (s_list, t_list) = find_transform(positions_sort, ref_positions,
                                              MAX_CONTROL_POINTS, PIXEL_TOL,
                                              MIN_MATCHES_FRACTION,
                                              NUM_NEAREST_NEIGHBORS)

    text = np.array([s_list[:, 0], s_list[:, 1], t_list[:, 0], t_list[:, 1]])
    text_trans = np.zeros((len(s_list[:, 0]), 4))

    #Transpose text matrix
    for k in range(0, 4):
        for l in range(0, len(s_list[:, 0])):
            text_trans[l][k] = text[k][l]

    #Save data as txt file
    np.savetxt(sname + '.txt',
               text_trans,
               fmt='%1.9E',
               delimiter='\t',
               comments='',
               header='x-measured   y-measured   x-reference   y-reference')

    return s_list, t_list
Ejemplo n.º 30
0
bpl.set_style()

plot_path = Path(sys.argv[1]).resolve()
size_home_dir = plot_path.parent
home_dir = size_home_dir.parent
oversampling_factor = int(sys.argv[2])
psf_width = int(sys.argv[3])

# ======================================================================================
#
# Load the data - image and star catalog
#
# ======================================================================================
image_data, _, _ = utils.get_drc_image(home_dir)
# the extract_stars thing below requires the input as a NDData object
nddata = NDData(data=image_data)


# load the input star list.
def get_star_list_and_psf(source):
    name_base = f"_{source}_stars_{psf_width}_pixels_{oversampling_factor}x_oversampled"

    table_name = "psf_star_centers" + name_base + ".txt"
    psf_name = "psf" + name_base + ".fits"

    star_table = table.Table.read(str(size_home_dir / table_name),
                                  format="ascii.ecsv")
    psf = fits.open(size_home_dir / psf_name)["PRIMARY"].data

    return star_table, psf