Exemple #1
0
def get_sn_for_aperture_range(hdu,
                              find_peak=False,
                              r_list=np.arange(0.5, 3.1, 0.1),
                              dtype=float):

    im = hdu[0].data
    im_mask = im == 0
    mean, median, std = sigma_clipped_stats(im[im_mask == 0], sigma=3.0)
    threshold = 3 * std

    if find_peak:
        r = np.sqrt(hdu[2].data**2 + hdu[3].data**2)
        searchregion = r < 3
        tbl = find_peaks(
            im,
            threshold,
            footprint=searchregion,
            mask=im_mask,
            wcs=wcs.WCS(hdu[0].header),
        )
        if tbl is None:
            coords_peak = None
        else:
            coords_peak = tbl["skycoord_peak"][np.argmax(tbl["peak_value"])]
    else:
        coords_peak = None

    if coords_peak is None:
        coords_center = SkyCoord(ra=hdu[0].header["CRVAL1"],
                                 dec=hdu[0].header["CRVAL2"],
                                 unit="deg")
    else:
        coords_center = coords_peak

    flux_list = []
    flux_err_list = []
    bkg_list = []
    apcor_list = []

    for r in r_list:

        flux, flux_err, bkg_stddev, apcor = FitCircularAperture(
            hdu=hdu,
            coords=coords_center,
            plot=False,
            radius=r * u.arcsec,
            annulus=[r * 2, r * 3] * u.arcsec,
        )
        flux_list.append(flux.value)
        flux_err_list.append(flux_err.value)
        bkg_list.append(bkg_stddev.value)
        apcor_list.append(apcor)

    flux_arr = np.array(flux_list)
    flux_err_arr = np.array(flux_err_list)
    bkg_arr = np.array(bkg_list)
    apcor_arr = np.array(apcor_list)
    sn = flux_arr / bkg_arr

    return r_list, sn, coords_center
Exemple #2
0
def cross_correlation_sim(hms):
    shft_0 = np.array([19, 19])
    for k1, hm1 in hms.items():
        print('################################################')
        print(k1)
        print(hm1)
        print('################################################')
    for k1, hm1 in hms.items():
        for k2, hm2 in hms.items():
            if k1 != k2:
                print('Distance between ', k1, " and ", k2, end=': ')

                X = signal.correlate2d(hm1,
                                       hm2)  #boundary='symm', mode='full')

                peak = find_peaks(data=X, threshold=0, box_size=1, npeaks=1)
                #print(peaks)

                if peak is not None:
                    for p in peak:
                        dist = np.linalg.norm(
                            np.array([p['x_peak'], p['y_peak']]) - shft_0)
                        peak_value = p['peak_value']
                else:
                    print("NO PEAKS FOUND")
                print(dist, peak_value)
Exemple #3
0
def make_source_catalog():
    """Make source catalog from images.

    TODO: use other images to do measurements for the sources,
    e.g. excess, npred, flux.
    """
    significance_threshold = 7

    hdu = fits.open(TS_IMAGES)['sqrt_ts']
    header = fits.getheader(REF_IMAGE)
    wcs = WCS(header)

    print('Running find_peaks ...')
    table = find_peaks(data=hdu.data,
                       threshold=significance_threshold,
                       wcs=wcs)
    print('Number of sources detected: {}'.format(len(table)))

    # Add some useful columns
    icrs = SkyCoord(table['icrs_ra_peak'], table['icrs_dec_peak'], unit='deg')
    galactic = icrs.galactic
    table['Source_Name'] = coordinate_iau_format(icrs, ra_digits=5, prefix='J')
    table['GLON'] = galactic.l.deg
    table['GLAT'] = galactic.b.deg

    # table.show_in_browser(jsviewer=True)
    print('Writing {}'.format(SOURCE_CATALOG))
    table.write(SOURCE_CATALOG, overwrite=True)

    ds9_string = to_ds9_region(table, label='Source_Name')
    print('Writing {}'.format(SOURCE_REGIONS))
    with open(SOURCE_REGIONS, 'w') as fh:
        fh.write(ds9_string)

    # TODO: move this to `make_significance_image`.
    # At the moment the TS map output images don't have WCS info in the header
    hdu = fits.PrimaryHDU(data=hdu.data, header=header)
    print('Writing {}'.format(SIGNIFICANCE_IMAGE))
    hdu.writeto(SIGNIFICANCE_IMAGE, clobber=True)
def make_source_catalog():
    """Make source catalog from images.

    TODO: use other images to do measurements for the sources,
    e.g. excess, npred, flux.
    """
    significance_threshold = 7

    hdu = fits.open(TS_IMAGES)['sqrt_ts']
    header = fits.getheader(REF_IMAGE)
    wcs = WCS(header)

    print('Running find_peaks ...')
    table = find_peaks(data=hdu.data, threshold=significance_threshold, wcs=wcs)
    print('Number of sources detected: {}'.format(len(table)))

    # Add some useful columns
    icrs = SkyCoord(table['icrs_ra_peak'], table['icrs_dec_peak'], unit='deg')
    galactic = icrs.galactic
    table['Source_Name'] = coordinate_iau_format(icrs, ra_digits=5, prefix='J')
    table['GLON'] = galactic.l.deg
    table['GLAT'] = galactic.b.deg

    # table.show_in_browser(jsviewer=True)
    print('Writing {}'.format(SOURCE_CATALOG))
    table.write(SOURCE_CATALOG, overwrite=True)

    ds9_string = to_ds9_region(table, label='Source_Name')
    print('Writing {}'.format(SOURCE_REGIONS))
    with open(SOURCE_REGIONS, 'w') as fh:
        fh.write(ds9_string)

    # TODO: move this to `make_significance_image`.
    # At the moment the TS map output images don't have WCS info in the header
    hdu = fits.PrimaryHDU(data=hdu.data, header=header)
    print('Writing {}'.format(SIGNIFICANCE_IMAGE))
    hdu.writeto(SIGNIFICANCE_IMAGE, clobber=True)
Exemple #5
0
def compute_cross_correlation_distance_normalized(h1, h2, peak_thresh=1, dimension=20, max_peaks=1):
    shft_0 = np.array([19, 19])

    # compute cross correlation matrix
    X = signal.correlate2d(h1, h2)

    #find peaks in matrix
    peak = find_peaks(data=X, threshold=0, box_size=1, npeaks=max_peaks)
    try:
        for p in peak:


            peak_value = p['peak_value']
            '''
            mean = 0
            for i,row in enumerate(X):
                for j, elem in enumerate(row):
                    if i != p['x_peak'] or j != p['y_peak']:
                        mean += elem
            mean /= (39*39) - 1
            '''
            X[p['x_peak'], p['y_peak']] = 0
            mean = np.sum(X)/(39*39-1)


            if peak_value > (peak_thresh*mean):
                #se il picco > [1.1*media,...,1.5*media] => calcolare distanza sulla base dello shift
                dist = np.linalg.norm(np.array([p['x_peak'], p['y_peak']]) - shft_0)
            else:
                #altrimenti (se picco < di [1.1*media,...,1.5*media]) => impostare dist ad un valore massimo > di sqrt(19^2+19^2)
                dist = np.sqrt((19 ^ 2) + (19 ^ 2))
    except:
        print("NO PEAKS FOUND")
        dist = np.sqrt((19 ^ 2) + (19 ^ 2))

    return dist/peak_value
Exemple #6
0
fig_iter = plt.figure(figsize=(18, 4))
ax_iter = fig_iter.add_axes([0.1, 0.1, 0.9, 0.9],
                            projection=images['counts'].wcs)
ax_iter.imshow(result['niter'], cmap='afmhot', origin='lower', vmin=0, vmax=20)
ax_iter.coords['glon'].set_axislabel('Galactic Longitude')
ax_iter.coords['glat'].set_axislabel('Galactic Latitude')

# ## Source catalog
#
# Let's run a peak finder on the `sqrt_ts` image to get a list of sources (positions and peak `sqrt_ts` values).

# In[8]:

sources = find_peaks(
    data=result['sqrt_ts'].data,
    threshold=10,
    wcs=result['sqrt_ts'].wcs,
)
sources

# In[9]:

# Plot sources on top of significance sky image
result['sqrt_ts'].cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),
                         size=(8 * u.deg, 20 * u.deg)).plot()

plt.gca().scatter(
    sources['icrs_ra_peak'],
    sources['icrs_dec_peak'],
    transform=plt.gca().get_transform('icrs'),
    color='none',
# cut out smaller piece of the PSF image to save computing time
# for covenience we're "misusing" the SkyImage class represent the PSF on the sky.
kernel = images['psf'].cutout(target_position, size=1.1 * u.deg)
kernel.show()

# In[20]:

ts_image_estimator = TSImageEstimator()
images_ts = ts_image_estimator.run(images, kernel.data)
print(images_ts.names)

# In[21]:

# find pointlike sources with sqrt(TS) > 5
sources = find_peaks(data=images_ts['sqrt_ts'].data,
                     threshold=5,
                     wcs=images_ts['sqrt_ts'].wcs)
sources

# In[22]:

# Plot sources on top of significance sky image
images_ts['sqrt_ts'].cutout(position=SkyCoord(0,
                                              0,
                                              unit='deg',
                                              frame='galactic'),
                            size=(3 * u.deg, 4 * u.deg)).plot(add_cbar=True)

plt.gca().scatter(
    sources['icrs_ra_peak'],
    sources['icrs_dec_peak'],
Exemple #8
0
def find_point_sources(drzname,
                       data=None,
                       mask=None,
                       def_fwhm=2.0,
                       box_size=11,
                       block_size=(1024, 1024),
                       diagnostic_mode=False):
    """ Identify point sources most similar to TinyTim PSFs

    Primary user-interface to identifying point-sources in the
    drizzle product image most similar to the TinyTim PSF for the
    filter-combination closest to that found in the drizzled image.
    The PSFs are pulled, by default, from those installed with the
    code as created using the TinyTim PSF modelling software for
    every direct image filter used by the ACS and WFC3 cameras on HST.

    .. note: Sources identified by this function will only have integer pixel
    positions.

    Parameters
    -----------
    drzname : `str`
        Filename of the drizzled image which should be used to find
        point sources.  This will provide the information on the filters
        used on the all the input exposures.

    data : `numpy.ndarray`, optional
        If provided, will be used as the image to be evaluated instead
        of opening the file specified in `drzname`.

    mask : `numpy.ndarray`, optional
        If provided, this mask will be used to eliminate regions in the
        input array from being searched for point sources.  Pixels with
        a value of 0 in the mask indicate what pixels should be ignored.

    def_fwhm : `float`, optional
        Default FWHM to use in case the model PSF can not be accurately
        measured by `photutils`.

    box_size : `int`, optional
        Size of the box used to recognize each point source.

    block_size : `tuple`, optional
        (Y, X) size of the block used by the FFT to process the drizzled image.

    diagnostic_mode : `bool`, optional
        Specify whether or not to provide additional diagnostic messages
        and output while processing.

    Returns
    -------
    peaks : `astropy.table.Table`
        Output from `photutils.detection.find_peaks` for all identified sources
        with columns `x_peak`, `y_peak` and `peak_value`.

    psf_fwhm : `float`
        FWHM (in pixels) of PSF used to identify the sources.

    """
    # determine the name of at least 1 input exposure
    calname = determine_input_image(drzname)
    sep = box_size // 2

    if not isinstance(block_size, tuple):
        block_size = tuple(block_size)

    if data is None:
        # load image
        drzhdu = fits.open(drzname)

        sciext = 0 if len(drzhdu) == 1 else ('sci', 1)
        drz = drzhdu[sciext].data.copy()
        drzhdr = drzhdu[sciext].header.copy()
        drzhdu.close()
        del drzhdu

        if mask is not None:
            # Apply any user-specified mask
            drz *= mask
    else:
        drz = data
        drzhdr = None

    if mask is not None:
        # invert the mask
        invmask = np.invert(mask)
    else:
        invmask = None

    # Identify PSF for image
    psfnames = find_psf(drzname)
    # Load PSF and convert to be consistent (orientation) with image
    clean_psfs = True if not diagnostic_mode else False

    drzpsfname = convert_library_psf(calname,
                                     drzname,
                                     psfnames,
                                     pixfrac=1.5,
                                     clean_psfs=clean_psfs)
    drzpsf = fits.getdata(drzpsfname)
    # try to measure just the core of the PSF
    # This will be a lot less likely to result in invalid/impossible FWHM values
    max_y, max_x = np.where(drzpsf == drzpsf.max())
    xc = max_x[0]
    yc = max_y[0]
    psf_core = drzpsf[yc - box_size:yc + box_size, xc - box_size:xc + box_size]
    psf_fwhm = amutils.find_fwhm(psf_core, def_fwhm)

    # check value
    if psf_fwhm < 0 or psf_fwhm > 2.0 * def_fwhm:
        # Try a different starting guess for the FWHM
        psf_fwhm = amutils.find_fwhm(psf_core, def_fwhm + 1)

        if psf_fwhm < 0 or psf_fwhm > 2.0 * def_fwhm:
            log.debug(
                "FWHM computed as {}.  Reverting to using default FWHM of {}".
                format(psf_fwhm, def_fwhm))
            psf_fwhm = def_fwhm

    log.info("Library PSF FWHM computed as {}.".format(psf_fwhm))

    # deconvolve the image with the PSF
    decdrz = fft_deconv_img(drz, drzpsf, block_size=block_size)

    if mask is not None:
        decmask = ndimage.binary_erosion(mask, iterations=box_size)
        decdrz *= decmask

    if diagnostic_mode:
        fits.PrimaryHDU(data=decdrz, header=drzhdr).writeto(drzname.replace(
            '.fits', '_deconv.fits'),
                                                            overwrite=True)
        if mask is not None:
            fits.PrimaryHDU(data=decmask.astype(np.uint16)).writeto(
                drzname.replace('.fits', '_deconv_mask.fits'), overwrite=True)
    # find sources in deconvolved image
    dec_peaks = find_peaks(decdrz,
                           threshold=0.0,
                           mask=invmask,
                           box_size=box_size)

    # Use these positions as an initial guess for the final position
    peak_mask = (drz * 0.).astype(np.uint8)
    # Do this by creating a mask for the original input that only
    # includes those pixels with 2 pixels of each peak from the
    # deconvolved image.
    for peak in dec_peaks:
        x = peak['x_peak']
        y = peak['y_peak']
        peak_mask[y - sep:y + sep + 1, x - sep:x + sep + 1] = 1
    drz *= peak_mask
    if diagnostic_mode:
        fits.PrimaryHDU(data=drz).writeto(drzname.replace(
            '.fits', '_peak_mask.fits'),
                                          overwrite=True)

    # Use this new mask to find the actual peaks in the original input
    # but only to integer pixel precision.
    peaks = find_peaks(drz, threshold=0., box_size=box_size // 2)
    if len(peaks) == 0:
        peaks = None

    # Remove PSF used, unless running in diagnostic_mode
    if not diagnostic_mode:
        if os.path.exists(drzpsfname):
            os.remove(drzpsfname)
    del peak_mask

    return peaks, psf_fwhm
Exemple #9
0
def find(image,
         fwhm,
         method='daophot',
         background='1D',
         frame='diff',
         diag=False):
    '''
        Find all stars above the sky background level using DAOFind-like algorithm

        Required inputs:
        image = 2D array of image on which to perform find
        fwhm = FWHM in pixels (1)

        Optional inputs:
        method = Either 'daophot' or 'peaks' to select different finding algorithms
        background = '2D' or '1D' to select 2- or 1-D background estimators
        frame = 'diff' or 'single' to set background behaviour for difference or single frames

    Example
    -------
    >>> np.random.seed(0)
    >>> im = np.ones((10,10)) + np.random.uniform(size=(10,10))
    >>> im *= u.ph
    >>> im[5,5] += 5 * u.ph
    >>> star_tbl, bkg_image, threshold = find(im, 1, method='peaks', background='1D', frame='single')
    >>> np.equal(len(star_tbl), 1)
    True
    '''
    from photutils.detection import DAOStarFinder, find_peaks
    from astropy.stats import sigma_clipped_stats

    if frame == 'diff':
        # Determine background RMS:
        bkg_image, sky = estimate_background(image,
                                             method=background,
                                             sigma=5,
                                             diag=diag)
        find_image = image
    elif frame == 'single':
        # Create and subtract a background image and determine background RMS:
        bkg_image, sky = estimate_background(image,
                                             method=background,
                                             sigma=2,
                                             diag=diag)
        find_image = image - bkg_image

    # Look for sources at twice the background RMS level
    threshold = 2 * sky

    # Make sure the image and threshold units are the same
    threshold = threshold.to(image.unit)

    # Find stars
    if method == 'daophot':
        finder = DAOStarFinder(threshold.value, fwhm)
        star_tbl = finder.find_stars(find_image.value)
        star_tbl['x'], star_tbl['y'] = \
            star_tbl['xcentroid'], star_tbl['ycentroid']
    elif method == 'peaks':
        star_tbl = find_peaks(find_image.value, threshold.value, box_size=3)
        star_tbl['x'], star_tbl['y'] = \
            star_tbl['x_peak'], star_tbl['y_peak']

    # Remove entries outside the image frame (mainly an issue with daophot), then reset the ID column:
    index = ((star_tbl['x'] < 0) | (star_tbl['y'] < 0) |
             (star_tbl['x'] > image.shape[0]) |
             (star_tbl['y'] > image.shape[1]))
    star_tbl.remove_rows(index)
    star_tbl['id'] = np.arange(len(star_tbl)) + 1

    if diag:
        print("Sky background rms: {}".format(sky))
        print("Found {} stars".format(len(star_tbl)))

    return star_tbl, bkg_image, threshold
def _find_stars(data,
                kernel,
                threshold_eff,
                min_separation=None,
                mask=None,
                exclude_border=False):
    """
    Find stars in an image.

    Parameters
    ----------
    data : 2D array_like
        The 2D array of the image.

    kernel : `_StarFinderKernel`
        The convolution kernel.

    threshold_eff : float
        The absolute image value above which to select sources.  This
        threshold should be the threshold input to the star finder class
        multiplied by the kernel relerr.

    min_separation : float, optional
        The minimum separation for detected objects in pixels.

    mask : 2D bool array, optional
        A boolean mask with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.
        Masked pixels are ignored when searching for stars.

    exclude_border : bool, optional
        Set to `True` to exclude sources found within half the size of
        the convolution kernel from the image borders.  The default is
        `False`, which is the mode used by IRAF's `DAOFIND`_ and
        `starfind`_ tasks.

    Returns
    -------
    objects : list of `_StarCutout`
        A list of `_StarCutout` objects containing the image cutout for
        each source.

    .. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind

    .. _starfind: https://iraf.net/irafhelp.php?val=starfind
    """
    convolved_data = _filter_data(data,
                                  kernel.data,
                                  mode='constant',
                                  fill_value=0.0,
                                  check_normalization=False)

    # define a local footprint for the peak finder
    if min_separation is None:  # daofind
        footprint = kernel.mask.astype(bool)
    else:
        # define a circular footprint
        idx = np.arange(-min_separation, min_separation + 1)
        xx, yy = np.meshgrid(idx, idx)
        footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int)

    # pad the data and convolved image by the kernel x/y radius to allow
    # for detections near the edges
    if not exclude_border:
        ypad = kernel.yradius
        xpad = kernel.xradius
        pad = ((ypad, ypad), (xpad, xpad))
        pad_mode = 'constant'
        data = np.pad(data, pad, mode=pad_mode, constant_values=[0.])
        if mask is not None:
            mask = np.pad(mask, pad, mode=pad_mode, constant_values=[0.])
        convolved_data = np.pad(convolved_data,
                                pad,
                                mode=pad_mode,
                                constant_values=[0.])

    # find local peaks in the convolved data
    with warnings.catch_warnings():
        # suppress any NoDetectionsWarning from find_peaks
        warnings.filterwarnings('ignore', category=NoDetectionsWarning)
        tbl = find_peaks(convolved_data,
                         threshold_eff,
                         footprint=footprint,
                         mask=mask)

    if tbl is None:
        return None

    coords = np.transpose([tbl['y_peak'], tbl['x_peak']])

    star_cutouts = []
    for (ypeak, xpeak) in coords:
        # now extract the object from the data, centered on the peak
        # pixel in the convolved image, with the same size as the kernel
        x0 = xpeak - kernel.xradius
        x1 = xpeak + kernel.xradius + 1
        y0 = ypeak - kernel.yradius
        y1 = ypeak + kernel.yradius + 1

        if x0 < 0 or x1 > data.shape[1]:
            continue  # pragma: no cover
        if y0 < 0 or y1 > data.shape[0]:
            continue  # pragma: no cover

        slices = (slice(y0, y1), slice(x0, x1))
        data_cutout = data[slices]
        convdata_cutout = convolved_data[slices]

        # correct pixel values for the previous image padding
        if not exclude_border:
            x0 -= kernel.xradius
            x1 -= kernel.xradius
            y0 -= kernel.yradius
            y1 -= kernel.yradius
            xpeak -= kernel.xradius
            ypeak -= kernel.yradius
            slices = (slice(y0, y1), slice(x0, x1))

        star_cutouts.append(
            _StarCutout(data_cutout, convdata_cutout, slices, xpeak, ypeak,
                        kernel, threshold_eff))

    return star_cutouts