Exemple #1
0
def make_catalog(data, segm, border_width=10, background=None):
    """
    Measure source properties from data, segmentation image, and wcs and match against photometric catalog. Returned
    trimmed and matched combination catalog.

    data : 2D `~numpy.ndarray`
        Image containing sources to extract.

    segm : `~photutils.segmentation.SegmentationImage`
        Segmentation image created from data.

    border_width : int (default: 10)
        Remove source labels within border_wiidth from the edges of the data.

    background : None or `~photutils.Background2D`
        Background to pass into photutils.source_properties()
    """
    segm.remove_border_labels(border_width=border_width)
    if background is not None:
        prop_cat = photutils.source_properties(
            data, segm, background=background.background)
    else:
        prop_cat = photutils.source_properties(data, segm)
    cat = prop_cat.to_table()
    cat['obs_mag'] = -2.5 * np.log10(cat['source_sum'])
    cat.keep_columns([
        'id', 'xcentroid', 'ycentroid', 'source_sum', 'background_mean',
        'obs_mag'
    ])
    return cat
Exemple #2
0
def find_all_sources(
    image: CCDData,
    snr: float = 3.,  # Threshold SNR for segmentation
    fwhm: float = 5.,  # Kernel FWHM for segmentation
    ksize: int = 5,  # Kernel size
    npixels:
    int = 10  # Number of connected pixels required to be considered a source
):
    """
    Find extended sources in image with default parameters tuned for expected donut size.
    """
    binning = image.header['BINNING']
    fwhm = int(fwhm / binning)
    ksize = int(ksize / binning)
    npixels = int(npixels / binning)
    threshold = photutils.detect_threshold(image, nsigma=snr)
    sigma = fwhm * stats.gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=ksize, y_size=ksize)
    kernel.normalize()
    segm = photutils.detect_sources(image.data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel)
    cat = photutils.source_properties(image.data, segm, wcs=image.wcs)
    return segm, cat
Exemple #3
0
    def set_segment_properties(self):
        """
        Calculate the segment-based source photometry and morphologies.

        The values are set as dynamic attributes.
        """
        source_props = source_properties(self.model.data.astype(float),
                                         self.segment_img,
                                         error=self.error,
                                         filter_kernel=self.kernel,
                                         wcs=self.wcs)

        self._xpeak = source_props.maxval_xpos.value.astype(int)
        self._ypeak = source_props.maxval_ypos.value.astype(int)

        # rename some columns in the output catalog
        prop_names = {}
        prop_names['isophotal_flux'] = 'source_sum'
        prop_names['isophotal_flux_err'] = 'source_sum_err'
        prop_names['isophotal_area'] = 'area'
        prop_names['semimajor_sigma'] = 'semimajor_axis_sigma'
        prop_names['semiminor_sigma'] = 'semiminor_axis_sigma'

        for column in self.segment_colnames:
            # define the property name
            prop_name = prop_names.get(column, column)

            try:
                value = getattr(source_props, prop_name)
            except AttributeError:
                value = getattr(self, prop_name)

            setattr(self, column, value)

        return
Exemple #4
0
def find_contour(box, mask, sigma_level):

    """
    This function ...
    :param box:
    :param mask:
    :param sigma_level:
    :return:
    """

    props = source_properties(box, mask)
    #tbl = properties_table(props)

    x_shift = box.x_min
    y_shift = box.y_min

    # Since there is only one segment in the self.source.mask (the center segment), the props
    # list contains only one entry (one galaxy)
    if len(props) == 0: return None
    properties = props[0]

    # Obtain the position, orientation and extent
    position = Position(properties.xcentroid.value + x_shift, properties.ycentroid.value + y_shift)
    a = properties.semimajor_axis_sigma.value * sigma_level
    b = properties.semiminor_axis_sigma.value * sigma_level
    angle = properties.orientation.value # in radians
    angle = Angle(angle, u.rad)

    radius = Extent(a, b)

    # Create and return the elliptical contour
    return Ellipse(position, radius, angle)
def snr(hduls, name="SCI"):

	for hdul in hduls:
		data = hdul[name].data

		# identify background rms
		boxsize=(data.shape)
		bkg = Background2D(data, boxsize)
		bkg_mean_rms = np.mean(bkg.background_rms)

		# subtract bkg from image
		new_data = data - bkg.background

		# set threshold and detect sources, threshold 5*std above background
		threshold = detect_threshold(data=new_data, nsigma=5.0, background=0.0)
		SegmentationImage = detect_sources(data=new_data, threshold=threshold, npixels=10)

		SourceCatalog = source_properties(new_data, SegmentationImage)
		columns = ['id', 'xcentroid', 'ycentroid', 'source_sum']

		source_max_values = SourceCatalog.max_value
		avg_source_max_values = np.mean(source_max_values)

		# calculate signal to noise ratio
		signal = avg_source_max_values
		noise = bkg_mean_rms
		SNR = (signal)/(noise)
		hdul["CAT"].header.append(('SNR',SNR,"signal to noise ratio" ))

	return (hdul for hdul in hduls)
Exemple #6
0
    def detect_sources(self):

        self.AllSourceProperties = source_properties(
            self.DetectionImage.sig, self.DeblendedSegmentationImage)

        if self.verbose:
            print(f'{len(self.AllSourceProperties)} objects detected')
Exemple #7
0
    def _find_star_properties(self, data, median, mask, star_coo):
        self.info('Finding star properties started')
        sigma = self.config_section.get('fwhm') * gaussian_fwhm_to_sigma
        kernel = Gaussian2DKernel(sigma,
                                  x_size=self.config_section.get('kernel_x'),
                                  y_size=self.config_section.get('kernel_y'))
        kernel.normalize()
        data[mask] = 0
        segm = detect_sources(data,
                              median *
                              self.config_section.get('detect_threshold'),
                              npixels=self.config_section.get('npixels'),
                              filter_kernel=kernel)
        properties = properties_table(
            source_properties(data - np.uint64(median), segm),
            columns=['id', 'xcentroid', 'ycentroid', 'source_sum',
                     'semimajor_axis_sigma', 'semiminor_axis_sigma',
                     'orientation'])

        self.info('Found star properties')
        self.info(properties)

        if len(properties) > 1:
            self.warning('More than one object has been found')
            properties = self._find_nearest_object(
                star_coo, properties, data.shape)
            return properties
        else:
            self.info('Finding star properties finished')
            return properties[0]
Exemple #8
0
    def detect(self):

        # Source detection using segmentation
        kernel = astro.convolution.Gaussian2DKernel(self.sigma,
                                                    x_size=3,
                                                    y_size=3)
        kernel.normalize()
        segm = phot.detect_sources(self.data,
                                   self.threshold,
                                   npixels=5,
                                   filter_kernel=kernel)

        # Deblending sources
        segm_deblend = phot.deblend_sources(self.data,
                                            segm,
                                            npixels=5,
                                            filter_kernel=kernel,
                                            nlevels=32,
                                            contrast=0.001)

        cat = phot.source_properties(self.data, segm_deblend, wcs=self.wcs)
        sources = cat.to_table()
        sources['xcentroid'].info.format = '.2f'  # optional format
        sources['ycentroid'].info.format = '.2f'
        sources['cxx'].info.format = '.2f'
        sources['cxy'].info.format = '.2f'
        sources['cyy'].info.format = '.2f'
        sources['gini'].info.format = '.2f'

        return sources.to_pandas().sort_values('max_value', ascending=True)
Exemple #9
0
def find_contour(box, mask, sigma_level):

    """
    This function ...
    :param box:
    :param mask:
    :param sigma_level:
    :return:
    """

    props = source_properties(box, mask)
    #tbl = properties_table(props)

    x_shift = box.x_min
    y_shift = box.y_min

    # Since there is only one segment in the self.source.mask (the center segment), the props
    # list contains only one entry (one galaxy)
    if len(props) == 0: return None
    properties = props[0]

    # Obtain the position, orientation and extent
    position = Position(properties.xcentroid.value + x_shift, properties.ycentroid.value + y_shift)
    a = properties.semimajor_axis_sigma.value * sigma_level
    b = properties.semiminor_axis_sigma.value * sigma_level
    angle = properties.orientation.value # in radians
    angle = Angle(angle, u.rad)

    radius = Extent(a, b)

    # Create and return the elliptical contour
    return Ellipse(position, radius, angle)
Exemple #10
0
 def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
     '''
     Parameters
     ----------
     psfFWHM : float
         FWHM of the imaging point spread function
     nsigma : float
         source detection threshold
     '''
     data = np.array(self.data.copy())
     psfFWHMpix = psfFWHM / self.pixel_scales[0].value
     thresholder = detect_threshold(data, nsigma=nsigma)
     sigma = psfFWHMpix * gaussian_fwhm_to_sigma
     kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
     kernel.normalize()
     segm = detect_sources(data,
                           thresholder,
                           npixels=5,
                           filter_kernel=kernel)
     props = source_properties(data, segm)
     tab = Table(props.to_table())
     self.sources_catalog = tab
     srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'],
                                               tab['ycentroid'], 1)
     sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
     sctab = Table([sc, np.arange(len(sc))],
                   names=['sc', 'sloop_{0}'.format(sc_key)])
     self.sources_skycord = sctab
Exemple #11
0
def ds9_region(image_path, image, segm, wcs, ds9_region):
    """"Creates ds9 region file.

    This function creates a ds9 region file to display the sources
    detected by the segmentation function. This file is written to
    the same directory the fits files are in.

        Args:
            image_path(str, required):    Image path to particular FITs. File
            image(array, required):       This is the image data
            segm:                         The segmentation image
            wcs:                          World Coordinte System object
            ds9_region(boolean, opt):     If true, creates region file
            """
    if ds9_region is True:
        data_path = os.path.splitext(image_path)
        region_path = str(data_path[0]) + '_ds9region'
        scale = proj_plane_pixel_scales(wcs)
        image_scale = scale[0]
        reg = source_properties(image, segm, wcs=wcs)
        with open(region_path + '.reg', 'w') as f:
            f.write('# Region file format: DS9 version 7.6\n\n')
            f.write('global color=#ff7733\n')
            f.write('global width=2\n')
            f.write('fk5\n\n')
            for i in range(0, len(reg.id)):
                x = reg[i].sky_centroid_icrs.ra.to(u.deg)
                y = reg[i].sky_centroid_icrs.dec
                r = image_scale * reg[i].equivalent_radius
                f.write('circle(' + str(x.value) + ',' + str(y.value) + ',' +
                        str(r.value) + ')' + '   # Source Number:' +
                        str(reg[i].id) + '\n')
Exemple #12
0
 def make_properties(self):
     """
     characterize source properties given a segmentation map (from Segmentation.make_segmentation)
     """
     from photutils import source_properties
     data = self.data
     segmentation = self.segmentation.value
     self.properties = source_properties(data, segmentation)
Exemple #13
0
def source_find(img, ota, inst, nbg_std=10.0):
    """
    This function will find sources on an OTA using the detect_sources module
    from photutils. This will return of csv file of the sources found with the
    x,y,Ra,Dec,source_sum,max_value, and elongation of the source. The
    elongation parameter is semimajor_axis / semiminor_axis.
    This output is needed for the source_xy function. This function is set
    to work on the reprojected otas.

    Parameters
    ----------
    img : str
        Name of image
    ota : str
        Name of OTA
    int : str
        Version of ODI used, ``podi`` or ``5odi``
    nbg_std : float
        Multiplier to the standard deviation of the background. It has a default
        value of ``10`` to only detect bright sources

    Note
    ----
    This function produces a ``csv`` file in ``odi.sourcepath`` with the
    following naming convention ``'source_'+ota+'.'+img.base()+'.csv'``.

    """
    image = odi.reprojpath + 'reproj_' + ota + '.' + img.stem()
    QR_raw = odi.fits.open(image)
    # hdu_ota = QR_raw[0]

    hdu_ota = odi.tan_header_fix(QR_raw[0])

    w = odi.WCS(hdu_ota.header)
    # needed to remind astropy that the header says RADESYS=ICRS
    # your mileage may vary (logic probably needed here to handle cases)
    w.wcs.radesys = 'ICRS'
    # if inst == '5odi':
    #     w.wcs.ctype = ["RA---TPV", "DEC--TPV"]
    bg_mean, bg_median, bg_std = odi.mask_ota(img, ota, reproj=True)
    threshold = bg_median + (bg_std * nbg_std)
    print bg_mean, bg_median, bg_std
    segm_img = detect_sources(hdu_ota.data, threshold, npixels=20)
    source_props = source_properties(hdu_ota.data, segm_img, wcs=w)

    columns = [
        'id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
        'dec_icrs_centroid', 'source_sum', 'max_value', 'elongation'
    ]
    source_tbl = properties_table(source_props, columns=columns)
    source_tbl_df = source_tbl.to_pandas()

    outputfile = odi.sourcepath + 'source_' + ota + '.' + img.base() + '.csv'

    source_tbl_df.to_csv(outputfile, index=False)
    QR_raw.close()
Exemple #14
0
    def measure_sources(self):
        """Use the positions of the sources identified in the white light image to
        measure properties of these sources in the filter images

        An instrument/detector combination may have multiple filter-level products.
        This routine is called for each filter image which is then measured to generate
        a filter-level source catalog based on object positions measured in the total
        detection product image.

        Parameters
        ----------
        segm : `~astropy.photutils.segmentation` Segmentation image
            Two-dimensional image of labeled source regions based on the "white light" drizzed product

        kernel : `~astropy.convolution`
            Two dimensional function of a specified FWHM used to smooth the image and
            used in the detection of sources as well as for the determination of the
            source properties (this routine)

        catalog_filename : string
            Name of the output source catalog for the filter detection product

        Returns
        -------

        """
        # TODO: Finish up and optimize HAPSegmentCatalog.measure_sources()

        # get filter-level science data
        imgarr = self.image.data.copy()

        # Report configuration values to log
        log.info("{}".format("=" * 80))
        log.info("")
        log.info(
            "SExtractor-like source property measurements based on Photutils segmentation"
        )
        log.info("Filter Level Product - Input Parameters")
        log.info("FWHM: {}".format(self.fwhm))
        log.info("size_source_box: {}".format(self.size_source_box))
        log.info("")
        log.info("{}".format("=" * 80))

        # The data needs to be background subtracted when computing the source properties
        bkg = self.image.bkg

        imgarr_bkgsub = imgarr - bkg.background

        # Compute source properties...
        self.source_cat = source_properties(imgarr_bkgsub,
                                            self.sources,
                                            background=bkg.background,
                                            filter_kernel=self.kernel,
                                            wcs=self.image.imgwcs)
        log.info("Found {} sources from segmentation map".format(
            len(self.source_cat)))
Exemple #15
0
def detect_obj(img, snr=2.8, exp_sz= 1.2, plt_show = True):
    threshold = detect_threshold(img, snr=snr)
    center_img = len(img)/2
    sigma = 3.0 * gaussian_fwhm_to_sigma# FWHM = 3.
    kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=10, filter_kernel=kernel)
    npixels = 20
    segm_deblend = deblend_sources(img, segm, npixels=npixels,
                                    filter_kernel=kernel, nlevels=25,
                                    contrast=0.001)
    #Number of objects segm_deblend.data.max()
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12.5, 10))
    import copy, matplotlib
    my_cmap = copy.copy(matplotlib.cm.get_cmap('gist_heat')) # copy the default cmap
    my_cmap.set_bad('black')
    vmin = 1.e-3
    vmax = 2.1 
    ax1.imshow(img, origin='lower', cmap=my_cmap, norm=LogNorm(), vmin=vmin, vmax=vmax)
    ax1.set_title('Data')
    ax2.imshow(segm_deblend, origin='lower', cmap=segm_deblend.cmap(random_state=12345))
    ax2.set_title('Segmentation Image')
    plt.show()
    
    columns = ['id', 'xcentroid', 'ycentroid', 'source_sum', 'area']
    cat = source_properties(img, segm_deblend)
    tbl = cat.to_table(columns=columns)
    tbl['xcentroid'].info.format = '.2f'  # optional format
    tbl['ycentroid'].info.format = '.2f'
    print(tbl)
    cat = source_properties(img, segm_deblend)
    objs = []
    for obj in cat:
        position = (obj.xcentroid.value-center_img, obj.ycentroid.value-center_img)
        a_o = obj.semimajor_axis_sigma.value
        b_o = obj.semiminor_axis_sigma.value
        Re = np.pi * a_o * b_o /2.
        q = 1 - obj.ellipticity.to_value()
        objs.append((position,Re,q))
    dis_sq = [np.sqrt((objs[i][0][0])**2+(objs[i][0][1])**2) for i in range(len(objs))]
    dis_sq = np.array(dis_sq)
    c_index= np.where(dis_sq == dis_sq.min())[0][0]
    return objs, c_index
Exemple #16
0
def deblend_sources(in_image, segm_obj, kernel, errmap, ext_name):
    fo = fits.open(in_image, "append")
    hdu = fo[ext_name]

    if segm_obj is None:
        nhdu = fits.ImageHDU()

        # save segmap and info
        nhdu.header["EXTNAME"] = "DEBLEND"

        thdu = fits.BinTableHDU()
        thdu.header["EXTNAME"] = "DEBLEND_PROPS"

        fo.append(nhdu)
        fo.append(thdu)

        fo.flush()
        fo.close()
        return None

    segm_obj = photutils.deblend_sources(hdu.data,
                                         segm_obj,
                                         npixels=5,
                                         filter_kernel=kernel)
    segmap = segm_obj.data

    props = photutils.source_properties(hdu.data, segmap, errmap)
    props_table = astropy.table.Table(props.to_table())
    # these give problems given their format/NoneType objects
    props_table.remove_columns([
        "sky_centroid",
        "sky_centroid_icrs",
        "source_sum_err",
        "background_sum",
        "background_mean",
        "background_at_centroid",
    ])
    nhdu = fits.ImageHDU(segmap)

    # save segmap and info
    nhdu.header["EXTNAME"] = "DEBLEND"

    thdu = fits.BinTableHDU(props_table)
    thdu.header["EXTNAME"] = "DEBLEND_PROPS"

    fo.append(nhdu)
    fo.append(thdu)

    fo.flush()
    fo.close()
    return segm_obj
Exemple #17
0
    def detect_sources(self, threshold=False, npixels=10):

        if not threshold:

            # threshold = np.min(self.img[self.img>0])

            threshold = np.sum(self.img.img) / 1000.

        self.segm = detect_sources(self.img.img, threshold, npixels=npixels)

        self.cat = source_properties(self.img.img, self.segm)

        for i, o in enumerate(self.cat):
            print(i, o.centroid, o.source_sum / np.sum(self.img.img))
Exemple #18
0
    def create_seg_map(self):
        '''
        Creates segmentation map, from original FLT file, that is used in 
        background subtraction and to fix cosmic rays.

        Parameters
        ----------
        self : object
            DashData object created from an individual IMA file.

        Output
        ------
        Segmentation Image : fits file
            Segmentation map
        Source List : .dat file
            List of sources and their properties
        '''

        flt = fits.open(self.flt_file_name)
        data = flt[1].data

        threshold = detect_threshold(data, nsigma=3.)

        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        segm = detect_sources(data,
                              threshold,
                              npixels=10,
                              filter_kernel=kernel)

        hdu = fits.PrimaryHDU(segm.data)
        if not os.path.exists('segmentation_maps'):
            os.mkdir('segmentation_maps')
        hdu.writeto(('segmentation_maps/{}_seg.fits').format(self.root),
                    overwrite=True)

        # Create source list
        cat = source_properties(data, segm)

        tbl = cat.to_table()
        tbl['xcentroid'].info.format = '.2f'
        tbl['ycentroid'].info.format = '.2f'
        tbl['cxx'].info.format = '.2f'
        tbl['cxy'].info.format = '.2f'
        tbl['cyy'].info.format = '.2f'

        ascii.write(tbl,
                    'segmentation_maps/{}_source_list.dat'.format(self.root))
Exemple #19
0
def find_centroid(data):
    """
    find the centroid again after the image was rotated
    """

    sigma_clip = SigmaClip(sigma=3., iters=10)
    bkg_estimator = MedianBackground()
    bkg = Background2D(data, (25, 25),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)

    threshold = bkg.background + (3. * bkg.background_rms)

    sigma = 2.0 * gaussian_fwhm_to_sigma  # FWHM = 2.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)

    props = source_properties(data, segm)
    tbl = properties_table(props)

    my_min = 100000.

    x_shape = np.float(data.shape[0])
    y_shape = np.float(data.shape[1])

    r = 3.  # approximate isophotal extent
    apertures = []
    for prop in props:
        position = (prop.xcentroid.value, prop.ycentroid.value)
        #print(position)
        a = prop.semimajor_axis_sigma.value * r
        b = prop.semiminor_axis_sigma.value * r
        theta = prop.orientation.value
        apertures.append(EllipticalAperture(position, a, b, theta=theta))
        my_dist = np.sqrt((prop.xcentroid.value - x_shape / 2.)**2 +
                          (prop.ycentroid.value - y_shape / 2.)**2)
        if (my_dist < my_min):
            my_label = prop.id - 1
            my_min = my_dist

    mytheta = props[my_label].orientation.value
    mysize = np.int(np.round(r * props[my_label].semimajor_axis_sigma.value))
    my_x = props[my_label].xcentroid.value
    my_y = props[my_label].ycentroid.value
    return my_x, my_y, mytheta, mysize
def detect_sources_segmap(data,
                          threshold,
                          npixels,
                          kernel_fwhm=1.8,
                          show=False):
    """
    Runs image segmentation to detect sources in `data`.

    Parameters
    ----------
    data : array
        Image array.
    threshold : float or array
        Detection threshold value, or pixel-wise threshold image (must be same
        shape as `data`.)
    npixels : int
        Positive integer number of connected pixels, each greater that
        `threshold` that an object must have to be detected.
    kernel_fwhm : float
        FWHM of gaussian kernel used to smooth image before segmentation.
    show : bool
        Show a plot of detected source(s).

    Returns
    -------
    coo_tab : `astropy.table.Table` or int
        Table with detected source(s). Returns '0' if no sources are detected.

    """

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data,
                          threshold=threshold,
                          npixels=npixels,
                          filter_kernel=kernel)
    if segm:
        coo_tab = source_properties(data, segm).to_table()
        if show:
            show_source_detection_plot(data, coo_tab)
        return coo_tab

    if not segm:
        if show:
            show_source_detection_plot(data, None)
        return 0
Exemple #21
0
def getsegmentation(image):
    ''' 
    pass in image name, array of x coord, array of y coord.  
    Returns object size 
    '''
    # read in image
    data = fits.getdata(image)
    threshold = detect_threshold(data, nsigma=2.)
    # create segmentation map
    segm = detect_sources(data, threshold, npixels=25)

    # skipping deblending for now
    
    # create catalog
    cat = source_properties(data, segm)
    tbl = cat.to_table()    
    return segm.data, tbl
def extract_sources(img, fwhm=2.0, threshold=None, source_box=7,
                    sharp=None, output=None, plot=False, vmax=None):
    """Use photutils to find sources in image based on segmentation."""
    if threshold is None:
        bkg_estimator = MedianBackground()
        bkg = Background2D(img, (50, 50), filter_size=(3, 3),
                           bkg_estimator=bkg_estimator)
        threshold = bkg.background + (3. * bkg.background_rms)
    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=source_box,
                          filter_kernel=kernel)
    cat = source_properties(img, segm)
    print("Total Number of detected sources: {}".format(len(cat)))
    if sharp:
        # Remove sources that do not fall within specified sharpness limit
        newcat = photutils.segmentation.properties.SourceCatalog([])
        for obj in cat:
            src_peak = (obj.max_value - (obj.source_sum/obj.area.value))
            kern_peak = (kernel*obj.source_sum).array.max()
            sharpness = src_peak/kern_peak
            if sharpness < sharp[0] or sharpness > sharp[1]:
                newcat._data.append(obj)
    else:
        newcat = cat
    print("Final Number of selected sources: {}".format(len(newcat)))
    if output:
        tbl = newcat.to_table()
        tbl['xcentroid'].info.format = '.10f'  # optional format
        tbl['ycentroid'].info.format = '.10f'
        tbl['cxy'].info.format = '.10f'
        tbl['cyy'].info.format = '.10f'
        tbl.write(output, format='ascii.ecsv')

    if plot:
        norm = None
        if vmax is None:
            norm = ImageNormalize(stretch=SqrtStretch())
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
        ax1.imshow(img, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
        ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))

    return newcat, segm
Exemple #23
0
    def find_sources(data_sub, x_size = 5, y_size = 5, npixels = 10, connectivity = 8):
    	'''
		Using photutils to detect the sources within the full fits files

		Arguments:
			data_sub: Background subtracted fits file

		Optional Arguments:
			x_size: x extent of the kernel which slides over the image to detect the sources 
					-- defaults to 5 pixels
			y_size: y extent of the kernel which slides over the image to detect the sources 
					-- defaults to 5 pixels
			n_pixels: number of connected pixels that are greater than the threshold to count a source 
					-- defaults to 10 pixels
			connectivity: The type of pixel connectivity used in determining how pixels are grouped into a detected source.
					-- defaults to 8 pixels which touch along their edges or corners.

    	'''

    	print('Finding sources using photutils')
    	start = time.time()

    	median = np.median(data_sub)
        std = mad_std(data_sub)

        threshold = bkg + (5.0 * bkg_rms)
        sigma = 5.0 * gaussian_fwhm_to_sigma
        kernel = Gaussian2DKernel(sigma, x_size = x_size, y_size = y_size) # Kernel defaults to 8*stddev
        kernel.normalize()

        segmented_image = detect_sources(data_sub, threshold, npixels = npixels, filter_kernel = kernel, connectivity = connectivity)
        segmented_image_deblend = deblend_sources(data_sub, segmented_image, npixels = npixels, filter_kernel = kernel, connectivity = connectivity)

        cat = source_properties(data_sub, segmented_image_deblend)

        # Getting values of the individual stars to place into a table
        x_pos = cat.xcentroid.value
        y_pos = cat.ycentroid.value
        area = cat.area.value
        max_pixel_val = cat.max_value
        ids = cat.id

        return ids, x_pos, y_pos, area, max_pixel_val
Exemple #24
0
 def get_dimensions(mask):
     """Get dimensions of labeled regions in labeled mask."""
     properties = photutils.source_properties(mask, mask)
     if not properties:
         return None
     tbl = properties.to_table()  # Convert to table
     lbl = np.array(tbl['min_value'], dtype=np.int16)
     reg_x = tbl['xcentroid']
     reg_y = tbl['ycentroid']
     reg_r = tbl['equivalent_radius']
     reg_area = tbl['area']
     perimeter = tbl['perimeter']
     eccentricity = tbl['eccentricity']
     pdata = np.array(
         [lbl, reg_x, reg_y, reg_r, reg_area, perimeter, eccentricity]).T
     dims = pd.DataFrame(data=pdata,
                         columns=[
                             'label', 'x_centroid', 'y_centroid', 'radius',
                             'area', 'perimeter', 'eccentricity'
                         ])
     return dims
Exemple #25
0
def grid_smooth(i_ra_f, i_dec_f, fwhm, width, height):
    # bin the filtered stars into a grid with pixel size XXX
    # print "Binning for m-M =",dm
    # bins = 165
    # width = 30
    bins_h = int(height * 60. / 8.)
    bins_w = int(width * 60. / 8.)

    grid, xedges, yedges = np.histogram2d(i_dec_f, i_ra_f, bins=[bins_h,bins_w], range=[[0,height],[0,width]])
    hist_points = zip(xedges,yedges)

    sig = ((bins_w/width)*fwhm)/2.355
    pltsig = fwhm/2.0

    # convolve the grid with a gaussian
    grid_gaus = ndimage.filters.gaussian_filter(grid, sig, mode='constant', cval=0)
    S = np.array(grid_gaus*0)
    S_th = 3.0

    grid_mean = np.mean(grid_gaus)
    grid_sigma = np.std(grid_gaus)
    S = (grid_gaus-grid_mean)/grid_sigma

    above_th = [(int(i),int(j)) for i in range(len(S)) for j in range(len(S[i])) if (S[i][j] >= S_th)]

    segm = detect_sources(S, 2.0, npixels=5)
    props = source_properties(S, segm)
    columns = ['id', 'maxval_xpos', 'maxval_ypos', 'max_value', 'area']
    tbl = properties_table(props, columns=columns)
    # print tbl
    # rand_cmap = random_cmap(segm.max + 1, random_state=12345)

    # find the maximum point in the grid and center the circle there
    x_cent, y_cent = np.unravel_index(grid_gaus.argmax(),grid_gaus.shape)
    x_cent_S, y_cent_S = np.unravel_index(S.argmax(),S.shape)
    # print 'Max of S located at:','('+'{0:6.3f}'.format(y_cent_S)+','+'{0:6.3f}'.format(x_cent_S)+')'
    # print 'Value of S at above:','{0:6.3f}'.format(S[x_cent_S][y_cent_S])
    # print 'Number of bins above S_th: {0:4d}'.format(len(above_th))
    return xedges, x_cent, yedges, y_cent, S, x_cent_S, y_cent_S, pltsig, tbl, segm
Exemple #26
0
def calc_fwhm_on_bright_star(image_file, print=True, fwhm_init=2.0):
    """Calculate the FWHM on a single bright star (either open or closed loops).

    image_file -- either FITS or PGM
    fwhm_init -- (def=2) pixels for FWHM initial guess

    """

    img = load_image(image_file)

    # Calculate the bacgkround
    bkg = photutils.Background(img,
                               img.shape,
                               filter_shape=(1, 1),
                               method='median')

    threshold = bkg.background + (30.0 * bkg.background_rms)

    sigma = 2.0 * gaussian_fwhm_to_sigma  # FWHM = 2. pixels
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel)

    props = source_properties(img, segm)
    tbl = properties_table(props)

    # Check for junk stars (cosmic rays)
    idx = np.where((tbl['semimajor_axis_sigma'] > 1)
                   & (tbl['semiminor_axis_sigma'] > 1))[0]
    tbl = tbl[idx]

    tbl['image_name'] = image_file

    if print == True:
        reformat_source_table(tbl)
        print_source_table(tbl)

    return tbl
Exemple #27
0
def find_contours(data, segments, sigma_level):

    """
    This function ...
    :param data:
    :param segments:
    :param sigma_level:
    :return:
    """

    # Initialize a list for the contours
    contours = []

    # Get the segment properties
    # Since there is only one segment in the source.mask (the center segment), the props
    # list contains only one entry (one galaxy)
    properties_list = source_properties(data, segments)

    for properties in properties_list:

        # Obtain the position, orientation and extent
        position = Position(properties.xcentroid.value, properties.ycentroid.value)
        a = properties.semimajor_axis_sigma.value * sigma_level
        b = properties.semiminor_axis_sigma.value * sigma_level
        angle = properties.orientation.value # in radians
        angle = Angle(angle, u.rad)

        radius = Extent(a, b)

        meta = {"text": str(properties.label)}

        # Create the contour
        contours.append(Ellipse(position, radius, angle, meta=meta))

    # Return the contours
    return contours
def source_find(img,ota,inst):
    """
    This function will find sources on an OTA
    using the detect_sources module from photutils.
    This will return of csv file of the sources found
    with the x,y,Ra,Dec,source_sum,max_value, and
    elongation of the source. The elongation parameter is
    semimajor_axis / semiminor_axis. This output is needed
    for the source_xy function.
    """
    image = odi.reprojpath+'reproj_'+ota+'.'+str(img[16:])
    QR_raw = odi.fits.open(image)
    hdu_ota = QR_raw[0]

    if inst == 'podi':
        pvlist = hdu_ota.header['PV*']
        for pv in pvlist:
            tpv = 'T'+pv
            hdu_ota.header.rename_keyword(pv, tpv, force=False)
    w = odi.WCS(hdu_ota.header)
    #w.wcs.ctype = ["RA---TPV", "DEC--TPV"]
    bg_mean,bg_median,bg_std = odi.mask_ota(img,ota,reproj=True)
    threshold = bg_median + (bg_std * 5.)
    print bg_mean,bg_median,bg_std
    segm_img = detect_sources(hdu_ota.data, threshold, npixels=20)
    source_props = source_properties(hdu_ota.data,segm_img,wcs=w)

    columns = ['id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
	       'dec_icrs_centroid','source_sum','max_value','elongation']
    source_tbl = properties_table(source_props,columns=columns)
    source_tbl_df = source_tbl.to_pandas()

    outputfile = odi.sourcepath+'source_'+ota+'.'+str(img[16:-5])+'.csv'

    source_tbl_df.to_csv(outputfile,index=False)
    QR_raw.close()
Exemple #29
0
def find_contours(data, segments, sigma_level):

    """
    This function ...
    :param data:
    :param segments:
    :param sigma_level:
    :return:
    """

    # Initialize a list for the contours
    contours = []

    # Get the segment properties
    # Since there is only one segment in the source.mask (the center segment), the props
    # list contains only one entry (one galaxy)
    properties_list = source_properties(data, segments)

    for properties in properties_list:

        # Obtain the position, orientation and extent
        position = Position(properties.xcentroid.value, properties.ycentroid.value)
        a = properties.semimajor_axis_sigma.value * sigma_level
        b = properties.semiminor_axis_sigma.value * sigma_level
        angle = properties.orientation.value # in radians
        angle = Angle(angle, u.rad)

        radius = Extent(a, b)

        meta = {"text": str(properties.label)}

        # Create the contour
        contours.append(Ellipse(position, radius, angle, meta=meta))

    # Return the contours
    return contours
def run_single_source(imfile, errfile, filter, true_pos, find_source=True, pclass='E',
                      extent=3.5, plot=True, name='Source'):
                      
    print 'Aperture photometry for ',name,' in Herschel filter ',filter
    
    # Load the data
    print 'Loading the data...'
    if imfile != errfile:
        hdu_image = pyf.open(imfile)[0]
        hdu_err = pyf.open(errfile)[0]
    else:
        data = pyf.getdata(imfile)
        hdr = pyf.getheader(imfile)
        hdr.remove('NAXIS3')
        hdr['NAXIS'] = 2
        hdu_image = pyf.PrimaryHDU(data=data[0], header=hdr)
        hdu_err = pyf.PrimaryHDU(data=data[1], header=hdr)
    
    # Convert to Jy/pixel
    if ((filter == 'PSW') | (filter == 'PMW') | (filter == 'PLW')):
        print 'SPIRE image units being converted to Jy/pixel...'
        hdu_image = prep_image(hdu_image, filter)
        hdu_err = prep_image(hdu_err, filter) 

    # Calculate the global background
    print 'Calculating the global background...'
    im = hdu_image.data
    im_med, im_std = estimate_bkg(im)
    print 'Background median = ', im_med
    print 'Background rms = ', im_std
    
    # Use a 1.5 sigma threshold to detect the BAT source
    thresh = im_med + 1.5*im_std
    segm_img = detect_sources(im, thresh, npixels=5)
    props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
    
    # Find the source in the image
    if (find_source) & ((pclass != 'U') & (pclass != 'C')) & (len(props) > 0):
        print 'Finding the source in the FOV...'
        ind_src = find_bat_source(true_pos, props, 2*FWHM[filter])
    else:
        ind_src = None
    
    # Create the source aperture            
    if ind_src is None:
        print 'Source not found or user fixed position of source aperture.'
        print 'Creating the source aperture centered at RA=', true_pos.ra.deg, ' and DEC=', true_pos.dec.deg        
        ap, type = create_aperture(None, filter, pclass, coord_bat=true_pos,
                                   wcs=wcs.WCS(hdu_image.header)) 
            
    else:
        print 'Source found in the FOV!'
        print 'Creating the source aperture centered at X=', props[ind_src].xcentroid.value, ' and Y=', props[ind_src].ycentroid.value
        ap, type = create_aperture(props[ind_src], filter, pclass, extent=extent,
                                   coord_bat=true_pos, wcs=wcs.WCS(hdu_image.header))
    
    # Create new mask based on 2-sigma threshold and remove the bat source if detected
    print 'Creating a new mask to not include the source...'
    thresh2 = im_med + 2.0*im_std
    segm_img2 = detect_sources(im, thresh2, npixels=5)
    props2 = source_properties(im-im_med, segm_img2, wcs=wcs.WCS(hdu_image.header))
    nanmask = np.isnan(im)
    nanerr = np.isnan(hdu_err.data) | np.isinf(hdu_err.data)
    
    if len(props2) != 0:
        ind_src2 = find_bat_source(true_pos, props2, 2*FWHM[filter])
    else:
        ind_src2 = None

    if ind_src2 is None:
        mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
    else:
        segm_img2.remove_labels(ind_src2 + 1)
        mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
    
    # Run the aperture photometry
    print 'Calculating the photometry!'
    if (pclass != 'C'):    
        results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data,
                                                          type, filter, pclass=pclass,
                                                          bkg=im_med, mask=mask)
    else:
        results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data,
                                                          type, filter, pclass=pclass,
                                                          bkg=im_med, mask=None)
    type = results['type']
    print results
    # Plot the apertures on top of the data
    if plot:
        print 'Plotting the apertures used for photometry.'
        cbat = [true_pos.ra.deg, true_pos.dec.deg]
        
        if (type == 'fixed'):
            pmax = None
        else:
            pmax = props[ind_src].max_value + im_med
        
        if (pmax < im_med):
            pmax = None

        fig = plot_aperture_photometry(hdu_image, apertures, filter, global_bkg=im_med,
                                       pixel_max=pmax, title=name+' ['+filter+']', plot_bat_loc=cbat)
        
        print 'All done!'
        return results, apertures, fig
    
    else:
        print 'All done!'
        return results, apertures
Exemple #31
0
def get_sources(detection_frame,
                mask=False,
                sigma=5.0,
                mode='DAO',
                fwhm=2.5,
                threshold=None,
                npix=4,
                return_segm_image=False):
    """
    Main method used to identify sources in a detection frame and estimate their position.
    Different modes are available, accesible through the ``mode`` keyword :

    * DAO : uses the :class:`photutils:photutils.DAOStarFinder` method, adapted from DAOPHOT.
    * IRAF : uses the :class:`photutils:photutils.IRAFStarFinder` method, adapted from IRAF.
    * PEAK : uses the :func:`photutils:photutils.find_peaks` method, looking for local peaks above a given threshold.
    * ORB : uses the :func:`ORB:orb.utils.astrometry.detect_stars` method, fitting stars in the frame
    * SEGM : uses the :func:`photutils:photutils.detect_sources` method, segmenting the image.

    The most reliable is SEGM.

    Parameters
    ----------
    detection_frame : 2D :class:`~numpy:numpy.ndarray`
        Map on which the sources should be visible.
    mask : 2D :class:`~numpy:numpy.ndarray` or bool,  Default = False
        (Optional) If passed, only sources inside the mask are detected.
    sigma : float
        (Optional) Signal to Noise of the detections we want to keep. Only used if threshold is None. In this case, the signal and the noise are computed with sigma-clipping on the deteciton frame. Default = 5
    threshold : float or 2D :class:`~numpy:numpy.ndarray` of floats
        (Optional) Threshold above which we consider having a detection. Default is None
    mode : str
        (Optional) One of the detection mode listed above. Dafault = 'DAO'
    fwhm : float
        (Optional) Expected FWHM of the sources. Default : 2.5
    npix : int
        (Optional) Only used by the 'SEGM' method : minimum number of connected pixels with flux above the threshold to make a credible source. Default = 4
    return_segm_image : bool, Default = False
        (Optional) Only used in the 'SEGM' mode. If True, returns the obtained segmentation image.

    Returns
    -------
    sources : :class:`~pandas:pandas.DataFrame`
        A DataFrame where each row represents a detection, with at least the positions named as ``xcentroid``, ``ycentroid`` (WARNING : using astropy convention). The other columns depend on the mode used.

    """
    if mask is False:
        mask = np.ones_like(detection_frame)
    if threshold is None:
        mean, median, std = sigma_clipped_stats(
            detection_frame, sigma=3.0, iters=5,
            mask=~mask.astype(bool))  #On masque la region hors de l'anneau
        threshold = median + sigma * std
    #On detecte sur toute la frame, mais on garde que ce qui est effectivement dans l'anneau
    if mode == 'DAO':
        daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold)
        sources = daofind(detection_frame)
    elif mode == 'IRAF':
        irafind = IRAFStarFinder(threshold=threshold, fwhm=fwhm)
        sources = irafind(detection_frame)
    elif mode == 'PEAK':
        sources = find_peaks(detection_frame, threshold=threshold)
        sources.rename_column('x_peak', 'xcentroid')
        sources.rename_column('y_peak', 'ycentroid')
    elif mode == 'ORB':
        astro = Astrometry(detection_frame, instrument='sitelle')
        path, fwhm_arc = astro.detect_stars(min_star_number=5000,
                                            r_max_coeff=1.,
                                            filter_image=False)
        star_list = astro.load_star_list(path)
        sources = Table([star_list[:, 0], star_list[:, 1]],
                        names=('ycentroid', 'xcentroid'))
    elif mode == 'SEGM':
        logging.info('Detecting')
        segm = detect_sources(detection_frame, threshold, npixels=npix)
        deblend = True
        labels = segm.labels
        if deblend:
            # while labels.shape != (0,):
            #     try:
            #         #logging.info('Deblending')
            #         # fwhm = 3.
            #         # s = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
            #         # kernel = Gaussian2DKernel(s, x_size = 3, y_size = 3)
            #         # kernel = Box2DKernel(3, mode='integrate')
            #         deblended = deblend_sources(detection_frame, segm, npixels=npix, labels=labels)#, filter_kernel=kernel)
            #         success = True
            #     except ValueError as e:
            #         #warnings.warn('Deblend was not possible.\n %s'%e)
            #         source_id = int(e.args[0].split('"')[1])
            #         id = np.argwhere(labels == source_id)[0,0]
            #         labels = np.concatenate((labels[:id], labels[id+1:]))
            #         success = False
            #     if success is True:
            #         break
            try:
                logging.info('Deblending')
                # fwhm = 3.
                # s = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
                # kernel = Gaussian2DKernel(s, x_size = 3, y_size = 3)
                # kernel = Box2DKernel(3, mode='integrate')
                deblended = deblend_sources(
                    detection_frame, segm,
                    npixels=npix)  #, filter_kernel=kernel)
            except ValueError as e:
                warnings.warn('Deblend was not possible.\n %s' % e)
                deblended = segm
            logging.info('Retieving properties')
            sources = source_properties(detection_frame, deblended).to_table()
        else:
            deblended = segm
            logging.info('Retieving properties')
            sources = source_properties(detection_frame, deblended).to_table()
        logging.info('Filtering Quantity columns')
        for col in sources.colnames:
            if type(sources[col]) is Quantity:
                sources[col] = sources[col].value
    sources = mask_sources(sources, mask)  # On filtre
    df = sources.to_pandas()
    if return_segm_image:
        return deblended.array, df
    else:
        return df
Exemple #32
0
def make_source_catalog(model, kernel_fwhm, kernel_xsize, kernel_ysize,
                        snr_threshold, npixels, deblend_nlevels=32,
                        deblend_contrast=0.001, deblend_mode='exponential',
                        connectivity=8, deblend=False):
    """
    Create a final catalog of source photometry and morphologies.

    Parameters
    ----------
    model : `DrizProductModel`
        The input `DrizProductModel` of a single drizzled image.  The
        input image is assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    kernel_xsize : odd int
        The size in the x dimension (columns) of the kernel array.

    kernel_ysize : odd int
        The size in the y dimension (row) of the kernel array.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    npixels : int
        The number of connected pixels, each greater than the threshold
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    deblend_nlevels : int, optional
        The number of multi-thresholding levels to use for deblending
        sources.  Each source will be re-thresholded at
        ``deblend_nlevels``, spaced exponentially or linearly (see the
        ``deblend_mode`` keyword), between its minimum and maximum
        values within the source segment.

    deblend_contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have to be considered as a separate object.
        ``deblend_contrast`` must be between 0 and 1, inclusive.  If
        ``deblend_contrast = 0`` then every local peak will be made a
        separate object (maximum deblending).  If ``deblend_contrast =
        1`` then no deblending will occur.  The default is 0.001, which
        will deblend sources with a magnitude differences of about 7.5.

    deblend_mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``deblend_nlevels`` keyword)
        when deblending sources.

    connectivity : {4, 8}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 4 or 8
        (default).  4-connected pixels touch along their edges.
        8-connected pixels touch along their edges or corners.  For
        reference, SExtractor uses 8-connected pixels.

    deblend : bool, optional
        Whether to deblend overlapping sources.  Source deblending
        requires scikit-image.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source photometry and
        morphologies.
    """

    if not isinstance(model, DrizProductModel):
        raise ValueError('The input model must be a DrizProductModel.')

    # Use this when model.wht contains an IVM map
    # Calculate "background-only" error assuming the weight image is an
    # inverse-variance map (IVM).  The weight image is clipped because it
    # may contain zeros.
    # bkg_error = np.sqrt(1.0 / np.clip(model.wht, 1.0e-20, 1.0e20))
    # threshold = snr_threshold * bkg_error

    # Estimate the 1-sigma noise in the image empirically because model.wht
    # does not yet contain an IVM map
    mask = (model.wht == 0)
    data_mean, data_median, data_std = sigma_clipped_stats(
        model.data, mask=mask, sigma=3.0, maxiters=10)
    threshold = data_median + (data_std * snr_threshold)

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=kernel_xsize, y_size=kernel_ysize)
    kernel.normalize()

    segm = photutils.detect_sources(model.data, threshold, npixels=npixels,
                                    filter_kernel=kernel,
                                    connectivity=connectivity)

    # source deblending requires scikit-image
    if deblend:
        segm = photutils.deblend_sources(model.data, segm, npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=deblend_nlevels,
                                         contrast=deblend_contrast,
                                         mode=deblend_mode,
                                         connectivity=connectivity,
                                         relabel=True)

    # Calculate total error, including source Poisson noise.
    # This calculation assumes that the data and bkg_error images are in
    # units of electron/s.  Poisson noise is not included for pixels
    # where data < 0.
    exptime = model.meta.resample.product_exposure_time    # total exptime
    # total_error = np.sqrt(bkg_error**2 +
    #                       np.maximum(model.data / exptime, 0))
    total_error = np.sqrt(data_std**2 + np.maximum(model.data / exptime, 0))

    wcs = model.get_fits_wcs()
    source_props = photutils.source_properties(
        model.data, segm, error=total_error, filter_kernel=kernel, wcs=wcs)

    if len(source_props) == 0:
        return QTable()    # empty table

    columns = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'area',
               'source_sum', 'source_sum_err', 'semimajor_axis_sigma',
               'semiminor_axis_sigma', 'orientation',
               'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur']
    catalog = source_props.to_table(columns=columns)

    # convert orientation to degrees
    orient_deg = catalog['orientation'].to(u.deg)
    catalog.replace_column('orientation', orient_deg)

    # define orientation position angle
    rot = _get_rotation(wcs)
    catalog['orientation_sky'] = ((270. - rot +
                                   catalog['orientation'].value) * u.deg)

    # define flux in microJanskys
    nsources = len(catalog)
    pixelarea = model.meta.photometry.pixelarea_arcsecsq
    if pixelarea is None:
        micro_Jy = np.full(nsources, np.nan)
    else:
        micro_Jy = (catalog['source_sum'] *
                    model.meta.photometry.conversion_microjanskys *
                    model.meta.photometry.pixelarea_arcsecsq)

    # define AB mag
    abmag = np.full(nsources, np.nan)
    mask = np.isfinite(micro_Jy)
    abmag[mask] = -2.5 * np.log10(micro_Jy[mask]) + 23.9
    catalog['abmag'] = abmag

    # define AB mag error
    # assuming SNR >> 1 (otherwise abmag_error is asymmetric)
    abmag_error = (2.5 * np.log10(np.e) * catalog['source_sum_err'] /
                   catalog['source_sum'])
    abmag_error[~mask] = np.nan
    catalog['abmag_error'] = abmag_error

    return catalog
Exemple #33
0
def extract_sources(img, **pars):
    """Use photutils to find sources in image based on segmentation.

    Parameters
    ==========
    dqmask : array
        Bitmask which identifies whether a pixel should be used (1) in source
        identification or not(0). If provided, this mask will be applied to the
        input array prior to source identification.

    fwhm : float
        Full-width half-maximum (fwhm) of the PSF in pixels.
        Default: 3.0

    threshold : float or None
        Value from the image which serves as the limit for determining sources.
        If None, compute a default value of (background+5*rms(background)).
        If threshold < 0.0, use absolute value as scaling factor for default value.
        Default: None

    source_box : int
        Size of box (in pixels) which defines the minimum size of a valid source

    classify : boolean
        Specify whether or not to apply classification based on invarient moments
        of each source to determine whether or not a source is likely to be a
        cosmic-ray, and not include those sources in the final catalog.
        Default: True

    centering_mode : {'segmentation', 'starfind'}
        Algorithm to use when computing the positions of the detected sources.
        Centering will only take place after `threshold` has been determined, and
        sources are identified using segmentation.  Centering using `segmentation`
        will rely on `photutils.segmentation.source_properties` to generate the
        properties for the source catalog.  Centering using `starfind` will use
        `photutils.IRAFStarFinder` to characterize each source in the catalog.
        Default: 'starfind'

    nlargest : int, None
        Number of largest (brightest) sources in each chip/array to measure
        when using 'starfind' mode.  Default: None (all)

    output : str
        If specified, write out the catalog of sources to the file with this name

    plot : boolean
        Specify whether or not to create a plot of the sources on a view of the image
        Default: False

    vmax : float
        If plotting the sources, scale the image to this maximum value.

    """
    fwhm= pars.get('fwhm', 3.0)
    threshold= pars.get('threshold', None)
    source_box = pars.get('source_box', 7)
    classify = pars.get('classify', True)
    output = pars.get('output', None)
    plot = pars.get('plot', False)
    vmax = pars.get('vmax', None)
    centering_mode = pars.get('centering_mode', 'starfind')
    deblend = pars.get('deblend', False)
    dqmask = pars.get('dqmask',None)
    nlargest = pars.get('nlargest', None)
    # apply any provided dqmask for segmentation only
    if dqmask is not None:
        imgarr = img.copy()
        imgarr[dqmask] = 0
    else:
        imgarr = img

    bkg_estimator = MedianBackground()
    bkg = None

    exclude_percentiles = [10,25,50,75]
    for percentile in exclude_percentiles:
        try:
            bkg = Background2D(imgarr, (50, 50), filter_size=(3, 3),
                           bkg_estimator=bkg_estimator,
                           exclude_percentile=percentile)
            # If it succeeds, stop and use that value
            bkg_rms = (5. * bkg.background_rms)
            bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()
            default_threshold = bkg.background + bkg_rms
            if threshold is None or threshold < 0.0:
                if threshold is not None and threshold < 0.0:
                    threshold = -1*threshold*default_threshold
                    log.info("{} based on {}".format(threshold.max(), default_threshold.max()))
                    bkg_rms_mean = threshold.max()
                else:
                    threshold = default_threshold
            else:
                bkg_rms_mean = 3. * threshold
            if bkg_rms_mean < 0:
                bkg_rms_mean = 0.
            break
        except Exception:
            bkg = None

    # If Background2D does not work at all, define default scalar values for
    # the background to be used in source identification
    if bkg is None:
        bkg_rms_mean = max(0.01, imgarr.min())
        bkg_rms = bkg_rms_mean * 5

    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
    kernel.normalize()
    segm = detect_sources(imgarr, threshold, npixels=source_box,
                          filter_kernel=kernel)
    if deblend:
        segm = deblend_sources(imgarr, segm, npixels=5,
                           filter_kernel=kernel, nlevels=16,
                           contrast=0.01)
    # If classify is turned on, it should modify the segmentation map
    if classify:
        cat = source_properties(imgarr, segm)
        if len(cat) > 0:
            # Remove likely cosmic-rays based on central_moments classification
            bad_srcs = np.where(classify_sources(cat) == 0)[0]+1
            segm.remove_labels(bad_srcs) # CAUTION: May be time-consuming!!!


    # convert segm to mask for daofind
    if centering_mode == 'starfind':
        src_table = None
        #daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)
        log.info("Setting up DAOStarFinder with: \n    fwhm={}  threshold={}".format(fwhm, bkg_rms_mean))
        daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)
        # Identify nbrightest/largest sources
        if nlargest is not None:
            if nlargest > len(segm.labels):
                nlargest = len(segm.labels)
            large_labels = np.flip(np.argsort(segm.areas)+1)[:nlargest]
        log.info("Looking for sources in {} segments".format(len(segm.labels)))

        for label in segm.labels:
            if nlargest is not None and label not in large_labels:
                continue # Move on to the next segment
            # Get slice definition for the segment with this label
            seg_slice = segm.segments[label-1].slices
            seg_yoffset = seg_slice[0].start
            seg_xoffset = seg_slice[1].start

            #Define raw data from this slice
            detection_img = img[seg_slice]
            # zero out any pixels which do not have this segments label
            detection_img[np.where(segm.data[seg_slice]==0)] = 0

            # Detect sources in this specific segment
            seg_table = daofind(detection_img)
            # Pick out brightest source only
            if src_table is None and len(seg_table) > 0:
                # Initialize final master source list catalog
                src_table = Table(names=seg_table.colnames,
                                  dtype=[dt[1] for dt in seg_table.dtype.descr])
            if len(seg_table) > 0:
                max_row = np.where(seg_table['peak'] == seg_table['peak'].max())[0][0]
                # Add row for detected source to master catalog
                # apply offset to slice to convert positions into full-frame coordinates
                seg_table['xcentroid'] += seg_xoffset
                seg_table['ycentroid'] += seg_yoffset
                src_table.add_row(seg_table[max_row])

    else:
        cat = source_properties(img, segm)
        src_table = cat.to_table()
        # Make column names consistent with IRAFStarFinder column names
        src_table.rename_column('source_sum', 'flux')
        src_table.rename_column('source_sum_err', 'flux_err')

    if src_table is not None:
        log.info("Total Number of detected sources: {}".format(len(src_table)))
    else:
        log.info("No detected sources!")
        return None, None

    # Move 'id' column from first to last position
    # Makes it consistent for remainder of code
    cnames = src_table.colnames
    cnames.append(cnames[0])
    del cnames[0]
    tbl = src_table[cnames]

    if output:
        tbl['xcentroid'].info.format = '.10f'  # optional format
        tbl['ycentroid'].info.format = '.10f'
        tbl['flux'].info.format = '.10f'
        if not output.endswith('.cat'):
            output += '.cat'
        tbl.write(output, format='ascii.commented_header')
        log.info("Wrote source catalog: {}".format(output))

    if plot and plt is not None:
        norm = None
        if vmax is None:
            norm = ImageNormalize(stretch=SqrtStretch())
        fig, ax = plt.subplots(2, 2, figsize=(8, 8))
        ax[0][0].imshow(imgarr, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
        ax[0][1].imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))
        ax[0][1].set_title('Segmentation Map')
        ax[1][0].imshow(bkg.background, origin='lower')
        if not isinstance(threshold, float):
            ax[1][1].imshow(threshold, origin='lower')
    return tbl, segm
Exemple #34
0
def saturation_mask(image_file,
                    mask_file=None,
                    sat_ADU=40000,
                    sat_area_min=500,
                    ra_safe=None,
                    dec_safe=None,
                    rad_safe=None,
                    dilation_its=5,
                    blur_sigma=2.0,
                    plot=True,
                    write=True,
                    output=None):
    """Produce a mask of all saturated sources in an image, optionally combined
    with an existing mask.
    
    Arguments
    ---------
    image_file : str
        Image (fits) filename
    mask_file : str, optional
        Existing mask image (fits) filename to combine existing mask with new
        saturation mask (default None)
    sat_ADU : float, optional
        ADU value above which a pixel is considered saturated (default 40000, 
        which is a bit below the limit for MegaCam)
    sat_area_min : float, optional
        *Minimum* pixel area for a source to be considered a saturated source
        (default 500)
    ra_safe, dec_safe : float, optional
        Right Ascension and Declination (in decimal degrees) at the centre of a 
        "safe zone" in which sources will not be masked (default None), see
        `rad_safe`
    rad_safe : float, optional
        Radius (in arcsec) of the "safe zone", a circle centered on 
        (`ra_safe`, `dec_safe`) (default None)
    dilation_its : int, optional
        Number of iterations of binary dilation to apply to the mask (see 
        notes for details; default 5; can be set to 0 turn this off)
    blur_sigma : float, optional
        Sigma of the Gaussian filter to apply to the mask to blur it (default 
        2.0; can be set to 0 to turn this off)
    plot : bool, optional
        Whether to plot the new saturation mask (default True)
    write : bool, optional
        Whether to write the new mask to a fits file (default True)
    output : str, optional
        Name for output fits file (default 
        `image_file.replace(".fits", "_satmask.fits")`)

    Returns
    -------
    tbl : astropy.table.table.Table
        A table containing properties of the sources which were flagged as 
        saturated
    mask_hdu : astropy.io.fits.PrimaryHDU
        New HDU (image + header) for the mask (**not** the masked data)
        
    Notes
    -----
    Uses image segmentation to find all sources in the image. Then, looks for 
    sources which have a maximum flux above the saturation ADU and above the 
    minimum saturation area and creates a mask of these sources. 

    Binary dilation is applied to to "dilate" (i.e., enlarge) features in the 
    mask, and Gaussian blurring is applied, to smooth out the mask and ensure
    that saturated sources are completely masked. Binary dilation is very 
    helpful for catching diffraction spikes which are initially not completely
    masked. **Note** however that both binary dilation and Gaussian blurring
    can be disabled by setting `dilation_its = 0` and `blur_sigma = 0`, 
    respectively.
    
    If a "safe zone" is supplied, any sources within the safe zone will be 
    labelled as NON-saturated. This is useful if you know the coordinates of 
    some galaxy/nebulosity in your image which should not be masked, as it is 
    sometimes difficult to distinguish between a saturated source and a galaxy.
    
    If an existing mask is supplied, the output mask will be a combination of 
    the previous mask and saturation mask.
    
    """

    data = fits.getdata(image_file)
    hdr = fits.getheader(image_file)

    ## set the threshold for image segmentation
    try:
        bkg_rms = hdr["BKGSTD"]  # header written by bkgsub function
    except KeyError:
        # use crude image segmentation to find sources above SNR=3, build a
        # source mask, and estimate the background RMS
        if mask_file:  # load a bad pixel mask if one is present
            bp_mask = fits.getdata(mask_file).astype(bool)
            source_mask = make_source_mask(data,
                                           snr=3,
                                           npixels=5,
                                           dilate_size=15,
                                           mask=bp_mask)
            # combine the bad pixel mask and source mask
            rough_mask = np.logical_or(bp_mask, source_mask)
        else:
            source_mask = make_source_mask(data,
                                           snr=3,
                                           npixels=5,
                                           dilate_size=15)
            rough_mask = source_mask

        # estimate the background standard deviation
        try:
            sigma_clip = SigmaClip(sigma=3, maxiters=5)  # sigma clipping
        except TypeError:  # in old astropy, "maxiters" was "iters"
            sigma_clip = SigmaClip(sigma=3, iters=5)

        bkg = Background2D(data, (10, 10),
                           filter_size=(5, 5),
                           sigma_clip=sigma_clip,
                           bkg_estimator=MedianBackground(),
                           mask=rough_mask)
        bkg_rms = bkg.background_rms

    threshold = 3.0 * bkg_rms  # threshold for proper image segmentation

    ## get the segmented image and source properties
    ## only detect sources composed of at least sat_area_min pixels
    segm = detect_sources(data, threshold, npixels=sat_area_min)
    labels = segm.labels
    cat = source_properties(data, segm)

    ## if any sources are found
    if len(cat) != 0:
        # catalogue of sources as a table
        tbl = cat.to_table()
        mask = tbl["max_value"] >= sat_ADU  # must be above this ADU
        sat_labels = labels[mask]
        tbl = tbl[mask]

        # eliminate sources within the "safe zone", if given
        if (ra_safe and dec_safe and rad_safe):
            # get coordinates
            w = wcs.WCS(hdr)
            tbl["ra"], tbl["dec"] = w.all_pix2world(tbl["xcentroid"],
                                                    tbl["ycentroid"], 1)
            safe_coord = SkyCoord(ra_safe * u.deg,
                                  dec_safe * u.deg,
                                  frame="icrs")
            source_coords = SkyCoord(tbl["ra"] * u.deg,
                                     tbl["dec"] * u.deg,
                                     frame="icrs")
            sep = safe_coord.separation(source_coords).arcsecond  # separations
            tbl["sep"] = sep  # add a column for sep from safe zone centre
            mask = tbl["sep"] > rad_safe  # only select sources outside this rad
            sat_labels = sat_labels[mask]
            tbl = tbl[mask]

        # keep only the remaining saturated sources
        segm.keep_labels(sat_labels)

        # build the mask, where masked=1 and unmasked=0
        newmask = segm.data_ma

        # combine with existing mask, if given
        if mask_file:
            mask = fits.getdata(mask_file)
            newmask = np.logical_or(mask, newmask)
        newmask[newmask >= 1] = 1  # masked pixels are labeled with 1
        newmask = newmask.filled(0)  # unmasked labeled with 0

        # mask any remaining pixels equal to 0, nan, or above the saturation
        # ADU in the data
        newmask[data == 0] = 1
        newmask[np.isnan(data)] = 1
        newmask[data >= sat_ADU] = 1

        # use binary dilation to fill holes, esp. near diffraction spikes
        if dilation_its < 0:
            raise ValueError("dilation_its must be 0 or a positive number, " +
                             f"but the provided argument was {dilation_its}")
        elif dilation_its == 0:
            print("Skipping binary dilation...")
        else:
            newmask = (binary_dilation(newmask,
                                       iterations=dilation_its)).astype(float)

        # use Gaussian blurring to smooth out the mask
        if blur_sigma < 0:
            raise ValueError("blur_sigma must be 0 or a positive number, " +
                             f"but the provided argument was {blur_sigma}")
        elif blur_sigma == 0:
            print("Skipping Gaussian blurring...")
        else:
            newmask = gaussian_filter(newmask,
                                      sigma=blur_sigma,
                                      mode="constant",
                                      cval=0.0)

        # anything which is above zero should be masked
        newmask[newmask > 0] = 1

    ## if no sources are found
    else:
        # empty table
        tbl = Table()

        # use existing mask, if given
        newmask = np.zeros(shape=data.shape)
        if mask_file:
            mask = fits.getdata(mask_file)
            newmask[mask] = 1

        # mask pixels equal to 0, nan, or above the saturation ADU in the data
        newmask[data == 0] = 1
        newmask[np.isnan(data)] = 1
        newmask[data >= sat_ADU] = 1

    ## construct the mask PrimaryHDU object
    hdr = fits.getheader(image_file)
    mask_hdu = fits.PrimaryHDU(data=newmask.astype(int), header=hdr)

    ## plot, if desired
    if plot:  # plot, if desired
        satmask_plot = image_file.replace(".fits", "_satmask.png")
        title = "saturation mask"
        __plot_mask(hdr=hdr, newmask=newmask, title=title, output=satmask_plot)

    ## write, if desired
    if write:
        if not (output):
            output = image_file.replace(".fits", "_satmask.fits")
        mask_hdu.writeto(output, overwrite=True, output_verify="ignore")

    return tbl, mask_hdu
def extract_sources(img, **pars):
    """Use photutils to find sources in image based on segmentation.

    Parameters
    ==========
    fwhm : float
        Full-width half-maximum (fwhm) of the PSF in pixels.
        Default: 3.0

    threshold : float or None
        Value from the image which serves as the limit for determining sources.
        If None, compute a default value of (background+5*rms(background)).
        If threshold < 0.0, use absolute value as scaling factor for default value.
        Default: None

    source_box : int
        Size of box (in pixels) which defines the minimum size of a valid source

    classify : boolean
        Specify whether or not to apply classification based on invarient moments
        of each source to determine whether or not a source is likely to be a
        cosmic-ray, and not include those sources in the final catalog.
        Default: True

    output : str
        If specified, write out the catalog of sources to the file with this name

    plot : boolean
        Specify whether or not to create a plot of the sources on a view of the image
        Default: False

    vmax : float
        If plotting the sources, scale the image to this maximum value.

    """
    fwhm = pars.get('fwhm', 3.0)
    threshold = pars.get('threshold', None)
    source_box = pars.get('source_box', 7)
    classify = pars.get('classify', True)
    output = pars.get('output', None)
    plot = pars.get('plot', False)
    vmax = pars.get('vmax', None)

    if threshold is None or threshold < 0.0:
        bkg_estimator = MedianBackground()
        bkg = Background2D(img, (50, 50),
                           filter_size=(3, 3),
                           bkg_estimator=bkg_estimator)
        default_threshold = bkg.background + (5. * bkg.background_rms)
        if threshold is not None and threshold < 0.0:
            threshold = -1 * threshold * default_threshold
            print("{} based on {}".format(threshold.max(),
                                          default_threshold.max()))
        else:
            threshold = default_threshold
    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
    kernel.normalize()
    segm = detect_sources(img,
                          threshold,
                          npixels=source_box,
                          filter_kernel=kernel)
    cat = source_properties(img, segm)
    print("Total Number of detected sources: {}".format(len(cat)))
    if classify:
        # Remove likely cosmic-rays based on central_moments classification
        goodsrcs = np.where(classify_sources(cat) == 1)[0].tolist()
        newcat = photutils.segmentation.properties.SourceCatalog([])
        for src in goodsrcs:
            newcat._data.append(cat[src])
    else:
        newcat = cat

    tbl = newcat.to_table()
    print("Final Number of selected sources: {}".format(len(newcat)))
    if output:
        tbl['xcentroid'].info.format = '.10f'  # optional format
        tbl['ycentroid'].info.format = '.10f'
        tbl['source_sum'].info.format = '.10f'
        tbl['cxy'].info.format = '.10f'
        tbl['cyy'].info.format = '.10f'
        if not output.endswith('.cat'):
            output += '.cat'
        tbl.write(output, format='ascii.commented_header')
        print("Wrote source catalog: {}".format(output))

    if plot:
        norm = None
        if vmax is None:
            norm = ImageNormalize(stretch=SqrtStretch())
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
        ax1.imshow(img, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
        ax2.imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))

    return tbl, segm
def run_bat_sources(source, filter, save_results=False, outdir=None, plot=False, results_file=None,
                    ap_file=None, save_fig=False):
    
    # File that contains the coordinates for each source
    bat_info = pd.read_csv(bat_dir+'bat_info.csv', index_col=0)
    
    # File that contains the classification for each source
    phot_class = pd.read_csv('bat_spire_phot_class.csv', index_col=0)
    
    if np.all(source != 'all'):
        if np.isscalar(source):
            srcs = [source]
        else:
            srcs = source
    else:
        srcs = bat_info.index.values
    
    if np.isscalar(filter):
        filter = [filter]
    
    # Setup the DataFrame to store the photometry results
    index = pd.MultiIndex.from_product([srcs, filter], names=['Name', 'Filter'])
    src_df = pd.DataFrame(columns=['raw_flux', 'bkg_flux', 'bkgsub_flux', 'total_err', 'aperture_err', 'bkg_rms', 'calib_err', 'type'], index=index)
    
    # Setup a dictionary to hold the apertures used
    src_aps = {}
    
    for s in srcs:
        print 'Running...',s
        for f in filter:
            print '\tFilter ',f
            # Classification of source (either P, E, U, or C for point source,
            # extended, undetected, or cirrus)
            pclass = phot_class.loc[s, f]
            #pclass = 'P'
            
            # Load the data
            hdu_image = pyf.open(image_dir+s+'_scanamorphos_spire'+str(WAVES[f])+'_signal.fits')[0]
            hdu_err = pyf.open(image_dir+s+'_scanamorphos_spire'+str(WAVES[f])+'_error.fits')[0]
            
            # Convert to Jy/pixel
            hdu_image = prep_image(hdu_image, f)
            hdu_err = prep_image(hdu_err, f)
            
            # Calculate the global background
            im = hdu_image.data
            im_med, im_std = estimate_bkg(im)
            
            # Use a 1.5 sigma threshold to detect the BAT source if its extended
            # If its a point source use a 3-sigma threshold and convolve the image with a
            # Gaussian that has a FWHM equal to the nominal FWHM of that filter
#            if pclass == 'E':
            thresh = im_med + 2.0*im_std
            segm_img = detect_sources(im, thresh, npixels=5)
            props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
#             elif pclass == 'P':
#                 thresh = im_med + 3.0*im_std
#                 sigma = FWHM[f]/PIX_SIZES[f] * gaussian_fwhm_to_sigma
#                 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
#                 segm_img = detect_sources(im, thresh, npixels=5, filter_kernel=kernel)
#                 props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
            
            # Find the BAT source in the properties list
            ra_bat = bat_info.loc[s, 'RA_(J2000)']
            dec_bat = bat_info.loc[s, 'DEC_(J2000)']
            coord_bat = coord.SkyCoord(ra=ra_bat, dec=dec_bat, frame='fk5')
            
            # No need to find the BAT source if it is already classified as undetected
            # or dominated by cirrus emission
            if ((pclass != 'U') & (pclass != 'C')) & (len(props) > 0):
                ind_bat = find_bat_source(coord_bat, props, 2*FWHM[f])
            else:
                ind_bat = None

            # Create the source aperture            
            if ind_bat is None:
                
                ap, type = create_aperture(None, f, pclass, coord_bat=coord_bat, wcs=wcs.WCS(hdu_image.header)) 
            
            else:
            
                ap, type = create_aperture(props[ind_bat], f, pclass, extent=3.0, coord_bat=coord_bat, wcs=wcs.WCS(hdu_image.header))
            
            # Create new mask based on 3-sigma threshold and remove the bat source if detected
            thresh2 = im_med + 2.0*im_std
            segm_img2 = detect_sources(im, thresh2, npixels=5)
            props2 = source_properties(im-im_med, segm_img2, wcs=wcs.WCS(hdu_image.header))
            nanmask = np.isnan(im)
            nanerr = np.isnan(hdu_err.data) | np.isinf(hdu_err.data)
            
            if len(props2) != 0:
                ind_bat2 = find_bat_source(coord_bat, props2, 2*FWHM[f])
            else:
                ind_bat2 = None
    
            if ind_bat2 is None:
                mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
            else:
                si = segm_img2.remove_labels(ind_bat2 + 1)
                mask = np.logical_not(si.data_masked.mask) | nanmask | nanerr
                #mask = nanmask | nanerr
            
            if (pclass != 'C'):    
                results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data, type, f, pclass=pclass, bkg=im_med, mask=mask)
            else:
                results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data, type, f, pclass=pclass, bkg=im_med, mask=None)
                
            type = results['type']
            src_df.loc[s, f] = pd.Series(results)
            src_aps[s+'_'+f] = apertures  
            
            if plot:

                cbat = [coord_bat.ra.deg, coord_bat.dec.deg]
                
                if (type == 'fixed'):
                    pmax = None
                else:
                    pmax = props[ind_bat].max_value + im_med
                
                if (pmax < im_med):
                    pmax = None

                fig = plot_aperture_photometry(hdu_image, apertures, f, type, global_bkg=im_med,
                                                   pixel_max=pmax, title=s+' ['+f+']', plot_bat_loc=cbat)
           
                if save_fig:
                    if outdir is None:
                        fig.save(s+'_'+f+'.png')
                    else:
                        fig.save(outdir+s+'_'+f+'.png')
                    fig.close()
            
    if save_results:
        if (outdir is None) and (results_file is None):
            src_df.to_csv('photometry_results_'+str(dt.datetime.today().date().isoformat())+'.csv')
            f_ap = open('apertures_'+str(dt.datetime.today().date().isoformat())+'.pkl', 'wb')
        elif (outdir is not None) and (results_file is None):
            src_df.to_csv(outdir+'photometry_results_'+str(dt.datetime.today().date().isoformat())+'.csv')
            f_ap = open(outdir+'apertures_'+str(dt.datetime.today().date().isoformat())+'.pkl', 'wb')
        else:
            src_df.to_csv(outdir+results_file)
            f_ap = open(outdir+ap_file, 'wb')
        
        pickle.dump(src_aps, f_ap)
        f_ap.close()
    
    if plot and not save_fig:
        return src_df, src_aps, fig  
    else:
        return src_df, src_aps             
Exemple #37
0
def build_ePSF_imsegm(image_file,
                      mask_file=None,
                      nstars=40,
                      thresh_sigma=5.0,
                      pixelmin=20,
                      etamax=1.4,
                      areamax=500,
                      cutout=35,
                      write=True,
                      output=None,
                      plot=False,
                      output_plot=None,
                      verbose=False):
    """Build the effective Point-Spread Function using a sample of stars from
    some image acquired via image segmentation.

    Arguments
    ---------
    image_file : str
        Filename for a **background-subtracted** image
    mask_file : str, optional
        Filename for a mask file (default None)
    nstars : int, optional
        *Maximum* number of stars to use in building the ePSF (default 40;
        set to None to impose no limit)
    thresh_sigma : float, optional
        Sigma threshold for source detection with image segmentation (default
        5.0)
    pixelmin : float, optional
        *Minimum* pixel area of an isophote to be considered a good source for 
        building the ePSF (default 20)
    etamax : float, optional
        *Maximum* allowed elongation for an isophote to be considered a good 
        source for building the ePSF (default 1.4)
    areamax : float, optional
        *Maximum* allowed area (in square pixels) for an isophote to be 
        considered a good source for building the ePSF (default 500)
    cutout : int, optional
        Cutout size around each star in pixels (default 35; must be **odd**; 
        rounded **down** if even)
    write : bool, optional
        Whether to write the ePSF to a new fits file (default True)
    output : str, optional
        Name for the output ePSF data fits file (default
        `image_file.replace(".fits", "_ePSF.fits")`)
    plot : bool, optional
        Whether to plot the newly-built ePSF (default False)
    output_plot : str, optional
        Name for the output figure (default 
        `image_file.replace(".fits", "_ePSF.png")`)
    verbose : bool, optional
        Whether to be verbose (default False)

    Returns
    -------
    np.ndarray
        The ePSF data in a 2D array
    
    Notes
    -----
    Uses image segmentation via `photutils` to obtain a list of sources in the 
    image with their x, y coordinates, flux, and background at their 
    location. Then uses `EPSFBuilder` to empirically obtain the ePSF of these 
    stars. Optionally writes and/or plots the obtained ePSF.
    
    **The ePSF obtained here should not be used in convolutions.** Instead, it 
    can serve as a tool for estimating the seeing of an image. 
    """

    # ignore annoying warnings from photutils
    from astropy.utils.exceptions import AstropyWarning
    warnings.simplefilter('ignore', category=AstropyWarning)

    # imports
    from astropy.nddata import NDData
    from photutils.psf import extract_stars
    from photutils import EPSFBuilder

    # load in data
    image_data = fits.getdata(image_file)
    image_header = fits.getheader(image_file)
    try:
        instrument = image_header["INSTRUME"]
    except KeyError:
        instrument = "Unknown"

    ## source detection
    # add mask to image_data
    image_data = np.ma.masked_where(image_data == 0.0, image_data)

    # build an actual mask
    mask = (image_data == 0)
    if mask_file:
        mask = np.logical_or(mask, fits.getdata(mask_file))

    # set detection standard deviation
    try:
        std = image_header["BKGSTD"]  # header written by bkgsub function
    except KeyError:
        # make crude source mask, get standard deviation of background
        source_mask = make_source_mask(image_data,
                                       snr=3,
                                       npixels=5,
                                       dilate_size=15,
                                       mask=mask)
        final_mask = np.logical_or(mask, source_mask)
        std = np.std(np.ma.masked_where(final_mask, image_data))

    # use the segmentation image to get the source properties
    segm = detect_sources(image_data,
                          thresh_sigma * std,
                          npixels=pixelmin,
                          mask=mask)
    cat = source_properties(image_data, segm, mask=mask)

    ## get the catalog and coordinate/fluxes for sources, do some filtering
    try:
        tbl = cat.to_table()
    except ValueError:
        print("SourceCatalog contains no sources. Exiting.")
        return

    # restrict elongation and area to obtain only unsaturated stars
    tbl = tbl[(tbl["elongation"] <= etamax)]
    tbl = tbl[(tbl["area"].value <= areamax)]
    # build a table
    sources = Table()  # build a table
    sources['x'] = tbl['xcentroid']  # for EPSFBuilder
    sources['y'] = tbl['ycentroid']
    sources['flux'] = tbl['source_sum'].data / tbl["area"].data
    sources.sort("flux")
    sources.reverse()
    # restrict number of stars (if requested)
    if nstars: sources = sources[:min(nstars, len(sources))]

    ## get WCS coords for all sources
    w = wcs.WCS(image_header)
    sources["ra"], sources["dec"] = w.all_pix2world(sources["x"], sources["y"],
                                                    1)
    ## mask out edge sources:
    # a bounding circle for WIRCam, rectangle for MegaPrime
    xsize = image_data.shape[1]
    ysize = image_data.shape[0]
    if "WIRCam" in instrument:  # bounding circle
        rad_limit = xsize / 2.0
        dist_to_center = np.sqrt((sources['x'] - xsize / 2.0)**2 +
                                 (sources['y'] - ysize / 2.0)**2)
        mask = dist_to_center <= rad_limit
        sources = sources[mask]
    else:  # rectangle
        x_lims = [int(0.05 * xsize), int(0.95 * xsize)]
        y_lims = [int(0.05 * ysize), int(0.95 * ysize)]
        mask = (sources['x'] > x_lims[0]) & (sources['x'] < x_lims[1]) & (
            sources['y'] > y_lims[0]) & (sources['y'] < y_lims[1])
        sources = sources[mask]

    ## empirically obtain the effective Point Spread Function (ePSF)
    nddata = NDData(image_data)  # NDData object
    if mask_file:  # supply a mask if needed
        nddata.mask = fits.getdata(mask_file)
    if cutout % 2 == 0:  # if cutout even, subtract 1
        cutout -= 1
    stars = extract_stars(nddata, sources, size=cutout)  # extract stars

    ## build the ePSF
    nstars_epsf = len(stars.all_stars)  # no. of stars used in ePSF building

    if nstars_epsf == 0:
        print(
            "\nNo valid sources were found to build the ePSF with the given" +
            " conditions. Exiting.")
        return
    if verbose:
        print(f"{nstars_epsf} stars used in building the ePSF")

    epsf_builder = EPSFBuilder(
        oversampling=1,
        maxiters=7,  # build it
        progress_bar=False)
    epsf, fitted_stars = epsf_builder(stars)
    epsf_data = epsf.data

    if write:  # write, if desired
        epsf_hdu = fits.PrimaryHDU(data=epsf_data)
        if not (output):
            output = image_file.replace(".fits", "_ePSF.fits")

        epsf_hdu.writeto(output, overwrite=True, output_verify="ignore")

    if plot:  # plot, if desired
        if not (output_plot):  # set output name if not given
            output_plot = image_file.replace(".fits", "_ePSF.png")
        __plot_ePSF(epsf_data=epsf_data, output=output_plot)

    return epsf_data
Exemple #38
0
# --- now run segmentation on the image to detect sources.

from photutils import detect_sources

threshold = 2.5  # require each pixel have a significance of >2.5 (since we're using the significance image)
npixels = 5  # require at least 5 connected pixels

segm = detect_sources(sig, threshold,
                      npixels=npixels)  # make segmentation image

# --- get various properties of the sources, crucially inclusing their centres

from photutils import source_properties, CircularAperture

cat = source_properties(sci, segm)

# --- get a list of positions (x,y) of the sources

positions = []
for obj in cat:
    positions.append(np.transpose((obj.xcentroid.value, obj.ycentroid.value)))

# --- make a CicrcularAperture object. This can be plotted but is mostly used for the aperture photometry.

r = 5.  # radius of aperture in pixels
apertures = CircularAperture(positions, r)

# --- let's make a plot of the sources and the apertures

import matplotlib.pyplot as plt