def search_trails(self):
     """Search star trails in image"""
     threshold = detect_threshold(self.data, snr=1)
     sigma = 2.0 * gaussian_fwhm_to_sigma
     kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
     self.segments = detect_sources(self.data, threshold, npixels=1000, filter_kernel=kernel)
     self.segments_properties = segment_properties(self.data, self.segments)
Example #2
0
 def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
     '''
     Parameters
     ----------
     psfFWHM : float
         FWHM of the imaging point spread function
     nsigma : float
         source detection threshold
     '''
     data = np.array(self.data.copy())
     psfFWHMpix = psfFWHM / self.pixel_scales[0].value
     thresholder = detect_threshold(data, nsigma=nsigma)
     sigma = psfFWHMpix * gaussian_fwhm_to_sigma
     kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
     kernel.normalize()
     segm = detect_sources(data,
                           thresholder,
                           npixels=5,
                           filter_kernel=kernel)
     props = source_properties(data, segm)
     tab = Table(props.to_table())
     self.sources_catalog = tab
     srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'],
                                               tab['ycentroid'], 1)
     sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
     sctab = Table([sc, np.arange(len(sc))],
                   names=['sc', 'sloop_{0}'.format(sc_key)])
     self.sources_skycord = sctab
Example #3
0
def getsky (data,psfFWHMpix,snr=3.0,dtype=bool) :
    threshold = detect_threshold(data, snr=snr)
    sigma = psfFWHMpix * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)
    ny, nx =data.shape
    if(dtype == bool):
        mask = np.zeros_like(data, dtype=bool)
        mask[np.isnan(data)] = True
        mask[np.isinf(data)] = True
    if (dtype == int) :
        mask = np.zeros_like(data, dtype=int)
        mask[np.isnan(data)] = 1
        mask[np.isinf(data)] = 1
    for loopx in range(nx) :
        for loopy in range(ny):
            if segm.data[loopy][loopx] > 0:
                if(dtype == bool):
                    mask[loopy][loopx]=True
                if (dtype == int) :
                    mask[loopy][loopx]=1
    result = {
        'mask':mask,
        'segmantation':segm
    }
    return result
Example #4
0
def ImageSlice(imgname):

    hdu = fits.open(imgname)
    img = hdu[0].data

    threshold = detect_threshold(img, snr=4.)

    #Detection for every sources to find the position of the galaxy.
    sigma = 2. * gaussian_fwhm_to_sigma  # FWHM = 2.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel)

    segm_areas = segm.areas
    segm_areas = np.delete(segm.areas, 0)  # deleting background area.
    label_gal = segm_areas.argmax()  # finding the galaxy by searching
    # a source which has largest area.
    rslice = segm.slices[label_gal][0]  # row slice
    cslice = segm.slices[label_gal][1]  # column slice

    ri = rslice.start
    rf = rslice.stop
    ci = cslice.start
    cf = cslice.stop

    b = 0  # expanding constant

    s_img = hdu[0].data[ri - b:rf + b, ci - b:cf + b]

    return s_img
def get_segmentation_map(
    image: torch.Tensor,
    npixels=5,  ## minimum number of connectied pixels
    get_mask=False,
) -> torch.Tensor:
    """ compute smoothed segmentation map marking the part of the image that contains the main source

    optional: when get_mask=True, also provide a mask map of all additional saurces
    """
    # get segmap from red colorband
    img = image[0]
    # create segmentation map
    threshold = detect_threshold(img, 1.5)
    segm = detect_sources(img, threshold, npixels)
    # Keep only the largest segment
    # ## !!! change this to take the central segment
    main_label = np.argmax(segm.areas) + 1
    segmap = segm.data == main_label
    # regularize shape
    segmap_float = ndi.uniform_filter(np.float64(segmap), size=10)
    segmap = segmap_float > 0.5

    if not get_mask:
        return segmap
    else:
        # mask additional objects
        mask = np.zeros_like(img).astype("bool")
        for obj_label in range(1, len(segm.areas) + 1):
            if obj_label == main_label:
                continue
            mask[segm.data == obj_label] = True
        return segmap, mask
Example #6
0
def _segmap_base(data,
                 numpix,
                 mask=None,
                 nsigma=2,
                 contrast=0.4,
                 nlevels=5,
                 kernel=None):
    """Returns a generic segmentation map of the input image
    INPUTS:
      data:     image data
      numpix:   minimum region size in pixels (default: 10)
      mask:     mask for the image (default: None)
      snr:      signal-to-noise threshold for detecting objects (default: 2)
      contrast: contrast ratio used in deblending (default: 0.4)
      nlevels:  number of lebels to split image to when deblending (default: 5)
      kernel:   kernel to use for image smoothing (default: None)
    """

    # Convert mask to boolean
    if mask is not None and (mask.dtype != "bool"):
        mask = np.array(mask, dtype="bool")

    threshold = phot.detect_threshold(data, nsigma=nsigma, mask=mask)
    segmap = phot.detect_sources(data,
                                 threshold,
                                 numpix,
                                 filter_kernel=kernel,
                                 mask=mask)
    segmap = phot.deblend_sources(data,
                                  segmap,
                                  npixels=numpix,
                                  filter_kernel=kernel,
                                  nlevels=nlevels,
                                  contrast=contrast)
    return segmap
Example #7
0
def get_wcs(pattern):
    for filename in pattern:

        which_hdu = choose_hdu(filename)
        header = fits.getheader(filename, which_hdu)
        print(header)
        w = WCS(header)
        print(w)

        data = fits.getdata(filename, ext=0)
        threshold = detect_threshold(data, nsigma=2.)
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        mean, median, std = sigma_clipped_stats(data, sigma=3)
        daofind = DAOStarFinder(fwhm=3.0, threshold=5. * std)
        sources = daofind(data - median)
        for col in sources.colnames:
            sources[col].info.format = '%.8g'  # for consistent table output

        x_unscaled = np.array(sources['xcentroid'])
        y_unscaled = np.array(sources['ycentroid'])
        x_min = min(x_unscaled)
        y_min = min(y_unscaled)
        x_max = max(x_unscaled)
        y_max = max(y_unscaled)
        x1 = ((x_unscaled - x_min) / x_max) * 90
        y1 = ((y_unscaled - y_min) / y_max) * 90
        fig, ax = plt.subplots()
        ax.plot(x1, y1, 'ok', ms=5)

        plt.show()
        c1 = SkyCoord(ra=x1 * u.degree, dec=y1 * u.degree)
        print(c1)
        coo = SkyCoord.from_name('GJ3470')
Example #8
0
def segmentation_map(image, extname, get_kernel=False):
    # in this context image means a 2D numpy array rather than a GFA_image
    # object

    par = common.gfa_misc_params()

    fwhm_pix = par['nominal_fwhm_asec'] / \
        util.nominal_pixel_sidelen_arith()

    threshold = detect_threshold(image, snr=2.0)

    sigma = fwhm_pix * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma,
                              x_size=int(np.round(fwhm_pix)),
                              y_size=int(np.round(fwhm_pix)))
    kernel.normalize()

    segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel)

    # add my own dilation of segm.array ?
    # incorporate masking based on master flat/bias in this analysis ?

    if not get_kernel:
        return segm
    else:
        return segm, kernel
Example #9
0
def find_all_sources(
    image: CCDData,
    snr: float = 3.,  # Threshold SNR for segmentation
    fwhm: float = 5.,  # Kernel FWHM for segmentation
    ksize: int = 5,  # Kernel size
    npixels:
    int = 10  # Number of connected pixels required to be considered a source
):
    """
    Find extended sources in image with default parameters tuned for expected donut size.
    """
    binning = image.header['BINNING']
    fwhm = int(fwhm / binning)
    ksize = int(ksize / binning)
    npixels = int(npixels / binning)
    threshold = photutils.detect_threshold(image, nsigma=snr)
    sigma = fwhm * stats.gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=ksize, y_size=ksize)
    kernel.normalize()
    segm = photutils.detect_sources(image.data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel)
    cat = photutils.source_properties(image.data, segm, wcs=image.wcs)
    return segm, cat
Example #10
0
def snr(hduls, name="SCI"):

	for hdul in hduls:
		data = hdul[name].data

		# identify background rms
		boxsize=(data.shape)
		bkg = Background2D(data, boxsize)
		bkg_mean_rms = np.mean(bkg.background_rms)

		# subtract bkg from image
		new_data = data - bkg.background

		# set threshold and detect sources, threshold 5*std above background
		threshold = detect_threshold(data=new_data, nsigma=5.0, background=0.0)
		SegmentationImage = detect_sources(data=new_data, threshold=threshold, npixels=10)

		SourceCatalog = source_properties(new_data, SegmentationImage)
		columns = ['id', 'xcentroid', 'ycentroid', 'source_sum']

		source_max_values = SourceCatalog.max_value
		avg_source_max_values = np.mean(source_max_values)

		# calculate signal to noise ratio
		signal = avg_source_max_values
		noise = bkg_mean_rms
		SNR = (signal)/(noise)
		hdul["CAT"].header.append(('SNR',SNR,"signal to noise ratio" ))

	return (hdul for hdul in hduls)
def make_segmap(filename):
    """
    Generates a source segmentation map of the input file.
    
    Parameters
    ----------
    filename : str
        The file to make segmentation map for.
    
    Outputs
    -------
    {filename}_seg.fits
        The segmentation map.
    """

    # See if segmap already exists
    outfile = filename.replace('.fits', '_seg.fits')

    # Get the data
    data = fits.getdata(filename, 'SCI')

    # Detector sources; Make segmap
    threshold = detect_threshold(data, 3.0)
    sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=3, filter_kernel=kernel)
    fits.writeto(outfile, segm.data, overwrite=True)

    return outfile
Example #12
0
def get_wcs(pattern):
    for filename in pattern:

        which_hdu = choose_hdu(filename)
        header = fits.getheader(filename, which_hdu)

        data = fits.getdata(filename, ext=0)
        threshold = detect_threshold(data, nsigma=2.)
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        mean, median, std = sigma_clipped_stats(data, sigma=3)
        daofind = DAOStarFinder(fwhm=3.0, threshold=5. * std)
        sources = daofind(data - median)
        for col in sources.colnames:
            sources[col].info.format = '%.8g'  # for consistent table output

        x = np.array(sources['xcentroid'])
        y = np.array(sources['ycentroid'])
        OB = header['OBJECT']
        cobj = SkyCoord.from_name(OB, parse=True)
        cobjx = cobj.ra.degree
        cobjy = cobj.dec.degree
        refx = x[1]
        refy = y[1]

        Bin = float(header['CCDSUM'][0])
        Res = 0.25  #arcsec/px
        f_arcsec = Bin * Res  #arcsec/px
        f = f_arcsec / 3600

        for n in x:
            dx = x - refx
            xscal = cobjx + (f * dx)

        for n in y:
            dy = y - refy
            yscal = cobjy + (f * dy)

        fig1, ax = plt.subplots()
        c1 = SkyCoord(ra=xscal * u.degree,
                      dec=yscal * u.degree)  # ra and dec of each source
        ax.plot(xscal, yscal, 'ok', ms=5)

        result = Vizier.query_object(OB)
        interesting_table = result['I/305/out']
        x1 = np.array(interesting_table['RAJ2000'])
        y1 = np.array(interesting_table['DEJ2000'])
        c2 = SkyCoord(ra=x1 * u.degree, dec=y1 * u.degree)
        fig2, ax = plt.subplots()
        ax.plot(x1, y1, 'or', mfc='none', ms=10)

        idx, d2d, d3d = c1.match_to_catalog_sky(c2)
        fig3, ax = plt.subplots()
        ax.plot(xscal, yscal, 'ok', ms=5)
        ax.plot(x1, y1, 'or', mfc='none', ms=10)
        plt.show()

        dx, d2d, d3d = c1.match_to_catalog_sky(c2)
Example #13
0
def make_segmentation_image(data,
                            fwhm=2.0,
                            snr=5.0,
                            x_size=5,
                            y_size=5,
                            npixels=7,
                            nlevels=32,
                            contrast=0.001,
                            deblend=True):
    """
    Use photutils to create a segmentation image containing detected sources.

    data : 2D `~numpy.ndarray`
        Image to segment into sources.

    fwhm : float (default: 2.0)
        FWHM of the kernel used to filter the image.

    snr : float (default: 5.0)
        Source S/N used to set detection threshold.

    x_size : int (default: 5)
        X size of the 2D `~astropy.convolution.Gaussian2DKernel` filter.

    y_size : int (default: 5)
        Y size of the 2D `~astropy.convolution.Gaussian2DKernel` filter.

    npixels : int (default: 7)
        Number of connected pixels required to be considered a source.

    nlevels : int (default: 32)
        Number of multi-thresholding levels to use when deblending sources.

    contrast : float (default: 0.001)
        Fraction of the total blended flux that a local peak must have to be considered a separate object.

    deblend : bool (default: True)
        If true, deblend sources after creating segmentation image.
    """
    sigma = fwhm * stats.gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=x_size, y_size=y_size)
    kernel.normalize()
    threshold = photutils.detect_threshold(data, nsigma=snr)

    segm = photutils.detect_sources(data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel)
    if deblend:
        segm = photutils.deblend_sources(data,
                                         segm,
                                         npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=nlevels,
                                         contrast=contrast)

    return segm
Example #14
0
def make_tweakreg_catalog(model, kernel_fwhm, snr_threshold, sharplo=0.2,
                          sharphi=1.0, roundlo=-1.0, roundhi=1.0):
    """
    Create a catalog of point-line sources to be used for image
    alignment in tweakreg.

    Parameters
    ----------
    model : `ImageModel`
        The input `ImageModel` of a single image.  The input image is
        assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    sharplo : float, optional
        The lower bound on sharpness for object detection.

    sharphi : float, optional
        The upper bound on sharpness for object detection.

    roundlo : float, optional
        The lower bound on roundess for object detection.

    roundhi : float, optional
        The upper bound on roundess for object detection.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source catalog.
    """

    if not isinstance(model, ImageModel):
        raise ValueError('The input model must be a ImageModel.')

    # threshold = snr_threshold * model.err   # can't input img to daofind
    threshold_img = photutils.detect_threshold(model.data, snr=snr_threshold)
    threshold = threshold_img[0, 0]     # constant image

    sources = photutils.daofind(model.data, fwhm=kernel_fwhm,
                                threshold=threshold, sharplo=sharplo,
                                sharphi=sharphi, roundlo=roundlo,
                                roundhi=roundhi)

    columns = ['id', 'xcentroid', 'ycentroid', 'flux']
    catalog = sources[columns]

    return catalog
Example #15
0
def maskstarsSEG(image: np.ndarray) -> np.ndarray:
    """ 'Cleans' image of external sources.
        For example will remove all stars that do not interfere with the object of interest.

    Parameters
    ----------

    image : np.ndarray
        Image to be cleaned.

    Returns
    -------

    imageClean : np.ndarray
        Image cleaned of external sources.

    """

    cenpix = np.array([int(image.shape[0]/2) + 1, int(image.shape[1]/2) + 1])
    mean, median, std = sigma_clipped_stats(image, sigma=3.)

    # create segmentation map
    # TODO give user option to specify kernel, size of kernel and threshold?
    imageClean = np.copy(image)
    threshold = detect_threshold(image, 1.5)
    sigma = 3.0 * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(image, threshold, npixels=8, filter_kernel=kernel)

    # if no sources return
    if segm is None:
        return imageClean

    # Save potions of segmentation map outwith object of interest
    stars = []
    for i, segment in enumerate(segm.segments):

        if not _inBbox(segment.bbox.extent, cenpix):
            stars.append(i)

    # clean image of external sources
    for i in stars:
        masked = segm.segments[i].data_ma
        masked = np.where(masked > 0., 1., 0.) * np.random.normal(mean, std, size=segm.segments[i].data.shape)
        imageClean[segm.segments[i].bbox.slices] = masked

        imageClean = np.where(imageClean == 0, image, imageClean)

    return imageClean
Example #16
0
    def find_center_segment(self, sigma_level, kernel=None, min_pixels=5):
        """
        This function ...
        :param sigma_level:
        :param kernel:
        :param min_pixels:
        :return:
        """

        # If a subtracted box is present, use it to find the center segment
        box = self.subtracted if self.has_background else self.cutout

        if not np.all(self.mask):

            mean, median, stddev = statistics.sigma_clipped_statistics(
                box, mask=self.mask)
            threshold = mean + stddev * sigma_level

        else:
            threshold = detect_threshold(box, snr=2.0)  #snr=2.0

        # Perform the segmentation
        segments = detect_sources(box,
                                  threshold,
                                  npixels=min_pixels,
                                  filter_kernel=kernel).data

        # To plot the multiple segments that are detected
        #if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Get the label of the center segment
        rel_center = self.cutout.rel_position(self.center)
        try:
            label = segments[int(round(rel_center.y)),
                             int(round(rel_center.x))]
        except:
            try:
                label = segments[int(rel_center.y), int(rel_center.x)]
            except:
                return Mask((segments == label))

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0:
            return Mask(np.zeros_like(self.cutout, dtype=bool))

            # Create a mask of the center segment
        else:
            return Mask((segments == label))
Example #17
0
    def create_seg_map(self):
        '''
        Creates segmentation map, from original FLT file, that is used in 
        background subtraction and to fix cosmic rays.

        Parameters
        ----------
        self : object
            DashData object created from an individual IMA file.

        Output
        ------
        Segmentation Image : fits file
            Segmentation map
        Source List : .dat file
            List of sources and their properties
        '''

        flt = fits.open(self.flt_file_name)
        data = flt[1].data

        threshold = detect_threshold(data, nsigma=3.)

        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        segm = detect_sources(data,
                              threshold,
                              npixels=10,
                              filter_kernel=kernel)

        hdu = fits.PrimaryHDU(segm.data)
        if not os.path.exists('segmentation_maps'):
            os.mkdir('segmentation_maps')
        hdu.writeto(('segmentation_maps/{}_seg.fits').format(self.root),
                    overwrite=True)

        # Create source list
        cat = source_properties(data, segm)

        tbl = cat.to_table()
        tbl['xcentroid'].info.format = '.2f'
        tbl['ycentroid'].info.format = '.2f'
        tbl['cxx'].info.format = '.2f'
        tbl['cxy'].info.format = '.2f'
        tbl['cyy'].info.format = '.2f'

        ascii.write(tbl,
                    'segmentation_maps/{}_source_list.dat'.format(self.root))
def detect_sources(pattern):# extracts the light sources from the image (basing on sigma, FWHM, thrsholds...) 
    rot = rotate_img(pattern)
    threshold = detect_threshold(rot, nsigma=2.)
    sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    mean, median, std = sigma_clipped_stats(rot, sigma=3)
    daofind = DAOStarFinder(fwhm=3.0, threshold=5.*std)
    sources = daofind(rot - median)
    for col in sources.colnames:
        sources[col].info.format = '%.8g'  # for consistent table output
   # Pixel coordinates of the sources
    x1 = np.array(sources['xcentroid'])
    y1 = np.array(sources['ycentroid'])
    return(x1,y1)
Example #19
0
def make_segments(image, npixels=None, nsigma=3., fwhm=8., kernel_size=4):
    """
    Segment an image.
    Parameters
    ----------
    image : array like
        Input image
    npixels : int
        The number of connected pixels, each greater than ``threshold``,
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.
    nsigma : float or image array
        The number of standard deviations per pixel above the
        ``background`` for which to consider a pixel as possibly being
        part of a source.
    fwhm : float
        FWHM of smoothing gaussian kernel
    kernel_size : int
        Size of smoothing kernel

    Returns
    -------
    segment_image : `~photutils.segmentation.SegmentationImage` or `None`
        A 2D segmentation image, with the same shape as ``data``, where
        sources are marked by different positive integer values.  A
        value of zero is reserved for the background.  If no sources
        are found then `None` is returned.
    """

    if npixels is None:
        npixels = fwhm**2

    kernel = make_kernel(fwhm, kernel_size) if kernel_size else None

    # Make detection threshold
    if isinstance(nsigma, int):
        threshold = detect_threshold(image, nsigma=nsigma)
    else:
        threshold = nsigma

    return detect_sources(
        image,
        threshold,
        npixels=npixels,
        filter_kernel=kernel,
    )
Example #20
0
def getsegmentation(image):
    ''' 
    pass in image name, array of x coord, array of y coord.  
    Returns object size 
    '''
    # read in image
    data = fits.getdata(image)
    threshold = detect_threshold(data, nsigma=2.)
    # create segmentation map
    segm = detect_sources(data, threshold, npixels=25)

    # skipping deblending for now
    
    # create catalog
    cat = source_properties(data, segm)
    tbl = cat.to_table()    
    return segm.data, tbl
Example #21
0
def detect_obj(img, snr=2.8, exp_sz= 1.2, plt_show = True):
    threshold = detect_threshold(img, snr=snr)
    center_img = len(img)/2
    sigma = 3.0 * gaussian_fwhm_to_sigma# FWHM = 3.
    kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=10, filter_kernel=kernel)
    npixels = 20
    segm_deblend = deblend_sources(img, segm, npixels=npixels,
                                    filter_kernel=kernel, nlevels=25,
                                    contrast=0.001)
    #Number of objects segm_deblend.data.max()
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12.5, 10))
    import copy, matplotlib
    my_cmap = copy.copy(matplotlib.cm.get_cmap('gist_heat')) # copy the default cmap
    my_cmap.set_bad('black')
    vmin = 1.e-3
    vmax = 2.1 
    ax1.imshow(img, origin='lower', cmap=my_cmap, norm=LogNorm(), vmin=vmin, vmax=vmax)
    ax1.set_title('Data')
    ax2.imshow(segm_deblend, origin='lower', cmap=segm_deblend.cmap(random_state=12345))
    ax2.set_title('Segmentation Image')
    plt.show()
    
    columns = ['id', 'xcentroid', 'ycentroid', 'source_sum', 'area']
    cat = source_properties(img, segm_deblend)
    tbl = cat.to_table(columns=columns)
    tbl['xcentroid'].info.format = '.2f'  # optional format
    tbl['ycentroid'].info.format = '.2f'
    print(tbl)
    cat = source_properties(img, segm_deblend)
    objs = []
    for obj in cat:
        position = (obj.xcentroid.value-center_img, obj.ycentroid.value-center_img)
        a_o = obj.semimajor_axis_sigma.value
        b_o = obj.semiminor_axis_sigma.value
        Re = np.pi * a_o * b_o /2.
        q = 1 - obj.ellipticity.to_value()
        objs.append((position,Re,q))
    dis_sq = [np.sqrt((objs[i][0][0])**2+(objs[i][0][1])**2) for i in range(len(objs))]
    dis_sq = np.array(dis_sq)
    c_index= np.where(dis_sq == dis_sq.min())[0][0]
    return objs, c_index
Example #22
0
def get_segmentation_map(image: np.array, npixels: int = 5):
    """ obtain segmentation map of biggest object in image

    Parameter
    ---------
    image : numpy.array
        contains the b/w image in 2D
    npixers : int
        minimum number of connected pixels
    """
    threshold = photutils.detect_threshold(image, 1.5)
    segm = photutils.detect_sources(image, threshold, npixels)
    # Keep only the largest segment
    label = np.argmax(segm.areas) + 1
    segmap = segm.data == label
    # regularize
    segmap_float = ndi.uniform_filter(np.float64(segmap), size=10)
    segmap = segmap_float > 0.5
    return segmap
Example #23
0
def make_segmap(f, overwrite=True):
    """
    Makes a segmentation map for each extension of the input files.
    
    Parameters
    ----------
    f : str
        The filename to make segmentation maps for.

    overwrite : bool
        Option to overwrite existing segmaps if they exist.
    
    Outputs
    -------
    {f}_seg_ext_1.fits
        The segmentation map for SCI extension 1.

    {f}_seg_ext_4.fits
        The segmentation map for SCI extension 4.
    """

    # Make segmaps for each SCI extension
    for i in [1, 4]:
        # See if segmap already exists
        outfile = f.replace('.fits', '_seg_ext_{}.fits'.format(i))
        if (os.path.exists(outfile)) & (overwrite is False):
            pass

        else:
            # Get the data
            data = fits.getdata(f, i)

            # Detector sources; Make segmap
            threshold = detect_threshold(data, snr=1.0)
            sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
            kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
            kernel.normalize()
            segm = detect_sources(data,
                                  threshold,
                                  npixels=3,
                                  filter_kernel=kernel)
            fits.writeto(outfile, segm.data, overwrite=overwrite)
Example #24
0
    def find_center_segment(self, sigma_level, kernel=None, min_pixels=5):

        """
        This function ...
        :param sigma_level:
        :param kernel:
        :param min_pixels:
        :return:
        """

        # If a subtracted box is present, use it to find the center segment
        box = self.subtracted if self.has_background else self.cutout

        if not np.all(self.mask):

            mean, median, stddev = statistics.sigma_clipped_statistics(box, mask=self.mask)
            threshold = mean + stddev * sigma_level

        else: threshold = detect_threshold(box, snr=2.0) #snr=2.0

        # Perform the segmentation
        segments = detect_sources(box, threshold, npixels=min_pixels, filter_kernel=kernel).data

        # To plot the multiple segments that are detected
        #if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Get the label of the center segment
        rel_center = self.cutout.rel_position(self.center)
        try:
            label = segments[int(round(rel_center.y)), int(round(rel_center.x))]
        except:
            try:
                label = segments[int(rel_center.y), int(rel_center.x)]
            except:
                return Mask((segments == label))

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0: return Mask(np.zeros_like(self.cutout, dtype=bool))

        # Create a mask of the center segment
        else: return Mask((segments == label))
def get_wcs(pattern):
    for filename in pattern:
        with fits.open(filename, 'update') as hdul:
            hdr = hdul[0].header
            key = hdr['OBJECT']
            if key == 'wasp16':
                w = WCS(filename)
                print(filename)
                print(key)
                print(w)

                image_data = fits.getdata(filename, ext=0)
                from photutils import detect_threshold
                threshold = detect_threshold(image_data, 2)

                from astropy.convolution import Gaussian2DKernel
                from astropy.stats import gaussian_fwhm_to_sigma
                from photutils import detect_sources
                sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
                kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
                kernel.normalize()
                segm = detect_sources(image_data,
                                      threshold,
                                      npixels=5,
                                      filter_kernel=kernel)

                import numpy as np
                import matplotlib.pyplot as plt
                from astropy.visualization import SqrtStretch
                from astropy.visualization.mpl_normalize import ImageNormalize
                norm = ImageNormalize(stretch=SqrtStretch())
                fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12.5))
                ax1.imshow(image_data,
                           origin='lower',
                           cmap='Greys_r',
                           norm=norm)
                ax1.set_title('Data')
                cmap = segm.cmap(random_state=12345)
                ax2.imshow(segm, origin='lower', cmap=cmap)
                ax2.set_title('Segmentation Image')
                plt.show()
Example #26
0
def optimize_m(t_ini, f_ini, alpha_ini, sig_curr):
    #keeping in mind that minimize requires flattened arrays
    grad_fun = lambda tg: -1 * grad_lnpost(tg, f_ini, alpha_ini, sig_curr)
    res = scipy.optimize.minimize(
        lambda tt: -1 * lnpost(tt, f_ini, alpha_ini, sig_curr),
        t_ini,  # theta initial
        jac=grad_fun,
        method='L-BFGS-B',
        bounds=[(1e-5, 10)] * len(t_ini))

    tt_prime = res['x']
    print(res['nit'])
    w_final = tt_prime.reshape((n_grid, n_grid))
    print(w_final)
    #pick out the peaks using photutils
    thresh = detect_threshold(w_final, 3)
    tbl = find_peaks(w_final, thresh)
    positions = np.transpose((tbl['x_peak'], tbl['y_peak']))
    w_peaks = np.zeros((n_grid, n_grid))
    w_peaks[positions] = w_final[positions]
    return w_peaks
Example #27
0
def get_sources_coords(target, xobj, yobj, pattern):
    for filename in pattern:
        data = fits.getdata(filename, ext=0)
        threshold = detect_threshold(data, nsigma=2.)
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        mean, median, std = sigma_clipped_stats(data, sigma=3)
        daofind = DAOStarFinder(fwhm=3.0, threshold=5. * std)
        sources = daofind(data - median)
        for col in sources.colnames:
            sources[col].info.format = '%.8g'  # for consistent table output
    # Pixel coordinates of the sources
        w = wcs(target, xobj, yobj, pattern)
        c = dict()
        x1 = np.array(sources['xcentroid'])
        y1 = np.array(sources['ycentroid'])
        world = w.wcs_pix2world(x1, y1, 0)
        c['ra_img'] = world[0]
        c['dec_img'] = world[1]
        return (c)
def get_data(pattern): # Gets file data, rotates the image and extracts sources
    for filename in pattern:
        which_hdu = choose_hdu(filename)
        header = fits.getheader(filename, which_hdu)

        # Gets file data and rotates the image
        data = fits.getdata(filename, ext=0)
        rot = rotate(data,-90,reshape=False)

        # extracts the light sources from the image (basing on sigma, FWHM, thrsholds...) 
        threshold = detect_threshold(rot, nsigma=2.)
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        mean, median, std = sigma_clipped_stats(rot, sigma=3)
        daofind = DAOStarFinder(fwhm=3.0, threshold=5.*std)
        sources = daofind(rot - median)
        for col in sources.colnames:
            sources[col].info.format = '%.8g'  # for consistent table output

       # Pixel coordinates of the sources
        x1 = np.array(sources['xcentroid'])
        y1 = np.array(sources['ycentroid'])
Example #29
0
    def make_thr(self, nsigma=None, do_mask=None):
        """
        make a threshold array for source detection
        """
        from photutils import detect_threshold
        if nsigma:
            self.detect_thr.params.nsigma = nsigma
        if not self.detect_thr.params.nsigma:
            self.detect_thr.params.nsigma = 2.
        if do_mask:
            self.detect_thr.params.do_mask = do_mask
        if not self.detect_thr.params.do_mask:
            self.detect_thr.params.do_mask = False

        data = self.data
        nsigma = self.detect_thr.params.nsigma
        mask = None
        if self.detect_thr.params.do_mask:
            mask = ~self.mask.mask

        self.detect_thr.value = detect_threshold(data,
                                                 nsigma=nsigma,
                                                 mask=mask)
Example #30
0
def estimate_background(cutout, nsigma=1, gauss_width=1, npixels=3):
    """ Simple background detecting using super-pixel method"""
    sigma = gauss_width * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma)
    kernel.normalize()

    # Find threshold for cutout, and make segmentation map
    threshold = detect_threshold(cutout, snr=nsigma)
    segments = detect_sources(cutout,
                              threshold,
                              npixels=npixels,
                              filter_kernel=kernel)

    segment_array = segments.data

    x_step = int(cutout.shape[0] / 10)
    y_step = int(cutout.shape[1] / 10)

    super_pixel_medians, super_pixel_rms_vals = [], []

    for x in range(0, cutout.shape[0] - x_step, x_step):
        for y in range(0, cutout.shape[1] - y_step, y_step):
            super_pixel = cutout[y:y + y_step, x:x + x_step]

            super_pixel_contents = []
            for m in range(0, super_pixel.shape[0]):
                for n in range(0, super_pixel.shape[1]):
                    if segment_array[m][n] == 0:
                        super_pixel_contents.append(super_pixel[m][n])

            super_pixel_medians.append(median(super_pixel_contents))
            super_pixel_rms_vals.append(
                sqrt((mean(super_pixel_contents) -
                      median(super_pixel_contents))**2))

    return median(super_pixel_medians), median(super_pixel_rms_vals)
Example #31
0
def mask_cutout(cutout, nsigma=1., gauss_width=2.0, npixels=5):
    """ Masks a cutout using segmentation and deblending using watershed"""
    mask_data = {}

    # Generate a copy of the cutout just to prevent any weirdness with numpy pointers
    cutout_copy = copy(cutout)

    sigma = gauss_width * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma)
    kernel.normalize()

    # Find threshold for cutout, and make segmentation map
    threshold = detect_threshold(cutout, snr=nsigma)
    segments = detect_sources(cutout,
                              threshold,
                              npixels=npixels,
                              filter_kernel=kernel)

    # Attempt to deblend. Return original segments upon failure.
    try:
        deb_segments = deblend_sources(cutout,
                                       segments,
                                       npixels=5,
                                       filter_kernel=kernel)
    except ImportError:
        print("Skimage not working!")
        deb_segments = segments
    except:
        # Don't do anything if it doesn't work
        deb_segments = segments

    segment_array = deb_segments.data

    # Center pixel values. (Assume that the central segment is the image, which is should be)
    c_x, c_y = floor(segment_array.shape[0] / 2), floor(
        segment_array.shape[1] / 2)
    central = segment_array[int(c_x)][int(c_y)]

    # Estimate background with severe cutout
    bg_method = 1
    bg_est, bg_rms = estimate_background(cutout_copy)

    mask_data["BG_EST"] = bg_est
    mask_data["BG_RMS"] = bg_rms
    mask_data["N_OBJS"] = segments.nlabels
    mask_data["BGMETHOD"] = bg_method

    # Use alternative method to try and get a estimate or rms value if first method fails
    if isnan(bg_est) or isnan(bg_rms):
        bg_pixel_array = []
        for x in range(0, segment_array.shape[0]):
            for y in range(0, segment_array.shape[1]):
                if segment_array[x][y] == 0:
                    bg_pixel_array.append(cutout_copy[x][y])

        bg_est = median(bg_pixel_array)
        bg_rms = sqrt((mean(bg_pixel_array) - bg_est)**2)

        bg_method = 2

    # Return input image if no need to mask
    if segments.nlabels == 1:
        mask_data["N_MASKED"] = 0
        return cutout_copy, mask_data

    num_masked = 0
    # Mask pixels
    for x in range(0, segment_array.shape[0]):
        for y in range(0, segment_array.shape[1]):
            if segment_array[x][y] not in (0, central):
                cutout_copy[x][y] = bg_est
                num_masked += 1

    mask_data["N_MASKED"] = num_masked

    return cutout_copy, mask_data
Example #32
0
def make_tweakreg_catalog(model, kernel_fwhm, snr_threshold, sharplo=0.2,
                          sharphi=1.0, roundlo=-1.0, roundhi=1.0,
                          brightest=None, peakmax=None):
    """
    Create a catalog of point-line sources to be used for image
    alignment in tweakreg.

    Parameters
    ----------
    model : `ImageModel`
        The input `ImageModel` of a single image.  The input image is
        assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    sharplo : float, optional
        The lower bound on sharpness for object detection.

    sharphi : float, optional
        The upper bound on sharpness for object detection.

    roundlo : float, optional
        The lower bound on roundess for object detection.

    roundhi : float, optional
        The upper bound on roundess for object detection.

    brightest : int, None, optional
        Number of brightest objects to keep after sorting the full object list.
        If ``brightest`` is set to `None`, all objects will be selected.

    peakmax : float, None, optional
        Maximum peak pixel value in an object. Only objects whose peak pixel
        values are *strictly smaller* than ``peakmax`` will be selected.
        This may be used to exclude saturated sources. By default, when
        ``peakmax`` is set to `None`, all objects will be selected.

        .. warning::
            `DAOStarFinder` automatically excludes objects whose peak
            pixel values are negative. Therefore, setting ``peakmax`` to a
            non-positive value would result in exclusion of all objects.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source catalog.
    """

    if not isinstance(model, ImageModel):
        raise ValueError('The input model must be a ImageModel.')

    threshold_img = detect_threshold(model.data, snr=snr_threshold)
    # TODO:  use threshold image based on error array
    threshold = threshold_img[0, 0]     # constant image

    daofind = DAOStarFinder(fwhm=kernel_fwhm, threshold=threshold,
                            sharplo=sharplo, sharphi=sharphi, roundlo=roundlo,
                            roundhi=roundhi, brightest=brightest,
                            peakmax=peakmax)
    sources = daofind(model.data)

    columns = ['id', 'xcentroid', 'ycentroid', 'flux']
    if sources:
        catalog = sources[columns]
    else:
        catalog = Table(names=columns, dtype=(np.int_, np.float_, np.float_,
                                              np.float_))

    return catalog
Example #33
0
thresh = sky_th * sky_s
print('sky_s x sky_th = threshold')
print('{0:8.6f} x {1:4d}   =   {2:8.6f}\n'.format(sky_s, sky_th, thresh))

# What if we do "sigma-clip" than MAD?
sky_a, sky_m, sky_s_sc = sigma_clipped_stats(
    img)  # default is 3-sigma, 5 iters
thresh_sc = sky_th * sky_s_sc
thresh = sky_th * sky_s_sc
print('3 sigma 5 iters clipped case:')
print('{0:8.6f} x {1:4d}   =   {2:8.6f}\n'.format(sky_s_sc, sky_th, thresh_sc))

#%%
from photutils import detect_threshold

thresh_snr = detect_threshold(data=img.data, snr=3)
thresh_snr = thresh_snr[0][0]
# This will give you 3*bkg_std.
print('detect_threshold', thresh_snr)

#%%
import matplotlib.pyplot as plt
from photutils import DAOStarFinder
from photutils import CircularAperture as CircAp

DAOfind = DAOStarFinder(
    fwhm=FWHM,
    threshold=thresh_snr,
    sharplo=0.2,
    sharphi=3.0,  # default values: sharplo=0.2, sharphi=1.0,
    roundlo=-1.0,
Example #34
0
hdu = datasets.load_star_image()
data = hdu.data[0:400, 0:400]
image = hdu.data.astype(float)
image -= np.median(image)

from photutils import daofind
from astropy.stats import mad_std
from astropy.stats import sigma_clipped_stats
bkg_sigma = mad_std(image)
mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
print(mean, median, std)
sources = daofind(image, fwhm=4.0, threshold=3.0*bkg_sigma)
print(sources)

from photutils import CircularAperture
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import matplotlib.pylab as plt
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=4.)
norm = ImageNormalize(stretch=SqrtStretch())
plt.imshow(data, cmap='Greys', origin='lower', norm=norm)
apertures.plot(color='blue', lw=1.5, alpha=0.5)

#
from photutils.datasets import make_100gaussians_image
data = make_100gaussians_image()
from photutils import detect_threshold
threshold = detect_threshold(data, snr=3.)

Example #35
0
  def extension(self, extension_idx, threshold='', FWHM=3.0, sigma=3.0, snr=50., plot=False):
    '''
    A method to run aperatue photometry routines on an individual extension and save the results to the exposure class
    
    Parameters
    ----------
    extension_idx: int
      Index of the extension
    threshold: float (optional)
      The absolute image value above which to select sources
    FWHM: float
      The full width at half maximum
    sigma: float
      Number of standard deviations to use for background estimation
    snr: float
      The signal-to-noise ratio to use in the threshold detection
    plot: bool
      Plot the field with identified sources circled      

    Returns
    -------
    source_list: table
      A source list for the image

    '''

    # Define the data array
    data = self.hdulist[extension_idx].data.astype(np.float)
    
    # Extract the header and create a WCS object
    hdr = self.hdulist[extension_idx].header
    wcs = WCS(hdr)

    # Estimate the background and background noise
    mean, median, std = sigma_clipped_stats(data, sigma=sigma, iters=5)

    # Calculate the detection threshold and FWHM if not provided
    if not threshold: threshold = np.mean(detect_threshold(data, snr=snr))
    
    # Print the parameters being used
    for p,v in zip(['mean','median','std','threshold','FWHM'],[mean,median,std,threshold,FWHM]): print '{!s:10}: {:.3f}'.format(p,v)

    # Subtract background and generate sources list of all detections
    sources = daofind(data-median, threshold, FWHM)
    
    # Map RA and Dec to pixels
    positions = (sources['xcentroid'], sources['ycentroid'])
    skycoords = pixel_to_skycoord(*positions, wcs=wcs)
    
    # Calculate magnitudes at given source positions
    apertures = CircularAperture(positions, r=2.)
    photometry_table = aperture_photometry(data, apertures)
    
    # 'skycoords' IRCS object is problematic for stacking tables so for now we'll just add the ra and dec
    # photometry_table['sky_center'] = skycoords
    photometry_table['ra'], photometry_table['dec'] = skycoords.ra, skycoords.dec
    
    # Update data in the exposure object
    self.source_table = vstack([self.source_table,photometry_table], join_type='inner')  
    
    # Plot the sources
    if plot:
      norm = ImageNormalize(stretch=SqrtStretch())
      plt.imshow(data, cmap='Greys', origin='lower', norm=norm)
      apertures.plot(color='blue', lw=1.5, alpha=0.5)
    
    print '{!s:10}: {}'.format('sources',len(sources))