Exemple #1
0
def _segmap_base(data,
                 numpix,
                 mask=None,
                 nsigma=2,
                 contrast=0.4,
                 nlevels=5,
                 kernel=None):
    """Returns a generic segmentation map of the input image
    INPUTS:
      data:     image data
      numpix:   minimum region size in pixels (default: 10)
      mask:     mask for the image (default: None)
      snr:      signal-to-noise threshold for detecting objects (default: 2)
      contrast: contrast ratio used in deblending (default: 0.4)
      nlevels:  number of lebels to split image to when deblending (default: 5)
      kernel:   kernel to use for image smoothing (default: None)
    """

    # Convert mask to boolean
    if mask is not None and (mask.dtype != "bool"):
        mask = np.array(mask, dtype="bool")

    threshold = phot.detect_threshold(data, nsigma=nsigma, mask=mask)
    segmap = phot.detect_sources(data,
                                 threshold,
                                 numpix,
                                 filter_kernel=kernel,
                                 mask=mask)
    segmap = phot.deblend_sources(data,
                                  segmap,
                                  npixels=numpix,
                                  filter_kernel=kernel,
                                  nlevels=nlevels,
                                  contrast=contrast)
    return segmap
def snr(hduls, name="SCI"):

	for hdul in hduls:
		data = hdul[name].data

		# identify background rms
		boxsize=(data.shape)
		bkg = Background2D(data, boxsize)
		bkg_mean_rms = np.mean(bkg.background_rms)

		# subtract bkg from image
		new_data = data - bkg.background

		# set threshold and detect sources, threshold 5*std above background
		threshold = detect_threshold(data=new_data, nsigma=5.0, background=0.0)
		SegmentationImage = detect_sources(data=new_data, threshold=threshold, npixels=10)

		SourceCatalog = source_properties(new_data, SegmentationImage)
		columns = ['id', 'xcentroid', 'ycentroid', 'source_sum']

		source_max_values = SourceCatalog.max_value
		avg_source_max_values = np.mean(source_max_values)

		# calculate signal to noise ratio
		signal = avg_source_max_values
		noise = bkg_mean_rms
		SNR = (signal)/(noise)
		hdul["CAT"].header.append(('SNR',SNR,"signal to noise ratio" ))

	return (hdul for hdul in hduls)
Exemple #3
0
    def detect(self):

        # Source detection using segmentation
        kernel = astro.convolution.Gaussian2DKernel(self.sigma,
                                                    x_size=3,
                                                    y_size=3)
        kernel.normalize()
        segm = phot.detect_sources(self.data,
                                   self.threshold,
                                   npixels=5,
                                   filter_kernel=kernel)

        # Deblending sources
        segm_deblend = phot.deblend_sources(self.data,
                                            segm,
                                            npixels=5,
                                            filter_kernel=kernel,
                                            nlevels=32,
                                            contrast=0.001)

        cat = phot.source_properties(self.data, segm_deblend, wcs=self.wcs)
        sources = cat.to_table()
        sources['xcentroid'].info.format = '.2f'  # optional format
        sources['ycentroid'].info.format = '.2f'
        sources['cxx'].info.format = '.2f'
        sources['cxy'].info.format = '.2f'
        sources['cyy'].info.format = '.2f'
        sources['gini'].info.format = '.2f'

        return sources.to_pandas().sort_values('max_value', ascending=True)
Exemple #4
0
 def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
     '''
     Parameters
     ----------
     psfFWHM : float
         FWHM of the imaging point spread function
     nsigma : float
         source detection threshold
     '''
     data = np.array(self.data.copy())
     psfFWHMpix = psfFWHM / self.pixel_scales[0].value
     thresholder = detect_threshold(data, nsigma=nsigma)
     sigma = psfFWHMpix * gaussian_fwhm_to_sigma
     kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
     kernel.normalize()
     segm = detect_sources(data,
                           thresholder,
                           npixels=5,
                           filter_kernel=kernel)
     props = source_properties(data, segm)
     tab = Table(props.to_table())
     self.sources_catalog = tab
     srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'],
                                               tab['ycentroid'], 1)
     sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
     sctab = Table([sc, np.arange(len(sc))],
                   names=['sc', 'sloop_{0}'.format(sc_key)])
     self.sources_skycord = sctab
Exemple #5
0
def segmentation_map(image, extname, get_kernel=False):
    # in this context image means a 2D numpy array rather than a GFA_image
    # object

    par = common.gfa_misc_params()

    fwhm_pix = par['nominal_fwhm_asec'] / \
        util.nominal_pixel_sidelen_arith()

    threshold = detect_threshold(image, snr=2.0)

    sigma = fwhm_pix * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma,
                              x_size=int(np.round(fwhm_pix)),
                              y_size=int(np.round(fwhm_pix)))
    kernel.normalize()

    segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel)

    # add my own dilation of segm.array ?
    # incorporate masking based on master flat/bias in this analysis ?

    if not get_kernel:
        return segm
    else:
        return segm, kernel
Exemple #6
0
    def central(self, npixels=1, connectivity=8):

        """
        This function ...
        :param npixels: number of connected pixels
        :param connectivity:
        :return:
        """

        # Perform the segmentation
        segments = detect_sources(self.data, min_alpha, npixels=npixels, connectivity=connectivity).data

        # To plot the multiple segments that are detected
        # if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Get the label of the center segment
        #rel_center = self.cutout.rel_position(self.center)
        label = segments[int(round(self.y_center)), int(round(self.x_center))]

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0: return self.__class__(np.zeros_like(self.data))

        # Return copy with only largest
        else:
            new = self.copy()
            new._data[segments != label] = 0
            return new
Exemple #7
0
def find_all_sources(
    image: CCDData,
    snr: float = 3.,  # Threshold SNR for segmentation
    fwhm: float = 5.,  # Kernel FWHM for segmentation
    ksize: int = 5,  # Kernel size
    npixels:
    int = 10  # Number of connected pixels required to be considered a source
):
    """
    Find extended sources in image with default parameters tuned for expected donut size.
    """
    binning = image.header['BINNING']
    fwhm = int(fwhm / binning)
    ksize = int(ksize / binning)
    npixels = int(npixels / binning)
    threshold = photutils.detect_threshold(image, nsigma=snr)
    sigma = fwhm * stats.gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=ksize, y_size=ksize)
    kernel.normalize()
    segm = photutils.detect_sources(image.data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel)
    cat = photutils.source_properties(image.data, segm, wcs=image.wcs)
    return segm, cat
Exemple #8
0
    def fill_holes(self, npixels=1, connectivity=8):
        """
        This function ...
        :param npixels:
        :param connectivity:
        :return:
        """

        # Perform the segmentation
        segments = detect_sources(self.inverse().data,
                                  1,
                                  npixels=npixels,
                                  connectivity=connectivity).data

        # Find the label of the largest segment (=the background)
        label_counts = np.bincount(segments.flatten())
        if len(label_counts) > 1:

            background_label = np.argmax(label_counts[1:]) + 1
            # If the source mask is larger than the background (in number of pixels), the above will provide the correct label
            # therefore we do the '[1:]'

            # Create a mask for the holes identified as background
            holes = self.inverse().data > 1
            holes[segments == background_label] = False

            # Remove holes from the mask
            self._data[holes] = 255
Exemple #9
0
    def central(self, npixels=1, connectivity=8):
        """
        This function ...
        :param npixels: number of connected pixels
        :param connectivity:
        :return:
        """

        # Perform the segmentation
        segments = detect_sources(self.data.astype(float),
                                  0.5,
                                  npixels=npixels,
                                  connectivity=connectivity).data

        # To plot the multiple segments that are detected
        # if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Center
        center_x = 0.5 * (self.xsize + 1) - 1
        center_y = 0.5 * (self.ysize + 1) - 1

        # Get the label of the center segment
        #rel_center = self.cutout.rel_position(self.center)
        label = segments[int(round(center_y)), int(round(center_x))]

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0:
            return self.__class__(np.zeros_like(self.data, dtype=bool))

            # Create a mask of the center segment
        else:
            return self.__class__(segments == label)
Exemple #10
0
    def remove_appendages(self, super=False):
        """
        This function ...
        :return:
        """

        from skimage import morphology  ###### TODO: this can cause a very weird error: (on Nancy (Ubuntu 14.04.4 LTS) with NUMPY VERSION 1.9.0)
        # *** libmkl_mc3.so *** failed with error : /raid6/home/sjversto/Enthought/Canopy_64bit/User/bin/../lib/libmkl_mc3.so: undefined symbol: i_free
        # *** libmkl_def.so *** failed with error : /raid6/home/sjversto/Enthought/Canopy_64bit/User/bin/../lib/libmkl_def.so: undefined symbol: i_free
        # MKL FATAL ERROR: Cannot load neither libmkl_mc3.so nor libmkl_def.so
        # POTENTIAL FIX HERE: http://stackoverflow.com/questions/14495334/python-matplotlib-mkl-fatal-error-on-ubuntu-12-04
        # IT WORKS WITH NUMPY VERSION 1.8.1 !!!

        if super: structure = morphology.disk(5, dtype=bool)
        else:
            structure = np.array([[False, True, True, True, False],
                                  [True, True, True, True, True],
                                  [True, True, True, True, True],
                                  [True, True, True, True, True],
                                  [False, True, True, True, False]])

        mask = self.opening(structure)

        segments = detect_sources(mask, 0.5, 1).data

        # Get the label of the center segment
        label = segments[int(0.5 * segments.shape[0]),
                         int(0.5 * segments.shape[1])]

        # Return the new mask with the appendages removed
        #data, name=None, description=None
        return Mask((segments == label),
                    name=self.name,
                    description=self.description)
def pix_to_deg(pattern):
    coords = detect_sources(pattern)
    x1 = coords[0]
    y1 = coords[1]
    for filename in pattern:
        which_hdu = choose_hdu(filename)
        header = fits.getheader(filename, which_hdu)
        
    coords_obj = get_target_coord(pattern)
    xobj = coords_obj[0]
    yobj = coords_obj[1]
    
    Bin = float(header['CCDSUM'][0])
    Res = 0.25 #arcsec/px
    f_arcsec = Bin*Res #arcsec/px
    f = f_arcsec/3600

    for n in x1:
        dx = x1 - x1[30]
        x_deg = xobj + (f*dx)

    for n in y1:
        dy = y1 - y1[30]
        y_deg = yobj + (f*dy)
    return(x_deg,y_deg)
Exemple #12
0
    def remove_appendages(self, super=False):
        """
        This function ...
        :return:
        """

        if super: structure = morphology.disk(5, dtype=bool)
        else:
            structure = np.array([[False, True, True, True, False],
                                  [True, True, True, True, True],
                                  [True, True, True, True, True],
                                  [True, True, True, True, True],
                                  [False, True, True, True, False]])

        mask = self.opening(structure)

        segments = detect_sources(mask, 0.5, 1).data

        # Get the label of the center segment
        label = segments[int(0.5 * segments.shape[0]),
                         int(0.5 * segments.shape[1])]

        # Return the new mask with the appendages removed
        #data, name=None, description=None
        return Mask((segments == label),
                    name=self.name,
                    description=self.description)
 def search_trails(self):
     """Search star trails in image"""
     threshold = detect_threshold(self.data, snr=1)
     sigma = 2.0 * gaussian_fwhm_to_sigma
     kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
     self.segments = detect_sources(self.data, threshold, npixels=1000, filter_kernel=kernel)
     self.segments_properties = segment_properties(self.data, self.segments)
Exemple #14
0
    def _find_star_properties(self, data, median, mask, star_coo):
        self.info('Finding star properties started')
        sigma = self.config_section.get('fwhm') * gaussian_fwhm_to_sigma
        kernel = Gaussian2DKernel(sigma,
                                  x_size=self.config_section.get('kernel_x'),
                                  y_size=self.config_section.get('kernel_y'))
        kernel.normalize()
        data[mask] = 0
        segm = detect_sources(data,
                              median *
                              self.config_section.get('detect_threshold'),
                              npixels=self.config_section.get('npixels'),
                              filter_kernel=kernel)
        properties = properties_table(
            source_properties(data - np.uint64(median), segm),
            columns=['id', 'xcentroid', 'ycentroid', 'source_sum',
                     'semimajor_axis_sigma', 'semiminor_axis_sigma',
                     'orientation'])

        self.info('Found star properties')
        self.info(properties)

        if len(properties) > 1:
            self.warning('More than one object has been found')
            properties = self._find_nearest_object(
                star_coo, properties, data.shape)
            return properties
        else:
            self.info('Finding star properties finished')
            return properties[0]
Exemple #15
0
def find_objects(image, threshold, FWHM, npixels):
    """Find sources in image by a segmentation process.

    This function detects sources a given sigma above a threshold,
    only if it has more that npixels that are interconnected.

        Args:
            image(array, required):      This is the image data
            threshold(array, required):  This is the threshold above which
                                         detection occurs
            FWHM(int, required):         Full Width Half Maximum of 2D circular
                                         gaussian kernel used to filter the
                                         image prior to thresholding. Input is
                                         in terms of pixels.
            npixels(int, required):      The minimum number of pixels to define
                                         a sources

        Returns:
            segm:                        The segmentation image
    """
    sigma = FWHM * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(image,
                          threshold,
                          npixels=npixels,
                          filter_kernel=kernel)
    return segm
def get_segmentation_map(
    image: torch.Tensor,
    npixels=5,  ## minimum number of connectied pixels
    get_mask=False,
) -> torch.Tensor:
    """ compute smoothed segmentation map marking the part of the image that contains the main source

    optional: when get_mask=True, also provide a mask map of all additional saurces
    """
    # get segmap from red colorband
    img = image[0]
    # create segmentation map
    threshold = detect_threshold(img, 1.5)
    segm = detect_sources(img, threshold, npixels)
    # Keep only the largest segment
    # ## !!! change this to take the central segment
    main_label = np.argmax(segm.areas) + 1
    segmap = segm.data == main_label
    # regularize shape
    segmap_float = ndi.uniform_filter(np.float64(segmap), size=10)
    segmap = segmap_float > 0.5

    if not get_mask:
        return segmap
    else:
        # mask additional objects
        mask = np.zeros_like(img).astype("bool")
        for obj_label in range(1, len(segm.areas) + 1):
            if obj_label == main_label:
                continue
            mask[segm.data == obj_label] = True
        return segmap, mask
Exemple #17
0
def getsky (data,psfFWHMpix,snr=3.0,dtype=bool) :
    threshold = detect_threshold(data, snr=snr)
    sigma = psfFWHMpix * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)
    ny, nx =data.shape
    if(dtype == bool):
        mask = np.zeros_like(data, dtype=bool)
        mask[np.isnan(data)] = True
        mask[np.isinf(data)] = True
    if (dtype == int) :
        mask = np.zeros_like(data, dtype=int)
        mask[np.isnan(data)] = 1
        mask[np.isinf(data)] = 1
    for loopx in range(nx) :
        for loopy in range(ny):
            if segm.data[loopy][loopx] > 0:
                if(dtype == bool):
                    mask[loopy][loopx]=True
                if (dtype == int) :
                    mask[loopy][loopx]=1
    result = {
        'mask':mask,
        'segmantation':segm
    }
    return result
def make_segmap(filename):
    """
    Generates a source segmentation map of the input file.
    
    Parameters
    ----------
    filename : str
        The file to make segmentation map for.
    
    Outputs
    -------
    {filename}_seg.fits
        The segmentation map.
    """

    # See if segmap already exists
    outfile = filename.replace('.fits', '_seg.fits')

    # Get the data
    data = fits.getdata(filename, 'SCI')

    # Detector sources; Make segmap
    threshold = detect_threshold(data, 3.0)
    sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=3, filter_kernel=kernel)
    fits.writeto(outfile, segm.data, overwrite=True)

    return outfile
Exemple #19
0
def ImageSlice(imgname):

    hdu = fits.open(imgname)
    img = hdu[0].data

    threshold = detect_threshold(img, snr=4.)

    #Detection for every sources to find the position of the galaxy.
    sigma = 2. * gaussian_fwhm_to_sigma  # FWHM = 2.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel)

    segm_areas = segm.areas
    segm_areas = np.delete(segm.areas, 0)  # deleting background area.
    label_gal = segm_areas.argmax()  # finding the galaxy by searching
    # a source which has largest area.
    rslice = segm.slices[label_gal][0]  # row slice
    cslice = segm.slices[label_gal][1]  # column slice

    ri = rslice.start
    rf = rslice.stop
    ci = cslice.start
    cf = cslice.stop

    b = 0  # expanding constant

    s_img = hdu[0].data[ri - b:rf + b, ci - b:cf + b]

    return s_img
Exemple #20
0
    def fill_holes(self):
        """
        This function ...
        :return:
        """

        # Create a copy of this mask
        #new_mask = self.copy()

        # Perform the segmentation
        segments = detect_sources(self.inverse().data.astype(float), 0.5,
                                  1).data

        # Find the label of the largest segment (=the background)
        label_counts = np.bincount(segments.flatten())
        if len(label_counts) > 1:

            background_label = np.argmax(label_counts[1:]) + 1
            # If the source mask is larger than the background (in number of pixels), the above will provide the correct label
            # therefore we do the '[1:]'

            # Create a mask for the holes identified as background
            holes = self.inverse().data
            holes[segments == background_label] = False

            # Remove holes from the mask
            self._data[holes] = True
Exemple #21
0
    def cutoff_low_snr(self):

        """
        This function ...
        :return:
        """

        # Check whether the reference image is present
        if os.path.isfile(self.config.cutoff.reference_path):

            # Open the reference image
            reference = Image.from_file(self.config.cutoff.reference_path)

            # Check whether the errors frame is present
            assert self.config.errors in reference.frames, "An error map could not be found for the reference image"

            # Create a mask for the pixels with a signal-to-noise ratio of 5 or less
            data = reference.frames[self.config.primary] < self.config.cutoff.level*reference.frames[self.config.errors]
            self.mask = Mask(data)

            # If requested, remove holes from the cut-off mask
            if self.config.cutoff.remove_holes:

                # Save the mask as a FITS file
                Frame(self.mask.astype(float)).save(self.config.saving.cutoff_mask_with_holes_path)

                # Perform the segmentation
                segments = detect_sources(self.mask.astype(float), 0.5, 1).data

                # Save segments
                Frame(segments.astype(float)).save(self.config.saving.cutoff_mask_segments_path)

                # Find the label of the largest segment (=the background)
                label_counts = np.bincount(segments.flatten())
                background_label = np.argmax(label_counts)

                # Create a mask for the holes identified as background
                holes = copy.deepcopy(self.mask)
                holes[segments == background_label] = False

                # Save holes mask
                Frame(holes.astype(float)).save(self.config.saving.cutoff_mask_holes_path)

                # Remove holes from the mask
                self.mask[holes] = False

            # Save the mask as a FITS file
            Frame(self.mask.astype(float)).save(self.config.saving.cutoff_mask_path)

        # If not, raise an error
        else: raise IOError("The prepared reference image could not be found")

        # Cut-off the input images at the same contour level
        for name in self.images: self.images[name].apply_mask(self.mask, 0.0)

        # Cut-off the bulge and disk map at the same contour level
        self.disk[self.mask] = 0.0
        self.bulge[self.mask] = 0.0
Exemple #22
0
def make_segmentation_image(data,
                            fwhm=2.0,
                            snr=5.0,
                            x_size=5,
                            y_size=5,
                            npixels=7,
                            nlevels=32,
                            contrast=0.001,
                            deblend=True):
    """
    Use photutils to create a segmentation image containing detected sources.

    data : 2D `~numpy.ndarray`
        Image to segment into sources.

    fwhm : float (default: 2.0)
        FWHM of the kernel used to filter the image.

    snr : float (default: 5.0)
        Source S/N used to set detection threshold.

    x_size : int (default: 5)
        X size of the 2D `~astropy.convolution.Gaussian2DKernel` filter.

    y_size : int (default: 5)
        Y size of the 2D `~astropy.convolution.Gaussian2DKernel` filter.

    npixels : int (default: 7)
        Number of connected pixels required to be considered a source.

    nlevels : int (default: 32)
        Number of multi-thresholding levels to use when deblending sources.

    contrast : float (default: 0.001)
        Fraction of the total blended flux that a local peak must have to be considered a separate object.

    deblend : bool (default: True)
        If true, deblend sources after creating segmentation image.
    """
    sigma = fwhm * stats.gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=x_size, y_size=y_size)
    kernel.normalize()
    threshold = photutils.detect_threshold(data, nsigma=snr)

    segm = photutils.detect_sources(data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel)
    if deblend:
        segm = photutils.deblend_sources(data,
                                         segm,
                                         npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=nlevels,
                                         contrast=contrast)

    return segm
def find_thresh(mn, mx, npix, heatmap):
    nlabels = 0.
    segm_labels_prev = 0
    mr_prev2 = -99
    mr_prev = -99
    kern = Gaussian2DKernel(0.2, x_size=4 * 10, y_size=4 * 10)
    kern.normalize()
    a = zeros(kern.array.shape)
    a[kern.array.shape[1] / 2., kern.array.shape[1] / 2.] = 1
    kern_2 = Gaussian1DKernel(8)
    a[:,
      kern.array.shape[1] / 2.] = convolve_fft(a[:, kern.array.shape[1] / 2.],
                                               kern_2)
    a /= sum(a)
    b = convolve_fft(a, kern)
    b /= sum(b)
    temp_heatmap = convolve_fft(heatmap.data, b)
    temp_heatmap[temp_heatmap <= 0] = nan

    for tt, t in enumerate(linspace(mn, mx, 1000)):

        threshold = t
        segm = detect_sources(log10(temp_heatmap),
                              threshold=threshold,
                              npixels=npix)
        masses = array([
            sum(temp_heatmap[segm.array == lbl])
            for lbl in arange(1, segm.nlabels + 1)
        ])
        srt_masses = masses[argsort(masses)[::-1]]
        if len(masses) > 1:
            mass_ratio = srt_masses[0] / srt_masses[1]
            if mr_prev == -99:
                mr_prev = mass_ratio
                thresh = threshold
            if (log10(srt_masses[0]) > 7.5) & (log10(srt_masses[1]) > 7.5) & \
                (mr_prev/mass_ratio > 10) & (mass_ratio < 100) & (nansum(srt_masses) > 0.50*nansum(temp_heatmap)):
                thresh = threshold

            mr_prev = mass_ratio

            if len(masses) > 2:
                mass_ratio2 = srt_masses[0] / srt_masses[2]
                if mr_prev2 == -99:
                    mr_prev2 = mass_ratio2
                    thresh = threshold

                if (log10(srt_masses[0]) > 7.5
                    ) & (log10(srt_masses[1]) > 7.5) & (
                        mr_prev2 / mass_ratio2 > 10) & (mass_ratio2 < 300) & (
                            nansum(srt_masses) > 0.50 * nansum(temp_heatmap)):
                    thresh = threshold

                mr_prev2 = mass_ratio2
        segm_labels_prev = segm.nlabels
    return thresh, temp_heatmap
Exemple #24
0
def source_find(img, ota, inst, nbg_std=10.0):
    """
    This function will find sources on an OTA using the detect_sources module
    from photutils. This will return of csv file of the sources found with the
    x,y,Ra,Dec,source_sum,max_value, and elongation of the source. The
    elongation parameter is semimajor_axis / semiminor_axis.
    This output is needed for the source_xy function. This function is set
    to work on the reprojected otas.

    Parameters
    ----------
    img : str
        Name of image
    ota : str
        Name of OTA
    int : str
        Version of ODI used, ``podi`` or ``5odi``
    nbg_std : float
        Multiplier to the standard deviation of the background. It has a default
        value of ``10`` to only detect bright sources

    Note
    ----
    This function produces a ``csv`` file in ``odi.sourcepath`` with the
    following naming convention ``'source_'+ota+'.'+img.base()+'.csv'``.

    """
    image = odi.reprojpath + 'reproj_' + ota + '.' + img.stem()
    QR_raw = odi.fits.open(image)
    # hdu_ota = QR_raw[0]

    hdu_ota = odi.tan_header_fix(QR_raw[0])

    w = odi.WCS(hdu_ota.header)
    # needed to remind astropy that the header says RADESYS=ICRS
    # your mileage may vary (logic probably needed here to handle cases)
    w.wcs.radesys = 'ICRS'
    # if inst == '5odi':
    #     w.wcs.ctype = ["RA---TPV", "DEC--TPV"]
    bg_mean, bg_median, bg_std = odi.mask_ota(img, ota, reproj=True)
    threshold = bg_median + (bg_std * nbg_std)
    print bg_mean, bg_median, bg_std
    segm_img = detect_sources(hdu_ota.data, threshold, npixels=20)
    source_props = source_properties(hdu_ota.data, segm_img, wcs=w)

    columns = [
        'id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
        'dec_icrs_centroid', 'source_sum', 'max_value', 'elongation'
    ]
    source_tbl = properties_table(source_props, columns=columns)
    source_tbl_df = source_tbl.to_pandas()

    outputfile = odi.sourcepath + 'source_' + ota + '.' + img.base() + '.csv'

    source_tbl_df.to_csv(outputfile, index=False)
    QR_raw.close()
Exemple #25
0
    def get_mask(self,
                 flt_name,
                 kernel_fwhm=1.25,
                 background_box=20,
                 thr=0.05,
                 npixels=100):
        """
        Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data. 
        Attributes
        ----------
        flt_name string containing the name of the FLT name to create a mask for
        kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)
        background_box Int The saie fo the background box when estimating the background (default = 20 pixels) 
        thr Float Threshold above noise to detect signal (default = 0.25)
        npixels Int number of pixels for a spectrum to be detected (default = 15)    

        Output
        ------
        A numpy array containing the mask
        """
        h = fits.open(flt_name)[0].header
        filt = h["FILTER"]

        fin = fits.open(flt_name)
        image = fin["SCI"].data
        err = fin["ERR"].data

        dq = fin["DQ"].data
        dq = np.bitwise_and(dq,
                            np.zeros(np.shape(dq), np.int16) + self.bit_mask)

        g = Gaussian1D(mean=0., stddev=kernel_fwhm / 2.35)
        x = np.arange(16.) - 8
        a = g(x)
        kernel = np.tile(a, (16 * int(kernel_fwhm + 1), 1)).T
        kernel = kernel / np.sum(kernel)

        b = Background2D(image, background_box)

        image = image - b.background
        threshold = thr * err

        image[dq > 0] = 0.  #np.nan

        mask = detect_sources(image,
                              threshold,
                              npixels=npixels,
                              filter_kernel=kernel).data

        ok = (mask == 0.) & (dq == 0)
        mask[~ok] = 1.

        return mask
def estimate_bkg(image, clip=3.0, snr=2.0, npix=5, tol=1e-6, max_iter=10):

    # Create the NaN mask
    nanmask = np.isnan(image)
    
    # First estimate of the global background from sigma-clipping
    im_mean0, im_med0, im_std0 = sigma_clipped_stats(image, sigma=clip, mask=nanmask)
    
    # Create segmentation image using threshold = im_med + snr*im_std
    # Greater than npix pixels have to be connected to be a source
    thresh = im_med0 + snr*im_std0
    segm_img = detect_sources(image, thresh, npixels=npix)
    
    # Create new mask that masks all the detected sources
    mask = np.logical_not(segm_img.data_masked.mask) | nanmask
    plt.imshow(mask, origin='lower left')
    plt.colorbar()
    # Re-calculate background and rms
    im_mean, im_med, im_std = sigma_clipped_stats(image, sigma=clip, mask=mask)
    
    # Calculate percent change in the background estimate
    perc_change = np.abs(im_med0 - im_med)/ im_med
    
    if perc_change > tol:
        im_med0 = im_med
        for i in range(max_iter):
 
            thresh = im_med + snr*im_std
            segm_img = detect_sources(image, thresh, npixels=npix)
            mask = np.logical_not(segm_img.data_masked.mask) | nanmask
            im_mean, im_med, im_std = sigma_clipped_stats(image, sigma=clip, mask=mask)
            perc_change = np.abs(im_med0 - im_med)/ im_med

            if perc_change < tol:
                break
            else:
                im_med0 = im_med
                
    return im_med, im_std
Exemple #27
0
def central_segmentation_map(data, std_level=3, min_size=0.05):
    
    mean, median, std = measure_background(data, 2, np.zeros_like(data))
    threshold = median + (std_level*std)

    zp = int(np.round(data.shape[0]/2))

    npixels = np.max([int((data.shape[0] * min_size) **2), 5])
    seg_map = detect_sources(data, threshold, npixels=npixels).data
    central_source_mask = np.zeros_like(seg_map)
    central_source_mask[np.where(seg_map == seg_map[zp, zp])] = 1
    central_source_mask = binary_dilation(central_source_mask, generate_circular_kernel(zp/5))

    return central_source_mask
Exemple #28
0
    def detect_sources(self, threshold=False, npixels=10):

        if not threshold:

            # threshold = np.min(self.img[self.img>0])

            threshold = np.sum(self.img.img) / 1000.

        self.segm = detect_sources(self.img.img, threshold, npixels=npixels)

        self.cat = source_properties(self.img.img, self.segm)

        for i, o in enumerate(self.cat):
            print(i, o.centroid, o.source_sum / np.sum(self.img.img))
Exemple #29
0
def maskstarsSEG(image: np.ndarray) -> np.ndarray:
    """ 'Cleans' image of external sources.
        For example will remove all stars that do not interfere with the object of interest.

    Parameters
    ----------

    image : np.ndarray
        Image to be cleaned.

    Returns
    -------

    imageClean : np.ndarray
        Image cleaned of external sources.

    """

    cenpix = np.array([int(image.shape[0]/2) + 1, int(image.shape[1]/2) + 1])
    mean, median, std = sigma_clipped_stats(image, sigma=3.)

    # create segmentation map
    # TODO give user option to specify kernel, size of kernel and threshold?
    imageClean = np.copy(image)
    threshold = detect_threshold(image, 1.5)
    sigma = 3.0 * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(image, threshold, npixels=8, filter_kernel=kernel)

    # if no sources return
    if segm is None:
        return imageClean

    # Save potions of segmentation map outwith object of interest
    stars = []
    for i, segment in enumerate(segm.segments):

        if not _inBbox(segment.bbox.extent, cenpix):
            stars.append(i)

    # clean image of external sources
    for i in stars:
        masked = segm.segments[i].data_ma
        masked = np.where(masked > 0., 1., 0.) * np.random.normal(mean, std, size=segm.segments[i].data.shape)
        imageClean[segm.segments[i].bbox.slices] = masked

        imageClean = np.where(imageClean == 0, image, imageClean)

    return imageClean
Exemple #30
0
    def create_seg_map(self):
        '''
        Creates segmentation map, from original FLT file, that is used in 
        background subtraction and to fix cosmic rays.

        Parameters
        ----------
        self : object
            DashData object created from an individual IMA file.

        Output
        ------
        Segmentation Image : fits file
            Segmentation map
        Source List : .dat file
            List of sources and their properties
        '''

        flt = fits.open(self.flt_file_name)
        data = flt[1].data

        threshold = detect_threshold(data, nsigma=3.)

        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        segm = detect_sources(data,
                              threshold,
                              npixels=10,
                              filter_kernel=kernel)

        hdu = fits.PrimaryHDU(segm.data)
        if not os.path.exists('segmentation_maps'):
            os.mkdir('segmentation_maps')
        hdu.writeto(('segmentation_maps/{}_seg.fits').format(self.root),
                    overwrite=True)

        # Create source list
        cat = source_properties(data, segm)

        tbl = cat.to_table()
        tbl['xcentroid'].info.format = '.2f'
        tbl['ycentroid'].info.format = '.2f'
        tbl['cxx'].info.format = '.2f'
        tbl['cxy'].info.format = '.2f'
        tbl['cyy'].info.format = '.2f'

        ascii.write(tbl,
                    'segmentation_maps/{}_source_list.dat'.format(self.root))
Exemple #31
0
    def find_center_segment(self, sigma_level, kernel=None, min_pixels=5):
        """
        This function ...
        :param sigma_level:
        :param kernel:
        :param min_pixels:
        :return:
        """

        # If a subtracted box is present, use it to find the center segment
        box = self.subtracted if self.has_background else self.cutout

        if not np.all(self.mask):

            mean, median, stddev = statistics.sigma_clipped_statistics(
                box, mask=self.mask)
            threshold = mean + stddev * sigma_level

        else:
            threshold = detect_threshold(box, snr=2.0)  #snr=2.0

        # Perform the segmentation
        segments = detect_sources(box,
                                  threshold,
                                  npixels=min_pixels,
                                  filter_kernel=kernel).data

        # To plot the multiple segments that are detected
        #if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Get the label of the center segment
        rel_center = self.cutout.rel_position(self.center)
        try:
            label = segments[int(round(rel_center.y)),
                             int(round(rel_center.x))]
        except:
            try:
                label = segments[int(rel_center.y), int(rel_center.x)]
            except:
                return Mask((segments == label))

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0:
            return Mask(np.zeros_like(self.cutout, dtype=bool))

            # Create a mask of the center segment
        else:
            return Mask((segments == label))
Exemple #32
0
def measureBackground(data, iterations, mask):
    if mask.sum() > 0:
        mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
    else:
        mean, median, std = sigma_clipped_stats(data, sigma=3.0)

    if iterations == 0:
        return mean, median, std
    else:
        threshold = median + (std * 2)
        segm_img = detect_sources(data, threshold, npixels=5)
        mask = segm_img.astype(np.bool)  # turn segm_img into a mask
        circMask = generateCircularMask(5)
        finalMask = binary_dilation(mask, circMask)
        return measureBackground(data, iterations - 1, finalMask)
Exemple #33
0
def measureBackground(data, iterations, mask):
    if (mask.sum() > 0):
        mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
    else:
        mean, median, std = sigma_clipped_stats(data, sigma=3.0)

    if (iterations == 0):
        return mean, median, std
    else:
        threshold = median + (std * 2)
        segm_img = detect_sources(data, threshold, npixels=5)
        mask = segm_img.astype(np.bool)  # turn segm_img into a mask
        circMask = generateCircularMask(5)
        finalMask = binary_dilation(mask, circMask)
        return measureBackground(data, iterations - 1, finalMask)
def detect_sources_segmap(data,
                          threshold,
                          npixels,
                          kernel_fwhm=1.8,
                          show=False):
    """
    Runs image segmentation to detect sources in `data`.

    Parameters
    ----------
    data : array
        Image array.
    threshold : float or array
        Detection threshold value, or pixel-wise threshold image (must be same
        shape as `data`.)
    npixels : int
        Positive integer number of connected pixels, each greater that
        `threshold` that an object must have to be detected.
    kernel_fwhm : float
        FWHM of gaussian kernel used to smooth image before segmentation.
    show : bool
        Show a plot of detected source(s).

    Returns
    -------
    coo_tab : `astropy.table.Table` or int
        Table with detected source(s). Returns '0' if no sources are detected.

    """

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data,
                          threshold=threshold,
                          npixels=npixels,
                          filter_kernel=kernel)
    if segm:
        coo_tab = source_properties(data, segm).to_table()
        if show:
            show_source_detection_plot(data, coo_tab)
        return coo_tab

    if not segm:
        if show:
            show_source_detection_plot(data, None)
        return 0
Exemple #35
0
    def find_center_segment(self, sigma_level, kernel=None, min_pixels=5):

        """
        This function ...
        :param sigma_level:
        :param kernel:
        :param min_pixels:
        :return:
        """

        # If a subtracted box is present, use it to find the center segment
        box = self.subtracted if self.has_background else self.cutout

        if not np.all(self.mask):

            mean, median, stddev = statistics.sigma_clipped_statistics(box, mask=self.mask)
            threshold = mean + stddev * sigma_level

        else: threshold = detect_threshold(box, snr=2.0) #snr=2.0

        # Perform the segmentation
        segments = detect_sources(box, threshold, npixels=min_pixels, filter_kernel=kernel).data

        # To plot the multiple segments that are detected
        #if segments.max() > 1: plotting.plot_box(np.ma.masked_array(box, mask=segments.astype(bool)))

        # Get the label of the center segment
        rel_center = self.cutout.rel_position(self.center)
        try:
            label = segments[int(round(rel_center.y)), int(round(rel_center.x))]
        except:
            try:
                label = segments[int(rel_center.y), int(rel_center.x)]
            except:
                return Mask((segments == label))

        # If the center pixel is identified as being part of the background, create an empty mask (the center does not
        # correspond to a segment)
        if label == 0: return Mask(np.zeros_like(self.cutout, dtype=bool))

        # Create a mask of the center segment
        else: return Mask((segments == label))
Exemple #36
0
    def get_holes(self, npixels=1, connectivity=8):

        """
        This function ...
        :param npixels:
        :param connectivity:
        :return:
        """

        # Perform the segmentation
        segments = detect_sources(self.inverse().data, self.max-1, npixels=npixels, connectivity=connectivity).data

        #from ..tools import plotting
        #plotting.plot_mask(self.inverse())
        #plotting.plot_box(segments)

        # Find the label of the largest segment (=the background)
        label_counts = np.bincount(segments.flatten())
        if len(label_counts) <= 1: return

        # Determine background label
        background_label = np.argmax(label_counts[1:]) + 1
        # If the source mask is larger than the background (in number of pixels), the above will provide the correct label
        # therefore we do the '[1:]'
        #print(background_label)

        # Create a mask for the holes identified as background
        holes = self.inverse().data > 1
        holes[segments == background_label] = False

        # Get 'edge', unmark as holes
        #core = self.opaque_filled
        core = self.above_half_filled
        edge = core.inverse()
        holes[edge] = False

        #plotting.plot_mask(holes, title="holes")
        #plotting.plot_mask(self.opaque_filled, title="opaque filled")

        # Return
        return holes
Exemple #37
0
def ImageStarProperties(image,gstarcoo=None):
    """ Returns fwhm, ratio, pa, bkg, and bkg_std of input image """
    data = fits.getdata(image)
    imgWCS = WCS(image)

    bkg = photu.Background(data, (50, 50), filter_shape=(3, 3), method='median')
    threshold = bkg.background + (3. * bkg.background_rms)
    kernel = Gaussian2DKernel(2.0 * gaussian_fwhm_to_sigma, x_size=3, y_size=3) # 2 pixel FWHM smoothing kernal
    segm = photu.detect_sources(data, threshold, npixels=5, filter_kernel=kernel)
    props = segment_properties(data, segm,wcs=imgWCS)
    tbl = properties_table(props)
    catimg = SkyCoord(tbl['ra_icrs_centroid'],tbl['dec_icrs_centroid'],frame='icrs')
    catGstars = SkyCoord(gstarcoo['ra'],gstarcoo['dec'],frame='icrs') 
    # Cross match the catlogs to find same stars
    index,dist2d,dist3d = catGstars.match_to_catalog_sky(catimg)

    fwhm = np.sqrt(np.median(tbl[index]['covar_sigx2']))/gaussian_fwhm_to_sigma #WRONG FIND another way!
    ratio = np.median(tbl[index]['elongation'])
    pa = np.median(tbl[index]['orientation'])
    
    return (fwhm, ratio, pa, bkg.background, bkg.background_rms)
Exemple #38
0
def CheckImagesAreAligned(NewImg,PrevImg,Pcoeffcut=0.3):
    """ Returns True if stars in images are aligned """
    if PrevImg is None:
        return False

    # First we have to remove uneven background from both images for looging at star coorilations
    PImgData = fits.getdata(PrevImg)
    PImgbkg = photu.Background(PImgData, (50, 50), filter_shape=(3, 3), method='median')
    PImgData -= PImgbkg.background

    NImgData = fits.getdata(NewImg)
    NImgbkg = photu.Background(NImgData, (50, 50), filter_shape=(3, 3), method='median')
    NImgData -= NImgbkg.background

    PImgthreshold =  3. * PImgbkg.background_rms

    kernel = Gaussian2DKernel(2.0 * gaussian_fwhm_to_sigma, x_size=3, y_size=3) # 2 pixel FWHM smoothing kernal
    segm = photu.detect_sources(PImgData, PImgthreshold, npixels=5, filter_kernel=kernel)
    mask = segm.astype(np.bool)
    # Remove any false positives due to bad pixels from the image edges
    mask[:50,:] = False
    mask[-50:,:] = False
    mask[:,:50] = False
    mask[:,-50:] = False
    # Also remove any negative counts area
    mask[PImgData<0] = False

    # Counts array in PrevImag at the detected sources
    PImgStarCounts = PImgData[mask]
    NImgStarCounts = NImgData[mask]

    pearsonCoeff = stats.pearsonr(PImgStarCounts,NImgStarCounts)[0]

    if pearsonCoeff > Pcoeffcut:
        Aligned = True
        logger.info('Pearson={0}: {1} is aligned with {2}.'.format(pearsonCoeff,NewImg,PrevImg))
    else:
        Aligned = False
        logger.info('Pearson={0}: {1} is not aligned with {2}.'.format(pearsonCoeff,NewImg,PrevImg))
    return Aligned
Exemple #39
0
def grid_smooth(i_ra_f, i_dec_f, fwhm, width, height):
    # bin the filtered stars into a grid with pixel size XXX
    # print "Binning for m-M =",dm
    # bins = 165
    # width = 30
    bins_h = int(height * 60. / 8.)
    bins_w = int(width * 60. / 8.)

    grid, xedges, yedges = np.histogram2d(i_dec_f, i_ra_f, bins=[bins_h,bins_w], range=[[0,height],[0,width]])
    hist_points = zip(xedges,yedges)

    sig = ((bins_w/width)*fwhm)/2.355
    pltsig = fwhm/2.0

    # convolve the grid with a gaussian
    grid_gaus = ndimage.filters.gaussian_filter(grid, sig, mode='constant', cval=0)
    S = np.array(grid_gaus*0)
    S_th = 3.0

    grid_mean = np.mean(grid_gaus)
    grid_sigma = np.std(grid_gaus)
    S = (grid_gaus-grid_mean)/grid_sigma

    above_th = [(int(i),int(j)) for i in range(len(S)) for j in range(len(S[i])) if (S[i][j] >= S_th)]

    segm = detect_sources(S, 2.0, npixels=5)
    props = source_properties(S, segm)
    columns = ['id', 'maxval_xpos', 'maxval_ypos', 'max_value', 'area']
    tbl = properties_table(props, columns=columns)
    # print tbl
    # rand_cmap = random_cmap(segm.max + 1, random_state=12345)

    # find the maximum point in the grid and center the circle there
    x_cent, y_cent = np.unravel_index(grid_gaus.argmax(),grid_gaus.shape)
    x_cent_S, y_cent_S = np.unravel_index(S.argmax(),S.shape)
    # print 'Max of S located at:','('+'{0:6.3f}'.format(y_cent_S)+','+'{0:6.3f}'.format(x_cent_S)+')'
    # print 'Value of S at above:','{0:6.3f}'.format(S[x_cent_S][y_cent_S])
    # print 'Number of bins above S_th: {0:4d}'.format(len(above_th))
    return xedges, x_cent, yedges, y_cent, S, x_cent_S, y_cent_S, pltsig, tbl, segm
Exemple #40
0
    def largest(self, npixels=1, connectivity=8):

        """
        This function ...
        :param npixels:
        :param connectivity:
        :return:
        """

        # Perform the segmentation
        segments = detect_sources(self.data, min_alpha, npixels=npixels, connectivity=connectivity).data

        # Get counts for each label
        unique, counts = np.unique(segments, return_counts=True)

        # Get indices of the unique values (sorted on
        sorted_indices = np.argsort(counts)

        # Check last index
        last_index = sorted_indices[-1]
        value = unique[last_index]

        # Get the label
        if value == 0:

            # No other labels: no patches
            if len(sorted_indices) == 1: return self.__class__(np.zeros_like(self.data))

            second_last_index = sorted_indices[-2]
            label = unique[second_last_index]

        else: label = value

        # Return copy with only largest
        new = self.copy()
        new._data[segments != label] = 0
        return new
Exemple #41
0
def split_overlap(base_mask, test_mask, return_segments=False):

    """
    This function takes all blobs in the base_mask and checks whether they overlap with the test_mask.
    The function returns two new masks, one mask with all the blobs that overlapped, and another with the blobs
    that did not overlap.
    :param base_mask:
    :param test_mask:
    :return:
    """

    overlapping = np.zeros_like(base_mask, dtype=bool)
    not_overlapping = np.copy(base_mask)

    from photutils import detect_sources
    segments = detect_sources(base_mask.astype('float'), 0.5, 1).data
    overlap = intersection(segments, test_mask)

    # Check which indices are present in the overlap map
    possible = np.array(range(1, np.max(overlap) + 1))
    present = np.in1d(possible, overlap)
    indices = possible[present]

    overlapping_segments = np.zeros_like(base_mask, dtype=int)
    not_overlapping_segments = np.copy(segments)

    # Remove the galaxies from the segmentation map
    for index in indices:
        blob = segments == index
        overlapping[blob] = True
        not_overlapping[blob] = False

        overlapping_segments[blob] = index
        not_overlapping_segments[blob] = 0

    if return_segments: return overlapping, not_overlapping, overlapping_segments, not_overlapping_segments
    else: return overlapping, not_overlapping
def run_bat_sources(source, filter, save_results=False, outdir=None, plot=False, results_file=None,
                    ap_file=None, save_fig=False):
    
    # File that contains the coordinates for each source
    bat_info = pd.read_csv(bat_dir+'bat_info.csv', index_col=0)
    
    # File that contains the classification for each source
    phot_class = pd.read_csv('bat_spire_phot_class.csv', index_col=0)
    
    if np.all(source != 'all'):
        if np.isscalar(source):
            srcs = [source]
        else:
            srcs = source
    else:
        srcs = bat_info.index.values
    
    if np.isscalar(filter):
        filter = [filter]
    
    # Setup the DataFrame to store the photometry results
    index = pd.MultiIndex.from_product([srcs, filter], names=['Name', 'Filter'])
    src_df = pd.DataFrame(columns=['raw_flux', 'bkg_flux', 'bkgsub_flux', 'total_err', 'aperture_err', 'bkg_rms', 'calib_err', 'type'], index=index)
    
    # Setup a dictionary to hold the apertures used
    src_aps = {}
    
    for s in srcs:
        print 'Running...',s
        for f in filter:
            print '\tFilter ',f
            # Classification of source (either P, E, U, or C for point source,
            # extended, undetected, or cirrus)
            pclass = phot_class.loc[s, f]
            #pclass = 'P'
            
            # Load the data
            hdu_image = pyf.open(image_dir+s+'_scanamorphos_spire'+str(WAVES[f])+'_signal.fits')[0]
            hdu_err = pyf.open(image_dir+s+'_scanamorphos_spire'+str(WAVES[f])+'_error.fits')[0]
            
            # Convert to Jy/pixel
            hdu_image = prep_image(hdu_image, f)
            hdu_err = prep_image(hdu_err, f)
            
            # Calculate the global background
            im = hdu_image.data
            im_med, im_std = estimate_bkg(im)
            
            # Use a 1.5 sigma threshold to detect the BAT source if its extended
            # If its a point source use a 3-sigma threshold and convolve the image with a
            # Gaussian that has a FWHM equal to the nominal FWHM of that filter
#            if pclass == 'E':
            thresh = im_med + 2.0*im_std
            segm_img = detect_sources(im, thresh, npixels=5)
            props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
#             elif pclass == 'P':
#                 thresh = im_med + 3.0*im_std
#                 sigma = FWHM[f]/PIX_SIZES[f] * gaussian_fwhm_to_sigma
#                 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
#                 segm_img = detect_sources(im, thresh, npixels=5, filter_kernel=kernel)
#                 props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
            
            # Find the BAT source in the properties list
            ra_bat = bat_info.loc[s, 'RA_(J2000)']
            dec_bat = bat_info.loc[s, 'DEC_(J2000)']
            coord_bat = coord.SkyCoord(ra=ra_bat, dec=dec_bat, frame='fk5')
            
            # No need to find the BAT source if it is already classified as undetected
            # or dominated by cirrus emission
            if ((pclass != 'U') & (pclass != 'C')) & (len(props) > 0):
                ind_bat = find_bat_source(coord_bat, props, 2*FWHM[f])
            else:
                ind_bat = None

            # Create the source aperture            
            if ind_bat is None:
                
                ap, type = create_aperture(None, f, pclass, coord_bat=coord_bat, wcs=wcs.WCS(hdu_image.header)) 
            
            else:
            
                ap, type = create_aperture(props[ind_bat], f, pclass, extent=3.0, coord_bat=coord_bat, wcs=wcs.WCS(hdu_image.header))
            
            # Create new mask based on 3-sigma threshold and remove the bat source if detected
            thresh2 = im_med + 2.0*im_std
            segm_img2 = detect_sources(im, thresh2, npixels=5)
            props2 = source_properties(im-im_med, segm_img2, wcs=wcs.WCS(hdu_image.header))
            nanmask = np.isnan(im)
            nanerr = np.isnan(hdu_err.data) | np.isinf(hdu_err.data)
            
            if len(props2) != 0:
                ind_bat2 = find_bat_source(coord_bat, props2, 2*FWHM[f])
            else:
                ind_bat2 = None
    
            if ind_bat2 is None:
                mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
            else:
                si = segm_img2.remove_labels(ind_bat2 + 1)
                mask = np.logical_not(si.data_masked.mask) | nanmask | nanerr
                #mask = nanmask | nanerr
            
            if (pclass != 'C'):    
                results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data, type, f, pclass=pclass, bkg=im_med, mask=mask)
            else:
                results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data, type, f, pclass=pclass, bkg=im_med, mask=None)
                
            type = results['type']
            src_df.loc[s, f] = pd.Series(results)
            src_aps[s+'_'+f] = apertures  
            
            if plot:

                cbat = [coord_bat.ra.deg, coord_bat.dec.deg]
                
                if (type == 'fixed'):
                    pmax = None
                else:
                    pmax = props[ind_bat].max_value + im_med
                
                if (pmax < im_med):
                    pmax = None

                fig = plot_aperture_photometry(hdu_image, apertures, f, type, global_bkg=im_med,
                                                   pixel_max=pmax, title=s+' ['+f+']', plot_bat_loc=cbat)
           
                if save_fig:
                    if outdir is None:
                        fig.save(s+'_'+f+'.png')
                    else:
                        fig.save(outdir+s+'_'+f+'.png')
                    fig.close()
            
    if save_results:
        if (outdir is None) and (results_file is None):
            src_df.to_csv('photometry_results_'+str(dt.datetime.today().date().isoformat())+'.csv')
            f_ap = open('apertures_'+str(dt.datetime.today().date().isoformat())+'.pkl', 'wb')
        elif (outdir is not None) and (results_file is None):
            src_df.to_csv(outdir+'photometry_results_'+str(dt.datetime.today().date().isoformat())+'.csv')
            f_ap = open(outdir+'apertures_'+str(dt.datetime.today().date().isoformat())+'.pkl', 'wb')
        else:
            src_df.to_csv(outdir+results_file)
            f_ap = open(outdir+ap_file, 'wb')
        
        pickle.dump(src_aps, f_ap)
        f_ap.close()
    
    if plot and not save_fig:
        return src_df, src_aps, fig  
    else:
        return src_df, src_aps             
Exemple #43
0
def make_source_catalog(model, kernel_fwhm, kernel_xsize, kernel_ysize,
                        snr_threshold, npixels, deblend_nlevels=32,
                        deblend_contrast=0.001, deblend_mode='exponential',
                        connectivity=8, deblend=False):
    """
    Create a final catalog of source photometry and morphologies.

    Parameters
    ----------
    model : `DrizProductModel`
        The input `DrizProductModel` of a single drizzled image.  The
        input image is assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    kernel_xsize : odd int
        The size in the x dimension (columns) of the kernel array.

    kernel_ysize : odd int
        The size in the y dimension (row) of the kernel array.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    npixels : int
        The number of connected pixels, each greater than the threshold
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    deblend_nlevels : int, optional
        The number of multi-thresholding levels to use for deblending
        sources.  Each source will be re-thresholded at
        ``deblend_nlevels``, spaced exponentially or linearly (see the
        ``deblend_mode`` keyword), between its minimum and maximum
        values within the source segment.

    deblend_contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have to be considered as a separate object.
        ``deblend_contrast`` must be between 0 and 1, inclusive.  If
        ``deblend_contrast = 0`` then every local peak will be made a
        separate object (maximum deblending).  If ``deblend_contrast =
        1`` then no deblending will occur.  The default is 0.001, which
        will deblend sources with a magnitude differences of about 7.5.

    deblend_mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``deblend_nlevels`` keyword)
        when deblending sources.

    connectivity : {4, 8}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 4 or 8
        (default).  4-connected pixels touch along their edges.
        8-connected pixels touch along their edges or corners.  For
        reference, SExtractor uses 8-connected pixels.

    deblend : bool, optional
        Whether to deblend overlapping sources.  Source deblending
        requires scikit-image.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source photometry and
        morphologies.
    """

    if not isinstance(model, DrizProductModel):
        raise ValueError('The input model must be a DrizProductModel.')

    # Use this when model.wht contains an IVM map
    # Calculate "background-only" error assuming the weight image is an
    # inverse-variance map (IVM).  The weight image is clipped because it
    # may contain zeros.
    # bkg_error = np.sqrt(1.0 / np.clip(model.wht, 1.0e-20, 1.0e20))
    # threshold = snr_threshold * bkg_error

    # Estimate the 1-sigma noise in the image empirically because model.wht
    # does not yet contain an IVM map
    mask = (model.wht == 0)
    data_mean, data_median, data_std = sigma_clipped_stats(
        model.data, mask=mask, sigma=3.0, maxiters=10)
    threshold = data_median + (data_std * snr_threshold)

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=kernel_xsize, y_size=kernel_ysize)
    kernel.normalize()

    segm = photutils.detect_sources(model.data, threshold, npixels=npixels,
                                    filter_kernel=kernel,
                                    connectivity=connectivity)

    # source deblending requires scikit-image
    if deblend:
        segm = photutils.deblend_sources(model.data, segm, npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=deblend_nlevels,
                                         contrast=deblend_contrast,
                                         mode=deblend_mode,
                                         connectivity=connectivity,
                                         relabel=True)

    # Calculate total error, including source Poisson noise.
    # This calculation assumes that the data and bkg_error images are in
    # units of electron/s.  Poisson noise is not included for pixels
    # where data < 0.
    exptime = model.meta.resample.product_exposure_time    # total exptime
    # total_error = np.sqrt(bkg_error**2 +
    #                       np.maximum(model.data / exptime, 0))
    total_error = np.sqrt(data_std**2 + np.maximum(model.data / exptime, 0))

    wcs = model.get_fits_wcs()
    source_props = photutils.source_properties(
        model.data, segm, error=total_error, filter_kernel=kernel, wcs=wcs)

    if len(source_props) == 0:
        return QTable()    # empty table

    columns = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'area',
               'source_sum', 'source_sum_err', 'semimajor_axis_sigma',
               'semiminor_axis_sigma', 'orientation',
               'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr', 'sky_bbox_ur']
    catalog = source_props.to_table(columns=columns)

    # convert orientation to degrees
    orient_deg = catalog['orientation'].to(u.deg)
    catalog.replace_column('orientation', orient_deg)

    # define orientation position angle
    rot = _get_rotation(wcs)
    catalog['orientation_sky'] = ((270. - rot +
                                   catalog['orientation'].value) * u.deg)

    # define flux in microJanskys
    nsources = len(catalog)
    pixelarea = model.meta.photometry.pixelarea_arcsecsq
    if pixelarea is None:
        micro_Jy = np.full(nsources, np.nan)
    else:
        micro_Jy = (catalog['source_sum'] *
                    model.meta.photometry.conversion_microjanskys *
                    model.meta.photometry.pixelarea_arcsecsq)

    # define AB mag
    abmag = np.full(nsources, np.nan)
    mask = np.isfinite(micro_Jy)
    abmag[mask] = -2.5 * np.log10(micro_Jy[mask]) + 23.9
    catalog['abmag'] = abmag

    # define AB mag error
    # assuming SNR >> 1 (otherwise abmag_error is asymmetric)
    abmag_error = (2.5 * np.log10(np.e) * catalog['source_sum_err'] /
                   catalog['source_sum'])
    abmag_error[~mask] = np.nan
    catalog['abmag_error'] = abmag_error

    return catalog
Exemple #44
0
    return mask


if len(sys.argv) < 1:
    print "\n\n"
    print sys.argv[0], "galaxyFilePath\n"
    exit()
else:
    galaxyFilePath = sys.argv[1]


oriImg = pf.getdata(galaxyFilePath)  # loads fits file data
mean, median, std = measureBackground(oriImg, 2, np.zeros_like(oriImg))  # measures background distribution
threshold = median + (3 * std)  # threshold to detect_sources
segMap = detect_sources(
    oriImg, threshold, npixels=100
)  # generate a segmentation map for any source above threshold with number of pixels above npixels
galMask = np.zeros_like(segMap)
zp = np.round(oriImg.shape[0] / 2)  # Only works for image with galaxy in the center
galMask[segMap == segMap[zp, zp]] = 1  # selects the central segmentation to remove from initial segmentation map
finalMask = binary_dilation(
    galMask, generateCircularMask(zp / 10)
)  # binary convolution with circular mask to get galaxy exterior region zp/10 ~ 5% of the galaxy image size


segMap[segMap == segMap[zp, zp]] = 0  # remove galaxy segmentation from segmentation map
segMap[segMap > 0] = 1  # transform segmentation map on binary mask
segMap = segMap - finalMask

# force binary mask after subtraction
segMap[segMap < 0] = 0
#names = bat_herschel[bat_herschel['PACS160'] == 0].index
#names = ['2MASXJ20183871+4041003', '4U1344-60', 'AXJ1737.4-2907', '2MASXJ09023729-4813339', 'WKK4374']
names = ['WKK4374']
f = 'PACS160'
check_cirrus = pd.DataFrame(index=names, columns=['5-sig_ul_new', '5-sig_ul_old'])
for n in names:

    hdu_img = fits.open('/Users/ttshimiz/Dropbox/Herschel_Images/PACS/'+n+'_scanamorphos_pacs160_signal.fits')[0]
    hdu_err = fits.open('/Users/ttshimiz/Dropbox/Herschel_Images/PACS/'+n+'_scanamorphos_pacs160_error.fits')[0]
    #hdu_image = rap.prep_image(src_img, f)
    #hdu_err = rap.prep_image(err_img, f)
    im = hdu_img.data
    im_med, im_std = rap.estimate_bkg(im)
    thresh = im_med + 2.0*im_std
    segm_img = pu.detect_sources(im, thresh, npixels=5)
    props = pu.segment_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_img.header))
    ind_bat = rap.find_bat_source(coord_bat, props, 12.)
    
    ra_bat = bat_info.loc[n, 'RA_(J2000)']
    dec_bat = bat_info.loc[n, 'DEC_(J2000)']
    coord_bat = coord.SkyCoord(ra=ra_bat, dec=dec_bat, frame='fk5')
    
     
    #ap = pu.CircularAperture([88.368554, 70.88513], 7.71929)
    if ind_bat is None:
        ap, type = rap.create_spire_aperture(None, f, 'P', coord_bat=coord_bat, wcs=wcs.WCS(hdu_img.header))
    else:
        ap, type = rap.create_spire_aperture(props[ind_bat], f, 'P', extent=3.0, coord_bat=coord_bat, wcs=wcs.WCS(hdu_image.header))
    
    result, apertures = rap.spire_aperture_photometry(ap, hdu_img, hdu_err.data, 'point', f, pclass='P')
        sciImgList  = []
        for pairNum, ABimg in enumerate(zip(Aimgs, Bimgs)):
            Aimg = ABimg[0]
            Bimg = ABimg[1]
            print('\t\tProcessing on-off pair {0}'.format(pairNum+1))

            # Estimate the background for this pair
            B1bkg     = Background(Bimg.arr, (100, 100), filter_shape=(3, 3),
                                   method='median')
            threshold = B1bkg.background + 3.0*B1bkg.background_rms

            # Build a mask for any sources above the 3-sigma threshold
            sigma  = 2.0 * gaussian_fwhm_to_sigma    # FWHM = 2.
            kernel = Gaussian2DKernel(sigma, x_size=6, y_size=6)
            kernel.normalize()
            segm   = detect_sources(Bimg.arr, threshold,
                                  npixels=5, filter_kernel=kernel)
            # Build the actual mask and include a step to capture negative
            # saturation values
            mask   = np.logical_or((segm.data > 0),
                     (np.abs((Bimg.arr -
                      B1bkg.background)/B1bkg.background_rms) > 7.0))

            # Estimate a 2D background image masking possible sources
            B1bkg = Background(Bimg.arr, (100, 100), filter_shape=(3, 3),
                                   method='median', mask=mask)

            # Catch all the NaN values from masking the optical ghosts
            ghostMask    = np.logical_not(np.isfinite(Aimg.arr))
            ghostMaskImg = Aimg.copy()

            # Wipe out the sigma attribute if its there (we won't need it)
			mean1, median1, std1 = sigma_clipped_stats(shared_im[x_region[0]:x_region[1], y_region[0]:y_region[1]], mask=im_mask_nan[x_region[0]:x_region[1], y_region[0]:y_region[1]], sigma=3.0)	
		
			x_region=np.round( im_size[0]*2./3+[-500,500] )
			y_region=np.round( im_size[1]*2./3+[-500,500] )
			print 'NaN region2 ', np.where( np.isnan(shared_im[x_region[0]:x_region[1], y_region[0]:y_region[1]]) )
			mean2, median2, std2 = sigma_clipped_stats(shared_im[x_region[0]:x_region[1], y_region[0]:y_region[1]], mask=im_mask_nan[x_region[0]:x_region[1], y_region[0]:y_region[1]] , sigma=3.0)	
			
			print "Statistics region 1 ", median1, std1
			print "Statistics region 2 ", median2, std2
		
			im_median=np.mean([median1,median2])
			im_stddev=np.sqrt((std1**2+std2**2)/2)
			im_thresh=im_median + 2*im_stddev
			print "Image median, stddev, threshold ", im_median, im_stddev, im_thresh
		
			seg_data = detect_sources(shared_nim, im_thresh, npixels=5)
			seg_npix = np.bincount(np.ravel(seg_data))[1:]
			seg_max = np.nanmax(seg_npix)

			seg_radius= np.sqrt(seg_max/np.pi)
			back_size = np.full( 2, np.round(seg_radius/10.)*10 )
			print "Back_size that will be used to remove large galaxies ", back_size

			if method=='none':			
				print "Writing file ", im_convol_file
				if os.path.isfile(im_convol_file): os.remove(im_convol_file)
				pyfits.writeto(im_convol_file, shared_nim, header=im_h)
		
				if os.path.isfile(im_convol_seg_file): os.remove(im_convol_seg_file)
				pyfits.writeto(im_convol_seg_file, seg_data, header=im_h)
    def find_sources_segmentation(self):

        """
        This function ...
        :return:
        """

        mask = Mask(self.galaxy_finder.segments._data) + Mask(self.star_finder.segments._data)
        data = self.frame.copy()
        data[mask] = 0.0

        #mask = Mask(self.galaxy_finder.segments)
        #star_mask = Mask(self.star_finder.segments)

        #data = interpolation.in_paint(self.image.frames.primary, star_mask) # Interpolate over stars
        #data[mask] = 0.0 # set galaxies to zero

        # Create the sigma-clipped mask
        if self.bad_mask is not None: mask += self.bad_mask

        clipped_mask = statistics.sigma_clip_mask(data, 3.0, mask)

        # Calculate the median sky value and the standard deviation
        median = np.median(np.ma.masked_array(data, mask=clipped_mask).compressed())
        stddev = np.ma.masked_array(data, mask=clipped_mask).std()

        # Calculate the detection threshold
        threshold = median + (3.0 * stddev)

        #kernel = self.star_finder.kernel # doesn't work when there was no star extraction on the image, self.star_finder does not have attribute image thus cannot give image.fwhm
        if self.star_finder.config.use_frame_fwhm and self.frame.fwhm is not None:

            fwhm = self.frame.fwhm.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
            sigma = fwhm * statistics.fwhm_to_sigma
            kernel = Gaussian2DKernel(sigma)

        else: kernel = self.star_finder.kernel

        try:
            # Create a segmentation map from the frame
            self.segments = Frame(detect_sources(data, threshold, npixels=5, filter_kernel=kernel).data)
        except RuntimeError:

            log.debug("Runtime error during detect_sources ...")
            #log.debug("kernel = " + str(kernel))

            #conv_mode = 'constant'
            #conv_val = 0.0
            #image = ndimage.convolve(data, kernel.array, mode=conv_mode, cval=conv_val)

            #log.debug("median = " + str(median))
            #log.debug("stddev = " + str(stddev))

            #print("image=", image)
            #log.debug("image.ndim = " + str(image.ndim))
            #log.debug("type image = " + type(image))
            #log.debug("image.shape = "+ str(image.shape))
            #log.debug("threshold = " + str(threshold))
            #image = image > threshold
            #log.debug("image.ndim = " + str(image.ndim))
            #log.debug("type image = " + str(type(image)))
            #log.debug("image.shape = " + str(image.shape))

        # Eliminate the principal galaxy and companion galaxies from the segments
        if self.galaxy_finder is not None:

            # Determine the mask that covers the principal and companion galaxies
            eliminate_mask = self.galaxy_finder.principal_mask + self.galaxy_finder.companion_mask

            # NEW: PLUS: Eliminate the segments covered by the 'ignore mask'
            if self.ignore_mask is not None: eliminate_mask += self.ignore_mask

            # Check where the galaxy mask overlaps with the segmentation map
            overlap = masks.intersection(self.segments, eliminate_mask)
            if np.any(overlap):

                # Check which indices are present in the overlap map
                possible = np.array(range(1, np.max(overlap) + 1))
                present = np.in1d(possible, overlap)
                indices = possible[present]

                # Remove the galaxies from the segmentation map
                for index in indices: self.segments[self.segments == index] = 0
Exemple #49
0
    def find_sources_segmentation(self):

        """
        This function ...
        :return:
        """

        # Inform the user
        log.info("Finding sources based on image segmentation ...")

        # Create mask
        if self.star_segments is not None: mask = Mask(self.galaxy_segments._data) + Mask(self.star_segments._data)
        else: mask = Mask(self.galaxy_segments._data)

        # Mask the data?
        data = self.frame.copy()
        data[mask] = 0.0

        #mask = Mask(self.galaxy_finder.segments)
        #star_mask = Mask(self.star_finder.segments)

        #data = interpolation.in_paint(self.image.frames.primary, star_mask) # Interpolate over stars
        #data[mask] = 0.0 # set galaxies to zero

        # Create the sigma-clipped mask
        if self.bad_mask is not None: mask += self.bad_mask

        clipped_mask = statistics.sigma_clip_mask(data, self.config.detection.segmentation.clipping_sigma_level, mask)

        # Calculate the median sky value and the standard deviation
        median = np.median(np.ma.masked_array(data, mask=clipped_mask).compressed())
        stddev = np.ma.masked_array(data, mask=clipped_mask).std()

        # Calculate the detection threshold
        threshold = median + (self.config.detection.segmentation.sigma_level * stddev)

        #try:
        # Create a segmentation map from the frame
        self.segments = Frame(detect_sources(data, threshold, npixels=5, filter_kernel=self.kernel).data)
        #except RuntimeError as e:

            #log.error("Runtime error during detect_sources ...")

            #print(e)
            #traceback.print_exc()

            #log.debug("kernel = " + str(kernel))

            #conv_mode = 'constant'
            #conv_val = 0.0
            #image = ndimage.convolve(data, kernel.array, mode=conv_mode, cval=conv_val)

            #log.debug("median = " + str(median))
            #log.debug("stddev = " + str(stddev))

            #print("image=", image)
            #log.debug("image.ndim = " + str(image.ndim))
            #log.debug("type image = " + type(image))
            #log.debug("image.shape = "+ str(image.shape))
            #log.debug("threshold = " + str(threshold))
            #image = image > threshold
            #log.debug("image.ndim = " + str(image.ndim))
            #log.debug("type image = " + str(type(image)))
            #log.debug("image.shape = " + str(image.shape))

        # Create eliminate mask
        eliminate_mask = Mask.empty_like(self.frame)

        # Eliminate the principal galaxy and companion galaxies from the segments
        if self.galaxies is not None:

            #principal_mask = self.galaxies.get_principal_mask(self.frame)
            #companion_mask = self.galaxies.get_companion_mask(self.frame)

            # Add mask of principal galaxy
            if self.principal_mask is None: log.warning("Principal mask is not defined")
            else: eliminate_mask += self.principal_mask

            # Add mask of companion galaxy
            if self.companion_mask is None: log.warning("Companion mask is not defined")
            else: eliminate_mask += self.companion_mask

            # Determine the mask that covers the principal and companion galaxies
            #eliminate_mask += principal_mask + companion_mask

        # NEW: PLUS: Eliminate the segments covered by the 'ignore mask'
        if self.ignore_mask is not None: eliminate_mask += self.ignore_mask

        # NEW: Eliminate the segments covered by the 'bad mask'
        if self.bad_mask is not None: eliminate_mask += self.bad_mask

        # Check where the galaxy mask overlaps with the segmentation map
        overlap = masks.intersection(self.segments, eliminate_mask)
        if np.any(overlap):

            # Check which indices are present in the overlap map
            possible = np.array(range(1, np.max(overlap) + 1))
            present = np.in1d(possible, overlap)
            indices = possible[present]

            # Remove the galaxies from the segmentation map
            for index in indices: self.segments[self.segments == index] = 0
Exemple #50
0
def extract_sources(img, **pars):
    """Use photutils to find sources in image based on segmentation.

    Parameters
    ==========
    dqmask : array
        Bitmask which identifies whether a pixel should be used (1) in source
        identification or not(0). If provided, this mask will be applied to the
        input array prior to source identification.

    fwhm : float
        Full-width half-maximum (fwhm) of the PSF in pixels.
        Default: 3.0

    threshold : float or None
        Value from the image which serves as the limit for determining sources.
        If None, compute a default value of (background+5*rms(background)).
        If threshold < 0.0, use absolute value as scaling factor for default value.
        Default: None

    source_box : int
        Size of box (in pixels) which defines the minimum size of a valid source

    classify : boolean
        Specify whether or not to apply classification based on invarient moments
        of each source to determine whether or not a source is likely to be a
        cosmic-ray, and not include those sources in the final catalog.
        Default: True

    centering_mode : {'segmentation', 'starfind'}
        Algorithm to use when computing the positions of the detected sources.
        Centering will only take place after `threshold` has been determined, and
        sources are identified using segmentation.  Centering using `segmentation`
        will rely on `photutils.segmentation.source_properties` to generate the
        properties for the source catalog.  Centering using `starfind` will use
        `photutils.IRAFStarFinder` to characterize each source in the catalog.
        Default: 'starfind'

    nlargest : int, None
        Number of largest (brightest) sources in each chip/array to measure
        when using 'starfind' mode.  Default: None (all)

    output : str
        If specified, write out the catalog of sources to the file with this name

    plot : boolean
        Specify whether or not to create a plot of the sources on a view of the image
        Default: False

    vmax : float
        If plotting the sources, scale the image to this maximum value.

    """
    fwhm= pars.get('fwhm', 3.0)
    threshold= pars.get('threshold', None)
    source_box = pars.get('source_box', 7)
    classify = pars.get('classify', True)
    output = pars.get('output', None)
    plot = pars.get('plot', False)
    vmax = pars.get('vmax', None)
    centering_mode = pars.get('centering_mode', 'starfind')
    deblend = pars.get('deblend', False)
    dqmask = pars.get('dqmask',None)
    nlargest = pars.get('nlargest', None)
    # apply any provided dqmask for segmentation only
    if dqmask is not None:
        imgarr = img.copy()
        imgarr[dqmask] = 0
    else:
        imgarr = img

    bkg_estimator = MedianBackground()
    bkg = None

    exclude_percentiles = [10,25,50,75]
    for percentile in exclude_percentiles:
        try:
            bkg = Background2D(imgarr, (50, 50), filter_size=(3, 3),
                           bkg_estimator=bkg_estimator,
                           exclude_percentile=percentile)
            # If it succeeds, stop and use that value
            bkg_rms = (5. * bkg.background_rms)
            bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()
            default_threshold = bkg.background + bkg_rms
            if threshold is None or threshold < 0.0:
                if threshold is not None and threshold < 0.0:
                    threshold = -1*threshold*default_threshold
                    log.info("{} based on {}".format(threshold.max(), default_threshold.max()))
                    bkg_rms_mean = threshold.max()
                else:
                    threshold = default_threshold
            else:
                bkg_rms_mean = 3. * threshold
            if bkg_rms_mean < 0:
                bkg_rms_mean = 0.
            break
        except Exception:
            bkg = None

    # If Background2D does not work at all, define default scalar values for
    # the background to be used in source identification
    if bkg is None:
        bkg_rms_mean = max(0.01, imgarr.min())
        bkg_rms = bkg_rms_mean * 5

    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
    kernel.normalize()
    segm = detect_sources(imgarr, threshold, npixels=source_box,
                          filter_kernel=kernel)
    if deblend:
        segm = deblend_sources(imgarr, segm, npixels=5,
                           filter_kernel=kernel, nlevels=16,
                           contrast=0.01)
    # If classify is turned on, it should modify the segmentation map
    if classify:
        cat = source_properties(imgarr, segm)
        if len(cat) > 0:
            # Remove likely cosmic-rays based on central_moments classification
            bad_srcs = np.where(classify_sources(cat) == 0)[0]+1
            segm.remove_labels(bad_srcs) # CAUTION: May be time-consuming!!!


    # convert segm to mask for daofind
    if centering_mode == 'starfind':
        src_table = None
        #daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)
        log.info("Setting up DAOStarFinder with: \n    fwhm={}  threshold={}".format(fwhm, bkg_rms_mean))
        daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)
        # Identify nbrightest/largest sources
        if nlargest is not None:
            if nlargest > len(segm.labels):
                nlargest = len(segm.labels)
            large_labels = np.flip(np.argsort(segm.areas)+1)[:nlargest]
        log.info("Looking for sources in {} segments".format(len(segm.labels)))

        for label in segm.labels:
            if nlargest is not None and label not in large_labels:
                continue # Move on to the next segment
            # Get slice definition for the segment with this label
            seg_slice = segm.segments[label-1].slices
            seg_yoffset = seg_slice[0].start
            seg_xoffset = seg_slice[1].start

            #Define raw data from this slice
            detection_img = img[seg_slice]
            # zero out any pixels which do not have this segments label
            detection_img[np.where(segm.data[seg_slice]==0)] = 0

            # Detect sources in this specific segment
            seg_table = daofind(detection_img)
            # Pick out brightest source only
            if src_table is None and len(seg_table) > 0:
                # Initialize final master source list catalog
                src_table = Table(names=seg_table.colnames,
                                  dtype=[dt[1] for dt in seg_table.dtype.descr])
            if len(seg_table) > 0:
                max_row = np.where(seg_table['peak'] == seg_table['peak'].max())[0][0]
                # Add row for detected source to master catalog
                # apply offset to slice to convert positions into full-frame coordinates
                seg_table['xcentroid'] += seg_xoffset
                seg_table['ycentroid'] += seg_yoffset
                src_table.add_row(seg_table[max_row])

    else:
        cat = source_properties(img, segm)
        src_table = cat.to_table()
        # Make column names consistent with IRAFStarFinder column names
        src_table.rename_column('source_sum', 'flux')
        src_table.rename_column('source_sum_err', 'flux_err')

    if src_table is not None:
        log.info("Total Number of detected sources: {}".format(len(src_table)))
    else:
        log.info("No detected sources!")
        return None, None

    # Move 'id' column from first to last position
    # Makes it consistent for remainder of code
    cnames = src_table.colnames
    cnames.append(cnames[0])
    del cnames[0]
    tbl = src_table[cnames]

    if output:
        tbl['xcentroid'].info.format = '.10f'  # optional format
        tbl['ycentroid'].info.format = '.10f'
        tbl['flux'].info.format = '.10f'
        if not output.endswith('.cat'):
            output += '.cat'
        tbl.write(output, format='ascii.commented_header')
        log.info("Wrote source catalog: {}".format(output))

    if plot and plt is not None:
        norm = None
        if vmax is None:
            norm = ImageNormalize(stretch=SqrtStretch())
        fig, ax = plt.subplots(2, 2, figsize=(8, 8))
        ax[0][0].imshow(imgarr, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
        ax[0][1].imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))
        ax[0][1].set_title('Segmentation Map')
        ax[1][0].imshow(bkg.background, origin='lower')
        if not isinstance(threshold, float):
            ax[1][1].imshow(threshold, origin='lower')
    return tbl, segm
    def set_mask(self):

        """
        This function ...
        :return:
        """

        # Inform the user
        log.info("Creating a mask to cover bad pixels ...")

        # Create a mask for the nans in the primary
        nan_mask = Mask.is_nan(self.image.frames.primary)

        # Sometimes, saturated stars have a few pixels that are nan. In this case, we certainly don't want to ignore these pixels
        # because we want to remove the star and its diffraction spikes. So, we want to remove these little blobs of nan from the nan_mask,
        # and interpolate the image there ... So here we seperate the nan_mask into a new mask without the little blobs, and a mask consisting
        # of only these blobs.

        from ..tools import plotting

        # plotting.plot_box(nan_mask[1200:1300, 700:790], "nan mask")

        # eroded_nan_mask = nan_mask.eroded()
        # plotting.plot_box(eroded_nan_mask[1200:1300, 700:790], "eroded nan mask")

        # blob_mask = nan_mask - eroded_nan_mask
        # plotting.plot_box(blob_mask[1200:1300, 700:790], "blob mask")
        # plotting.plot_box(blob_mask, "blob mask")

        from photutils import detect_sources

        segments = detect_sources(nan_mask.astype(float), 0.1, 1).data

        # Check where the nan_mask hits the boundary
        hits_boundary, where = nan_mask.hits_boundary(where=True)

        blob_mask = nan_mask.copy()
        if hits_boundary:

            indices = set()

            for pixel in where:
                index = segments[pixel.y, pixel.x]
                indices.add(index)

            # print("indices=", indices)

            for index in indices:

                # plotting.plot_box(segments == index, "segments == index")

                blob_mask[segments == index] = False
                segments[segments == index] = False

        # plotting.plot_box(segments, "segmentation map")
        # plotting.plot_box(blob_mask[1200:1300, 700:790], "blob mask")

        # Interpolate the frame over the blobs

        # Get a list of contours for the blobs (the oversaturated pixels)
        from ..analysis import sources

        contours = sources.find_contours(segments, segments, 10.0)

        # print(contours)

        # Create a file
        # f = open(os.path.join(self.directory_path, "oversaturated.reg"), 'w')
        # Initialize the region string
        # print("# Region file format: DS9 version 4.1", file=f)
        # for contour in contours:
        #    print("image;ellipse({},{},{},{})".format(contour.center.x+1, contour.center.y+1, contour.radius.x, contour.radius.y), file=f)
        # f.close()

        for contour in contours:

            # Create source object
            # source = Source.from_ellipse(self.image.frames.primary, contour, 1.5)
            # source.plot()

            cutout = Box.from_ellipse(self.image.frames.primary, contour)

            # cutout.plot()

            cutout_segment = segments[cutout.y_slice, cutout.x_slice]

            # plotting.plot_box(cutout_segment)

            import numpy as np

            cutout[np.isnan(cutout)] = 0.0

            where_is_cutout_segment = cutout_segment.astype(bool)

            interpolated_box = cutout.interpolated(where_is_cutout_segment, "local_mean")

            # plotting.plot_box(interpolated_box)

            # Replace frame pixels
            self.image.frames.primary[cutout.y_slice, cutout.x_slice][where_is_cutout_segment] = interpolated_box[
                where_is_cutout_segment
            ]
            nan_mask[cutout.y_slice, cutout.x_slice][where_is_cutout_segment] = False

        # Add the mask
        self.image.add_mask(nan_mask, "bad")

        # Add the bad mask
        if self.bad_region is not None:

            bad_mask = Mask.from_region(self.bad_region, self.image.xsize, self.image.ysize)
            self.image.masks.bad += bad_mask
Exemple #52
0
def bkg_boxes(frame,nboxes,length,sources=False):
    """
    Function to calculate the sigma clipped statistics
    of a number of randomly generated boxes
    Variables:
    frame: fits image 
    nboxes: number of boxes to generate
    length: length of side of box in pixels
    sources: if sources = True, the sources in each box will be detected and masked
           if sources = False, no masking is done 
    """
    side = float(length)/2.0
    
    if not os.path.isfile('bgvals_{:s}.txt'.format(frame)):
        hdu = pyfits.open(frame,memmap=True)
        image = hdu[0].data
        
        #Get length of image in each axis
        naxis1 = float(hdu[0].header['NAXIS1'])
        naxis2 = float(hdu[0].header['NAXIS2'])
        
        #generate the centers of 1000 random boxes.
        #np.random.seed(1234)
        box_centers = np.random.random_integers(0,np.min([naxis1,naxis2]),size=(nboxes,2))
        logfile = open('bgvals_{:s}.txt'.format(frame), 'w+')
        
        bg_stats = []
        centers = []
        for center in range(len(box_centers)):
            x1 = box_centers[center][0]-side
            x2 = box_centers[center][0]+side
            y1 = box_centers[center][1]-side
            y2 = box_centers[center][1]+side
        
            #Check to ensure that box is within image
            if (x1 > 0.0 and x2 < naxis1) and (y1 > 0.0 and y2 < naxis2):
                centers.append(box_centers[center])
                """
                The centers that are within the image bounds are returned
                in case you need to examine the regions used.
                """      
                box = image[int(x1):int(x2),int(y1):int(y2)]
              
            if (box >= 0).all() == True:
                """
                Only boxes with non-negative values are kept.
                This should help deal with cell gaps
                The sigma and iter values might need some tuning.
                """
                mean, median, std = sigma_clipped_stats(box, sigma=3.0, iters=10)
                if sources == False:
                    bg_stats.append((mean, median, std))
                    print >> logfile, "{:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f}".format(x1,y1,x2,y2,mean,median,std)
                if sources == True:
                    threshold = median + (std * 2.)
                    segm_img = detect_sources(box, threshold, npixels=50)
                    mask = segm_img.data.astype(np.bool)# turn segm_img into a mask
                    selem = np.ones((15, 15))    # dilate using a 25x25 box
                    mask2 = binary_dilation(mask, selem)
                    mask_mean = np.mean(mask2)
                    mean_mask, median_mask, std_mask = sigma_clipped_stats(box, sigma=3.0, mask=mask2)
                    bg_stats.append((mean_mask, median_mask, std_mask))
                    print >> logfile, "{:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f} {:10.3f}".format(x1,y1,x2,y2,mean_mask,median_mask,std_mask)
                
        bg_stats = np.reshape(np.array(bg_stats),(len(bg_stats),3))
        centers = np.reshape(np.array(centers),(len(centers),2))
        
        #Calculate median std of Background
        med = np.median(bg_stats[:,1])
        #calculate standard deviation of the std values
        std = np.median(bg_stats[:,2])
        
        #Locate the box that had the largest std
        #Array will be returned for plotting if wanted
        max_std = np.argmax(bg_stats[:,2])
        max_center = centers[max_std]
        max_box = image[int(max_center[0]-side):int(max_center[0]+side),int(max_center[1]-side):int(max_center[1]+side)]
        #plt.imshow(max_box,origin='lower', cmap='Greys_r')
        #plt.show()
    else:
        x1s,y1s,x2s,y2s,means,medians,stds = np.loadtxt('bgvals_{:s}.txt'.format(frame), usecols=(0,1,2,3,4,5,6), unpack=True)
        med = np.median(medians)
        std = np.median(stds)
        centers = np.array((x1s+side, y1s+side))
        
    return med,std,centers
Exemple #53
0
def compute_binary_map(frame, thresholds, injections, fwhm, npix=1,
                       overlap_threshold=0.7, max_blob_fact=2, plot=False,
                       debug=False):
    """
    Take a list of ``thresholds``, create binary maps and counts detections/fps.
    A blob which is "too big" is split into apertures, and every aperture adds
    one 'false positive'.

    Parameters
    ----------
    frame : numpy ndarray
        Detection map.
    thresholds : list or numpy ndarray
        List of thresholds (detection criteria).
    injections : tuple, list of tuples
        Coordinates (x,y) of the injected companions. Also accepts 1d/2d
        ndarrays.
    fwhm : float
        FWHM, used for obtaining the size of the circular aperture centered at
        the injection position (and measuring the overlapping with found blobs).
        The circular aperture has 2 * FWHM in diameter.
    npix : int, optional
        The number of connected pixels, each greater than the given threshold,
        that an object must have to be detected. ``npix`` must be a positive
        integer. Passed to ``detect_sources`` function from ``photutils``.
    overlap_threshold : float
        Percentage of overlap a blob has to have with the aperture around an
        injection.
    max_blob_fact : float
        Maximum size of a blob (in multiples of the resolution element) before
        it is considered as "too big" (= non-detection).
    plot : bool, optional
        If True, a final resulting plot summarizing the results will be shown.
    debug : bool, optional
        For showing optional information.

    Returns
    -------
    list_detections : list of int
        List of detection count for each threshold.
    list_fps : list of int
        List of false positives count for each threshold.
    list_binmaps : list of 2d ndarray
        List of binary maps: detection maps thresholded for each threshold
        value.

    """
    def _overlap_injection_blob(injection, fwhm, blob_mask):
        """
        Parameters
        ----------
        injection: tuple (y,x)
        fwhm : float
        blob_mask : 2d bool ndarray

        Returns
        -------
        overlap_fact : float between 0 and 1
            Percentage of the area overlap. If the blob is smaller than the
            resolution element, this is ``intersection_area / blob_area``,
            otherwise ``intersection_area / resolution_element``.

        """
        injection_mask = get_circle(np.ones_like(blob_mask), radius=fwhm,
                                    cy=injection[1], cx=injection[0],
                                    mode="mask")
        intersection = injection_mask & blob_mask
        smallest_area = min(blob_mask.sum(), injection_mask.sum())
        return intersection.sum() / smallest_area
    # --------------------------------------------------------------------------
    list_detections = []
    list_fps = []
    list_binmaps = []
    sizey, sizex = frame.shape
    cy, cx = frame_center(frame)
    reselem_mask = get_circle(frame, radius=fwhm, cy=cy, cx=cx, mode="val")
    npix_circ_aperture = reselem_mask.shape[0]

    # normalize injections: accepts combinations of 1d/2d and tuple/list/array.
    injections = np.asarray(injections)
    if injections.ndim == 1:
        injections = np.array([injections])

    for ithr, threshold in enumerate(thresholds):
        if debug:
            print("\nprocessing threshold #{}: {}".format(ithr + 1, threshold))

        segments = detect_sources(frame, threshold, npix, connectivity=4)
        binmap = (segments.data != 0)

        if debug:
            plot_frames((segments.data, binmap), cmap=('tab20b', 'binary'),
                        circle=tuple(tuple(xy) for xy in injections),
                        circle_radius=fwhm, circle_alpha=0.6,
                        label=("segmentation map", "binary map"))

        detections = 0
        fps = 0

        for segment in segments.segments:
            label = segment.label
            blob_mask = (segments.data == label)
            blob_area = segment.area

            if debug:
                lab = "blob #{}, area={}px**2".format(label, blob_area)
                plot_frames(blob_mask, circle_radius=fwhm, circle_alpha=0.6,
                            circle=tuple(tuple(xy) for xy in injections),
                            cmap='binary', label_size=8, label=lab,
                            size_factor=3)

            for iinj, injection in enumerate(injections):
                if injection[0] > sizex or injection[1] > sizey:
                    raise ValueError("Wrong coordinates in `injections`")

                if debug:
                    print("\ttesting injection #{} at {}".format(iinj + 1,
                                                                 injection))

                if blob_area > max_blob_fact * npix_circ_aperture:
                    number_of_apertures_in_blob = blob_area / npix_circ_aperture
                    fps += number_of_apertures_in_blob  # float, rounded at end
                    if debug:
                        print("\tblob is too big (+{:.0f} fps)"
                              "".format(number_of_apertures_in_blob))
                        print("\tskipping all other injections")
                    # continue with next blob, do not check other injections
                    break

                overlap = _overlap_injection_blob(injection, fwhm, blob_mask)
                if overlap > overlap_threshold:
                    if debug:
                        print("\toverlap of {}! (+1 detection)"
                              "".format(overlap))

                    detections += 1
                    # continue with next blob, do not check other injections
                    break

                if debug:
                    print("\toverlap of {} -> do nothing".format(overlap))

            else:
                if debug:
                    print("\tdid not find a matching injection for this "
                          "blob (+1 fps)")
                fps += 1

        if debug:
            print("done with threshold #{}".format(ithr))
            print("result: {} detections, {} fps".format(detections, fps))

        fps = np.round(fps).astype(int).item()  # -> python `int`

        list_detections.append(detections)
        list_binmaps.append(binmap)
        list_fps.append(fps)

    if plot:
        labs = tuple(str(det) + ' detections' + '\n' + str(fps) +
                     ' false positives' for det, fps in zip(list_detections,
                                                            list_fps))
        plot_frames(tuple(list_binmaps), title='Final binary maps', label=labs,
                    label_size=8, cmap='binary', circle_alpha=0.8,
                    circle=tuple(tuple(xy) for xy in injections),
                    circle_radius=fwhm, circle_color='deepskyblue', axis=False)

    return list_detections, list_fps, list_binmaps
def run_single_source(imfile, errfile, filter, true_pos, find_source=True, pclass='E',
                      extent=3.5, plot=True, name='Source'):
                      
    print 'Aperture photometry for ',name,' in Herschel filter ',filter
    
    # Load the data
    print 'Loading the data...'
    if imfile != errfile:
        hdu_image = pyf.open(imfile)[0]
        hdu_err = pyf.open(errfile)[0]
    else:
        data = pyf.getdata(imfile)
        hdr = pyf.getheader(imfile)
        hdr.remove('NAXIS3')
        hdr['NAXIS'] = 2
        hdu_image = pyf.PrimaryHDU(data=data[0], header=hdr)
        hdu_err = pyf.PrimaryHDU(data=data[1], header=hdr)
    
    # Convert to Jy/pixel
    if ((filter == 'PSW') | (filter == 'PMW') | (filter == 'PLW')):
        print 'SPIRE image units being converted to Jy/pixel...'
        hdu_image = prep_image(hdu_image, filter)
        hdu_err = prep_image(hdu_err, filter) 

    # Calculate the global background
    print 'Calculating the global background...'
    im = hdu_image.data
    im_med, im_std = estimate_bkg(im)
    print 'Background median = ', im_med
    print 'Background rms = ', im_std
    
    # Use a 1.5 sigma threshold to detect the BAT source
    thresh = im_med + 1.5*im_std
    segm_img = detect_sources(im, thresh, npixels=5)
    props = source_properties(im-im_med, segm_img, wcs=wcs.WCS(hdu_image.header))
    
    # Find the source in the image
    if (find_source) & ((pclass != 'U') & (pclass != 'C')) & (len(props) > 0):
        print 'Finding the source in the FOV...'
        ind_src = find_bat_source(true_pos, props, 2*FWHM[filter])
    else:
        ind_src = None
    
    # Create the source aperture            
    if ind_src is None:
        print 'Source not found or user fixed position of source aperture.'
        print 'Creating the source aperture centered at RA=', true_pos.ra.deg, ' and DEC=', true_pos.dec.deg        
        ap, type = create_aperture(None, filter, pclass, coord_bat=true_pos,
                                   wcs=wcs.WCS(hdu_image.header)) 
            
    else:
        print 'Source found in the FOV!'
        print 'Creating the source aperture centered at X=', props[ind_src].xcentroid.value, ' and Y=', props[ind_src].ycentroid.value
        ap, type = create_aperture(props[ind_src], filter, pclass, extent=extent,
                                   coord_bat=true_pos, wcs=wcs.WCS(hdu_image.header))
    
    # Create new mask based on 2-sigma threshold and remove the bat source if detected
    print 'Creating a new mask to not include the source...'
    thresh2 = im_med + 2.0*im_std
    segm_img2 = detect_sources(im, thresh2, npixels=5)
    props2 = source_properties(im-im_med, segm_img2, wcs=wcs.WCS(hdu_image.header))
    nanmask = np.isnan(im)
    nanerr = np.isnan(hdu_err.data) | np.isinf(hdu_err.data)
    
    if len(props2) != 0:
        ind_src2 = find_bat_source(true_pos, props2, 2*FWHM[filter])
    else:
        ind_src2 = None

    if ind_src2 is None:
        mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
    else:
        segm_img2.remove_labels(ind_src2 + 1)
        mask = np.logical_not(segm_img2.data_masked.mask) | nanmask | nanerr
    
    # Run the aperture photometry
    print 'Calculating the photometry!'
    if (pclass != 'C'):    
        results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data,
                                                          type, filter, pclass=pclass,
                                                          bkg=im_med, mask=mask)
    else:
        results, apertures = herschel_aperture_photometry(ap, hdu_image, hdu_err.data,
                                                          type, filter, pclass=pclass,
                                                          bkg=im_med, mask=None)
    type = results['type']
    print results
    # Plot the apertures on top of the data
    if plot:
        print 'Plotting the apertures used for photometry.'
        cbat = [true_pos.ra.deg, true_pos.dec.deg]
        
        if (type == 'fixed'):
            pmax = None
        else:
            pmax = props[ind_src].max_value + im_med
        
        if (pmax < im_med):
            pmax = None

        fig = plot_aperture_photometry(hdu_image, apertures, filter, global_bkg=im_med,
                                       pixel_max=pmax, title=name+' ['+filter+']', plot_bat_loc=cbat)
        
        print 'All done!'
        return results, apertures, fig
    
    else:
        print 'All done!'
        return results, apertures