Exemple #1
0
def evaluate_sky_dragonfly(img, b=15, f=3, sigma=1.5, radius=1.0, threshold=0.05, show_fig=True, show_hist=True):
    '''Evaluate the mean sky value.
    Parameters:
    ----------
    img: 2-D numpy array, the input image
    show_fig: bool. If True, it will show you the masked sky image.
    show_hist: bool. If True, it will show you the histogram of the sky value.
    
    Returns:
    -------
    bkg_global: `sep` object.
    '''
    b = b  # Box size
    f = f   # Filter width


    bkg = sep.Background(img, maskthresh=0, bw=b, bh=b, fw=f, fh=f)

    obj_lthre, seg_lthre = extract_obj(img, b=b, f=f, sigma=sigma, pixel_scale=Dragonfly_pixel_scale,
                                            deblend_nthresh=128, deblend_cont=0.0001, show_fig=show_fig)

    # make mask
    seg_mask = make_binary_mask(img, None, seg_lthre, radius=2.0, show_fig=show_fig, threshold=0.005, gaia=False)

    if show_hist:
        from scipy import stats
        samp = img[~bkg_mask]
        x = np.linspace(-0.5, 0.5, 100)
        gkde = stats.gaussian_kde(dataset=samp)
        fig, ax = plt.subplots(figsize=(8,6))

        ax.plot(x, gkde.evaluate(x), linestyle='dashed', c='black', lw=2,
                label='KDE')
        ax.hist(samp, bins=x, normed=1)
        ax.legend(loc='best', frameon=False, fontsize=20)


        ax.set_title('Histogram of pixels', fontsize=20)
        ax.set_xlabel('Pixel Value', fontsize=20)
        ax.set_ylabel('Normed Number', fontsize=20)
        ax.tick_params(labelsize=20)
        ax.set_ylim(0,20)
        offset = x[np.argmax(gkde.evaluate(x))]
        ax.text(-0.045, 10, r'$\mathrm{offset}='+str(round(offset, 6))+'$', fontsize=20)
        plt.vlines(np.median(samp), 0, 20, linestyle='--')
        print('mean', np.mean(samp))

    bkg_global = sep.Background(img, 
                                mask=seg_mask, maskthresh=0,
                                bw=20, bh=20, 
                                fw=5, fh=5)
    print("# Mean Sky / RMS Sky = %10.5f / %10.5f" % (bkg_global.globalback, bkg_global.globalrms))
    return bkg_global
Exemple #2
0
 def _bkg(self, maskthresh):
     if self.mask.any():
         if maskthresh is not None:
             back = sep.Background(self.pixeldata.data, mask=self.mask)  # ,
             # maskthresh=maskthresh)
             self.__bkg = back
         else:
             back = sep.Background(self.pixeldata.data, mask=self.mask)
             self.__bkg = back
     else:
         # print(self.pixeldata.data.shape)
         back = sep.Background(self.pixeldata.data)
         self.__bkg = back
Exemple #3
0
def make_sep_catalog(data,
                     header,
                     options,
                     mask=None,
                     min_sep=10.,
                     do_bgsub=False):

    try:
        bkg = sep.Background(data, mask, bw=32, bh=32, fw=3, fh=3)
    except ValueError:
        data = data.byteswap().newbyteorder()
        bkg = sep.Background(data, mask, bw=32, bh=32, fw=3, fh=3)

    if do_bgsub:
        error = np.sqrt(data)
        data_bgsub = data - bkg
    else:
        error = bkg.globalrms
        data_bgsub = data
    sources = sep.extract(data_bgsub, err=error, mask=mask, **options['sep'])

    dists = ((sources['x'] - sources['x'][:, np.newaxis])**2 +
             (sources['y'] - sources['y'][:, np.newaxis])**2)**0.5
    closest = np.partition(dists, 1)[:, 1]
    sources = sources[closest > min_sep]

    t = table.Table(sources)
    kronrad, krflag = sep.kron_radius(data_bgsub, sources['x'], sources['y'],
                                      sources['a'], sources['b'],
                                      sources['theta'], 6.0)

    flux, fluxerr, flag = sep.sum_ellipse(data_bgsub,
                                          sources['x'],
                                          sources['y'],
                                          sources['a'],
                                          sources['b'],
                                          np.pi / 2.0,
                                          2.5 * kronrad,
                                          subpix=1,
                                          err=error)

    t['mag'] = -2.5 * np.log10(flux)
    t['magerr'] = np.log(10) / 2.5 * fluxerr / flux
    t['ra'], t['dec'] = WCS(header).all_pix2world(t['x'], t['y'], 0)

    t = t['x', 'y', 'mag', 'magerr', 'ra', 'dec']

    return t
Exemple #4
0
    def debug_pyplotter(self):
        data_sub = self.image_data_formatted - \
                   sep.Background(self.image_data_formatted)
        fig, ax = plt.subplots()
        m, s = np.mean(data_sub), np.std(data_sub)
        im = ax.imshow(data_sub,
                       interpolation='nearest',
                       cmap='gray',
                       vmin=m - s,
                       vmax=m + s,
                       origin='lower')
        # plot an ellipse for each object
        objects = self.star_objects

        for i in range(len(self.star_objects)):
            e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
                        width=6 * objects['a'][i],
                        height=6 * objects['b'][i],
                        angle=objects['theta'][i] * 180. / np.pi)
            e.set_facecolor('none')
            e.set_edgecolor('red')
            ax.add_artist(e)

        plt.colorbar(im)
        plt.show()
Exemple #5
0
def extract(residual_s, thresh=None):
    """
    Uses sep to find sources on a residual image(s)
    Arguments:
        residuals -- image of residuals from hotpants or a list of images
    Returns:
        A list of Source objects representing the location and various metrics
            of detected variable sources
    """
    residuals = []
    if isinstance(residuals, list):
        residuals = residual_s
    else:
        residuals.append(residual_s)
    sources = []
    for r in residuals:
        r_np = to_np(r)
        if thresh is None:
            # from astroalign’s settings
            bkg = sep.Background(r_np)
        sources.append(
            sep.extract(r_np - bkg.back(),
                        bkg.globalrms * 3.0,
                        segmentation_map=True))
    return sources if isinstance(residuals, list) else sources[0]
Exemple #6
0
def extractSourcesFromRCD2(filename, biasimage):
    hnumpix = 2048
    vnumpix = 2048
    gain = 'low'
    try:
        fid = open(filename, 'rb')
        # fid.seek(0,0)
        # magicnum = readxbytes(fid,4) # 4 bytes ('Meta')
        # Check the magic number. If it doesn't match, exit function

        fid.seek(152, 0)
        timestamp = readxbytes(fid, 29)
        print(timestamp)
        # Load data portion of file
        fid.seek(246, 0)
        # fid.seek(384,0)

        table = np.fromfile(fid, dtype=np.uint8, count=12582912)
        testimages = nb_read_data(table)
        image = split_images(testimages, hnumpix, vnumpix, gain)
        image = image.astype('int32')
        image = image.copy(order='C')
        fid.close()

        image = subtractBias(image, biasimage)

        # m, s = np.mean(image), np.std(image)
        bkg = sep.Background(image)
        data_sub = image - bkg
        objects = sep.extract(data_sub, 2.5, err=bkg.globalrms)
        print(objects)
    except Exception:
        print(filename)
        print('Error with filename')
Exemple #7
0
def sepsky(image, box_size=(64, 64), filter_size=(3, 3)):
    """ Estimate sky background using sep."""

    # Check if the data is "sep ready"

    data = image.sepready(image.data)
    if image.mask is not None:
        mask = image.sepready(image.mask)
    else:
        mask = None

    #image.native()  # make sure the arrays use native byte-order

    ## Background subtraction with SEP
    ##  measure a spatially varying background on the image
    #if image.data.flags['C_CONTIGUOUS']==False:
    #    data = image.data.copy(order='C')
    #    mask = image.mask.copy(order='C')
    #else:
    #    data = image.data
    #    mask = image.mask
    bkg = sep.Background(data,
                         mask=mask,
                         bw=box_size[0],
                         bh=box_size[1],
                         fw=filter_size[0],
                         fh=filter_size[1])
    return bkg.back()
Exemple #8
0
def background(data, box_size=64, filter_size=3, mask=None, global_bkg=True):
    """Estimate the image background using SExtractor algorithm.

    Parameters
    ----------
    data: array_like
        2D array containing the image to extract the background.
    box_size: `int` (optional)
        Size of background boxes in pixels.
        Default: 64
    filter_size: `int` (optional)
        Filter size in boxes unit.
        Default: 3
    mask: array_like (optional)
        Boolean mask where 1 pixels are masked out in the background
        calculation.
        Default: `None`
    global_bkg: `bool`
        If True, the algorithm returns a single value for background
        and rms, else, a 2D image with local values will be returned.
    """
    d = _sep_fix_byte_order(data)
    bkg = sep.Background(d,
                         bw=box_size,
                         bh=box_size,
                         fw=filter_size,
                         fh=filter_size,
                         mask=mask)

    if global_bkg:
        return bkg.globalback, bkg.globalrms
    return bkg.back(), bkg.rms()
Exemple #9
0
class SourceDetection:

	def background_reduction(imgPath):
		'''
		Performing a background reduction on the fits images
		Arguments:
			imgFile: Path to the fits file that is requiring background reduction

		'''

        print('Opening fits file and subtracting background')
        start = time.time()

        #opening the header of the major fits file
        hdu1 = fits.open(imgPath) 

        #obtaining a signal to noise ratio of the image, can be used instead of calculating a background for detection
        data = hdu1[1].data
        hdr = hdu1[1].header
        var = hdu1[3].data
        sn_map = data/np.sqrt(var)

        #remove background from image - code from sep tutorial. - https://sep.readthedocs.io/en/v1.0.x/tutorial.html
        bkg = sep.Background(data)
        bkg_image = bkg.back()
        bkg_rms = bkg.rms()
        
        print(f"Performed background reduction in {time.time() - start}")

        # Returning the background subtracted image 
        subtracted_image = data - bkg
        return subtracted_image
Exemple #10
0
    def phot(self, image_path, x_coor, y_coor, aper_radius=3.0, gain=0.57):
        """
        Photometry of given coordinates.
        @param image_path: Path of FITS file.
        @type image_path: path
        @param x_coor: X coordinate of object
        @type x_coor: float
        @param y_coor: Y coordinate of object
        @type y_coor: float
        @param aper_radius: Aperture radius
        @type aper_radius: float
        @return: tuple
        """

        if image_path:
            hdu = fits.open(image_path)[0]
        else:
            print("FITS image has not been provided by the user!")
            raise SystemExit

        data = hdu.data.astype(float)

        bkg = sep.Background(data)
        # bkg_image = bkg.back()
        # bkg_rms = bkg.rms()
        data_sub = data - bkg

        flux, fluxerr, flag = sep.sum_circle(data_sub,
                                             x_coor,
                                             y_coor,
                                             aper_radius=aper_radius,
                                             err=bkg.globalrms,
                                             gain=gain)

        return ({"flag": flag, "flux": flux, "fluxerr": fluxerr})
Exemple #11
0
def test_background_boxsize():
    """Test that background works when boxsize is same as image"""

    ny, nx = 100, 100
    data = np.ones((ny, nx), dtype=np.float64)
    bkg = sep.Background(data, bh=ny, bw=nx, fh=1, fw=1)
    bkg.back()
Exemple #12
0
def find_sources_with_sep(img):
    """Return sources (x, y) sorted by brightness. Use SEP package.
    """
    import sep
    if isinstance(img, np.ma.MaskedArray):
        image = img.filled(fill_value=np.median(img)).astype('float32')
    else:
        image = img.astype('float32')

    bkg = sep.Background(image)
    thresh = 3. * bkg.globalrms
    try:
        sources = sep.extract(image - bkg.back(), thresh)
    except Exception as e:
        buff_message = 'internal pixel buffer full'
        if e.message[0:26] == buff_message:
            sep.set_extract_pixstack(600000)
        try:
            sources = sep.extract(image - bkg.back(), thresh)
        except Exception as e:
            if e.message[0:26] == buff_message:
                sep.set_extract_pixstack(900000)
                sources = sep.extract(image - bkg.back(), thresh)

    sources.sort(order='flux')
    return np.array([[asrc['x'], asrc['y']] for asrc in sources[::-1]])
Exemple #13
0
    def run_sep_extractor(self):
        # Not entirely sure what this is for but the likeliness of a memory
        # error occurring in call to sep.extract is inversely proportional to
        # the pixstack...
        sep.set_extract_pixstack(10000000)

        data = self.image_data_formatted
        # generate a background map of data
        bkg = sep.Background(data)
        # subtract the background map from data
        data_sub = data - bkg
        self.data_sub_bkg = data_sub
        threshold = 100  #2 * np.std(data_sub) + np.min(data_sub)
        star_objects = sep.extract(data_sub,
                                   threshold,
                                   minarea=9,
                                   mask=self.image_mask,
                                   gain=3,
                                   deblend_nthresh=32,
                                   deblend_cont=0.0005)

        pix_thresh = 100
        good_objects = star_objects[star_objects['flag'] < 8]
        good_objects = good_objects[good_objects['npix'] < pix_thresh]

        return good_objects
Exemple #14
0
def test_extract_with_noise_array():

    # Get some background-subtracted test data:
    data = np.copy(image_data)
    bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3)
    bkg.subfrom(data)

    # Ensure that extraction with constant noise array gives the expected
    # result. We have to use conv=None here because the results are *not*
    # the same when convolution is on! This is because the noise map is
    # convolved. Near edges, the convolution doesn't adjust for pixels
    # off edge boundaries. As a result, the convolved noise map is not
    # all ones.
    objects = sep.extract(data, 1.5*bkg.globalrms, filter_kernel=None)
    objects2 = sep.extract(data, 1.5*bkg.globalrms, err=np.ones_like(data),
                           filter_kernel=None)
    names_to_remove = ['errx2', 'erry2', 'errxy']
    names_to_keep = [i for i in objects.dtype.names if i not in names_to_remove]
    objects = objects[names_to_keep]
    objects2 = objects2[names_to_keep]

    assert_equal(objects, objects2)

    # Less trivial test where thresh is realistic. Still a flat noise map.
    noise = bkg.globalrms * np.ones_like(data)
    objects2 = sep.extract(data, 1.5, err=noise, filter_kernel=None)

    names_to_remove = ['errx2', 'erry2', 'errxy']
    names_to_keep = [i for i in objects.dtype.names if i not in names_to_remove]
    objects = objects[names_to_keep]
    objects2 = objects2[names_to_keep]

    assert_equal(objects, objects2)
Exemple #15
0
def globalscalenoise(cube, outcube, memmap=False):
    """
    Read a cube and compute a global scaling factor to the variance
    such that the variance in the data is consistent with the 
    variance extension. The scaling factor is calculated in regions
    of the spectrum free from skylines.
    
    """

    #Read cube
    hdu = fits.open(cube, memmap=memmap)

    data = hdu[1].data
    std = np.sqrt(hdu[2].data)

    nz, ny, nx = np.shape(data)

    wave = hdu[1].header['CRVAL3'] + np.arange(nz) * hdu[1].header['CD3_3']

    #compress into image
    image = np.nanmedian(data, axis=0)
    nx, ny = image.shape

    #mask edges
    edges = np.isfinite(image)
    badmask = np.zeros((nx, ny)) + 1
    badmask[edges] = 0.0
    badmask = ndimage.gaussian_filter(badmask, 1.5)
    badmask[np.where(badmask > 0)] = 1.0

    #mask sources
    bkg = sep.Background(image, mask=badmask)
    thresh = 1.5 * bkg.globalrms
    segmap = np.zeros((nx, ny))
    objects, segmap = sep.extract(image,
                                  thresh,
                                  segmentation_map=True,
                                  minarea=10,
                                  clean=True,
                                  mask=badmask)
    badmask[np.where(segmap > 0)] = 1.0

    tonan = (badmask > 0)
    badmask[tonan] = np.nan
    badmask[np.logical_not(tonan)] = 1

    mask3d = np.broadcast_to(badmask, (nz, ) + badmask.shape)
    fsig = data / std * mask3d

    fsig_1d = np.nanstd(fsig, axis=(1, 2))

    okwave = ((wave > 4700) & (wave < 5800)) | ((wave > 6600) & (wave < 6800))

    #Average does not like nans, mask them out
    global_offset = np.nanmedian(fsig_1d[okwave])

    hdu[2].data *= global_offset**2
    hdu[2].header['VARSCALE'] = global_offset**2

    hdu.writeto(outcube, overwrite=True)
Exemple #16
0
 def do_stage(self, image):
     # TODO: Try LOWESS for hole filling
     # TODO: fix bug that background is subtracting
     indices_to_interpolate = np.logical_or(image.traces > 0, image.mask)
     image.background = sep.Background(image.data,
                                       mask=indices_to_interpolate).back()
     return image
Exemple #17
0
 def sky(self,method='sep',rin=None,rout=None):
     """ (Re)calculate the sky."""
     # Remove the current best-fit model
     resid = self.image.data-self.modelim  # remove model
     # SEP smoothly varying background
     if method=='sep':
         bw = np.maximum(int(self.nx/10),64)
         bh = np.maximum(int(self.ny/10),64)
         bkg = sep.Background(resid, mask=None, bw=bw, bh=bh, fw=3, fh=3)
         self.skyim = bkg.back()
         # Calculate sky value for each star
         #  use center position
         self.starsky[:] = self.skyim[np.round(self.starycen).astype(int),np.round(self.starxcen).astype(int)]
     # Annulus aperture
     elif method=='annulus':
         if rin is None:
             rin = self.psf.fwhm()*1.5
         if rout is None:
             rout = self.psf.fwhm()*2.5
         positions = list(zip(self.starxcen,self.starycen))
         annulus = CircularAnnulus(positions,r_in=rin,r_out=rout)
         for i in range(self.nstars):
             annulus_mask = annulus[i].to_mask(method='center')
             annulus_data = annulus_mask.multiply(resid,fill_value=np.nan)
             data = annulus_data[(annulus_mask.data>0) & np.isfinite(annulus_data)]
             mean_sigclip, median_sigclip, _ = sigma_clipped_stats(data,stdfunc=dln.mad)
             self.starsky[i] = mean_sigclip
         if hasattr(self,'skyim') is False:
             self.skyim = np.zeros(self.image.shape,float)
         if self.skyim is None:
             self.skyim = np.zeros(self.image.shape,float)
         self.skyim += np.median(self.starsky)
     else:
         raise ValueError("Sky method "+method+" not supported")
Exemple #18
0
def backgroundSubtraction(image,method="edge",width=10,verbose=True):
    """
    backgroundSubtraction(image,method="edge",width=10,verbose=True)
    
    image: input image (numpy.array)
    method : in ["edge", "sep"]
        edge : background = median of edge "width" pixels around the image
        sep  : sourceExtractor Background method
    width : see 'method'
    verbose  : boolean, triggers verbose prints
    
    """
    if method.lower().find('edge') >= 0:
        edge = arrayEdge1d(image,width=width)
        result = image.copy() - np.median(edge)
        if verbose:
            print (f"Median Background level: {np.median(edge)}")
        return result, edge
    elif method.lower().find('sep') or method.lower().find('sex'):
        sep_background = sep.Background(image)        # Spatially varying background        
        sep_backgroundImage = sep_background.back()   # Evaluate background as 2D array (same dimensions as image)
        result = image - sep_backgroundImage          # Subtract the background
        if verbose:
            print (f"Median Background level: {np.median(sep_backgroundImage)}")
        return result, sep_backgroundImage
    else:
        print (f"WARNING: method must be in ['edge','sep']")
    return
Exemple #19
0
    def _mask(self, hdu, sci_ext):
        im = hdu[sci_ext].data + 0
        try:
            mask = hdu['mask'].data.astype(bool)
            mask[im < DATA_FLOOR] = True
        except KeyError:
            opts = dict(bw=64, bh=64, fw=3, fh=3)
            mask = np.zeros(im.shape, bool)
            for i in range(2):
                mask[im < DATA_FLOOR] = True
                bkg = sep.Background(im, mask=mask, **opts)
                objects, mask = sep.extract(im - bkg, 2, err=bkg.globalrms,
                                            segmentation_map=True)
                mask = mask != 0

        sources = mask.copy()
        mask[im < DATA_FLOOR] = True

        # unmask objects near target
        lbl, n = nd.label(mask)
        try:
            cen = hdu['sci'].header['tgty'], hdu['sci'].header['tgtx']
            for m in np.unique(lbl[cen[0]-2:cen[0]+3, cen[1]-2:cen[1]+3]):
                mask[lbl == m] = False
        except KeyError:
            pass

        # add nans
        mask += ~np.isfinite(im.data)
        return sources, mask
Exemple #20
0
def run_source_extractor(N, draw_figures=False):
    gal_params_file = os.path.join('params', 'gal_sim_params.txt')
    real_params = np.loadtxt(gal_params_file)
    position_data = np.zeros((POS_NUM_GAL * N, 2))
    num_found_data = np.zeros(N)

    for i in range(N):

        fits_file_name = os.path.join('blends', 'single_blend%d.fits' % i)
        data = fits.getdata(fits_file_name)
        data = data.byteswap(inplace=True).newbyteorder()

        bkg = sep.Background(data)
        # subtract background noise
        data_sub = data - bkg

        objects = sep.extract(data_sub, 1.5, err=bkg.globalrms)

        if (draw_figures):
            draw_figure(data_sub, objects)

        for j in range(len(objects)):
            position_data[2 * i + j][0] = objects['x'][j]
            position_data[2 * i + j][1] = objects['y'][j]

        num_found_data[i] = len(objects)

    if (not draw_figures):
        pos_file_name = os.path.join('params', 'sep_positions.txt')
        num_file_name = os.path.join('params', 'sep_num_found.txt')
        np.savetxt(pos_file_name, np.asarray(position_data))
        np.savetxt(num_file_name, num_found_data)
        print('updated training data')
Exemple #21
0
def genSegmap(cutoutName):
    '''Create segmenation image using the sep SExtractor module.'''
    cutoutData = fits.getdata(cutoutName).astype(float)
    # filter kernel
    filter_kernel = np.loadtxt(
        f'{realsim_dir}/Sources/utils/sdss-cfg/gauss_3.0_7x7.conv', skiprows=2)
    # use std of full image as detection threshold
    guess_rms = np.std(cutoutData)
    # mask all sources above std for background statistics
    mask = ((cutoutData - np.median(cutoutData)) > guess_rms)
    # https://github.com/kbarbary/sep/issues/33: convert to float
    # bkg object which includes back() and rms() methods
    bkg = sep.Background(cutoutData, mask=mask, bw=32, bh=32, fw=3, fh=3)
    # run sep.extract() on image
    objCat, segmap = sep.extract(cutoutData - bkg.back(),
                                 thresh=1.0,
                                 err=bkg.rms(),
                                 mask=None,
                                 minarea=5,
                                 filter_kernel=filter_kernel,
                                 filter_type='conv',
                                 deblend_nthresh=32,
                                 deblend_cont=0.001,
                                 clean=True,
                                 clean_param=1.0,
                                 segmentation_map=True)
    return segmap
Exemple #22
0
def sep_extract(data, threshold=3):
    ''' Extract sources from an image using SEP.

    Parameters
    ==========
    data : 2d ndarray
        Image containing the sources
    threshold : float
        The threshold value for detection, in number of sigma.

    Returns
    =======
    sources : np.recarray
        A list of sources, as returned by sep.extract, and ordered by flux.
        See documentation of sep.extract for a description of the fields.
    '''
    if isinstance(data, np.ma.MaskedArray):
        image = data.filled(fill_value=np.median(data)).astype(np.float32)
    else:
        image = data.astype(np.float32)
    bkg = sep.Background(image)
    thresh = threshold * bkg.globalrms
    sources = sep.extract(image - bkg.back(), thresh)
    sources.sort(order='flux')
    # sources = sources.view(np.recarray)
    return sources
Exemple #23
0
def meas_back(img, backsize, backffrac=0.5, mask=None, sub_from_img=True):
    """
    Measure the sky background of image.

    Parameters
    ----------
    img : ndarray
        2D numpy array of image.
    backsize : int
        Size of background boxes in pixels.
    backffrac : float, optional
        The fraction of background box size for the
        filter size for smoothing the background.
    mask : ndarray, optional
        Mask array for pixels to exclude from background
        estimation.
    sub_from_img : bool, optional
        If True, also return background subtracted image.

    Returns
    -------
    bkg : sep.Background object
       See SEP documentation for methods & attributes.
    img_bsub : ndarray, if sub_from_img is True
    """
    img = _byteswap(img)
    mask = mask if mask is None else mask.astype(bool)
    bw = bh = backsize
    fw = fh = int(backffrac * backsize)
    bkg = sep.Background(img, mask=mask, bw=bw, bh=bh, fw=fw, fh=fh)
    if sub_from_img:
        bkg.subfrom(img)
        return bkg, img
    else:
        return bkg
Exemple #24
0
        def compare_image(the_image):
            """Return the fraction of sources found in the reference image"""
            # pixel comparison is not good, doesn't work. Compare catalogs.
            if isinstance(the_image, np.ma.MaskedArray):
                full_algn = the_image.filled(fill_value=np.median(the_image))\
                    .astype('float32')
            else:
                full_algn = the_image.astype('float32')
            # full_algn[the_image == 0] = np.median(the_image)
            import sep
            bkg = sep.Background(full_algn)
            thresh = 3.0 * bkg.globalrms
            allobjs = sep.extract(full_algn - bkg.back(), thresh)
            allxy = np.array([[obj['x'], obj['y']] for obj in allobjs])

            from scipy.spatial import KDTree
            ref_coordtree = KDTree(self.star_ref_pos)

            # Compare here srcs list with self.star_ref_pos
            num_sources = 0
            for asrc in allxy:
                found_source = ref_coordtree.query_ball_point(asrc, 3)
                if found_source:
                    num_sources += 1
            fraction_found = float(num_sources) / float(len(allxy))
            return fraction_found
Exemple #25
0
def genSegmap(cutoutName):
    '''Create segmenation image using the sep SExtractor module.'''
    cutoutData = fits.getdata(cutoutName)
    # filter kernel
    filter_kernel = np.loadtxt(
        '{}Sources/utils/CFIS-cfg/gauss_3.0_7x7.conv'.format(RSDIR),
        skiprows=2)
    # use std of full image as detection threshold
    guess_rms = np.std(cutoutData)
    # mask all sources above std for background statistics
    mask = (cutoutData > guess_rms)
    # https://github.com/kbarbary/sep/issues/23
    cutoutData_sw = cutoutData.byteswap(True).newbyteorder()
    # bkg object which includes sky() and rms() methods
    bkg = sep.Background(cutoutData_sw, mask=mask, bw=32, bh=32, fw=3, fh=3)
    # run sep.extract() on image
    objCat, segmap = sep.extract(cutoutData_sw,
                                 thresh=1.0,
                                 err=bkg.rms(),
                                 mask=None,
                                 minarea=5,
                                 filter_kernel=filter_kernel,
                                 filter_type='conv',
                                 deblend_nthresh=32,
                                 deblend_cont=0.001,
                                 clean=True,
                                 clean_param=1.0,
                                 segmentation_map=True)
    return segmap
Exemple #26
0
def background_subtract(data, plot_bkg=False, return_bkg=False):
    """Background subtract the driftscan FFI

    Performs these steps:
    - Computes a mask based on the top 95th percentile of values
    - Estimate background with sep

    Args:
        data (numpy.ndarray): 2D numpy of one FFI channel
        plot_bkg (bool): Flag for whether to plot the background
                         default = False
        return_bkg (bool): Flag for whether to return the background
                         default = False

    Returns:
        background-subtracted FFI or optionally
        tuple of background-subtracted FFI, and background estimate
    """
    data = data.copy(order='C')
    mask = data > np.percentile(data, 95)
    bkg = sep.Background(data, mask=mask)

    if plot_bkg:
        bkg_image = bkg.back()
        # show the background
        plt.imshow(bkg_image,
                   interpolation='nearest',
                   cmap='gray',
                   origin='lower')
        plt.colorbar()

    if return_bkg:
        return (data - bkg, bkg)
    else:
        return data - bkg
Exemple #27
0
def fit_noise(data, n_stamps=1, mode='iqr', fname=''):
    """Find the standard deviation of the image background; returns standard deviation, median"""

    median_small = np.zeros([n_stamps, n_stamps])
    std_small = np.zeros([n_stamps, n_stamps])
    if mode == 'sep':
        background = sep.Background(np.ascontiguousarray(data.data).byteswap().newbyteorder())
        median = background.back()
        std = background.rms()
    else:
        for y_stamp in range(n_stamps):
            for x_stamp in range(n_stamps):
                y_index = [y_stamp * data.shape[0] // n_stamps, (y_stamp + 1) * data.shape[0] // n_stamps]
                x_index = [x_stamp * data.shape[1] // n_stamps, (x_stamp + 1) * data.shape[1] // n_stamps]
                stamp_data = data[y_index[0]: y_index[1], x_index[0]: x_index[1]].compressed()
                if mode == 'iqr':
                    quartile25, median, quartile75 = np.nanpercentile(stamp_data, (25, 50, 75))
                    median_small[y_stamp, x_stamp] = median
                    # 0.741301109 is a parameter that scales iqr to std
                    std_small[y_stamp, x_stamp] = 0.741301109 * (quartile75 - quartile25)
                elif mode == 'mad':
                    median = np.median(stamp_data)
                    absdev = np.abs(stamp_data - median)
                    mad = np.median(absdev)
                    median_small[y_stamp, x_stamp] = median
                    std_small[y_stamp, x_stamp] = 1.4826 * mad

        median = scipy.ndimage.zoom(median_small, [data.shape[0] / float(n_stamps), data.shape[1] / float(n_stamps)])
        std = scipy.ndimage.zoom(std_small, [data.shape[0] / float(n_stamps), data.shape[1] / float(n_stamps)])

    logging.info('{0}Global median is {1}'.format(fname + ':', np.mean(median)))
    logging.info('{0}Global standard deviation is {1}'.format(fname + ':', np.mean(std)))
    return std, median
Exemple #28
0
def identify_objects(image_data, nsigma, min_area, deb_n_thresh, deb_cont,
                     param_dict):
    '''
    This function performs source identification and generates a segmentation map,
    which is then used for masking the sources.
    :param image_data: provide the image data, which is a mxn numpy nd array. e.g., fits.getdata('image_file_name')
    :param nsigma: source detection significance.
    :param min_area: minimum area to be considered as a source
    :param deb_n_thresh: number of threshold values for deblending routine. e.g., 32, 64 etc.
    :param deb_cont: deblend minimum contrast ratio (see source extraction or SEP python page).
    :param param_dict: a dictionary containing the
    'sep_filter_kwarg' = filter keyword argument, which can be a 'tophat', 'gauss', or 'boxcar'
    'sep_filter_size' = the 'size' of the filter. In case of gaussian, it is the FWHM of the gaussian. For tophat, it
    is the radius of the tophat filter. For boxcar, it is the side length of the 2D Box.
    :return: objects: a numpy array of the objects, ordered as per their segmentation values in the segmap.
    segmap: a segmentation map, where each source is marked with unique source identification number.
    '''

    # Note, this whole routine uses a Python-based source identification module named SEP (Barbary et al., 2016)

    # Unpack the filter keyword and its size from the parameter dictionary.
    filter_kwarg = param_dict['sep_filter_kwarg']
    filter_size = float(param_dict['sep_filter_size'])

    # Look at the SEP webpage, this is suggested for working of SEP.
    byte_swaped_data = image_data.byteswap().newbyteorder()

    # SEP estimates a global background.
    global_bkg = sep.Background(byte_swaped_data)

    # background subtracted data = original data - estimated global background.
    bkg_subtracted = byte_swaped_data - global_bkg

    # In the following block, we check for the user's choice of filter and its size.
    # We define a kernel based on their choice.
    if filter_kwarg.lower() not in ['tophat', 'gauss', 'boxcar']:
        warnings.warn(
            'The filter %s is not supported as of yet, defaulting to tophat of radius 5'
        )
        source_kernel = Tophat2DKernel(5)
    elif filter_kwarg.lower() == 'tophat':
        source_kernel = Tophat2DKernel(filter_size)
    elif filter_kwarg.lower() == 'gauss':
        _gauss_sigma = gaussian_fwhm_to_sigma(filter_size)
        source_kernel = Gaussian2DKernel(_gauss_sigma)
    elif filter_kwarg.lower() == 'boxcar':
        source_kernel = Box2DKernel(filter_size)

    # Object detection and Segmentation map generataion.
    objects, segmap = sep.extract(bkg_subtracted,
                                  nsigma,
                                  err=global_bkg.globalrms,
                                  minarea=min_area,
                                  deblend_nthresh=deb_n_thresh,
                                  deblend_cont=deb_cont,
                                  segmentation_map=True,
                                  filter_kernel=source_kernel.array)

    return objects, segmap
Exemple #29
0
def analyse_single_blend(idx, blend_image, cat, fig=False):
    # Compute the background
    bkg = sep.Background(blend_image)

    # Run detection with the 'cold' strategy
    source, segmap = run_sextractor(blend_image, bkg, SEXCONFIG["cold"])
    n_detect_cold = len(source["x"])
    n_detect_hot = 0

    if n_detect_cold < 2:
        # Rerun SExtractor with  the 'hot' stratefy
        source, segmap = run_sextractor(blend_image, bkg, SEXCONFIG["hot"])
        n_detect_hot = len(source["x"])

    n_detections = max(n_detect_cold, n_detect_hot)

    if n_detections == 0:
        return {}

    positions = np.hypot(source["x"] - X_CENTER, source["y"] - Y_CENTER)
    indx = positions.argsort().tolist()
    id_central = indx.pop(0)

    result = {}
    result["n_sources"] = n_detections

    if n_detections >= 2:
        # Once the central galaxy is found, order the remaining galaxies by flux
        flux_indx = np.argsort(source[indx]["flux"])
        id_companion = flux_indx[-1]

        result["flux_central"] = source[id_central]["flux"]
        result["flux_companion"] = source[id_companion]["flux"]

        if fig:
            plot_galaxy_with_markers(idx, blend_image, segmap,
                                     source[id_central], source[id_companion])
    else:
        # If a single object is detected even with the 'hot' strategy
        # assign that galaxy to the central or the companion depending
        # on the relative distance to it

        # x_companion = X_CENTER + full_cat["shift_x"][idx]
        # y_companion = Y_CENTER + full_cat["shift_y"][idx]

        # dist_to_companion = np.hypot(
        #     source["x"][0] - x_companion,
        #     source["y"][0] - y_companion
        # )
        dist_to_center = np.hypot(source["x"][0] - X_CENTER,
                                  source["y"][0] - Y_CENTER)

        # if dist_to_companion > dist_to_center:
        if dist_to_center < 0.5 * cat["g1_rad"][idx]:
            result["flux_central"] = source[id_central]["flux"]
        else:
            result["flux_companion"] = source[id_central]["flux"]

    return result
Exemple #30
0
def svm_clf(path):
    data_set = []
    data1 =fits.open(path)
    data = data1[0].data
    data = data[132:991, 132:991]

    # 检测图像背景,获得去除背景后的图像
    # Detect the background of the picture and obtain the picture after removing the background
    data = data.astype(np.float64)
    bkg = sep.Background(data, mask=None, bw=64, bh=64, fw=3, fh=3)
    data_sub = data - bkg  # 得到去噪后的数据
    objects = sep.extract(data_sub, 2.5, err=bkg.globalrms, deblend_nthresh=1)

    # 获得亮星数目
    # Get the number of bright stars
    number = 0
    for i in range(len(objects)):
        a = objects[i][15]
        b = objects[i][16]
        a = max(a, b)
        b = min(a, b)
        # 控制星象大小
        # Control star size
        if a < 32 and b > 2.5:
            number = number + 1
        else:
            number = number

    m1, s1 = np.mean(data_sub), np.std(data_sub)
    data_sub = data_sub.astype(np.uint16)

    # 获得灰度共生矩阵参数
    # Obtain gray level co-occurrence matrix parameters
    gray = color.rgb2gray(data_sub)
    image = img_as_ubyte(gray)
    bins = np.array([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255])  # 16-bit
    inds = np.digitize(image, bins)
    max_value = inds.max() + 1
    matrix_coocurrence = greycomatrix(inds, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=max_value,
                                      normed=False, symmetric=False)
    cons = np.sum(contrast_feature(matrix_coocurrence)) / 4
    diss = np.sum(dissimilarity_feature(matrix_coocurrence)) / 4
    h**o = np.sum(homogeneity_feature(matrix_coocurrence)) / 4
    asmm = np.sum(asm_feature(matrix_coocurrence)) / 4
    ener = np.sum(energy_feature(matrix_coocurrence)) / 4
    corr = np.sum(correlation_feature(matrix_coocurrence)) / 4
    # 熵的计算
    # Entropy calculation
    shan = shannon_entropy(image)
    data_set = [[m1,number,corr,s1,h**o,shan,asmm,ener,cons]]

    # 加载保存好的模型进行预测
    # Load the saved model for prediction
    clf = joblib.load('./rf_clf.m')
    a=clf.predict(data_set)
    a=int(a[0])
    print(a)
    cnn_set=[path,m1,number,corr,s1,h**o,shan,asmm,ener,cons,diss,a]
    return cnn_set