def findsources(image, cube, varima=None, check=False, output='./', spectra=False, helio=0, nsig=2., minarea=10., deblend_cont=0.0001, regmask=None, invregmask=False, fitsmask=None, clean=True, outspec='Spectra', marz=False, rphot=False, detphot=False, sname='MUSE'): """ Take a detection image (collapse of a cube), or median of an RGB, or whatever you want (but aligned to the cube) and run sourceextractor Use SEP utilities http://sep.readthedocs.org/en/stable/ image -> fits file of image to process cube -> the cube used to extract spectra varima -> the noise image corresponding to the science image (std), optional check -> if true write a bunch of check mages output -> where to dump the output spectra -> if True, extract spectra in VACUUM wave!! helio -> pass additional heliocentric correction nsig -> number of skyrms used for source id minarea -> minimum area for extraction regmask -> ds9 region file (image) of regions to be masked before extraction [e.g. edges] invregmask -> if True invert the mask (region defines good area) fitsmask -> Fits file with good mask, overrides regmask clean -> clean souces outspec -> where to store output spectra marz -> write spectra in also marz format (spectra needs to be true). If set to numerical value, this is used as an r-band magnitude limit. detphot -> perform aperture phtometry on the detection image and add magnitues to the catalogue rphot -> perform r-band aperture photometry and add r-band magnitudes to the catalogue sname -> prefix for the source names. Default = MUSE """ import sep from astropy.io import fits from astropy import wcs from astropy import coordinates from astropy import units as u from astropy import table import numpy as np import os from mypython.ifu import muse_utils as utl from mypython.fits import pyregmask as msk from shutil import copyfile import glob #open image img = fits.open(image) header = img[0].header imgwcs = wcs.WCS(header) try: #this is ok for narrow band images data = img[1].data except: #white cubex images data = img[0].data data = data.byteswap(True).newbyteorder() #grab effective dimension nex, ney = data.shape #close fits img.close() if (varima): var = fits.open(varima) try: datavar = var[1].data except: datavar = var[0].data datavar = datavar.byteswap(True).newbyteorder() #grab effective dimension stdx, stdy = datavar.shape #close fits var.close() if (stdx != nex) or (stdy != ney): print( "The noise image does not have the same dimensions as the science image" ) return -1 #create bad pixel mask if (fitsmask): print("Using FITS image for badmask") hdumsk = fits.open(fitsmask) try: badmask = hdumsk[1].data except: badmask = hdumsk[0].data badmask = badmask.byteswap(True).newbyteorder() elif (regmask): print("Using region file for badmask") Mask = msk.PyMask(ney, nex, regmask, header=img[0].header) for ii in range(Mask.nreg): Mask.fillmask(ii) if (ii == 0): badmask = Mask.mask else: badmask += Mask.mask badmask = 1. * badmask else: badmask = np.zeros((nex, ney)) if (regmask) and (invregmask) and not (fitsmask): badmask = 1 - badmask if (check): print('Dumping badmask') hdumain = fits.PrimaryHDU(badmask, header=header) hdulist = fits.HDUList([hdumain]) hdulist.writeto(output + "/badmask.fits", overwrite=True) #check background level, but do not subtract it print('Checking background levels') bkg = sep.Background(data, mask=badmask) print('Residual background level ', bkg.globalback) print('Residual background rms ', bkg.globalrms) if (check): print('Dumping sky...') #dump sky properties back = bkg.back() rms = bkg.rms() hdumain = fits.PrimaryHDU(back, header=header) hdubk = fits.ImageHDU(back) hdurms = fits.ImageHDU(rms) hdulist = fits.HDUList([hdumain, hdubk, hdurms]) hdulist.writeto(output + "/skyprop.fits", overwrite=True) if (varima): #Use nsigma threshold and a pixel by pixel effective treshold based on variance map thresh = nsig objects, segmap = sep.extract(data, thresh, var=datavar, segmentation_map=True, minarea=minarea, clean=clean, mask=badmask, deblend_cont=deblend_cont, deblend_nthresh=32) else: #extracting sources at nsigma, use constant threshold thresh = nsig * bkg.globalrms objects, segmap = sep.extract(data, thresh, segmentation_map=True, minarea=minarea, clean=clean, mask=badmask, deblend_cont=deblend_cont, deblend_nthresh=32) print("Extracted {} objects... ".format(len(objects))) ids = np.arange(len(objects)) + 1 if (spectra): if not os.path.exists(outspec): os.makedirs(outspec) if ((check) | (spectra)): #create a detection mask a'la cubex srcmask = np.zeros((data.shape[0], data.shape[1])) print('Generating spectra...') #loop over detections for nbj in ids: obj = objects[nbj - 1] #init mask tmpmask = np.zeros((data.shape[0], data.shape[1]), dtype=np.bool) #fill this mask sep.mask_ellipse(tmpmask, obj['x'], obj['y'], obj['a'], obj['b'], obj['theta'], r=2) #add in global mask srcmask = srcmask + tmpmask * nbj #verify conflicts, resolve using segmentation map if np.nanmax(srcmask) > nbj: blended = (srcmask > nbj) srcmask[blended] = segmap[blended] #Now loop again and extract spectra if required if (spectra): #Verify that the source mask has the same number of objects as the object list if not len(np.unique(srcmask[srcmask > 0])) == len(objects): print( "Mismatch between number of objects and number of spectra to extract." ) for nbj in ids: savename = "{}/id{}.fits".format(outspec, nbj) tmpmask3d = np.zeros((1, data.shape[0], data.shape[1])) tmpmask3d[0, :, :] = srcmask[:, :] tmpmask3d[tmpmask3d != nbj] = 0 tmpmask3d[tmpmask3d > 0] = 1 tmpmask3d = np.array(tmpmask3d, dtype=np.bool) utl.cube2spec(cube, None, None, None, write=savename, shape='mask', helio=helio, mask=tmpmask3d, tovac=True) if (check): print('Dumping source mask...') hdumain = fits.PrimaryHDU(srcmask, header=header) hdubk = fits.ImageHDU(srcmask) hdulist = fits.HDUList([hdumain, hdubk]) hdulist.writeto(output + "/source.fits", overwrite=True) print('Dumping segmentation map') hdumain = fits.PrimaryHDU(segmap, header=header) hdubk = fits.ImageHDU(segmap) hdulist = fits.HDUList([hdumain, hdubk]) hdulist.writeto(output + "/segmap.fits", overwrite=True) #Generate source names using coordinates and name prefix ra, dec = imgwcs.wcs_pix2world(objects['x'], objects['y'], 0) coord = coordinates.FK5(ra * u.degree, dec * u.degree) rastr = coord.ra.to_string(u.hour, precision=2, sep='', pad=True) decstr = coord.dec.to_string(u.degree, precision=1, sep='', alwayssign=True, pad=True) name = [ sname + 'J{0}{1}'.format(rastr[k], decstr[k]) for k in range(len(rastr)) ] #Generate a column to be used to flag the sources to be used in the analysis #True for all sources at this point use_source = np.ones_like(name, dtype=bool) #write source catalogue print('Writing catalogue..') tab = table.Table(objects) tab.add_column(table.Column(dec), 0, name='DEC') tab.add_column(table.Column(ra), 0, name='RA') tab.add_column(table.Column(name), 0, name='name') tab.add_column(table.Column(ids), 0, name='ID') tab.add_column(table.Column(use_source), name='use_source') tab.write(output + '/catalogue.fits', overwrite=True) if (detphot): #Run source photometry on the extraction image whiteimg, whitevar, whitewcsimg = utl.cube2img(cube, write=output + '/Image_white.fits') phot_det = sourcephot(output + '/catalogue.fits', output + '/Image_white.fits', output + '/segmap.fits', image, zpab=28.35665) phot_det.add_column(table.Column(name), 1, name='name') tbhdu = fits.open(output + '/catalogue.fits') tbhdu.append(fits.BinTableHDU(phot_det)) tbhdu[-1].header['PHOTBAND'] = 'Detection' tbhdu.writeto(output + '/catalogue.fits', overwrite=True) #rband photometry if (rphot): rimg, rvar, rwcsimg = utl.cube2img(cube, filt=129, write=output + '/Image_R.fits') phot_r = sourcephot(output + '/catalogue.fits', output + '/Image_R.fits', output + '/segmap.fits', image) phot_r.add_column(table.Column(name), 1, name='name') tbhdu = fits.open(output + '/catalogue.fits') tbhdu.append(fits.BinTableHDU(phot_r)) tbhdu[-1].header['PHOTBAND'] = 'SDSS_r' tbhdu.writeto(output + '/catalogue.fits', overwrite=True) if ((marz) & (spectra)): #if marz is True but no magnitude limit set, create marz file for whole catalogue if marz > 10 and (rphot): #Requires testing hdu = fits.open(output + '/catalogue.fits') hdu[1].data['use_source'][hdu[2].data['MAGAP'] > marz] = False hdu.writeto(output + '/catalogue.fits', overwrite=True) marz_file(output + '/catalogue.fits', outspec, output, r_lim=marz) else: marz_file(output + '/catalogue.fits', outspec, output) print('All done') return objects
def findsources(image,cube,check=False,output='.',spectra=False,helio=0,nsig=2., minarea=10.,regmask=None,clean=True,outspec='Spectra',marz=False, rphot=False, sname='MUSE'): """ Take a detection image (collapse of a cube), or median of an RGB, or whatever you want (but aligned to the cube) and run sourceextractor Use SEP utilities http://sep.readthedocs.org/en/stable/ image -> fits file of image to process check -> if true write a bunch of check mages output -> where to dump the output cube -> the cube used to extract spectra spectra -> if True, extract spectra in VACUUM wave!! helio -> pass additional heliocentric correction nsig -> number of skyrms used for source id minarea -> minimum area for extraction regmask -> ds9 region file (image) of regions to be masked before extraction [e.g. edges] clean -> clean souces outspec -> where to store output spectra marz -> write spectra in also marz format (spectra needs to be true). If set to numerical value, this is used as an r-band magnitude limit. rphot -> perform r-band aperture photometry and add r-band magnitudes to the catalogue sname -> prefix for the source names. Default = MUSE """ import sep from astropy.io import fits from astropy import wcs from astropy import coordinates from astropy import units as u from astropy import table import numpy as np import os try: from mypython.ifu import muse_utils as utl from mypython.fits import pyregmask as msk except ImportError: from mypython import ifu from ifu import muse_utils as utl from mypython import fits from fits import pyregmask as msk from astropy.io import fits from shutil import copyfile import glob #open image img=fits.open(image) try: header=img[1].header except: header= img[0].header imgwcs = wcs.WCS(header) try: #this is ok for narrow band images data=img[1].data except: #white cubex images data=img[0].data data=data.byteswap(True).newbyteorder() #grab effective dimension nex,ney=data.shape #close fits img.close() #create bad pixel mask if(regmask): Mask=msk.PyMask(ney,nex,regmask,header=img[0].header) for ii in range(Mask.nreg): Mask.fillmask(ii) if(ii == 0): badmask=Mask.mask else: badmask+=Mask.mask badmask=1.*badmask else: badmask=np.zeros((nex,ney)) if(check): print('Dumping badmask') hdumain = fits.PrimaryHDU(badmask,header=header) hdulist = fits.HDUList([hdumain]) hdulist.writeto(output+"/badmask.fits",overwrite=True) #check background level, but do not subtract it print('Checking background levels') bkg = sep.Background(data,mask=badmask) print('Residual background level ', bkg.globalback) print('Residual background rms ', bkg.globalrms) if(check): print('Dumping sky...') #dump sky properties back = bkg.back() rms = bkg.rms() hdumain = fits.PrimaryHDU(back,header=header) hdubk = fits.ImageHDU(back) hdurms = fits.ImageHDU(rms) hdulist = fits.HDUList([hdumain,hdubk,hdurms]) hdulist.writeto(output+"/skyprop.fits",overwrite=True) #extracting sources at nsigma thresh = nsig * bkg.globalrms # segmap = np.zeros((header["NAXIS1"],header["NAXIS2"])) objects, segmap=sep.extract(data,thresh,segmentation_map=True, minarea=minarea,clean=clean,mask=badmask,deblend_cont=0.0001) print("Extracted {} objects... ".format(len(objects))) if(spectra): if not os.path.exists(outspec): os.makedirs(outspec) if((check) | (spectra)): #create a detection mask alla cubex srcmask=np.zeros((1,data.shape[0],data.shape[1])) nbj=1 print('Generating spectra...') #loop over detections for obj in objects: #init mask tmpmask=np.zeros((data.shape[0],data.shape[1]),dtype=np.bool) tmpmask3d=np.zeros((1,data.shape[0],data.shape[1]),dtype=np.bool) #fill this mask sep.mask_ellipse(tmpmask,obj['x'],obj['y'],obj['a'],obj['b'],obj['theta'],r=2) tmpmask3d[0,:,:]=tmpmask[:,:] srcmask=srcmask+tmpmask3d*nbj if(spectra): savename="{}/id{}.fits".format(outspec,nbj) if not os.path.exists(savename): utl.cube2spec(cube,obj['x'],obj['y'],None,write=savename, shape='mask',helio=helio,mask=tmpmask3d,tovac=True) else: print("{} already exists. Skipping it...".format(savename)) #go to next nbj=nbj+1 if(check): print('Dumping source mask...') hdumain = fits.PrimaryHDU(srcmask,header=header) hdubk = fits.ImageHDU(srcmask) hdulist = fits.HDUList([hdumain,hdubk]) hdulist.writeto(output+"/source.fits",overwrite=True) print('Dumping segmentation map') hdumain = fits.PrimaryHDU(segmap,header=header) hdubk = fits.ImageHDU(segmap) hdulist = fits.HDUList([hdumain,hdubk]) hdulist.writeto(output+"/segmap.fits",overwrite=True) #Generate source names using coordinates and name prefix ra, dec = imgwcs.wcs_pix2world(objects['x'], objects['y'],0) coord = coordinates.FK5(ra*u.degree, dec*u.degree) rastr = coord.ra.to_string(u.hour, precision=2, sep='') decstr = coord.dec.to_string(u.degree, precision=1, sep='', alwayssign=True) name = [sname+'J{0}{1}'.format(rastr[k], decstr[k]) for k in range(len(rastr))] ids = np.arange(len(name)) #write source catalogue print('Writing catalogue..') tab = table.Table(objects) tab.add_column(table.Column(name),0,name='name') tab.add_column(table.Column(ids),0,name='ID') tab.write(output+'/catalogue.fits',overwrite=True) #cols = fits.ColDefs(objects) #cols.add_col(fits.Column(name, format='A')) #tbhdu = fits.BinTableHDU.from_columns(cols) #tbhdu.writeto(output+'/catalogue.fits',clobber=True) #rband photometry if (rphot): if not os.path.exists(output+'/Image_R.fits'): rimg, rvar, rwcsimg = utl.cube2img(cube, filt=129, write=output+'/Image_R.fits') phot_r = sourcephot(output+'/catalogue.fits', output+'/Image_R.fits', output+'/segmap.fits', image) phot_r.add_column(table.Column(name),1,name='name') tbhdu = fits.open(output+'/catalogue.fits')[1] tbhdu2 = fits.BinTableHDU(phot_r) hdulist = fits.HDUList([fits.PrimaryHDU(), tbhdu, tbhdu2]) hdulist.writeto(output+'/catalogue.fits',overwrite=True) if((marz) & (spectra)): #if marz is True but no magnitude limit set, create marz file for whole catalogue if marz==True: marz_file(image, output+'/catalogue.fits', outspec, output) else: #create folder and catalogue with just sources brighter than mag limit if os.path.exists(output + '/spectra_r' + str(marz)): files = glob.glob(output + '/spectra_r' + str(marz) +'/*') for f in files: os.remove(f) else: os.mkdir(output + '/spectra_r' + str(marz)) mag = phot_r['MAGSEG'] #add in x y pixels from original catalogue x, y = tbhdu.data['x'], tbhdu.data['y'] phot_r['x'], phot_r['y'] = x, y #add in ra,dec img = fits.open(image) mywcs = wcs.WCS(img[0].header) ra, dec = mywcs.all_pix2world(x,y,0) phot_r['RA'] = ra phot_r['dec'] = dec for i in range(len(mag)): if mag[i] < marz: copyfile((output + '/spectra/id' + str(i+1) + '.fits'), (output + '/spectra_r' + str(marz) + '/id' + str(i+1) + '.fits')) #Write photometry catalog with objects below magnitude limit excluded phot_r.remove_rows(phot_r['MAGSEG'] > marz) catalogue_lim_name = (output + '/catalogue_r' + str(marz) +'.fits') if os.path.exists(catalogue_lim_name): os.remove(catalogue_lim_name) phot_r.write(catalogue_lim_name) outspec = output + '/spectra_r' + str(marz) marz_file(image, output+'/catalogue_r' + str(marz) +'.fits', outspec, output, r_lim=marz) print('All done') return objects
def dataquality(cubeslist, maskslist): """ Perform bunch of QA checks to asses the final data quality of reduction """ import os import numpy as np from astropy.io import fits from mypython.fits import pyregmask as msk from mypython.ifu import muse_utils as mutil from mypython.ifu import muse_source as msrc from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt import matplotlib.cm as cm from astropy.stats import sigma_clipped_stats try: from photutils import CircularAperture, aperture_photometry,\ data_properties, properties_table, centroids except: print("To run checks need photutils package") return print("Perform QA checks...") #make QA folder if not os.path.exists('QA'): os.makedirs('QA') #cube names cname = "COMBINED_CUBE.fits" iname = "COMBINED_IMAGE.fits" #first identify bright sources in final white image catsrc = msrc.findsources(iname, cname, check=True, output='QA', nsig=5, minarea=20) #make rsdss images if not (os.path.isfile('QA/sdssr.fits')): mutil.cube2img(cname, write='QA/sdssr.fits', wrange=None, helio=0, filt=129) rsdssall = fits.open('QA/sdssr.fits') segmask = fits.open('QA/segmap.fits') whiteref = fits.open(iname) #select round and bright objects shapesrc = catsrc['a'] / catsrc['b'] roundsrc = catsrc[np.where((shapesrc < 1.1) & (catsrc['cflux'] > 50))] imgfield = fits.open(iname) rms = np.std(imgfield[0].data) #perform aperture photometry on rband image - data already skysub positions = [roundsrc['x'], roundsrc['y']] apertures = CircularAperture(positions, r=10.) phot_table = aperture_photometry(rsdssall[1].data, apertures) phot_table_white = aperture_photometry(whiteref[0].data, apertures) rmag = -2.5 * np.log10( phot_table['aperture_sum']) + rsdssall[0].header['ZPAB'] wmag = -2.5 * np.log10(phot_table_white['aperture_sum']) #find FWHM on rband image fwhm = np.zeros(len(rmag)) for ii in range(len(rmag)): subdata=rsdssall[1].data[roundsrc['y'][ii]-10:roundsrc['y'][ii]+10,\ roundsrc['x'][ii]-10:roundsrc['x'][ii]+10] tdfit = centroids.fit_2dgaussian(subdata, error=None, mask=None) fwhm[ii] = 2.3548 * 0.5 * (tdfit.x_stddev + tdfit.y_stddev ) * rsdssall[0].header['PC2_2'] * 3600. #find rms of cube - mask sources and add edge buffer maskwbuffer = np.copy(segmask[1].data) maskwbuffer[0:30, :] = 9999 maskwbuffer[-31:-1, :] = 9999 maskwbuffer[:, 0:30] = 9999 maskwbuffer[:, -31:-1] = 9999 cwrms, crms = mutil.cubestat(cname, mask=maskwbuffer) #open diagnostic output with PdfPages('QA/QAfile.pdf') as pdf: ########################### #display field with r mag # ########################### plt.figure(figsize=(10, 10)) plt.imshow(imgfield[0].data, origin='low', clim=[-0.5 * rms, 0.5 * rms], cmap='gray_r') #mark round soruces plt.scatter(roundsrc['x'], roundsrc['y'], color='red') for ii in range(len(rmag)): plt.text(roundsrc['x'][ii], roundsrc['y'][ii], " " + str(rmag[ii]), color='red') plt.title('Round sources with SDSS r mag') pdf.savefig() # saves the current figure into a pdf page plt.close() ########################### #display FWHM # ########################### plt.figure(figsize=(10, 10)) plt.scatter(rmag, fwhm, color='red') plt.xlabel('Source rmag') plt.ylabel('FWHM (arcsec)') plt.title('Median FWHM {}'.format(np.median(fwhm))) pdf.savefig() # saves the current figure into a pdf page plt.close() ########################### #check centroid # ########################### plt.figure(figsize=(10, 10)) #loop on exposures for tmpc in open(cubeslist, 'r'): thisob = tmpc.split('/')[1] thisexp = tmpc.split('_')[3] wname = '../{}/Proc/DATACUBE_FINAL_LINEWCS_{}_white2.fits'.format( thisob, thisexp) wfits = fits.open(wname) #now loop on sources delta_x = np.zeros(len(rmag)) delta_y = np.zeros(len(rmag)) for ii in range(len(rmag)): subdata=wfits[0].data[roundsrc['y'][ii]-10:roundsrc['y'][ii]+10,\ roundsrc['x'][ii]-10:roundsrc['x'][ii]+10] x1, y1 = centroids.centroid_2dg(subdata) delta_x[ii] = 10.5 - x1 delta_y[ii] = 10.5 - y1 #plot for this subunit plt.scatter(delta_x * rsdssall[0].header['PC2_2'] * 3600., delta_y * rsdssall[0].header['PC2_2'] * 3600.) plt.xlabel('Delta x (arcsec)') plt.ylabel('Delta y (arcsec)') plt.title('Check exposure aligment') pdf.savefig() # saves the current figure into a pdf page plt.close() ########################### #check fluxing # ########################### #make a check on fluxing plt.figure(figsize=(10, 10)) #loop on exposures for tmpc in open(cubeslist, 'r'): thisob = tmpc.split('/')[1] thisexp = tmpc.split('_')[3] wname = '../{}/Proc/DATACUBE_FINAL_LINEWCS_{}_white2.fits'.format( thisob, thisexp) wfits = fits.open(wname) phot_this_white = aperture_photometry(wfits[0].data, apertures) wmag_this = -2.5 * np.log10(phot_this_white['aperture_sum']) #plot for this subunit ii = np.argsort(rmag) dd = wmag - wmag_this plt.plot(rmag[ii], dd[ii], label=thisob + thisexp) plt.xlabel('SDSS R mag') plt.ylabel('Delta White Mag') plt.title('Check exposure photometry') plt.legend() pdf.savefig() # saves the current figure into a pdf page plt.close() #display rms stats + compute stats over cubes plt.figure(figsize=(10, 10)) plt.semilogy(cwrms, crms, label='Coadd') for tmpc in open(cubeslist, 'r'): cwrms_this, crms_this = mutil.cubestat(tmpc.strip(), mask=maskwbuffer) plt.semilogy(cwrms_this, crms_this, label=tmpc.split('/')[1] + tmpc.split('_')[3]) plt.xlabel('Wave (A)') plt.ylabel('RMS (SB units)') plt.legend() plt.title('RMS in cubex') pdf.savefig() # saves the current figure into a pdf page plt.close()