def calibrate(namelst): iraf.noao() iraf.twodspec() iraf.longslit(dispaxis=2, nsum=1, observatory='ca', extinction=extpath, caldir=stdpath) for fitname in namelst: outname = 'mark_' + fitname if os.path.isfile(outname): print('remove file ' + outname) os.remove(outname) iraf.calibrate(input=fitname, output=outname, extinct='yes', flux='yes', extinction=extpath, ignoreaps='yes', sensitivity='Sens', fnu='no') iraf.splot(images=outname) iraf.flpr()
def fixpix(fs=None): iraf.cd('work') if fs is None: fs = glob('nrm/sci*nrm*.fits') if len(fs) == 0: print "WARNING: No rectified images to fix." iraf.cd('..') return if not os.path.exists('fix'): os.mkdir('fix') for f in fs: outname = f.replace('nrm', 'fix') # Copy the file to the fix directory shutil.copy(f, outname) # Set all of the BPM pixels = 0 h = pyfits.open(outname, mode='update') h['SCI'].data[h['BPM'].data == 1] = 0 # Grab the CRM extension from the lax file laxhdu = pyfits.open(f.replace('nrm', 'lax')) h.append(pyfits.ImageHDU(data=laxhdu['CRM'].data.copy(), header=laxhdu['CRM'].header.copy(), name='CRM')) h.flush() h.close() laxhdu.close() # Run iraf's fixpix on the cosmic rays, not ideal, # but better than nothing because apall doesn't take a bad pixel mask iraf.unlearn(iraf.fixpix) iraf.flpr() iraf.fixpix(outname + '[SCI]', outname + '[CRM]', mode='hl') iraf.cd('..')
def combine_flat(ftblst): iraf.flatcombine(input='tb//@' + ftblst, output='flat', combine='average', reject='avsigclip', ccdtype='', process=False, subsets=False, delete=False, clobber=False, scale='mode', statsec='', nlow=1, nhigh=1, nkeep=1, mclip=True, lsigma=3.0, hsigma=3.0, rdnoise='CCDRON', gain='CCDGAIN', snoise=0.0, pclip=-0.5, blank=1.0) iraf.flpr() print '<<<<<combine flat successfully>>>>>'
def cl_bye(verb=None): from pyraf import iraf if verb: print(" Clearing iraf --> iraf.flpr()", file=sys.stderr) iraf.flpr() iraf.flpr() return
def identify2d(fs=None): iraf.cd('work') if fs is None: fs = glob('mos/arc*mos*.fits') if len(fs) == 0: print "WARNING: No mosaiced (2D) specidentify." # Change directories to fail gracefully iraf.cd('..') return arcfs, arcgas = get_ims(fs, 'arc') if not os.path.exists('id2'): os.mkdir('id2') lampfiles = { 'Th Ar': 'ThAr.salt', 'Xe': 'Xe.salt', 'Ne': 'NeAr.salt', 'Cu Ar': 'CuAr.salt', 'Ar': 'Argon_hires.salt', 'Hg Ar': 'HgAr.salt' } for i, f in enumerate(arcfs): ga = arcgas[i] # find lamp and corresponding linelist lamp = pyfits.getval(f, 'LAMPID') lampfn = lampfiles[lamp] if pyfits.getval(f, 'GRATING') == 'PG0300' and lamp == 'Ar': lampfn = 'Argon_lores.swj' ccdsum = int(pyfits.getval(f, 'CCDSUM').split()[1]) # linelistpath is a global variable defined in beginning, path to # where the line lists are. lamplines = pysaltpath + '/data/linelists/' + lampfn print(lamplines) # img num should be right before the .fits imgnum = f[-9:-5] # run pysalt specidentify idfile = 'id2/arc%05.2fid2%04i' % (float(ga), int(imgnum)) + '.db' iraf.unlearn(iraf.specidentify) iraf.flpr() iraf.specidentify( images=f, linelist=lamplines, outfile=idfile, guesstype='rss', inter=True, # automethod='FitXcor', rstep=600 / ccdsum, rstart=200 / ccdsum, startext=1, clobber='yes', #startext=1, clobber='yes', verbose='no', mode='hl', logfile='salt.log', mdiff=2, function='legendre') iraf.cd('..')
def coroverbiastrim(lstfile): iraf.noao() iraf.imred() iraf.ccdred() x1,x2,y1,y2 = get_trim_sec() iraf.ccdproc(images = '@' + lstfile + '//[1]' , output = '%bo%bo%@' + lstfile , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = True, trim = False , zerocor = True, darkcor = False, flatcor = False , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '[5:45,%s:%s]'%(y1,y2), trimsec = '[%s:%s,%s:%s]'%(x1,x2,y1,y2) , zero = 'Zero', dark = '', flat = '', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'chebyshev', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 1.0) iraf.ccdproc(images = '%bo%bo%@' + lstfile , output = '%tbo%tbo%@' + lstfile , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = False, trim = True , zerocor = False, darkcor = False, flatcor = False , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '[5:45,%s:%s]'%(y1,y2), trimsec = '[%s:%s,%s:%s]'%(x1,x2,y1,y2) , zero = 'Zero', dark = '', flat = '', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'chebyshev', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 1.0) iraf.flpr()
def main(): if os.path.isdir('bias'): if os.path.isfile(os.getcwd() + os.sep + 'bias' + os.sep + 'spec_bias.lst'): os.chdir('bias') clear() coroverscan('spec_bias.lst') combinebias('spec_bias.lst') dirname, filename = os.path.split(os.getcwd()) os.chdir(dirname) iraf.flpr() path = os.getcwd() dirname = os.listdir(path) dirname = [ tmp for tmp in dirname if os.path.isdir(tmp) and 'bias' not in tmp and 'other' not in tmp ] for dirg in dirname: print('copy Zero to ' + path + os.sep + dirg) shutil.copyfile(path + os.sep + 'bias' + os.sep + 'Zero.fits', path + os.sep + dirg + os.sep + 'Zero.fits') else: print('no spec_bias.lst in ' + os.getcwd()) else: print('no bias dir in ' + os.getcwd())
def reidentify(): iraf.twodspec() iraf.longslit() iraf.reidentify(reference='Lamp', images='Lamp', interactive='no', section='column', newaps='yes', override='yes', refit='yes', trace='no', step=10, nsum=10, shift=0.0, search=0.0, nlost=5, cradius=7.0, threshold=0.0, addfeatures='no', coordlist=cdherb_file, match=-3.0, maxfeatures=50, minsep=2.0, database='database') iraf.flpr()
def identify(): iraf.twodspec() iraf.longslit() iraf.identify(images='Lamp.fits', section='middle column', database='database', coordlist=cdherb_file, nsum=10, match=-3.0, maxfeatures=50, zwidth=100.0, ftype='emission', fwidth=20.0, cradius=7.0, threshold=0.0, minsep=2.0, function='chebyshev', order=6, sample='*', niterate=0, low_reject=3.0, high_reject=3.0, grow=0.0, autowrite='no') iraf.flpr()
def fluxcal(stdsfolder='./', fs=None): iraf.cd('work') if fs is None: fs = glob('x1d/sci*x1d*c*.fits') if len(fs) == 0: print "WARNING: No science chip spectra to flux calibrate." iraf.cd('..') return if not os.path.exists('flx'): os.mkdir('flx') extfile = pysaltpath + '/data/site/suth_extinct.dat' stdfiles = glob(stdsfolder + '/std/*sens*c?.dat') print(stdfiles) for f in fs: outfile = f.replace('x1d', 'flx') chip = outfile[-6] hdu = pyfits.open(f) ga = f.split('/')[1][3:8] # Get the standard sensfunc with the same grating angle stdfile = None for stdf in stdfiles: if np.isclose(float(ga), float(stdf.split('/')[stdf.count('/')][3:8]), rtol=1e-2): # Get the right chip number if chip == stdf[-5]: stdfile = stdf break if stdfile is None: print('No standard star with grating-angle %s' % ga) continue # for each extracted aperture for i in range(hdu[0].data.shape[1]): # create an ascii file that pysalt can read asciiname = 'flx/sciflx.dat' outtmpname = 'flx/scical.dat' spectoascii(f, asciiname, i) # Run pysalt.speccal iraf.unlearn(iraf.speccal) iraf.flpr() iraf.speccal(asciiname, outtmpname, stdfile, extfile, airmass=pyfits.getval(f, 'AIRMASS'), exptime=pyfits.getval(f, 'EXPTIME'), clobber=True, mode='h') # read in the flux calibrated ascii file and copy its # contents into a fits file flxcal = np.genfromtxt(outtmpname).transpose() hdu[0].data[0, i] = flxcal[1] hdu[0].data[2, i] = flxcal[2] # delete the ascii file os.remove(asciiname) os.remove(outtmpname) hdu.writeto(outfile, clobber=True) iraf.cd('..')
def combinelamp(lst): iraf.noao() iraf.imred() iraf.ccdred() iraf.imcombine(input='%ftb%ftb%@' + lst, output='Lamp', combine='sum', reject='none') print('<<<<<combine the lamp & generate the Lamp.fits>>>>>') iraf.flpr()
def check_bias(): biaspath = os.getcwd() + os.sep + 'bias' if os.path.isfile(biaspath + os.sep + 'spec_bias.lst'): os.chdir('bias') os.system('gedit spec_bias.lst &') iraf.imexamine(input = '@spec_bias.lst[1]', frame = 1) dirname, filename = os.path.split(os.getcwd()) os.chdir(dirname) iraf.flpr() else: print('no bias dir in ' + os.getcwd())
def check_bias(): biaspath = os.getcwd() + os.sep + 'bias' if os.path.isfile(biaspath + os.sep + 'spec_bias.lst'): os.chdir('bias') webbrowser.open('spec_bias.lst') iraf.imexamine(input='@spec_bias.lst[1]', frame=1) dirname = os.path.dirname(os.getcwd()) os.chdir(dirname) iraf.flpr() else: print('no bias dir in ' + os.getcwd())
def fluxcal(stdsfolder='./', fs=None): iraf.cd('work') if fs is None: fs = glob('x1d/sci*x1d*c*.fits') if len(fs) == 0: print "WARNING: No science chip spectra to flux calibrate." iraf.cd('..') return if not os.path.exists('flx'): os.mkdir('flx') extfile = pysaltpath + '/data/site/suth_extinct.dat' stdfiles = glob(stdsfolder + '/std/*sens*c?.dat') print(stdfiles) for f in fs: outfile = f.replace('x1d', 'flx') chip = outfile[-6] hdu = pyfits.open(f) ga = f.split('/')[1][3:8] # Get the standard sensfunc with the same grating angle stdfile = None for stdf in stdfiles: if ga in stdf: # Get the right chip number if chip == stdf[-5]: stdfile = stdf break if stdfile is None: print('No standard star with grating-angle %s' % ga) continue # for each extracted aperture for i in range(hdu[0].data.shape[1]): # create an ascii file that pysalt can read asciiname = 'flx/sciflx.dat' outtmpname = 'flx/scical.dat' spectoascii(f, asciiname, i) # Run pysalt.speccal iraf.unlearn(iraf.speccal) iraf.flpr() iraf.speccal(asciiname, outtmpname, stdfile, extfile, airmass=pyfits.getval(f, 'AIRMASS'), exptime=pyfits.getval(f, 'EXPTIME'), clobber=True, mode='h') # read in the flux calibrated ascii file and copy its # contents into a fits file flxcal = np.genfromtxt(outtmpname).transpose() hdu[0].data[0, i] = flxcal[1] hdu[0].data[2, i] = flxcal[2] # delete the ascii file os.remove(asciiname) os.remove(outtmpname) hdu.writeto(outfile, clobber=True) iraf.cd('..')
def mosaic(fs=None): iraf.cd('work') # If the file list is not given, grab the default files if fs is None: fs = glob('flts/*.fits') # Abort if there are no files if len(fs) == 0: print "WARNING: No flat-fielded images to mosaic." iraf.cd('..') return if not os.path.exists('mos'): os.mkdir('mos') # Get the images to work with ims, gas = get_scis_and_arcs(fs) for i, f in enumerate(ims): ga = gas[i] fname = f.split('/')[1] typestr = fname[:3] # by our naming convention, imnum should be the last 4 characters # before the '.fits' imnum = fname[-9:-5] outname = 'mos/' + typestr outname += '%05.2fmos%04i.fits' % (float(ga), int(imnum)) # prepare to run saltmosaic iraf.unlearn(iraf.saltmosaic) iraf.flpr() iraf.saltmosaic(images=f, outimages=outname, outpref='', geomfile=pysaltpath + '/data/rss/RSSgeom.dat', clobber=True, mode='h') # Make a bad pixel mask marking where there is no data. h = pyfits.open(outname, 'update') maskim = h[1].data.copy() maskim[:, :] = 0.0 maskim[abs(h[1].data) < 1e-5] = 1 imhdu = pyfits.ImageHDU(maskim) h.append(imhdu) h[1].header['BPMEXT'] = 2 h[2].header['EXTNAME'] = 'BPM' h[2].header['CD2_2'] = 1 h.flush() h.close() iraf.cd('..')
def standard(namelst): iraf.noao() iraf.twodspec() iraf.longslit(dispaxis=2, nsum=1, observatory='ca', extinction=extpath, caldir=stdpath) std_fitsname = namelst[0] stdname, stdmag, stdmagband = standard_star_info(std_fitsname) wid, sep = get_band_width_sep(stdname) print('<<<<<the standard star is ', stdname, '>>>>>') print std_fitsname if os.path.isfile('Std'): print('remove file Std') os.remove('Std') iraf.standard( input=std_fitsname, output='Std', samestar=True, beam_switch=False, apertures='', bandwidth=wid, bandsep=sep, # 30.0 20.0 fnuzero=3.6800000000000E-20, extinction=extpath, caldir=stdpath, observatory='ca', interact=True, graphics='stdgraph', cursor='', star_name=stdname, airmass='', exptime='', mag=stdmag, magband=stdmagband, teff='', answer='yes') if os.path.isfile('Sens.fits'): print('remove file Sens.fits') os.remove('Sens.fits') iraf.sensfunc(standards='Std', sensitivity='Sens', extinction=extpath, function='spline3', order=15) iraf.splot('Sens') iraf.flpr()
def check_other(path): if os.path.isdir(path): os.chdir(path) lstlst = ['halogen.lst', 'lamp.lst', 'cor_lamp.lst','std.lst','cor_std.lst'] for i in lstlst: if os.path.isfile(i): os.system('gedit %s &' % i) iraf.imexamine(input = '@%s[1]'%i, frame = 1) dirname, filename = os.path.split(os.getcwd()) os.chdir(dirname) iraf.flpr() else: print('no dir ' + path + ' in ' + os.getcwd())
def main(): dirname = os.listdir(os.getcwd()) dirname = [tmp for tmp in dirname if os.path.isdir(tmp) and \ 'bias' not in tmp and 'other' not in tmp] path = os.getcwd() for i in dirname: os.chdir(path + os.sep + i) print('current dir : ' + os.getcwd()) iraf.flpr() cor_ftbo.main() wcal2d.main() re_apall.main() re_corflux.main() os.chdir(path)
def check_other(path): if os.path.isdir(path): os.chdir(path) lstlst = [ 'halogen.lst', 'lamp.lst', 'cor_lamp.lst', 'std.lst', 'cor_std.lst' ] for i in lstlst: if os.path.isfile(i): webbrowser.open(i) iraf.imexamine(input='@%s[1]' % i, frame=1) dirname, filename = os.path.split(os.getcwd()) os.chdir(dirname) iraf.flpr() else: print('no dir ' + path + ' in ' + os.getcwd())
def correct_overscan(filename): iraf.ccdproc(images = '@' + filename + '//[1]' , output = '%o%o%@' + filename , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = True, trim = False , zerocor = False, darkcor = False, flatcor = False , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '[5:45,1:4612]', trimsec = '', zero = '' , dark = '', flat = '', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'legendre', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 0.0) iraf.flpr()
def correct_bias(filename1, filename2): iraf.ccdproc(images = 'o//@' + filename1 , output = 'bo//@' + filename1 , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = False, trim = False , zerocor = True, darkcor = False, flatcor = False , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '', trimsec = '', zero = filename2 , dark = '', flat = '', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'chebyshev', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 0.0) iraf.flpr()
def HandleEllipseTask(cutimage, xcntr, ycntr, SizeX, SizeY, sky, out): """Running the ellipse task. SizeX, SizeY are the total size""" manual_profile = 0 try: raise ImportError() #Temporarily kill this loop as the new flagging does not work with pyraf-functions, yet from pyraf import iraf from fitellifunc import run_elli use_pyraf = 1 except ImportError: use_pyraf = 0 print 'No pyraf installed!' WriteError('Cannot find pyraf installation! Trying manual 1d ' + \ 'profile finder\n') if use_pyraf: if out: ell_mask_file = 'OEM_' + c.fstring + '.fits' ell_out = 'OE_' + c.fstring + '.txt' else: ell_mask_file = 'EM_' + c.fstring + '.fits' ell_out = 'E_' + c.fstring + '.txt' plfile = 'GalEllFit.fits.pl' CleanEllipse(ell_out, 0) try: iraf.imcopy(ell_mask_file, plfile, verbose='no') iraf.flpr() except: pass try: run_elli(cutimage, ell_out, xcntr, ycntr, c.eg, \ c.pos_ang, c.major_axis, sky) CleanEllipse(ell_out, 1) try: iraf.flpr() except: pass if exists(ell_out): pass else: manual_profile = 1 except: manual_profile = 1 WriteError('Error in ellipse task. Trying manual profile finder\n') try: c.Flag = SetFlag(c.Flag, GetFlag('ELLIPSE_FAIL')) except badflag: pass if use_pyraf == 0 or manual_profile: FitEllipseManual(cutimage, xcntr, ycntr, SizeX, SizeY, sky, out)
def apall(lstfile): iraf.noao() iraf.twodspec() iraf.apextract(dispaxis = 2, database = 'database') f = open(lstfile) l = f.readlines() f.close() l = [tmp.split('\n')[0] for tmp in l] path = os.getcwd() for i in l: infile = 'wftbo' + i outfile = 'awftbo' + i while True: if os.path.isfile(outfile): print('remove ' + outfile) os.remove(outfile) delfile = path + os.sep + 'database/ap' + infile[0:-5] if os.path.isfile(delfile): print('remove ' + delfile) os.remove(delfile) iraf.apall(input = infile , output = outfile, apertures = 2, format = 'multispec' , references = '', profiles = '', interactive = True , find = True, recenter = True, resize = False , edit = True, trace = True, fittrace = True , extract = True, extras = True, review = True , line = 'INDEF', nsum = 10, lower = -15.0 , upper = 15.0, apidtable = '', b_function = 'chebyshev' , b_order = 2, b_sample = '-50:-26,26:50', b_naverage = -25 , b_niterate = 1, b_low_reject = 3.0, b_high_reject = 3.0 , b_grow = 0.0, width = 5.0, radius = 10.0 , threshold = 0.0, nfind = 2, minsep = 5.0 , maxsep = 100000.0, order = 'increasing', aprecenter = '' , npeaks = 'INDEF', shift = True, llimit ='INDEF' , ulimit = 'INDEF', ylevel = 0.1, peak = True , bkg = True, r_grow = 0.0, avglimits = False , t_nsum = 20, t_step = 10, t_nlost = 3, t_function = 'legendre' , t_order = 7, t_sample = '*', t_naverage = 1 , t_niterate = 1, t_low_reject = 3.0, t_high_reject = 3.0 , t_grow = 0.0, background = 'fit', skybox = 1 , weights = 'variance', pfit = 'fit1d', clean = True , saturation = 'INDEF', readnoise = 9.4, gain = 0.35 , lsigma = 4.0, usigma = 4.0, nsubaps = 1) iraf.flpr() sspecplot.sspecplot(outfile) getval = raw_input('Are you need repeat apall,may be clean should be close(r/n)') if getval != 'r': break
def correct_flat(filename, flatname): # flatname = 'per' + filename.replace('_no_flat.lst', '_flat.fits') iraf.ccdproc(images = 'tbo//@' + filename , output = 'ftbo//@' + filename , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = False, trim = False , zerocor = False, darkcor = False, flatcor = True , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '', trimsec = '', zero = '' , dark = '', flat = flatname, illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'legendre', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 0.0) iraf.flpr()
def coroverscan(lstfn): """ call iraf command ccdproc, overscan correct. lstfn : lst file name type : string output file : oYF*.fits """ iraf.noao() iraf.imred() iraf.ccdred() iraf.ccdproc(images='@' + lstfn + '//[1]', output='%o%o%@' + lstfn, ccdtype='', max_cache=0, noproc=False, fixpix=False, overscan=True, trim=False, zerocor=False, darkcor=False, flatcor=False, illumcor=False, fringecor=False, readcor=False, scancor=False, readaxis='line', fixfile='', biassec='[5:45,1:4612]', trimsec='', zero='', dark='', flat='', illum='', fringe='', minreplace=1.0, scantype='shortscan', nscan=1, interactive=False, function='chebyshev', order=1, sample='*', naverage=1, niterate=1, low_reject=3.0, high_reject=3.0, grow=1.0) iraf.flpr()
def trim(tblst, trimsec): iraf.ccdproc(images='b//@' + tblst, output='tb//@' + tblst, ccdtype='', noproc=False, overscan=False, trim=True, zerocor=False, flatcor=False, readaxis='line', biassec='', trimsec=trimsec, interactive=False, function='legendre', order=1) iraf.flpr() print '<<<<<trim section successfully>>>>>'
def identify2d(fs=None): iraf.cd('work') if fs is None: fs = glob('mos/arc*mos*.fits') if len(fs) == 0: print "WARNING: No mosaiced (2D) specidentify." # Change directories to fail gracefully iraf.cd('..') return arcfs, arcgas = get_ims(fs, 'arc') if not os.path.exists('id2'): os.mkdir('id2') lampfiles = {'Th Ar': 'ThAr.salt', 'Xe': 'Xe.salt', 'Ne': 'NeAr.salt', 'Cu Ar': 'CuAr.salt', 'Ar': 'Argon_hires.salt', 'Hg Ar': 'HgAr.salt'} for i, f in enumerate(arcfs): ga = arcgas[i] # find lamp and corresponding linelist lamp = pyfits.getval(f, 'LAMPID') lampfn = lampfiles[lamp] if pyfits.getval(f,'GRATING') == 'PG0300' and lamp == 'Ar': lampfn = 'Argon_lores.swj' ccdsum = int(pyfits.getval(f, 'CCDSUM').split()[1]) # linelistpath is a global variable defined in beginning, path to # where the line lists are. lamplines = pysaltpath + '/data/linelists/' + lampfn print(lamplines) # img num should be right before the .fits imgnum = f[-9:-5] # run pysalt specidentify idfile = 'id2/arc%05.2fid2%04i' % (float(ga), int(imgnum)) + '.db' iraf.unlearn(iraf.specidentify) iraf.flpr() iraf.specidentify(images=f, linelist=lamplines, outfile=idfile, guesstype='rss', inter=True, # automethod='FitXcor', rstep=600 / ccdsum, rstart=200 / ccdsum, startext=1, clobber='yes', #startext=1, clobber='yes', verbose='no', mode='hl', logfile='salt.log', mdiff=2, function='legendre') iraf.cd('..')
def corhalogen(lstfile): namelst = glob.glob('ftbo*.fits') for name in namelst: print 'remove ', name os.remove(name) iraf.noao() iraf.imred() iraf.ccdred() iraf.ccdproc(images='tbo@' + lstfile, output='%ftbo%ftbo%@' + lstfile, ccdtype='', max_cache=0, noproc=False, fixpix=False, overscan=False, trim=False, zerocor=False, darkcor=False, flatcor=True, illumcor=False, fringecor=False, readcor=False, scancor=False, readaxis='line', fixfile='', biassec='', trimsec='', zero='Zero', dark='', flat='Resp', illum='', fringe='', minreplace=1.0, scantype='shortscan', nscan=1, interactive=False, function='chebyshev', order=1, sample='*', naverage=1, niterate=1, low_reject=3.0, high_reject=3.0, grow=1.0) iraf.flpr()
def corhalogen(lstfile): iraf.noao() iraf.imred() iraf.ccdred() iraf.ccdproc(images = 'tbo@' + lstfile , output = '%ftbo%ftbo%@' + lstfile , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = False, trim = False , zerocor = False, darkcor = False, flatcor = True , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '', trimsec = '' , zero = 'Zero', dark = '', flat = 'Resp', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'chebyshev', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 1.0) iraf.flpr()
def __init__(self, shortparlists, parlists, FitsDir, logfile, verbose=1, clean_up=1, skyKey='ALIGNSKY', hdrGain=0, crlower=None, imNsci=1): self.modName = string.split(string.split(str(self))[0], '.')[0][1:] self.shortparlists = shortparlists self.parlists = parlists self.Fits = FitsDir self.verbose = verbose self.crmasks = {} # cosmic ray masks names self.removeList = [] self.clean_up = clean_up self.skyKey = skyKey self.hdrGain = hdrGain self.crlower = crlower if imNsci < 1: raise ValueError, 'Error: pyblot got imNsci = ' + imNsci self.imNsci = imNsci self.logfile = logfile print self.modName, 'version', __version__ self.logfile.write('Instantiating ' + self.modName + ' version ' + __version__) # make sure these packages are loaded iraf.stsdas() iraf.toolbox() iraf.imgtool() iraf.fourier() iraf.fitting() iraf.ttools() iraf.analysis() iraf.dither() # flush the cash! twice! iraf.flpr() iraf.flpr() iraf.reset(imtype='fits') # seems to make deriv task a bit happier iraf.set(tmp='./')
def apall(ilst, olst): iraf.noao() iraf.twodspec() iraf.apextract(dispaxis=1, database='database') for i, infile in enumerate(ilst): outfile = olst[i] laper, raper, back_samp = -7, 7, '-30:-15,15:30' while True: if os.path.isfile(outfile): print('remove ' + outfile) os.remove(outfile) delfile = os.getcwd()+os.sep+'database/ap'+infile[0:-5] if os.path.isfile(delfile): print('remove ' + delfile) os.remove(delfile) iraf.apall(input=infile, output=outfile, apertures=2, format='multispec', references='', profiles='', interactive=True, find=True, recenter=True, resize=False, edit=True, trace=True, fittrace=True, extract=True, extras=True, review=True, line='INDEF', nsum=10, lower=laper, upper=raper, apidtable='', b_function='chebyshev', b_order=2, b_sample=back_samp, b_naverage=-25, b_niterate=1, b_low_reject=3.0, b_high_reject=3.0, b_grow=0.0, width=5.0, radius=10.0, threshold=0.0, nfind=2, minsep=5.0, maxsep=100000.0, order='increasing', aprecenter='', npeaks='INDEF', shift=True, llimit='INDEF', ulimit='INDEF', ylevel=0.1, peak=True, bkg=True, r_grow=0.0, avglimits=False, t_nsum=20, t_step=10, t_nlost=3, t_function='legendre', t_order=12, t_sample='*', t_naverage=1, t_niterate=1, t_low_reject=3.0, t_high_reject=3.0, t_grow=0.0, background='median', skybox=1, weights='none', pfit='fit1d', clean=True, saturation='INDEF', readnoise='CCDRON', gain='CCDGAIN', lsigma=4.0, usigma=4.0, nsubaps=1) iraf.flpr() getval = raw_input(('Are you need repeat apall,' 'may be clean should be close(r/n)')) if getval != 'r': break
def correct_trim(filename, trimsec): # print 'Please check the fits image and determine the size to trim...' # os.popen('gedit ' + filename +' &') # iraf.imexamine(input = 'bo//@' + filename, frame = 1) # xy = raw_input("Please input x1, x2, y1, y2 to trim: ") # xy = xy.split() # trimsec = '[' + xy[0] + ':' + xy[1] + ',' + xy[2] + ':' + xy[3] +']' iraf.ccdproc(images = 'bo//@' + filename , output = 'tbo//@' + filename , ccdtype = '', max_cache = 0, noproc = False , fixpix = False, overscan = False, trim = True , zerocor = False, darkcor = False, flatcor = False , illumcor = False, fringecor = False, readcor = False , scancor = False, readaxis = 'line', fixfile = '' , biassec = '', trimsec = trimsec, zero = '' , dark = '', flat = '', illum = '', fringe = '' , minreplace = 1.0, scantype = 'shortscan', nscan = 1 , interactive = False, function = 'legendre', order = 1 , sample = '*', naverage = 1, niterate = 1 , low_reject = 3.0, high_reject = 3.0, grow = 0.0) iraf.flpr()
def cor_flat(corftblst): print 1 iraf.ccdproc(images='tb//@' + corftblst, output='ftb//@' + corftblst, ccdtype='', max_cache=0, noproc=False, fixpix=False, overscan=False, trim=False, zerocor=False, darkcor=False, flatcor=True, illumcor=False, fringecor=False, readcor=False, scancor=False, readaxis='line', fixfile='', biassec='', trimsec='', zero='', dark='', flat='flat', illum='', fringe='', minreplace=1.0, scantype='shortscan', nscan=1, interactive=False, function='chebyshev', order=1, sample='*', naverage=1, niterate=1, low_reject=3.0, high_reject=3.0, grow=1.0) iraf.flpr() print '<<<<<correct flat successfully>>>>>'
def main(): if os.path.isdir('bias'): if os.path.isfile(os.getcwd() + os.sep + 'bias' + os.sep + 'spec_bias.lst'): os.chdir('bias') clear() coroverscan('spec_bias.lst') combinebias('spec_bias.lst') dirname, filename = os.path.split(os.getcwd()) os.chdir(dirname) iraf.flpr() path = os.getcwd() dirname = os.listdir(path) dirname = [tmp for tmp in dirname if os.path.isdir(tmp) \ and 'bias' not in tmp and 'other' not in tmp] for dirg in dirname: print('copy Zero to ' + path + os.sep + dirg) shutil.copyfile(path + os.sep + 'bias' + os.sep + 'Zero.fits' \ , path + os.sep + dirg + os.sep + 'Zero.fits') else: print('no spec_bias.lst in ' + os.getcwd()) else: print('no bias dir in ' + os.getcwd())
def iraf_flpr(): ### Reset the IRAF cache. ### Obviously this needs to be done three times ### becuase IRAF is such a sweet and intuitive piece of ### software. iraf.flpr() iraf.flpr() iraf.flpr()
def combine_bias(blst): iraf.zerocombine(input='@' + blst, output='Zero', combine='average', reject='minmax', ccdtype='', process=False, delete=False, clobber=False, scale='none', statsec='', nlow=0, nhigh=1, nkeep=1, mclip=True, lsigma=3.0, hsigma=3.0, rdnoise='CCDRON', gain='CCDGAIN', snoise=0.0, pclip=-0.5, blank=0.0) iraf.flpr() print '<<<<<combine bias successfully>>>>>'
def run_tweakshifts(asn_direct, verbose=False, clean=True): """ run_tweakshifts(asn_direct) asn_direct - filename of ASN table of direct images [...]_asn.fits This routine only uses dither.tweakshifts to compute the relative shifts of the direct images """ from pyraf import iraf from iraf import stsdas, dither no = iraf.no yes = iraf.yes INDEF = iraf.INDEF root = asn_direct.split('_asn.fits')[0] #.lower() try: os.remove(root + '_tweak.fits') except: pass iraf.flpr() iraf.flpr() iraf.flpr() if clean: clean = iraf.yes else: clean = iraf.no iraf.unlearn('tweakshifts') status = iraf.tweakshifts(input=asn_direct, shiftfile='', reference=root+'_tweak.fits', output = root+'_shifts.txt', findmode = 'catalog', gencatalog = 'daofind', sextractpars = '', undistort = yes, computesig = yes, idckey = 'idctab', \ clean = clean, verbose = no, catfile = '', xcol = 1, ycol = 2, \ fluxcol = 3, fluxmax = INDEF, fluxmin = INDEF, fluxunits = 'counts', \ nbright = INDEF, refcat = '', refxcol = 1, refycol = 2, rfluxcol = 3, \ rfluxmax = INDEF, rfluxmin = INDEF, rfluxunits = 'counts', \ refnbright = INDEF, minobj = 15, nmatch = 30, matching = 'tolerance', \ xyxin = INDEF, xyyin = INDEF, tolerance = 4.0, fwhmpsf = 1.5, \ sigma = 0.0, datamin = INDEF, datamax = INDEF, threshold = 4.0, \ nsigma = 1.5, fitgeometry = 'shift', function = 'polynomial', \ maxiter = 3, reject = 3.0, crossref = '', margin = 50, tapersz = 50, \ pad = no, fwhm = 7.0, ellip = 0.05, pa = 45.0, fitbox = 7, \ Stdout=1) if verbose: for line in status: print line return status
def runApphot(imfile, coofile=None, magfile=None): """ use iraf.digiphot.apphot to collect aperture photometry """ import pyraf from pyraf import iraf import os import pyfits if not coofile : coofile = imfile.replace('.fits', '.coo') if not magfile : magfile = imfile.replace('.fits', '.mag') iraf.digiphot(_doprint=0) iraf.apphot(_doprint=0) if os.path.exists( magfile ) : os.remove( magfile ) hdr = pyfits.getheader( imfile ) if 'FILTER' in hdr: filt=hdr['FILTER'] filtkey='FILTER' elif 'FILTER1' in hdr : filt=hdr['FILTER1'] filtkey='FILTER1' if filt.startswith('CLEAR'): filt = hdr['FILTER2'] filtkey='FILTER2' if filt not in APCOR.keys() : return( [] ) # iraf.digiphot.apphot.datapars : iraf.datapars.scale = 1.0 iraf.datapars.fwhmpsf = 2.5 iraf.datapars.emission = True iraf.datapars.sigma = 'INDEF' iraf.datapars.datamin = 'INDEF' iraf.datapars.datamax = 'INDEF' iraf.datapars.noise = 'constant' iraf.datapars.ccdread = '' iraf.datapars.gain = '' iraf.datapars.readnoise = 0.0 iraf.datapars.epadu = 1.0 iraf.datapars.exposure = '' iraf.datapars.airmass = '' iraf.datapars.filter = filt iraf.datapars.obstime = '' iraf.datapars.itime = 1.0 iraf.datapars.xairmass = 'INDEF' iraf.datapars.ifilter = 'INDEF' iraf.datapars.otime = 'INDEF' # iraf.digiphot.apphot.centerpars : iraf.unlearn( iraf.centerpars ) iraf.centerpars.calgorithm = 'centroid' iraf.centerpars.cbox = 3.0 iraf.centerpars.cthreshold = 0.0 iraf.centerpars.minsnratio = 1.0 iraf.centerpars.cmaxiter = 10.0 iraf.centerpars.maxshift = 1.0 iraf.centerpars.clean = False iraf.centerpars.rclean = 1.0 iraf.centerpars.rclip = 2.0 iraf.centerpars.kclean = 3.0 iraf.centerpars.mkcenter = False # iraf.digiphot.apphot.fitskypars : iraf.unlearn( iraf.fitskypars ) iraf.fitskypars.salgorithm = 'median' iraf.fitskypars.annulus = 25.0 iraf.fitskypars.dannulus = 40.0 iraf.fitskypars.skyvalue = 0.0 iraf.fitskypars.smaxiter = 10.0 iraf.fitskypars.sloclip = 0.0 iraf.fitskypars.shiclip = 0.0 iraf.fitskypars.snreject = 50.0 iraf.fitskypars.sloreject = 3.0 iraf.fitskypars.shireject = 3.0 iraf.fitskypars.khist = 3.0 iraf.fitskypars.binsize = 0.1 iraf.fitskypars.smooth = False iraf.fitskypars.rgrow = 0.0 iraf.fitskypars.mksky = False # iraf.digiphot.apphot.photpars : iraf.unlearn(iraf.photpars) iraf.photpars.weighting = 'constant' iraf.photpars.apertures = '2,3,4,5' iraf.photpars.zmag = ZPT[filt] iraf.photpars.mkapert = False iraf.unlearn(iraf.phot) iraf.gflush(); iraf.gflush() iraf.flpr(); iraf.flpr(); iraf.flpr() photparams={ 'interac':False, 'radplot':False, } outputstuff = iraf.phot(image=imfile, skyfile='',coords=coofile, output=magfile, verify=False, verbose=True, Stdout=1,**photparams) sourcelist = apphotOutput( magfile ) return( sourcelist )
def plot_shifts( ROOT_DIRECT, ALIGN_IMAGE, clean=True, verbose=True, ALIGN_EXTENSION=0, toler=3, skip_swarp=False, threshold=7, force=False, drz=True, WEIGHT_IMAGE=None, ): """ Run SExtractor on two images and match the objects to plot the shifts between them. ALIGN_IMAGE is a string that may contain wildcards, and the function will use `align_img_list` to find ALIGN_IMAGEs """ import glob from pyraf import iraf from iraf import stsdas, dither no = iraf.no yes = iraf.yes INDEF = iraf.INDEF if os.path.exists(ROOT_DIRECT + "_align.fits") & (not force): if verbose: print "Image %s_align.fits exists. Skipping SWarp." % (ROOT_DIRECT) skip_swarp = True if not skip_swarp: if drz: align_img_list = find_align_images_that_overlap( ROOT_DIRECT + "_drz.fits", ALIGN_IMAGE, ALIGN_EXTENSION=ALIGN_EXTENSION ) else: align_img_list = glob.glob(ALIGN_IMAGE) if not align_img_list: print "threedhst.shifts.align_to_reference: no alignment images overlap." return 0, 0 # try: os.remove(ROOT_DIRECT + "_align.fits") except: pass if drz: matchImagePixels( input=align_img_list, matchImage=ROOT_DIRECT + "_drz.fits", output=ROOT_DIRECT + "_align.fits", match_extension=1, input_extension=ALIGN_EXTENSION, ) ALIGN_FITS = ROOT_DIRECT + "_align.fits" else: ALIGN_FITS = os.path.basename(ROOT_DIRECT.split(".fits")[0]) + "_align.fits" matchImagePixels( input=align_img_list, matchImage=ROOT_DIRECT, output=ALIGN_FITS, match_extension=0, input_extension=ALIGN_EXTENSION, ) se = threedhst.sex.SExtractor() se.aXeParams() se.copyConvFile() se.overwrite = True se.options["CHECKIMAGE_TYPE"] = "NONE" se.options["WEIGHT_TYPE"] = "MAP_WEIGHT" if drz: se.options["WEIGHT_IMAGE"] = ROOT_DIRECT + "_drz.fits[1]" else: if WEIGHT_IMAGE: se.options["WEIGHT_IMAGE"] = WEIGHT_IMAGE else: se.options["WEIGHT_TYPE"] = "NONE" se.options["WEIGHT_IMAGE"] = "NONE" se.options["FILTER"] = "Y" ## Detect thresholds (default = 1.5) se.options["DETECT_THRESH"] = "%f" % (threshold) se.options["ANALYSIS_THRESH"] = "%f" % (threshold) se.options["MAG_ZEROPOINT"] = str(threedhst.options["MAG_ZEROPOINT"]) #### Run SExtractor on direct and alignment images ## direct image se.options["CATALOG_NAME"] = "direct.cat" if drz: status = se.sextractImage(ROOT_DIRECT + "_drz.fits[0]") INPUT_IMAGE = ROOT_DIRECT + "_drz.fits" else: status = se.sextractImage(ROOT_DIRECT) INPUT_IMAGE = ROOT_DIRECT ## alignment image se.options["CATALOG_NAME"] = "align.cat" se.options["WEIGHT_TYPE"] = "NONE" status = se.sextractImage(ALIGN_FITS) ## Read the catalogs directCat = threedhst.sex.mySexCat("direct.cat") alignCat = threedhst.sex.mySexCat("align.cat") xshift = 0 yshift = 0 rot = 0 scale = 1.0 xrms = 2 yrms = 2 NITER = 5 IT = 0 while IT < NITER: IT = IT + 1 #### Get x,y coordinates of detected objects ## direct image fp = open("direct.xy", "w") for i in range(len(directCat.X_IMAGE)): fp.write("%s %s\n" % (directCat.X_IMAGE[i], directCat.Y_IMAGE[i])) fp.close() ## alignment image fp = open("align.xy", "w") for i in range(len(alignCat.X_IMAGE)): fp.write("%s %s\n" % (np.float(alignCat.X_IMAGE[i]) + xshift, np.float(alignCat.Y_IMAGE[i]) + yshift)) fp.close() iraf.flpr() iraf.flpr() iraf.flpr() #### iraf.xyxymatch to find matches between the two catalogs pow = toler * 1.0 try: os.remove("align.match") except: pass status1 = iraf.xyxymatch( input="direct.xy", reference="align.xy", output="align.match", tolerance=2 ** pow, separation=0, verbose=yes, Stdout=1, ) while status1[-1].startswith("0"): pow += 1 os.remove("align.match") status1 = iraf.xyxymatch( input="direct.xy", reference="align.xy", output="align.match", tolerance=2 ** pow, separation=0, verbose=yes, Stdout=1, ) #### Images are aligned, plot the offsets dx, dy, ax, ay, di, ai = np.loadtxt("align.match", unpack=True) ddx, ddy = dx - ax, dy - ay keep = (np.abs(ddx) < 15) & (np.abs(ddy) < 15) for i in range(5): sx, sy = threedhst.utils.biweight(ddx[keep], both=True), threedhst.utils.biweight(ddy[keep], both=True) keep = keep & (np.abs(ddx - sx[0]) < 5 * sx[1]) & (np.abs(ddy - sy[0]) < 5 * sy[1]) if USE_PLOT_GUI: fig = plt.figure(figsize=[8, 4], dpi=100) else: fig = Figure(figsize=[8, 4], dpi=100) fig.subplots_adjust(wspace=0.28, hspace=0.0, left=0.08, bottom=0.14, right=0.98, top=0.98) ax = fig.add_subplot(121) ax.plot(ddx, ddy, marker="o", linestyle="None", color="black", alpha=0.4, ms=2, zorder=-1) ax.errorbar(sx[0], sy[0], sx[1], sy[1], marker="o", ms=0.1, color="white", linewidth=3, zorder=100) ax.errorbar(sx[0], sy[0], sx[1], sy[1], marker="o", ms=0.1, color="red", linewidth=2, zorder=500) ax.grid(alpha=0.5) dwin = 5 * np.max([sx[1], sy[1]]) ax.set_xlim(sx[0] - dwin, sx[0] + dwin) ax.set_ylim(sy[0] - dwin, sy[0] + dwin) ax.set_xlabel(r"$\Delta x$ [pix]") ax.set_ylabel(r"$\Delta y$ [pix]") ax.text(0.5, 0.95, os.path.basename(INPUT_IMAGE), fontsize=9, horizontalalignment="center", transform=ax.transAxes) ax.text(0.5, 0.9, os.path.basename(ALIGN_IMAGE), fontsize=9, horizontalalignment="center", transform=ax.transAxes) ax.text( 0.5, 0.1, r"$\Delta x, \Delta y = %.2f \pm %.2f, %.2f \pm %.2f)$" % (sx[0], sx[1], sy[0], sy[1]), fontsize=11, horizontalalignment="center", transform=ax.transAxes, ) ax = fig.add_subplot(122) ax.plot(dx[keep], dy[keep], marker="o", ms=1, linestyle="None", color="black", alpha=0.1) ax.quiver( dx[keep], dy[keep], ddx[keep], ddy[keep], alpha=0.5, angles="xy", headlength=1, headwidth=1, scale=30.0 / (dx.max() - dx.min()), units="x", minlength=1, ) aa = np.array([1, 1]) ax.quiver( dx[keep].mean() * aa, dy[keep].max() * 0.95 * aa, 1 * aa, 0 * aa, alpha=0.9, angles="xy", headlength=0, headwidth=1, scale=30.0 / (dx.max() - dx.min()), units="x", color="red", ) ax.set_xlabel(r"$x$ [pix]") ax.set_ylabel(r"$y$ [pix]") if USE_PLOT_GUI: fig.savefig(os.path.basename(ROOT_DIRECT.split(".fits")[0]) + "_align.pdf", dpi=100, transparent=False) else: canvas = FigureCanvasAgg(fig) canvas.print_figure(os.path.basename(ROOT_DIRECT.split(".fits")[0]) + "_align.pdf", dpi=100, transparent=False) if clean: rmfiles = [ "SCI.fits", "WHT.fits", "align.cat", "align.map", "align.match", "align.reg", "align.xy", "direct.cat", "direct.reg", "direct.xy", "drz_sci.fits", "drz_wht.fits", "bg.fits", ] for file in rmfiles: try: os.remove(file) except: pass
def run_tweakshifts(asn_direct, verbose=False, clean=True): """ run_tweakshifts(asn_direct) asn_direct - filename of ASN table of direct images [...]_asn.fits This routine only uses dither.tweakshifts to compute the relative shifts of the direct images """ from pyraf import iraf from iraf import stsdas, dither no = iraf.no yes = iraf.yes INDEF = iraf.INDEF root = asn_direct.split("_asn.fits")[0] # .lower() try: os.remove(root + "_tweak.fits") except: pass iraf.flpr() iraf.flpr() iraf.flpr() if clean: clean = iraf.yes else: clean = iraf.no iraf.unlearn("tweakshifts") status = iraf.tweakshifts( input=asn_direct, shiftfile="", reference=root + "_tweak.fits", output=root + "_shifts.txt", findmode="catalog", gencatalog="daofind", sextractpars="", undistort=yes, computesig=yes, idckey="idctab", clean=clean, verbose=no, catfile="", xcol=1, ycol=2, fluxcol=3, fluxmax=INDEF, fluxmin=INDEF, fluxunits="counts", nbright=INDEF, refcat="", refxcol=1, refycol=2, rfluxcol=3, rfluxmax=INDEF, rfluxmin=INDEF, rfluxunits="counts", refnbright=INDEF, minobj=15, nmatch=30, matching="tolerance", xyxin=INDEF, xyyin=INDEF, tolerance=4.0, fwhmpsf=1.5, sigma=0.0, datamin=INDEF, datamax=INDEF, threshold=4.0, nsigma=1.5, fitgeometry="shift", function="polynomial", maxiter=3, reject=3.0, crossref="", margin=50, tapersz=50, pad=no, fwhm=7.0, ellip=0.05, pa=45.0, fitbox=7, Stdout=1, ) if verbose: for line in status: print line return status
def rectify(ids=None, fs=None): iraf.cd('work') if ids is None: ids = np.array(glob('id2/arc*id2*.db')) if fs is None: fs = glob('mos/*mos*.fits') if len(ids) == 0: print "WARNING: No wavelength solutions for rectification." iraf.cd('..') return if len(fs) == 0: print "WARNING: No images for rectification." iraf.cd('..') return # Get the grating angles of the solution files idgas = [] for i, thisid in enumerate(ids): f = open(thisid) idlines = np.array(f.readlines(), dtype=str) f.close() idgaline = idlines[np.char.startswith(idlines, '#graang')][0] idgas.append(float(idgaline.split('=')[1])) ims, gas = get_scis_and_arcs(fs) if not os.path.exists('rec'): os.mkdir('rec') for i, f in enumerate(ims): fname = f.split('/')[1] typestr = fname[:3] ga, imgnum = gas[i], fname[-9:-5] outfile = 'rec/' + typestr + '%05.2frec' % (ga) + imgnum + '.fits' iraf.unlearn(iraf.specrectify) iraf.flpr() idfile = ids[np.array(idgas) == ga][0] iraf.specrectify(images=f, outimages=outfile, solfile=idfile, outpref='', function='legendre', order=3, inttype='interp', conserve='yes', clobber='yes', verbose='yes') # Update the BPM to mask any blank regions h = pyfits.open(outfile, 'update') # Cover the chip gaps. The background task etc do better if the chip # gaps are straight # To deal with this we just throw away the min and max of each side of # the curved chip gap chipgaps = get_chipgaps(h) # Chip 1 h[2].data[:, chipgaps[0][0]:chipgaps[0][1]] = 1 # Chip 2 h[2].data[:, chipgaps[1][0]:chipgaps[1][1]] = 1 # edge of chip 3 h[2].data[:, chipgaps[2][0]:chipgaps[2][1]] = 1 # Cover the other blank regions h[2].data[[h[1].data == 0]] = 1 # Set all of the data to zero in the BPM h[1].data[h[2].data == 1] = 0.0 h.flush() h.close() iraf.cd('..')
def align_to_reference( ROOT_DIRECT, ALIGN_IMAGE, fitgeometry="shift", clean=True, verbose=False, ALIGN_EXTENSION=0, toler=3, skip_swarp=False, align_sdss_ds9=False, catalog=None, ): """ xshift, yshift, rot, scale, xrms, yrms = align_to_reference() """ import os import glob import shutil from pyraf import iraf from iraf import stsdas, dither import threedhst from threedhst import catIO no = iraf.no yes = iraf.yes INDEF = iraf.INDEF #### Clean slate rmfiles = [ "SCI.fits", "WHT.fits", "align.cat", "direct.cat" "align.map", "align.match", "align.reg", "align.xy", "direct.reg", "direct.xy", "ds9_align.tsv", ] for file in rmfiles: try: os.remove(file) except: pass if catalog is not None: align_sdss_ds9 = True #### Get only images that overlap from the ALIGN_IMAGE list if not align_sdss_ds9: align_img_list = find_align_images_that_overlap( ROOT_DIRECT + "_drz.fits", ALIGN_IMAGE, ALIGN_EXTENSION=ALIGN_EXTENSION ) if not align_img_list: print "threedhst.shifts.align_to_reference: no alignment images overlap." return 0, 0 #### Use swarp to combine the alignment images to the same image #### dimensions as the direct mosaic if (not skip_swarp) & (not align_sdss_ds9): try: os.remove(ROOT_DIRECT + "_align.fits") except: pass matchImagePixels( input=align_img_list, matchImage=ROOT_DIRECT + "_drz.fits", output=ROOT_DIRECT + "_align.fits", match_extension=1, input_extension=ALIGN_EXTENSION, ) #### Run SExtractor on the direct image, with the WHT #### extension as a weight image se = threedhst.sex.SExtractor() se.aXeParams() se.copyConvFile() se.overwrite = True se.options["CHECKIMAGE_TYPE"] = "NONE" se.options["WEIGHT_TYPE"] = "MAP_WEIGHT" se.options["WEIGHT_IMAGE"] = "WHT.fits" se.options["FILTER"] = "Y" ## Detect thresholds (default = 1.5) THRESH = 10 if align_sdss_ds9: if "Vizier" not in REFERENCE_CATALOG: THRESH = 20 se.options["DETECT_THRESH"] = "%d" % (THRESH) se.options["ANALYSIS_THRESH"] = "%d" % (THRESH) se.options["MAG_ZEROPOINT"] = str(threedhst.options["MAG_ZEROPOINT"]) #### Run SExtractor on direct and alignment images ## direct image se.options["CATALOG_NAME"] = "direct.cat" iraf.imcopy(ROOT_DIRECT + "_drz.fits[1]", "SCI.fits") iraf.imcopy(ROOT_DIRECT + "_drz.fits[2]", "WHT.fits") status = se.sextractImage("SCI.fits") ## Read the catalog directCat = threedhst.sex.mySexCat("direct.cat") if align_sdss_ds9: ### Use ds9 SDSS catalog to refine alignment import threedhst.dq import pywcs import threedhst.catIO as catIO wcs = pywcs.WCS(pyfits.getheader("SCI.fits", 0)) # wcs = pywcs.WCS(pyfits.getheader('Q0821+3107-F140W_drz.fits', 1)) if "Vizier" in REFERENCE_CATALOG: #### Use (unstable) astroquery Vizier search #### CFHTLS-Deep: 'Vizier.II/317' VIZIER_CAT = REFERENCE_CATALOG.split("Vizier.")[1] print "Align to Vizier catalog: http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=%s" % (VIZIER_CAT) import astroquery if astroquery.__version__ < "0.0.dev1078": from astroquery import vizier query = {} query["-source"] = VIZIER_CAT # query["-out"] = ["_r", "CFHTLS", "rmag"] query["-out"] = ["_RAJ2000", "_DEJ2000"] ### Just RA/Dec. #### Center position and query radius r0, d0 = wcs.wcs_pix2sky([[wcs.naxis1 / 2.0, wcs.naxis2 / 2.0]], 1)[0] rll, dll = wcs.wcs_pix2sky([[0, 0]], 1)[0] corner_radius = ( np.sqrt((r0 - rll) ** 2 * np.cos(d0 / 360.0 * 2 * np.pi) ** 2 + (d0 - dll) ** 2) * 60.0 * 1.5 ) h = query["-c"] = "%.6f %.6f" % (r0, d0) query["-c.rm"] = "%.3f" % (corner_radius) ### xxx check image size #### Run the query vt = vizier.vizquery(query) else: #### Newer astroquery from astroquery.vizier import Vizier import astropy.coordinates as coord import astropy.units as u Vizier.ROW_LIMIT = -1 r0, d0 = wcs.wcs_pix2sky([[wcs.naxis1 / 2.0, wcs.naxis2 / 2.0]], 1)[0] rll, dll = wcs.wcs_pix2sky([[0, 0]], 1)[0] corner_radius = ( np.sqrt((r0 - rll) ** 2 * np.cos(d0 / 360.0 * 2 * np.pi) ** 2 + (d0 - dll) ** 2) * 60.0 * 1.5 ) # c = coord.ICRSCoordinates(ra=r0, dec=d0, unit=(u.deg, u.deg)) #### something with astropy.coordinates # c.icrs.ra.degree = c.icrs.ra.degrees # c.icrs.dec.degree = c.icrs.dec.degrees # vt = Vizier.query_region(c, width=u.Quantity(corner_radius, u.arcminute), catalog=[VIZIER_CAT])[0] #### Make a region file ra_list, dec_list = vt["RAJ2000"], vt["DEJ2000"] print "Vizier, found %d objects." % (len(ra_list)) fp = open("%s.vizier.reg" % (ROOT_DIRECT), "w") fp.write("# %s, r=%.1f'\nfk5\n" % (VIZIER_CAT, corner_radius)) for ra, dec in zip(ra_list, dec_list): fp.write('circle(%.6f, %.6f, 0.5")\n' % (ra, dec)) # fp.close() else: #### Use DS9 catalog ds9 = threedhst.dq.myDS9() ds9.set("file SCI.fits") # ds9.set('file Q0821+3107-F140W_drz.fits') ds9.set("catalog %s" % (REFERENCE_CATALOG)) ### Can't find XPA access point for "copy to regions" ds9.set("catalog export tsv ds9_align.tsv") lines = open("ds9_align.tsv").readlines() ra_list, dec_list = [], [] for line in lines[1:]: spl = line.split() ra, dec = float(spl[0]), float(spl[1]) ra_list.append(ra) dec_list.append(dec) # del (ds9) x_image, y_image = [], [] for ra, dec in zip(ra_list, dec_list): x, y = wcs.wcs_sky2pix([[ra, dec]], 1)[0] x_image.append(x) y_image.append(y) alignCat = catIO.EmptyCat() alignCat["X_IMAGE"] = np.array(x_image) alignCat["Y_IMAGE"] = np.array(y_image) else: ## alignment image se.options["CATALOG_NAME"] = "align.cat" status = se.sextractImage(ROOT_DIRECT + "_align.fits") alignCat = threedhst.sex.mySexCat("align.cat") xshift = 0 yshift = 0 rot = 0 scale = 1.0 xrms = 2 yrms = 2 NITER = 5 IT = 0 while IT < NITER: IT = IT + 1 #### Get x,y coordinates of detected objects ## direct image fp = open("direct.xy", "w") for i in range(len(directCat.X_IMAGE)): fp.write("%s %s\n" % (directCat.X_IMAGE[i], directCat.Y_IMAGE[i])) fp.close() ## alignment image fp = open("align.xy", "w") for i in range(len(alignCat.X_IMAGE)): fp.write("%s %s\n" % (np.float(alignCat.X_IMAGE[i]) + xshift, np.float(alignCat.Y_IMAGE[i]) + yshift)) fp.close() iraf.flpr() iraf.flpr() iraf.flpr() #### iraf.xyxymatch to find matches between the two catalogs pow = toler * 1.0 try: os.remove("align.match") except: pass status1 = iraf.xyxymatch( input="direct.xy", reference="align.xy", output="align.match", tolerance=2 ** pow, separation=0, verbose=yes, Stdout=1, ) nmatch = 0 while status1[-1].startswith("0") | (nmatch < 10): pow += 1 os.remove("align.match") status1 = iraf.xyxymatch( input="direct.xy", reference="align.xy", output="align.match", tolerance=2 ** pow, separation=0, verbose=yes, Stdout=1, ) # nmatch = 0 for line in open("align.match").xreadlines(): nmatch += 1 if verbose: for line in status1: print line #### Compute shifts with iraf.geomap iraf.flpr() iraf.flpr() iraf.flpr() try: os.remove("align.map") except: pass status2 = iraf.geomap( input="align.match", database="align.map", fitgeometry=fitgeometry, interactive=no, xmin=INDEF, xmax=INDEF, ymin=INDEF, ymax=INDEF, maxiter=10, reject=2.0, Stdout=1, ) if verbose: for line in status2: print line # fp = open(root+'.iraf.log','a') # fp.writelines(status1) # fp.writelines(status2) # fp.close() #### Parse geomap.output fp = open("align.map", "r") for line in fp.readlines(): spl = line.split() if spl[0].startswith("xshift"): xshift += float(spl[1]) if spl[0].startswith("yshift"): yshift += float(spl[1]) if spl[0].startswith("xrotation"): rot = float(spl[1]) if spl[0].startswith("xmag"): scale = float(spl[1]) if spl[0].startswith("xrms"): xrms = float(spl[1]) if spl[0].startswith("yrms"): yrms = float(spl[1]) fp.close() # os.system('wc align.match') print "Shift iteration #%d, xshift=%f, yshift=%f, rot=%f, scl=%f (rms: %5.2f,%5.2f)" % ( IT, xshift, yshift, rot, scale, xrms, yrms, ) im = pyfits.open("SCI.fits") shutil.copy("align.map", ROOT_DIRECT + "_align.map") shutil.copy("align.match", ROOT_DIRECT + "_align.match") #### Cleanup if clean: rmfiles = [ "SCI.fits", "WHT.fits", "align.cat", "align.map", "align.match", "align.reg", "align.xy", "direct.cat", "direct.reg", "direct.xy", "drz_sci.fits", "drz_wht.fits", "bg.fits", ] for file in rmfiles: try: os.remove(file) except: pass return xshift, yshift, rot, scale, xrms, yrms
def drztranback( drzfile, x=0, y=0, fltlist=None, xylistfile='', verbose=True ): """ convert a set of coordinates from a drz image into the coordinate space of each of the contributing _flt files returns a list of tuples with (fltfile,x,y) """ import os import pyfits from pyraf import iraf from iraf import stsdas from numpy import loadtxt, iterable stsdas.analysis() stsdas.dither() # get output (i.e. drizzled) image size nxout = pyfits.getval( drzfile, 'NAXIS1') nyout = pyfits.getval( drzfile, 'NAXIS2') scaleout = 3600*( abs(pyfits.getval(drzfile,'CD1_1')) + abs(pyfits.getval(drzfile,'CD2_2')) ) xscaleout = 7200*abs(pyfits.getval(drzfile,'CD1_1')) yscaleout = 7200*abs(pyfits.getval(drzfile,'CD2_2')) # if needed, get a list of contributing flt files and/or coeff files if not fltlist : fltlist = getfltlist( drzfile ) # build the list of coordinates, starting with the original drzfile x,y coords returnlist = [] if xylistfile : xlist,ylist = loadtxt( xylistfile, unpack=True ) elif iterable(x) : xlist, ylist = x, y else : xlist,ylist = [float(x)],[float(y)] for xx,yy in zip(x,y) : returnlist.append( (drzfile,1,xx,yy) ) # write out a list of drz-frame x,y positions into a text file. # this is used as input to wtranback for all flt files xylistfile = util.naming.chsuffix( os.path.basename(drzfile), '_fake.xylist') fout = open(xylistfile, 'w') for xx,yy in zip(xlist,ylist) : print >> fout, "%15.5f %15.5f"%(xx,yy) fout.close() # translate the x,y coords back to _flt coordinates # TODO : probably need to allow for up to 2 coeff files # for every flt file, for UVIS and ACS # for fltfile,coefffile in zip(fltlist,coefflist) : for fltfile in fltlist: if verbose: print("translating %s to %s coords"%(drzfile,fltfile)) # find all the sci extensions flthdulist = pyfits.open( fltfile ) extlist = [ i for i in range(len(flthdulist)) if flthdulist[i].name.lower().startswith('sci') ] for ext in extlist : nxin = pyfits.getval( fltfile, 'NAXIS1', ext=ext) nyin = pyfits.getval( fltfile, 'NAXIS2', ext=ext) scalein = 3600*( abs(pyfits.getval(fltfile,'CD1_1',ext=ext)) + abs(pyfits.getval(fltfile,'CD2_2',ext=ext)) ) xscalein = 7200*abs(pyfits.getval(fltfile,'CD1_1',ext=ext)) yscalein = 7200*abs(pyfits.getval(fltfile,'CD2_2',ext=ext)) # slimmed down wtranback call from LS: 2011.04.28 # # NOTE: we use the xylistfile as input even when # there is only one pair of coordinates iraf.wtranback.unlearn() iraf.flpr(); iraf.flpr() iraf.gflush(); iraf.gflush() #coeffile = os.path.join(snworkdir,fltfile[:-5] + '_coeffs1.dat') coeffile = fltfile[:-5] + '_coeffs1.dat' output = iraf.wtranback( 0, 0, nxin=nxin, nxout=nxout, nyin=nyin, nyout=nyout, xylist=xylistfile, coeffs=coeffile, geomode='wcs', refim=drzfile, inimage=fltfile+'[%i]'%ext, Stdout=1 ) for line in output: if line.startswith(' Xin,Yin:') : xin,yin = map(float,line.split()[1:3]) if xin>0 and xin<nxin and yin>0 and yin<nyin : returnlist.append( (fltfile,ext,float(xin),float(yin)) ) return( returnlist )
def runblots(self, doblots=1, doderivs=1, doCRrej=1, domedian=1, useMedMask=1): """Blot drizzled images from mulitple assocations back to the positions of the input images from the ASN table, looping over the parlists attribute of drizzleImage object. Input parameters are: doblots (0/1): do the blotting? [def: 1(yes)] doderivs (0/1): produce deriv images? [def:1] doCRrej (0/1): do CR rejection to produce masks? [def:1] domedian (0/1): median stack separate drizzled images for CR comp [def:1] useMedMask(0/1): use context images as masks in median stack [def:1] """ # clean out the crmasks dictionary if it has junk in it while self.crmasks: a = self.crmasks.keys()[-1] del self.crmasks[a] curdir = os.getcwd() os.chdir(self.Fits) iraf.flpr('imcombine') iraf.flpr('blot') iraf.flpr('deriv') iraf.flpr('driz_cr') self.runNum = 0 i = 0 for parList in self.parlists: shortparList = self.shortparlists[i] if self.clean_up: self.removeList = [] self.runNum += 1 self._blot_asn(shortparList, parList, doblots=doblots, doderivs=doderivs, \ doCRrej=doCRrej,domedian=domedian,useMedMask=useMedMask) if self.clean_up: for file in self.removeList: try: os.remove(file) except: pass i = i + 1 iraf.flpr('imcombine') iraf.flpr('blot') iraf.flpr('deriv') iraf.flpr('driz_cr') iraf.flpr() os.chdir(curdir) return self.crmasks
os.mkdir(_obsdate) os.system('mv '+img+' '+_obsdate) ''' i = 0 refpath = '' refpath2 = '' datelist = os.listdir(imgdir) datelist.sort() for _dir in datelist: toforget = ['flatcombine','zerocombine','ccdproc','specred.apall','identify','reidentify','specred.standard','dispcor','refspectra','response','apsum','sensfunc','calibrate'] for t in toforget: iraf.unlearn(t) # iraf.set(use_new_imt='no') iraf.flpr() i = i + 1 print 'Working on data from %s' % (_dir) os.chdir(imgdir+_dir) imgsort() os.chdir('CHIP1') os.system('cp /dark/jsamuel/agn/extinction_lasilla.dat ./') os.system('cp /dark/jsamuel/agn/Lines_HgCdHeNeAr600.dat ./') objdict = {} process() extract() if i <= 1: idref = ident() # refpath = imgdir+_dir+'/CHIP1/'+idref+'.fits' refpath = imgdir+_dir+'/CHIP1/database/id'+idref print idref
overwrite=True, output_verify='fix') # if skylevel is in the header, swarp with bkg subtraction has been applyed if 'SKYLEVEL' in head_temp: pssl_temp = head_temp['SKYLEVEL'] # create mask image for template mask = np.abs(data_temp) < 1e-6 fits.writeto('tempmask.fits', mask.astype('i'), overwrite=True) if args.fixpix: try: iraf.flpr() iraf.flpr() iraf.unlearn(iraf.fixpix) cwd = os.getcwd() iraf.fixpix(os.path.join(cwd, imgtarg), os.path.join(cwd, targmask), verbose='yes') iraf.flpr() iraf.flpr() iraf.unlearn(iraf.fixpix) iraf.fixpix(os.path.join(cwd, imgtemp), os.path.join(cwd, tempmask), verbose='yes') iraf.flpr() iraf.flpr() iraf.unlearn(iraf.fixpix)
def extract(fs=None): iraf.cd('work') if fs is None: fs = glob('fix/*fix*.fits') if len(fs) == 0: print "WARNING: No fixpixed images available for extraction." iraf.cd('..') return if not os.path.exists('x1d'): os.mkdir('x1d') print "Note: No continuum? Make nsum small (~-5) with 'line' centered on an emission line." for f in fs: # Get the output filename without the ".fits" outbase = f.replace('fix', 'x1d')[:-5] # Get the readnoise, right now assume default value of 5 but we could # get this from the header readnoise = 5 # If interactive open the rectified, background subtracted image in ds9 ds9display(f.replace('fix', 'bkg')) # set dispaxis = 1 just in case pyfits.setval(f, 'DISPAXIS', extname='SCI', value=1) iraf.unlearn(iraf.apall) iraf.flpr() iraf.apall(input=f + '[SCI]', output=outbase, interactive='yes', review='no', line='INDEF', nsum=-1000, lower=-5, upper=5, b_function='legendre', b_order=5, b_sample='-400:-200,200:400', b_naverage=-10, b_niterate=5, b_low_reject=3.0, b_high_reject=3.0, nfind=1, t_nsum=15, t_step=15, t_nlost=100, t_function='legendre', t_order=5, t_niterate=5, t_low_reject=3.0, t_high_reject=3.0, background='fit', weights='variance', pfit='fit1d', clean='no', readnoise=readnoise, gain=1.0, lsigma=4.0, usigma=4.0, mode='hl') # Copy the CCDSUM keyword into the 1d extraction pyfits.setval(outbase + '.fits', 'CCDSUM', value=pyfits.getval(f, 'CCDSUM')) # Extract the corresponding arc arcname = glob('nrm/arc' + f.split('/')[1][3:8] + '*.fits')[0] # set dispaxis = 1 just in case pyfits.setval(arcname, 'DISPAXIS', extname='SCI', value=1) iraf.unlearn(iraf.apsum) iraf.flpr() iraf.apsum(input=arcname + '[SCI]', output='auxext_arc', references=f[:-5] + '[SCI]', interactive='no', find='no', edit='no', trace='no', fittrace='no', extras='no', review='no', background='no', mode='hl') # copy the arc into the 5 column of the data cube arcfs = glob('auxext_arc*.fits') for af in arcfs: archdu = pyfits.open(af) scihdu = pyfits.open(outbase + '.fits', mode='update') d = scihdu[0].data.copy() scihdu[0].data = np.zeros((5, d.shape[1], d.shape[2])) scihdu[0].data[:-1, :, :] = d[:, :, :] scihdu[0].data[-1::, :] = archdu[0].data.copy() scihdu.flush() scihdu.close() archdu.close() os.remove(af) # Add the airmass, exptime, and other keywords back into the # extracted spectrum header kws = ['AIRMASS','EXPTIME', 'PROPID','PROPOSER','OBSERVER','OBSERVAT','SITELAT','SITELONG', 'INSTRUME','DETSWV','RA','PM-RA','DEC','PM-DEC','EQUINOX', 'EPOCH','DATE-OBS','TIME-OBS','UTC-OBS','TIMESYS','LST-OBS', 'JD','MOONANG','OBSMODE','DETMODE','SITEELEV','BLOCKID','PA', 'TELHA','TELRA','TELDEC','TELPA','TELAZ','TELALT','DECPANGL', 'TELTEM','PAYLTEM','MASKID','MASKTYP','GR-ANGLE','GRATING', 'FILTER'] for kw in kws: pyfits.setval(outbase + '.fits', kw, value=pyfits.getval(f,kw)) iraf.cd('..')
pyfits.writeto(temp_file0+'_tempnoise3.fits', noiseimg, output_verify='fix', clobber=True) else: pssl_temp = 0 print 'variance image already there, do not create noise image' # if skylevel is in the header, swarp with bkg subtraction has been applyed if 'SKYLEVEL' in head_temp: pssl_temp = head_temp['SKYLEVEL'] # create mask image for template data_temp, head_temp mask = np.abs(data_temp) < 1e-6 pyfits.writeto(temp_file0+'_tempmask3.fits',mask.astype('i')) if _fixpix: iraf.flpr(); iraf.flpr() iraf.unlearn(iraf.fixpix) iraf.fixpix('./'+imgtarg, './'+targmask, verbose='no') iraf.flpr(); iraf.flpr() iraf.unlearn(iraf.fixpix) iraf.fixpix('./'+imgtemp, './'+tempmask, verbose='no') iraf.flpr(); iraf.flpr() iraf.unlearn(iraf.fixpix) # hotpants parameters iuthresh = str(sat_targ) # upper valid data count, image iucthresh = str(0.95*sat_targ) # upper valid data count for kernel, image tuthresh = str(sat_temp) # upper valid data count, template tucthresh = str(0.95*sat_temp) # upper valid data count for kernel, template rkernel = str(np.median([10, 2.*max_fwhm, 20])) # convolution kernel half width radius = str(np.median([15, 3.0*max_fwhm, 25])) # HW substamp to extract around each centroid sconv = '-sconv' # all regions convolved in same direction (0)
def rectify(ids=None, fs=None): iraf.cd('work') if ids is None: ids = np.array(glob('id2/arc*id2*.db')) if fs is None: fs = glob('srt/*mos*.fits') if len(ids) == 0: print "WARNING: No wavelength solutions for rectification." iraf.cd('..') return if len(fs) == 0: print "WARNING: No images for rectification." iraf.cd('..') return # Get the grating angles of the solution files idgas = [] for i, thisid in enumerate(ids): f = open(thisid) idlines = np.array(f.readlines(), dtype=str) f.close() idgaline = idlines[np.char.startswith(idlines, '#graang')][0] idgas.append(float(idgaline.split('=')[1])) ims, gas = get_scis_and_arcs(fs) if not os.path.exists('rec'): os.mkdir('rec') for i, f in enumerate(ims): fname = f.split('/')[1] typestr = fname[:3] ga, imgnum = gas[i], fname[-9:-5] outfile = 'rec/' + typestr + '%05.2frec' % (ga) + imgnum + '.fits' iraf.unlearn(iraf.specrectify) iraf.flpr() print('_____idgas_____') print (np.array(idgas)) print('_____ga_____') print (ga) idfile = ids[np.array(idgas) == ga][0] iraf.specrectify(images=f, outimages=outfile, solfile=idfile, outpref='', function='legendre', order=3, inttype='interp', conserve='yes', clobber='yes', verbose='yes') # Update the BPM to mask any blank regions h = pyfits.open(outfile, 'update') # Cover the chip gaps. The background task etc do better if the chip # gaps are straight # To deal with this we just throw away the min and max of each side of # the curved chip gap chipgaps = get_chipgaps(h) print(" -- chipgaps --") print(chipgaps) # Chip 1 h[2].data[:, chipgaps[0][0]:chipgaps[0][1]] = 1 # Chip 2 h[2].data[:, chipgaps[1][0]:chipgaps[1][1]] = 1 # edge of chip 3 h[2].data[:, chipgaps[2][0]:chipgaps[2][1]] = 1 # Cover the other blank regions h[2].data[[h[1].data == 0]] = 1 # Set all of the data to zero in the BPM h[1].data[h[2].data == 1] = 0.0 h.flush() h.close() iraf.cd('..')
def drztranback(drzfile, x=0, y=0, xylistfile="", ext="SCI", verbose=False): """ convert a set of coordinates from a drz image into the coordinate space of each of the contributing _flt files returns a list of tuples with (fltfile,x,y) """ import os import pyfits from pyraf import iraf from iraf import stsdas from numpy import loadtxt stsdas.analysis() stsdas.dither() # get output (i.e. drizzled) image size nxout = pyfits.getval(drzfile, "NAXIS1", ext=ext) nyout = pyfits.getval(drzfile, "NAXIS2", ext=ext) scaleout = 3600 * (abs(pyfits.getval(drzfile, "CD1_1", ext=ext)) + abs(pyfits.getval(drzfile, "CD2_2", ext=ext))) xscaleout = 7200 * abs(pyfits.getval(drzfile, "CD1_1", ext=ext)) yscaleout = 7200 * abs(pyfits.getval(drzfile, "CD2_2", ext=ext)) # get a list of contributing flt files fltfilelist = getfltlist(drzfile) scifile = drzfile[:-8] + "sci.fits" # translate the x,y coords back to _flt coordinates returnlist = [] if xylistfile: xlist, ylist = loadtxt(xylistfile, unpack=True) for x, y in zip(xlist, ylist): returnlist.append((drzfile, x, y)) else: returnlist.append((drzfile, x, y)) for fltfile in fltfilelist: if verbose: print ("translating %s to %s coords" % (drzfile, fltfile)) # import pdb; pdb.set_trace() nxin = pyfits.getval(fltfile, "NAXIS1", ext="SCI") nyin = pyfits.getval(fltfile, "NAXIS2", ext="SCI") scalein = 3600 * ( abs(pyfits.getval(fltfile, "CD1_1", ext="SCI")) + abs(pyfits.getval(fltfile, "CD2_2", ext="SCI")) ) xscalein = 7200 * abs(pyfits.getval(fltfile, "CD1_1", ext="SCI")) yscalein = 7200 * abs(pyfits.getval(fltfile, "CD2_2", ext="SCI")) coeffile = fltfile[:-5] + "_coeffs1.dat" # wtranback is better than tranback iraf.flpr() iraf.flpr() iraf.unlearn(iraf.wtranback) if ext == "SCI": extstr = "[1]" else: extstr = "" output = iraf.wtranback( x, y, nxin=nxin, nxout=nxout, nyin=nyin, nyout=nyout, xylist=xylistfile, coeffs=coeffile, geomode="wcs", refim=drzfile + extstr, inimage=fltfile + "[sci,1]", raref=pyfits.getval(drzfile, "CRVAL1", ext=ext), decref=pyfits.getval(drzfile, "CRVAL2", ext=ext), xrefpix=pyfits.getval(drzfile, "CRPIX1", ext=ext), yrefpix=pyfits.getval(drzfile, "CRPIX2", ext=ext), orient=pyfits.getval(drzfile, "ORIENTAT", ext=ext), Stdout=1, ) for line in output: if line.startswith(" Xin,Yin:"): xin, yin = line.split()[1:3] returnlist.append((fltfile, float(xin), float(yin))) return returnlist
def get_align_to_subaru(sci='M0416_Ks_c1_mp_avg.fits', wht='M0416_Ks_c1_mp_exp.fits', field='', clean=True, toler=3, verbose=False, fitgeometry='shift', shift_max=20, rms_max=1.1, rot_max=2, rot_only=True, THRESH=2, align_data=None): """ Align HAWK-I images to the FF Subaru astrometric reference catalogs """ #sci='M0416_Ks_c1_mp_avg.fits'; wht='M0416_Ks_c1_mp_exp.fits' ### Make object catalog se = threedhst.sex.SExtractor() se.aXeParams() se.copyConvFile() se.overwrite = True se.options['CHECKIMAGE_TYPE'] = 'NONE' if wht is None: se.options['WEIGHT_TYPE'] = 'NONE' else: se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT' se.options['WEIGHT_IMAGE'] = wht se.options['FILTER'] = 'Y' se.options['DETECT_THRESH'] = '%d' %(THRESH) se.options['ANALYSIS_THRESH'] = '%d' %(THRESH) se.options['MAG_ZEROPOINT'] = '26.0' #### Run SExtractor on direct and alignment images ## direct image se.options['CATALOG_NAME'] = 'direct.cat' status = se.sextractImage(sci) threedhst.sex.sexcatRegions('direct.cat', 'direct.reg', format=2) directCat = threedhst.sex.mySexCat('direct.cat') #### Get the X/Y coords of the reference catalog #head = pyfits.getheader(sci, 0) #wcs = pywcs.WCS(head) if 'M0416' in sci: ra_list, dec_list, mag = np.loadtxt(os.getenv('HAWKI')+'/FrontierFields/HST/hlsp_frontier_subaru_suprimecam_macs0416-astrom_R_v1_cat.txt', unpack=True) if ('c4' in sci): ra_list, dec_list, mag = np.loadtxt(os.getenv('HAWKI')+'/FrontierFields/HST/M0416/macs0416_f814w_radec.cat', unpack=True) # if 'M0717' in sci: ra_list, dec_list, mag = np.loadtxt('subaru.radec', unpack=True) if ('M1149' in sci) | (field == 'M1149'): ra_list, dec_list, mag = np.loadtxt('/Users/brammer/Research/VLT/HAWKI/MACS1149/hlsp_frontier_subaru_suprimecam_macs1149-astrom_R_v1_cat.txt', unpack=True) if 'A2744' in sci: ra_list, dec_list, mag = np.loadtxt(os.getenv('HAWKI')+'/FrontierFields/HST/hlsp_frontier_subaru_suprimecam_abell2744-astrom_i_v1_cat.txt', unpack=True) if ('c1' in sci) | ('c4' in sci): ra_list, dec_list, mag = np.loadtxt(os.getenv('HAWKI')+'/FrontierFields/HST/abell2744_f814w_radec.cat', unpack=True) if align_data is not None: ra_list, dec_list, mag = align_data im = pyfits.open(sci) print sci sh = im[0].shape head = im[0].header head['CUNIT1'] = 'deg'; head['CUNIT2'] = 'deg' wcs = pywcs.WCS(head) x_image, y_image = wcs.wcs_sky2pix(ra_list, dec_list, 1) try: x_image, y_image = wcs.wcs_sky2pix(ra_list, dec_list, 1) except: x_image, y_image = wcs.wcs_world2pix(ra_list, dec_list, 1) ok = (x_image > 0) & (y_image > 0) & (x_image < sh[1]) & (y_image < sh[1]) x_image, y_image = x_image[ok], y_image[ok] fpr = open('align.reg','w') fpr.write('image\n') for i in range(ok.sum()): fpr.write('circle(%.6f, %.6f,0.3") # color=magenta\n' %(x_image[i], y_image[i])) fpr.close() # x_image, y_image = [], [] # # for ra, dec in zip(ra_list, dec_list): # x, y = wcs.wcs_sky2pix([[ra, dec]], 1)[0] # if (x > 0) & (y > 0) & (x < sh[1]) & (y < sh[1]): # x_image.append(x) # y_image.append(y) alignCat = catIO.EmptyCat() alignCat['X_IMAGE'] = np.array(x_image) alignCat['Y_IMAGE'] = np.array(y_image) xshift = 0 yshift = 0 rot = 0 scale = 1. xrms = 2 yrms = 2 NITER = 5 IT = 0 while (IT < NITER): IT = IT+1 #### Get x,y coordinates of detected objects ## direct image fp = open('direct.xy','w') for i in range(len(directCat.X_IMAGE)): fp.write('%s %s\n' %(directCat.X_IMAGE[i],directCat.Y_IMAGE[i])) fp.close() ## alignment image fp = open('align.xy','w') for i in range(len(alignCat.X_IMAGE)): fp.write('%s %s\n' %(np.float(alignCat.X_IMAGE[i])+xshift, np.float(alignCat.Y_IMAGE[i])+yshift)) fp.close() iraf.flpr() iraf.flpr() iraf.flpr() #### iraf.xyxymatch to find matches between the two catalogs pow = toler*1. try: os.remove('align.match') except: pass status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy", output="align.match", tolerance=2**pow, separation=0, verbose=iraf.yes, Stdout=1) nmatch = 0 while status1[-1].startswith('0') | (nmatch < 10) | (float(status1[-3].split()[1]) > 40): pow+=1 os.remove('align.match') status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy", output="align.match", tolerance=2**pow, separation=0, verbose=iraf.yes, Stdout=1) # nmatch = 0 for line in open('align.match').xreadlines( ): nmatch += 1 if verbose: for line in status1: print line #### Compute shifts with iraf.geomap iraf.flpr() iraf.flpr() iraf.flpr() try: os.remove("align.map") except: pass status2 = iraf.geomap(input="align.match", database="align.map", fitgeometry=fitgeometry, interactive=iraf.no, xmin=iraf.INDEF, xmax=iraf.INDEF, ymin=iraf.INDEF, ymax=iraf.INDEF, maxiter = 10, reject = 2.0, Stdout=1) if verbose: for line in status2: print line #fp = open(root+'.iraf.log','a') #fp.writelines(status1) #fp.writelines(status2) #fp.close() #### Parse geomap.output fp = open("align.map","r") for line in fp.readlines(): spl = line.split() if spl[0].startswith('xshift'): xshift += float(spl[1]) if spl[0].startswith('yshift'): yshift += float(spl[1]) if spl[0].startswith('xrotation'): rot = float(spl[1]) if spl[0].startswith('xmag'): scale = float(spl[1]) if spl[0].startswith('xrms'): xrms = float(spl[1]) if spl[0].startswith('yrms'): yrms = float(spl[1]) fp.close() #os.system('wc align.match') print 'Shift iteration #%d, xshift=%f, yshift=%f, rot=%f, scl=%f (rms: %5.2f,%5.2f)' %(IT, xshift, yshift, rot, scale, xrms, yrms) os.system('cat align.match | grep -v "\#" | grep [0-9] | awk \'{print "circle(", $1, ",", $2, ",4) # color=green"}\' > d.reg') os.system('cat align.match | grep -v "\#" | grep [0-9] | awk \'{print "circle(", $3, ",", $4, ",4) # color=magenta"}\' > a.reg') shutil.copy('align.map', sci.replace('.fits', '.align.map')) shutil.copy('align.match', sci.replace('.fits', '.align.match')) #### Cleanup if clean: rmfiles = ['align.cat', 'align.map','align.match','align.reg','align.xy', 'direct.cat','direct.reg','direct.xy'] for file in rmfiles: try: os.remove(file) except: pass fp = open(sci.replace('.fits', '.align.info'), 'w') fp.write('# image xshift yshift rot scale xrms yrms\n') fp.write('%s %.3f %.3f %.4f %.4f %.3f %.3f\n' %(sci, xshift, yshift, rot, scale, xrms, yrms)) if (np.abs(xshift) > shift_max) | (np.abs(yshift) > shift_max) | (xrms > rms_max) | (yrms > rms_max): print 'Shifts out of allowed range. Run again with increased shift_max to accept.' #return xshift, yshift, rot, scale, xrms, yrms ## Add a small shift that should come out easily with another ## shift iteration xshift, yshift, rot, scale, xrms, yrms = 2,2,0,1.0,-99,-99 for file in [sci, wht]: if ('r' in fitgeometry) & rot_only: xshift, yshift = 0, 0 #apply_offsets(file, [[xshift, yshift, rot, scale]]) from drizzlepac import updatehdr updatehdr.updatewcs_with_shift(file, sci, wcsname='DRZWCS', rot=rot,scale=scale, xsh=xshift, ysh=yshift, fit=None, xrms=xrms, yrms = yrms, verbose=False, force=True, sciext=0) if '_dr' in sci: im = pyfits.open(sci) h = im[0].header for i in range(h['NDRIZIM']): flt_str = h['D%03dDATA' %(i+1)] if 'sci,2' in flt_str: continue # flt_im = flt_str.split('[')[0] ext = int(flt_str.split('[')[1][:-1].split(',')[1]) updatehdr.updatewcs_with_shift(flt_im, sci, wcsname='GTWEAK', rot=rot, scale=scale, xsh=xshift, ysh=yshift, fit=None, xrms=xrms, yrms = yrms, verbose=False, force=True, sciext='SCI') # im = pyfits.open(file, mode='update') # wcs = pywcs.WCS(im[0].header) # wcs.rotateCD(-rot) # wcs.wcs.cd /= scale # # # im[0].header['CRPIX1'] += xshift # im[0].header['CRPIX2'] += yshift # # # for i in [0,1]: # for j in [0,1]: # im[0].header['CD%d_%d' %(i+1, j+1)] = wcs.wcs.cd[i,j] # # # im.flush() return xshift, yshift, rot, scale, xrms, yrms
def _medDriz(self, parList, usemask=1): """median-combined separate drizzle images. there is an iraf dependant kludge in here involving the inputString which the a string of input files for imcombine. iraf cannot apparently handle a string over a certain size (guessing 512 char). So, we now write out a temp file, imcombine_input, which is just a list of the input files imcombine is use. We use the iraf idiom input = "@file" to get this task to run. So far it seems to work. """ drizList = [] maskList = [] iraf.flpr('imcombine') iraf.unlearn(iraf.imcalc) iraf.imcalc.pixtype = 'short' self.logfile.write("Entered _medDriz for asn number %d." % (self.runNum)) for ii in range(len(parList)): for jj in range(ii + 1, len(parList)): if parList[ii]['outdata'] == parList[jj]['outdata']: raise KeyError, "requested to median stack same images!" drizList.append(parList[ii]['outdata']) if usemask: plmask = string.split(parList[ii]['outcontext'], '.')[0] + '.pl' try: os.remove(plmask) except: pass if self.verbose: print 'making', plmask, 'from', parList[ii][ 'outcontext'], '...' iraf.imcalc(parList[ii]['outcontext'], plmask, "if im1 .eq. 0 then 0 else 1") maskList.append(plmask) self.removeList.append(plmask) # construct input list and add masks info to the headers inputString = drizList[0] # ***> NOTE! If list too big, imcombine crashes! # ***> NOTE! define 76 as a safe maximum, or 80... MAX_IM = 80 NumTot = len(drizList) NumIm = min(NumTot, MAX_IM) for ii in range(NumIm): if ii > 0: inputString = inputString + ',' + drizList[ii] if usemask: fUtil.fixHeader(drizList[ii], [('BPM', maskList[ii])]) if self.verbose: print 'median stacking: ', inputString if usemask: print ' with masks: ', maskList #if that all checks out, go ahead and median iraf.unlearn(iraf.imcombine) # 15/Apr/2002, jpb: want to keep all the medriz's around for CR-rej debugging outfile = 'medriz_' + str(self.runNum) + '.fits' # self.removeList.append(outfile) try: os.remove(outfile) except: pass # temp file for iraf input because the list might be too big. filekludge = open("imcombine_input", "w") newinputList = inputString.split(',') if (NumIm != len(newinputList)): errtxt = "ERROR! Error: NumIm != len(newinputList) in _medDriz ?!" print errtxt self.logfile.write(errtxt) for item in newinputList: filekludge.write(item + "\n") filekludge.close() #iraf.imcombine.input = inputString # this is what we used to do. iraf.imcombine.input = "@imcombine_input" iraf.imcombine.output = outfile # iraf.imcombine.plfile = '' iraf.imcombine.sigma = '' iraf.imcombine.combine = 'median' iraf.imcombine.reject = 'minmax' iraf.imcombine.outtype = 'real' iraf.imcombine.offsets = 'none' if usemask: iraf.imcombine.masktype = 'badvalue' else: iraf.imcombine.masktype = 'none' iraf.imcombine.maskvalue = 0. iraf.imcombine.scale = 'exposure' iraf.imcombine.expname = 'EXPTIME' iraf.imcombine.nkeep = 1 # paradoxically, this is not what we want # NumIm = len(drizList)/self.imNsci # imcombine considers the total number of images being # stacked, not the number at any given point! # NOTE: nhigh must be >= NumIm/2 if cr rejection to be done everywhere if NumIm == 1: #1,2 iraf.imcombine.nlow = 0 iraf.imcombine.nhigh = 0 elif NumIm == 2: # 2 iraf.imcombine.nlow = 0 iraf.imcombine.nhigh = 1 elif NumIm == 3: # 3 iraf.imcombine.nlow = 0 iraf.imcombine.nhigh = 2 elif NumIm == 4: # 4 HRC; 2 WFC iraf.imcombine.nlow = 0 iraf.imcombine.nhigh = 3 elif NumIm == 5: # 5 HRC iraf.imcombine.nlow = 1 iraf.imcombine.nhigh = 3 elif NumIm == 6: # 6 HRC; 3 WFC iraf.imcombine.nlow = 1 iraf.imcombine.nhigh = 4 elif NumIm == 7: # 7 HRC iraf.imcombine.nlow = 1 iraf.imcombine.nhigh = 5 elif NumIm < 10: # 8,9 HRC; 4 WFC iraf.imcombine.nlow = 1 iraf.imcombine.nhigh = 6 elif NumIm < 12: # 10,11 HRC; 5 WFC iraf.imcombine.nlow = 2 iraf.imcombine.nhigh = 7 # next added/changed else: iraf.imcombine.nlow = (NumIm + 2) / 4 - 1 iraf.imcombine.nhigh = 3 * NumIm / 4 iraf.imcombine.mode = 'h' self.logfile.write(self.modName+' calling imcombine. come in imcombine. NumIm/nlo/nhi: '+\ str(NumIm)+' '+str(iraf.imcombine.nlow)+' '+str(iraf.imcombine.nhigh)+\ (' [NumTot=%d]'%(NumTot))) iraf.imcombine() if self.verbose: print 'NumIm = %d nlow,high: %d %d' % ( NumIm, iraf.imcombine.nlow, iraf.imcombine.nhigh) print 'median image', outfile, 'created' self.logfile.write('median image ' + outfile + ' created. Removing imcombine_input temp file.') print "removing imcombine_input temp file." try: os.remove("imcombine_input") except: pass return outfile
def get_align_to_subaru(sci='M0416_Ks_c1_mp_avg.fits', wht='M0416_Ks_c1_mp_exp.fits', field='', clean=True, toler=3, verbose=False, fitgeometry='shift', shift_max=20, rms_max=1.1, rot_max=2, rot_only=True, THRESH=2, align_data=None): """ Align HAWK-I images to the FF Subaru astrometric reference catalogs """ #sci='M0416_Ks_c1_mp_avg.fits'; wht='M0416_Ks_c1_mp_exp.fits' ### Make object catalog se = threedhst.sex.SExtractor() se.aXeParams() se.copyConvFile() se.overwrite = True se.options['CHECKIMAGE_TYPE'] = 'NONE' if wht is None: se.options['WEIGHT_TYPE'] = 'NONE' else: se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT' se.options['WEIGHT_IMAGE'] = wht se.options['FILTER'] = 'Y' se.options['DETECT_THRESH'] = '%d' % (THRESH) se.options['ANALYSIS_THRESH'] = '%d' % (THRESH) se.options['MAG_ZEROPOINT'] = '26.0' #### Run SExtractor on direct and alignment images ## direct image se.options['CATALOG_NAME'] = 'direct.cat' status = se.sextractImage(sci) threedhst.sex.sexcatRegions('direct.cat', 'direct.reg', format=2) directCat = threedhst.sex.mySexCat('direct.cat') #### Get the X/Y coords of the reference catalog #head = pyfits.getheader(sci, 0) #wcs = pywcs.WCS(head) if 'M0416' in sci: ra_list, dec_list, mag = np.loadtxt( os.getenv('HAWKI') + '/FrontierFields/HST/hlsp_frontier_subaru_suprimecam_macs0416-astrom_R_v1_cat.txt', unpack=True) if ('c4' in sci): ra_list, dec_list, mag = np.loadtxt( os.getenv('HAWKI') + '/FrontierFields/HST/M0416/macs0416_f814w_radec.cat', unpack=True) # if 'M0717' in sci: ra_list, dec_list, mag = np.loadtxt('subaru.radec', unpack=True) if ('M1149' in sci) | (field == 'M1149'): ra_list, dec_list, mag = np.loadtxt( '/Users/brammer/Research/VLT/HAWKI/MACS1149/hlsp_frontier_subaru_suprimecam_macs1149-astrom_R_v1_cat.txt', unpack=True) if 'A2744' in sci: ra_list, dec_list, mag = np.loadtxt( os.getenv('HAWKI') + '/FrontierFields/HST/hlsp_frontier_subaru_suprimecam_abell2744-astrom_i_v1_cat.txt', unpack=True) if ('c1' in sci) | ('c4' in sci): ra_list, dec_list, mag = np.loadtxt( os.getenv('HAWKI') + '/FrontierFields/HST/abell2744_f814w_radec.cat', unpack=True) if align_data is not None: ra_list, dec_list, mag = align_data im = pyfits.open(sci) print sci sh = im[0].shape head = im[0].header head['CUNIT1'] = 'deg' head['CUNIT2'] = 'deg' wcs = pywcs.WCS(head) x_image, y_image = wcs.wcs_sky2pix(ra_list, dec_list, 1) try: x_image, y_image = wcs.wcs_sky2pix(ra_list, dec_list, 1) except: x_image, y_image = wcs.wcs_world2pix(ra_list, dec_list, 1) ok = (x_image > 0) & (y_image > 0) & (x_image < sh[1]) & (y_image < sh[1]) x_image, y_image = x_image[ok], y_image[ok] fpr = open('align.reg', 'w') fpr.write('image\n') for i in range(ok.sum()): fpr.write('circle(%.6f, %.6f,0.3") # color=magenta\n' % (x_image[i], y_image[i])) fpr.close() # x_image, y_image = [], [] # # for ra, dec in zip(ra_list, dec_list): # x, y = wcs.wcs_sky2pix([[ra, dec]], 1)[0] # if (x > 0) & (y > 0) & (x < sh[1]) & (y < sh[1]): # x_image.append(x) # y_image.append(y) alignCat = catIO.EmptyCat() alignCat['X_IMAGE'] = np.array(x_image) alignCat['Y_IMAGE'] = np.array(y_image) xshift = 0 yshift = 0 rot = 0 scale = 1. xrms = 2 yrms = 2 NITER = 5 IT = 0 while (IT < NITER): IT = IT + 1 #### Get x,y coordinates of detected objects ## direct image fp = open('direct.xy', 'w') for i in range(len(directCat.X_IMAGE)): fp.write('%s %s\n' % (directCat.X_IMAGE[i], directCat.Y_IMAGE[i])) fp.close() ## alignment image fp = open('align.xy', 'w') for i in range(len(alignCat.X_IMAGE)): fp.write('%s %s\n' % (np.float(alignCat.X_IMAGE[i]) + xshift, np.float(alignCat.Y_IMAGE[i]) + yshift)) fp.close() iraf.flpr() iraf.flpr() iraf.flpr() #### iraf.xyxymatch to find matches between the two catalogs pow = toler * 1. try: os.remove('align.match') except: pass status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy", output="align.match", tolerance=2**pow, separation=0, verbose=iraf.yes, Stdout=1) nmatch = 0 while status1[-1].startswith('0') | (nmatch < 10) | (float( status1[-3].split()[1]) > 40): pow += 1 os.remove('align.match') status1 = iraf.xyxymatch(input="direct.xy", reference="align.xy", output="align.match", tolerance=2**pow, separation=0, verbose=iraf.yes, Stdout=1) # nmatch = 0 for line in open('align.match').xreadlines(): nmatch += 1 if verbose: for line in status1: print line #### Compute shifts with iraf.geomap iraf.flpr() iraf.flpr() iraf.flpr() try: os.remove("align.map") except: pass status2 = iraf.geomap(input="align.match", database="align.map", fitgeometry=fitgeometry, interactive=iraf.no, xmin=iraf.INDEF, xmax=iraf.INDEF, ymin=iraf.INDEF, ymax=iraf.INDEF, maxiter=10, reject=2.0, Stdout=1) if verbose: for line in status2: print line #fp = open(root+'.iraf.log','a') #fp.writelines(status1) #fp.writelines(status2) #fp.close() #### Parse geomap.output fp = open("align.map", "r") for line in fp.readlines(): spl = line.split() if spl[0].startswith('xshift'): xshift += float(spl[1]) if spl[0].startswith('yshift'): yshift += float(spl[1]) if spl[0].startswith('xrotation'): rot = float(spl[1]) if spl[0].startswith('xmag'): scale = float(spl[1]) if spl[0].startswith('xrms'): xrms = float(spl[1]) if spl[0].startswith('yrms'): yrms = float(spl[1]) fp.close() #os.system('wc align.match') print 'Shift iteration #%d, xshift=%f, yshift=%f, rot=%f, scl=%f (rms: %5.2f,%5.2f)' % ( IT, xshift, yshift, rot, scale, xrms, yrms) os.system( 'cat align.match | grep -v "\#" | grep [0-9] | awk \'{print "circle(", $1, ",", $2, ",4) # color=green"}\' > d.reg' ) os.system( 'cat align.match | grep -v "\#" | grep [0-9] | awk \'{print "circle(", $3, ",", $4, ",4) # color=magenta"}\' > a.reg' ) shutil.copy('align.map', sci.replace('.fits', '.align.map')) shutil.copy('align.match', sci.replace('.fits', '.align.match')) #### Cleanup if clean: rmfiles = [ 'align.cat', 'align.map', 'align.match', 'align.reg', 'align.xy', 'direct.cat', 'direct.reg', 'direct.xy' ] for file in rmfiles: try: os.remove(file) except: pass fp = open(sci.replace('.fits', '.align.info'), 'w') fp.write('# image xshift yshift rot scale xrms yrms\n') fp.write('%s %.3f %.3f %.4f %.4f %.3f %.3f\n' % (sci, xshift, yshift, rot, scale, xrms, yrms)) if (np.abs(xshift) > shift_max) | (np.abs(yshift) > shift_max) | ( xrms > rms_max) | (yrms > rms_max): print 'Shifts out of allowed range. Run again with increased shift_max to accept.' #return xshift, yshift, rot, scale, xrms, yrms ## Add a small shift that should come out easily with another ## shift iteration xshift, yshift, rot, scale, xrms, yrms = 2, 2, 0, 1.0, -99, -99 for file in [sci, wht]: if ('r' in fitgeometry) & rot_only: xshift, yshift = 0, 0 #apply_offsets(file, [[xshift, yshift, rot, scale]]) from drizzlepac import updatehdr updatehdr.updatewcs_with_shift(file, sci, wcsname='DRZWCS', rot=rot, scale=scale, xsh=xshift, ysh=yshift, fit=None, xrms=xrms, yrms=yrms, verbose=False, force=True, sciext=0) if '_dr' in sci: im = pyfits.open(sci) h = im[0].header for i in range(h['NDRIZIM']): flt_str = h['D%03dDATA' % (i + 1)] if 'sci,2' in flt_str: continue # flt_im = flt_str.split('[')[0] ext = int(flt_str.split('[')[1][:-1].split(',')[1]) updatehdr.updatewcs_with_shift(flt_im, sci, wcsname='GTWEAK', rot=rot, scale=scale, xsh=xshift, ysh=yshift, fit=None, xrms=xrms, yrms=yrms, verbose=False, force=True, sciext='SCI') # im = pyfits.open(file, mode='update') # wcs = pywcs.WCS(im[0].header) # wcs.rotateCD(-rot) # wcs.wcs.cd /= scale # # # im[0].header['CRPIX1'] += xshift # im[0].header['CRPIX2'] += yshift # # # for i in [0,1]: # for j in [0,1]: # im[0].header['CD%d_%d' %(i+1, j+1)] = wcs.wcs.cd[i,j] # # # im.flush() return xshift, yshift, rot, scale, xrms, yrms
def extract(fs=None): iraf.cd('work') if fs is None: fs = glob('fix/*fix*.fits') if len(fs) == 0: print "WARNING: No fixpixed images available for extraction." iraf.cd('..') return if not os.path.exists('x1d'): os.mkdir('x1d') print "Note: No continuum? Make nsum small (~-5) with 'line' centered on an emission line." for f in fs: # Get the output filename without the ".fits" outbase = f.replace('fix', 'x1d')[:-5] # Get the readnoise, right now assume default value of 5 but we could # get this from the header readnoise = 5 # If interactive open the rectified, background subtracted image in ds9 ds9display(f.replace('fix', 'bkg')) # set dispaxis = 1 just in case pyfits.setval(f, 'DISPAXIS', extname='SCI', value=1) iraf.unlearn(iraf.apall) iraf.flpr() iraf.apall(input=f + '[SCI]', output=outbase, interactive='yes', review='no', line='INDEF', nsum=-1000, lower=-5, upper=5, b_function='legendre', b_order=5, b_sample='-200:-100,100:200', b_naverage=-10, b_niterate=5, b_low_reject=3.0, b_high_reject=3.0, nfind=1, t_nsum=15, t_step=15, t_nlost=200, t_function='legendre', t_order=5, t_niterate=5, t_low_reject=3.0, t_high_reject=3.0, background='fit', weights='variance', pfit='fit2d', clean='no', readnoise=readnoise, gain=1.0, lsigma=4.0, usigma=4.0, mode='hl') # Copy the CCDSUM keyword into the 1d extraction pyfits.setval(outbase + '.fits', 'CCDSUM', value=pyfits.getval(f, 'CCDSUM')) # Extract the corresponding arc arcname = glob('nrm/arc' + f.split('/')[1][3:8] + '*.fits')[0] # set dispaxis = 1 just in case pyfits.setval(arcname, 'DISPAXIS', extname='SCI', value=1) iraf.unlearn(iraf.apsum) iraf.flpr() iraf.apsum(input=arcname + '[SCI]', output='auxext_arc', references=f[:-5] + '[SCI]', interactive='no', find='no', edit='no', trace='no', fittrace='no', extras='no', review='no', background='no', mode='hl') # copy the arc into the 5 column of the data cube arcfs = glob('auxext_arc*.fits') for af in arcfs: archdu = pyfits.open(af) scihdu = pyfits.open(outbase + '.fits', mode='update') d = scihdu[0].data.copy() scihdu[0].data = np.zeros((5, d.shape[1], d.shape[2])) scihdu[0].data[:-1, :, :] = d[:, :, :] scihdu[0].data[-1::, :] = archdu[0].data.copy() scihdu.flush() scihdu.close() archdu.close() os.remove(af) # Add the airmass, exptime, and other keywords back into the # extracted spectrum header kws = ['AIRMASS','EXPTIME', 'PROPID','PROPOSER','OBSERVER','OBSERVAT','SITELAT','SITELONG', 'INSTRUME','DETSWV','RA','PM-RA','DEC','PM-DEC','EQUINOX', 'EPOCH','DATE-OBS','TIME-OBS','UTC-OBS','TIMESYS','LST-OBS', 'JD','MOONANG','OBSMODE','DETMODE','SITEELEV','BLOCKID','PA', 'TELHA','TELRA','TELDEC','TELPA','TELAZ','TELALT','DECPANGL', 'TELTEM','PAYLTEM','MASKID','MASKTYP','GR-ANGLE','GRATING', 'FILTER'] for kw in kws: pyfits.setval(outbase + '.fits', kw, value=pyfits.getval(f,kw)) iraf.cd('..')