Ejemplo n.º 1
0
def wl_cal(filename, outname, cals):
    print 'run wavelength calibration...'
    if os.path.isfile(outname):
        print 'file %s is already exist' % outname
    else:
        iraf.imred()
        iraf.specred()
        print 'The calibration file is listed below:'
        for i in xrange(len(cals)):
            print i, cals[i], get_fits_objname(cals[i])
        valget = 0
        if len(cals) > 1:
            valget = raw_input('Which one are you want to use?:')
            while type(eval(valget)) != int:
                valget = raw_input('input is not a int type, please reinput:')
            valget = int(valget)
        iraf.refspectra(input = filename
                , references = cals[valget], apertures = '', refaps = ''
                , ignoreaps = True, select = 'interp', sort = ''
                , group = '', time = False, timewrap = 17.0
                , override = False, confirm = True, assign = True
                , logfiles = 'STDOUT,logfile', verbose = False, answer = 'yes')
        print 'make file ' + outname + '...'
        iraf.dispcor(input = filename
                , output = outname, linearize = True, database = 'database'
                , table = '', w1 = 'INDEF', w2 = 'INDEF', dw = 'INDEF'
                , nw = 'INDEF', log = False, flux = True, blank = 0.0
                , samedisp = False, ignoreaps = True
                , confirm = False, listonly = False, verbose = True
                , logfile = '')
    print 'splot %s' % outname
    iraf.splot(images = outname)
Ejemplo n.º 2
0
def initialize_iraf():
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    return
Ejemplo n.º 3
0
def scalewavelenght(calspec):
    ''' Creates a wavelenght solution for 'calspec' '''
    iraf.imred()
    iraf.specred()

    linelist = str(raw_input('Enter file with list of lines (linelists$thar.dat) : '))
    if linelist == '':
        linelist = 'linelists$thar.dat'
    iraf.specred.identify.coordlist = linelist
    iraf.specred.identify(images=calspec)
Ejemplo n.º 4
0
def scalewavelenght(calspec):
    ''' Creates a wavelenght solution for 'calspec' '''
    iraf.imred()
    iraf.specred()

    linelist = str(
        raw_input('Enter file with list of lines (linelists$thar.dat) : '))
    if linelist == '':
        linelist = 'linelists$thar.dat'
    iraf.specred.identify.coordlist = linelist
    iraf.specred.identify(images=calspec)
Ejemplo n.º 5
0
def flatresponse(input='Flat', output='nFlat'):
    ''' normalize Flat to correct illumination patterns'''

    iraf.imred()
    iraf.ccdred()
    iraf.specred()

    iraf.ccdred.combine.unlearn()
    iraf.ccdred.ccdproc.unlearn()
    iraf.specred.response.unlearn()
    iraf.specred.response.interactive = True
    iraf.specred.response.function = 'chebyshev'
    iraf.specred.response.order = 1
    iraf.specred.response(calibration=input, normalization=input,
            response=output)
Ejemplo n.º 6
0
def findaperture(img, _interactive=False):
    # print "LOGX:: Entering `findaperture` method/function in %(__file__)s" %
    # globals()
    import re
    import string
    import os
    from pyraf import iraf
    import ntt
    iraf.noao(_doprint=0, Stdout=0)
    iraf.imred(_doprint=0, Stdout=0)
    iraf.specred(_doprint=0, Stdout=0)
    toforget = ['specred.apfind']
    for t in toforget:
        iraf.unlearn(t)

    iraf.specred.databas = 'database'
    iraf.specred.dispaxi = 2
    iraf.specred.apedit.thresho = 0

    dv = ntt.dvex()
    grism = ntt.util.readkey3(ntt.util.readhdr(img), 'grism')
    if _interactive:
        _interac = 'yes'
        _edit = 'yes'
    else:
        _interac = 'no'
        _edit = 'no'
    if os.path.isfile('database/ap' + re.sub('.fits', '', img)):
        ntt.util.delete('database/ap' + re.sub('.fits', '', img))
    xx = iraf.specred.apfind(img,
                             interac=_interac,
                             find='yes',
                             recenter='yes',
                             edit=_edit,
                             resize='no',
                             aperture=1,
                             Stdout=1,
                             nfind=1,
                             line=dv['line'][grism],
                             nsum=50,
                             mode='h')
    try:
        for line in open('database/ap' + re.sub('.fits', '', img)):
            if "center" in line:
                center = float(string.split(line)[1])
    except:
        center = 9999
    return center
Ejemplo n.º 7
0
def flatresponse(input='Flat', output='nFlat'):
    ''' normalize Flat to correct illumination patterns'''

    iraf.imred()
    iraf.ccdred()
    iraf.specred()

    iraf.ccdred.combine.unlearn()
    iraf.ccdred.ccdproc.unlearn()
    iraf.specred.response.unlearn()
    iraf.specred.response.interactive = True
    iraf.specred.response.function = 'chebyshev'
    iraf.specred.response.order = 1
    iraf.specred.response(calibration=input,
                          normalization=input,
                          response=output)
Ejemplo n.º 8
0
def runapall(imagesre, gain='gain', rdnoise='rdnoise'):
    '''Extract aperture spectra for science images ...'''

    imageslist = glob.glob(imagesre)
    imagesin = ', '.join(imageslist)

    # load packages
    iraf.imred()
    iraf.ccdred()
    iraf.specred()
    # unlearn previous settings
    iraf.ccdred.combine.unlearn()
    iraf.ccdred.ccdproc.unlearn()
    iraf.specred.apall.unlearn()
    # setup and run task
    iraf.specred.apall.format = 'onedspec'
    iraf.specred.apall.readnoise = rdnoise
    iraf.specred.apall.gain = gain
    iraf.specred.apall(input=imagesin)
Ejemplo n.º 9
0
def runapall(imagesre, gain='gain', rdnoise='rdnoise'):
    '''Extract aperture spectra for science images ...'''

    imageslist = glob.glob(imagesre)
    imagesin = ', '.join(imageslist)

    # load packages
    iraf.imred()
    iraf.ccdred()
    iraf.specred()
    # unlearn previous settings
    iraf.ccdred.combine.unlearn()
    iraf.ccdred.ccdproc.unlearn()
    iraf.specred.apall.unlearn()
    # setup and run task
    iraf.specred.apall.format = 'onedspec'
    iraf.specred.apall.readnoise = rdnoise
    iraf.specred.apall.gain = gain
    iraf.specred.apall(input=imagesin)
Ejemplo n.º 10
0
def continumsub(imagefile, _order1, _order2):
    # print "LOGX:: Entering `continumsub` method/function in %(__file__)s" %
    # globals()
    from pyraf import iraf
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.continuum']
    for t in toforget:
        iraf.unlearn(t)
    delete('tsky.fits')
    iraf.specred.continuum(imagefile, output='tsky.fits', type='difference',
                           interact='no', function='legendre', niterat=300, low_rej=3, high_re=2, sample='*',
                           order=_order1, ask='YES')
    delete(imagefile)
    iraf.continuum('tsky.fits', output=imagefile, type='difference',
                   interact='no', function='spline1', overrid='yes', niterat=10, low_rej=3, high_re=1, sample='*',
                   order=_order2, ask='YES')
    delete('tsky.fits')
    return imagefile
Ejemplo n.º 11
0
def findaperture(img, _interactive=False):
    # print "LOGX:: Entering `findaperture` method/function in %(__file__)s" %
    # globals()
    import re
    import string
    import os
    from pyraf import iraf
    import ntt
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.apfind']
    for t in toforget:
        iraf.unlearn(t)

    iraf.specred.databas = 'database'
    iraf.specred.dispaxi = 2
    iraf.specred.apedit.thresho = 0

    dv = ntt.dvex()
    grism = ntt.util.readkey3(ntt.util.readhdr(img), 'grism')
    if _interactive:
        _interac = 'yes'
        _edit = 'yes'
    else:
        _interac = 'no'
        _edit = 'no'
    if os.path.isfile('database/ap' + re.sub('.fits', '', img)):
        ntt.util.delete('database/ap' + re.sub('.fits', '', img))
    xx = iraf.specred.apfind(img, interac=_interac, find='yes', recenter='yes', edit=_edit, resize='no',
                             aperture=1, Stdout=1, nfind=1, line=dv['line'][grism], nsum=50, mode='h')
    try:
        for line in open('database/ap' + re.sub('.fits', '', img)):
            if "center" in line:
                center = float(string.split(line)[1])
    except:
        center = 9999
    return center
Ejemplo n.º 12
0
def floydsautoredu(files,_interactive,_dobias,_doflat,_listflat,_listbias,_listarc,_cosmic,_ext_trace,_dispersionline,liststandard,listatmo,_automaticex,_classify=False,_verbose=False,smooth=1,fringing=1):
    import floyds
    import string,re,os,glob,sys,pickle
    from numpy import array, arange, mean,pi,arccos,sin,cos,argmin
    from astropy.io import fits
    from pyraf import iraf
    import datetime
    os.environ["PYRAF_BETA_STATUS"] = "1"
    iraf.set(direc=floyds.__path__[0]+'/')
    _extinctdir='direc$standard/extinction/'
    _tel=floyds.util.readkey3(floyds.util.readhdr(re.sub('\n','',files[0])),'TELID')
    if _tel=='fts':
        _extinction='ssoextinct.dat'
        _observatory='sso'
    elif _tel=='ftn':
        _extinction='maua.dat' 
        _observatory='cfht'   
    else: sys.exit('ERROR: observatory not recognised')
    dv=floyds.util.dvex()
    scal=pi/180.
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['ccdred.flatcombine','ccdred.zerocombine','ccdproc','specred.apall','longslit.identify','longslit.reidentify',\
                    'specred.standard','longslit.fitcoords','specred.transform','specred.response']
    for t in toforget: iraf.unlearn(t)
    iraf.longslit.dispaxi=2
    iraf.longslit.mode='h'
    iraf.identify.fwidth=7 
    iraf.identify.order=2 
    iraf.specred.dispaxi=2
    iraf.specred.mode='h'
    iraf.ccdproc.darkcor='no'
    iraf.ccdproc.fixpix='no'
    iraf.ccdproc.trim='no'
    iraf.ccdproc.flatcor='no'
    iraf.ccdproc.overscan='no'
    iraf.ccdproc.zerocor='no'
    iraf.ccdproc.biassec=''
    iraf.ccdproc.ccdtype=''
    iraf.ccdred.instrument = "/dev/null"
    if _verbose: 
        iraf.ccdred.verbose='yes'
        iraf.specred.verbose='yes'
    else: 
        iraf.specred.verbose='no'
        iraf.ccdred.verbose='no'
    now=datetime.datetime.now()
    datenow=now.strftime('20%y%m%d%H%M')
    MJDtoday=55928+(datetime.date.today()-datetime.date(2012, 01, 01)).days
    outputlist=[]
    hdra=floyds.util.readhdr(re.sub('\n','',files[0]))
    _gain=floyds.util.readkey3(hdra,'gain')
    _rdnoise=floyds.util.readkey3(hdra,'ron')
    std,rastd,decstd,magstd=floyds.util.readstandard('standard_floyds_mab.txt')
    _naxis2=hdra.get('NAXIS2')
    _naxis1=hdra.get('NAXIS1')
    if not _naxis1: _naxis1=2079
    if not _naxis2: 
        if not hdr0.get('HDRVER'):   _naxis1=511
        else:                        _naxis1=512
    _overscan='[2049:'+str(_naxis1)+',1:'+str(_naxis2)+']'
    _biassecblu='[380:2048,325:'+str(_naxis2)+']'    
    _biassecred='[1:1800,1:350]'    
    lista={}
    objectlist={}
    biaslist={}
    flatlist={}
    flatlistd={}
    arclist={}
    max_length=14
    for img in files:
        hdr0=floyds.util.readhdr(img)
        if  floyds.util.readkey3(hdr0,'naxis2')>=500:
            if 'blu' not in lista: lista['blu']=[]
            if 'red' not in lista: lista['red']=[]
            _object0=floyds.util.readkey3(hdr0,'object')
            _object0 = re.sub(':', '', _object0) # colon
            _object0 = re.sub('/', '', _object0) # slash
            _object0 = re.sub('\s', '', _object0) # any whitespace
            _object0 = re.sub('\(', '', _object0) # open parenthesis
            _object0 = re.sub('\[', '', _object0) # open square bracket
            _object0 = re.sub('\)', '', _object0) # close parenthesis
            _object0 = re.sub('\]', '', _object0) # close square bracket
            _object0 = _object0.replace(r'\t', '') # Any tab characters
            _object0 = _object0.replace('*', '') # Any asterisks

            if len(_object0) > max_length:
                _object0 = _object0[:max_length]
            _date0=floyds.util.readkey3(hdr0,'date-night')
            _tel=floyds.util.readkey3(hdr0,'TELID')
            _type=floyds.util.readkey3(hdr0,'OBSTYPE')
            if not _type:    _type=floyds.util.readkey3(hdr0,'imagetyp')
            _slit=floyds.util.readkey3(hdr0,'slit')
            if _type:
                _type = _type.lower()
                if _type in ['sky','spectrum','expose']:
                    nameoutb=str(_object0)+'_'+_tel+'_'+str(_date0)+'_blue_'+str(_slit)+'_'+str(MJDtoday)
                    nameoutr=str(_object0)+'_'+_tel+'_'+str(_date0)+'_red_'+str(_slit)+'_'+str(MJDtoday)
                elif _type in ['lamp','arc','l']:
                    nameoutb='arc_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_blue_'+str(_slit)+'_'+str(MJDtoday)
                    nameoutr='arc_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_red_'+str(_slit)+'_'+str(MJDtoday)
                elif _type in ['flat','f','lampflat','lamp-flat']:
                    nameoutb='flat_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_blue_'+str(_slit)+'_'+str(MJDtoday)
                    nameoutr='flat_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_red_'+str(_slit)+'_'+str(MJDtoday)
                else:
                    nameoutb=str(_type.lower())+'_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_blue_'+str(_slit)+'_'+str(MJDtoday)
                    nameoutr=str(_type.lower())+'_'+str(_object0)+'_'+_tel+'_'+str(_date0)+'_red_'+str(_slit)+'_'+str(MJDtoday)

                bimg=floyds.util.name_duplicate(img,nameoutb,'')
                rimg=floyds.util.name_duplicate(img,nameoutr,'')
####
                floyds.util.delete(bimg)
                floyds.util.delete(rimg)
                iraf.imcopy(img,bimg,verbose='no')
                iraf.imcopy(img,rimg,verbose='no')

                aaa=iraf.hedit(bimg,'CCDSEC',delete='yes',update='yes',verify='no',Stdout=1)
                aaa=iraf.hedit(bimg,'TRIMSEC',delete='yes',update='yes',verify='no',Stdout=1)
                aaa=iraf.hedit(rimg,'CCDSEC',delete='yes',update='yes',verify='no',Stdout=1)
                aaa=iraf.hedit(rimg,'TRIMSEC',delete='yes',update='yes',verify='no',Stdout=1)

                iraf.ccdproc(bimg,output='', overscan="yes", trim="yes", zerocor='no', flatcor='no', zero='', ccdtype='',\
                                 fixpix='no', trimsec=_biassecblu, biassec=_overscan, readaxi='line', Stdout=1)
                iraf.ccdproc(rimg,output='', overscan="yes", trim="yes", zerocor='no', flatcor='no', zero='', ccdtype='',\
                                 fixpix='no', trimsec=_biassecred, biassec=_overscan, readaxi='line', Stdout=1)
                floyds.util.updateheader(bimg,0,{'GRISM':['blu',' blue order']})
                floyds.util.updateheader(rimg,0,{'GRISM':['red',' blue order']})
                floyds.util.updateheader(bimg,0,{'arcfile':[img,'file name in the archive']})
                floyds.util.updateheader(rimg,0,{'arcfile':[img,'file name in the archive']})
                lista['blu'].append(bimg)
                lista['red'].append(rimg)
            else: 
                print 'warning type not defined'
    for arm in lista.keys():
        for img in lista[arm]:
            print img
            hdr=floyds.util.readhdr(img)
            _type=floyds.util.readkey3(hdr,'OBSTYPE')
            if _type=='EXPOSE':  
                      _type=floyds.util.readkey3(hdr,'imagetyp')
                      if not _type: _type='EXPOSE'

            if _type=='EXPOSE':  
                print 'warning obstype still EXPOSE, are this old data ?  run manually floydsfixheader'

            _slit=floyds.util.readkey3(hdr,'slit')
            _grpid=floyds.util.readkey3(hdr,'grpid')
            if _type.lower() in ['flat','f','lamp-flat','lampflat'] :
                if (arm,_slit) not in flatlist:  flatlist[arm,_slit]={}
                if _grpid not in flatlist[arm,_slit]: flatlist[arm,_slit][_grpid]=[img]
                else: flatlist[arm,_slit][_grpid].append(img)
            elif _type.lower() in ['lamp','l','arc']:
                if (arm,_slit) not in arclist:  arclist[arm,_slit]={}
                if _grpid not in arclist[arm,_slit]: arclist[arm,_slit][_grpid]=[img]
                else: arclist[arm,_slit][_grpid].append(img)
            elif _type in ['bias','b']:
                if arm not in biaslist: biaslist[arm]=[]
                biaslist[arm].append(img)
            elif _type.lower() in ['sky','s','spectrum']:
                try:
                    _ra=float(floyds.util.readkey3(hdr,'RA'))
                    _dec=float(floyds.util.readkey3(hdr,'DEC'))
                except:
                    ra00=string.split(floyds.util.readkey3(hdr,'RA'),':')
                    ra0,ra1,ra2=float(ra00[0]),float(ra00[1]),float(ra00[2])
                    _ra=((ra2/60.+ra1)/60.+ra0)*15.
                    dec00=string.split(floyds.util.readkey3(hdr,'DEC'),':')
                    dec0,dec1,dec2=float(dec00[0]),float(dec00[1]),float(dec00[2])
                    if '-' in str(dec0):       _dec=(-1)*((dec2/60.+dec1)/60.+((-1)*dec0))
                    else:                      _dec=(dec2/60.+dec1)/60.+dec0
                dd=arccos(sin(_dec*scal)*sin(decstd*scal)+cos(_dec*scal)*cos(decstd*scal)*cos((_ra-rastd)*scal))*((180/pi)*3600)
                if _verbose:
                    print _ra,_dec
                    print std[argmin(dd)],min(dd)
                if min(dd)<5200: _typeobj='std'
                else: _typeobj='obj'
                if min(dd)<5200:
                    floyds.util.updateheader(img,0,{'stdname':[std[argmin(dd)],'']})
                    floyds.util.updateheader(img,0,{'magstd':[float(magstd[argmin(dd)]),'']})
                if _typeobj not in objectlist:      objectlist[_typeobj]={}

                if (arm,_slit) not in objectlist[_typeobj]:     objectlist[_typeobj][arm,_slit]=[img]
                else: objectlist[_typeobj][arm,_slit].append(img)
    if _verbose:
        print 'object'
        print objectlist
        print 'flat'
        print flatlist
        print 'bias'
        print biaslist
        print 'arc'
        print arclist

    if liststandard and 'std' in objectlist.keys():  
        print 'external standard, raw standard not used'
        del objectlist['std']

    sens={}
    outputfile={}
    atmo={}
    for tpe in objectlist:
      if tpe not in outputfile:  outputfile[tpe]={}
      for setup in objectlist[tpe]:
        if setup not in sens:   sens[setup]=[]
        print '\n### setup= ',setup,'\n### objects= ',objectlist[tpe][setup],'\n'
        for img in objectlist[tpe][setup]:
              print '\n\n### next object= ',img,' ',floyds.util.readkey3(floyds.util.readhdr(img),'object'),'\n'
              hdr=floyds.util.readhdr(img)
              archfile=floyds.util.readkey3(hdr,'arcfile')
              _gain=floyds.util.readkey3(hdr,'gain')
              _rdnoise=floyds.util.readkey3(hdr,'ron')
              _grism=floyds.util.readkey3(hdr,'grism')
              _grpid=floyds.util.readkey3(hdr,'grpid')
              if archfile not in outputfile[tpe]: outputfile[tpe][archfile]=[]
#####################      flat   ###############
              if _listflat:   flatgood=_listflat    # flat list from reducer
              elif setup in flatlist:  
                  if _grpid in flatlist[setup]:
                      print '\n###FLAT WITH SAME GRPID'
                      flatgood= flatlist[setup][_grpid]     # flat in the  raw data
                  else:  
                      flatgood=[]
                      for _grpid0 in flatlist[setup].keys():
                          for ii in flatlist[setup][_grpid0]:
                              flatgood.append(ii)
              else: flatgood=[]
              if len(flatgood)!=0:
                  if len(flatgood)>1:
                      f=open('_oflatlist','w')
                      for fimg in flatgood:
                          print fimg
                          f.write(fimg+'\n')
                      f.close()
                      floyds.util.delete('flat'+img)
                      iraf.ccdred.flatcombine('"@_oflatlist"',output='flat'+img,combine='average',reject='none',ccdtype=' ',rdnoise=_rdnoise,gain=_gain, process='no', Stdout=1)
                      floyds.util.delete('_oflatlist')
                      flatfile='flat'+img
                  elif len(flatgood)==1:
                      os.system('cp '+flatgood[0]+' flat'+img)
                      flatfile='flat'+img
              else: flatfile=''
##########################   find arcfile            #######################
              arcfile=''
              if _listarc:       arcfile= [floyds.util.searcharc(img,_listarc)[0]][0]   # take arc from list 
              if not arcfile and setup in arclist.keys():
                    if _grpid in arclist[setup]:  
                        print '\n###ARC WITH SAME GRPID'
                        arcfile= arclist[setup][_grpid]     # flat in the  raw data
                    else:  
                        arcfile=[]
                        for _grpid0 in arclist[setup].keys():
                            for ii in arclist[setup][_grpid0]:
                                arcfile.append(ii)                   
              if arcfile:
                  if len(arcfile)>1:                           # more than one arc available
                      print arcfile
#                     _arcclose=floyds.util.searcharc(imgex,arcfile)[0]   # take the closest in time 
                      _arcclose=floyds.sortbyJD(arcfile)[-1]               #  take the last arc of the sequence
                      if _interactive.upper() in ['YES','Y']:
                              for ii in floyds.floydsspecdef.sortbyJD(arcfile):
                                  print '\n### ',ii 
                              arcfile=raw_input('\n### more than one arcfile available, which one to use ['+str(_arcclose)+'] ? ')
                              if not arcfile: arcfile=_arcclose
                      else: arcfile=_arcclose
                  else: arcfile=arcfile[0]
              else:   print '\n### Warning: no arc found'

###################################################################   rectify 
              if setup[0]=='red':
                  fcfile=floyds.__path__[0]+'/standard/ident/fcrectify_'+_tel+'_red'
                  fcfile1=floyds.__path__[0]+'/standard/ident/fcrectify1_'+_tel+'_red'
                  print fcfile
              else:
                  fcfile=floyds.__path__[0]+'/standard/ident/fcrectify_'+_tel+'_blue'
                  fcfile1=floyds.__path__[0]+'/standard/ident/fcrectify1_'+_tel+'_blue'
                  print fcfile
              print img,arcfile,flatfile
              img0=img
              if img      and img not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(img)
              if arcfile  and arcfile not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(arcfile)
              if flatfile and flatfile not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(flatfile)

              img,arcfile,flatfile=floyds.floydsspecdef.rectifyspectrum(img,arcfile,flatfile,fcfile,fcfile1,'no',_cosmic)
              if img      and img not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(img)
              if arcfile  and arcfile not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(arcfile)
              if flatfile and flatfile not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(flatfile)
###################################################################         check wavecalib  
              if tpe=='std' or floyds.util.readkey3(floyds.util.readhdr(img),'exptime') < 300:
                  if setup[0]=='red':
                      print '\n### check standard wave calib'
                      data, hdr = fits.getdata(img, 0, header=True) 
                      y=data.mean(1)
                      import numpy as np
                      if np.argmax(y) < 80 and np.argmax(y) > 15:                      
                          y2=data[np.argmax(y)-3:np.argmax(y)+3].mean(0)
                          yy2=data[np.argmax(y)-9:np.argmax(y)-3].mean(0)
                          floyds.util.delete('_std.fits')
                          fits.writeto('_std.fits', np.float32(y2-yy2), hdr)
                          shift=floyds.floydsspecdef.checkwavestd('_std.fits',_interactive,2)
                          zro=hdr['CRVAL1']
                          floyds.util.updateheader(img,0,{'CRVAL1':[zro+int(shift),'']})
                          floyds.util.updateheader(img,0,{'shift':[float(shift),'']})
                          floyds.util.delete('_std.fits')
                      else:
                          print 'object not found'
                  else: 
                      print '\n### warning check in wavelength not possible for short exposure in the blu range '
              else:
                    print '\n### check object wave calib'
                    _skyfile=floyds.__path__[0]+'/standard/ident/sky_'+setup[0]+'.fits'
                    data, hdr = fits.getdata(img, 0, header=True) 
                    y=data.mean(1)
                    import numpy as np
                    if np.argmax(y) < 80 and np.argmax(y) > 15:
                        yy1=data[10:np.argmax(y)-9].mean(0)
                        yy2=data[np.argmax(y)+9:-10].mean(0)
                        floyds.util.delete('_sky.fits')
                        fits.writeto('_sky.fits', np.float32(yy1+yy2), hdr)
                        shift=floyds.floydsspecdef.checkwavelength_obj('_sky.fits',_skyfile,_interactive,usethirdlayer=False)
                        floyds.util.delete('_sky.fits')
                        zro=hdr['CRVAL1']
                        floyds.util.updateheader(img,0,{'CRVAL1':[zro+int(shift),'']})
                        floyds.util.updateheader(img,0,{'shift':[float(shift),'']})
                    else:  print 'object not found'
####################################################     flat field
              if img and flatfile and setup[0]=='red':
                      imgn='n'+img
                      hdr1 = floyds.readhdr(img)
                      hdr2 = floyds.readhdr(flatfile)
                      _grpid1=floyds.util.readkey3(hdr1,'grpid')
                      _grpid2=floyds.util.readkey3(hdr2,'grpid')
                      if _grpid1==_grpid2:
                          print flatfile,img,setup[0]
                          imgn=floyds.fringing_classicmethod2(flatfile,img,'no','*',15,setup[0])
                      else:
                          print 'Warning flat not the same OB'
                          imgex=floyds.floydsspecdef.extractspectrum(img,dv,_ext_trace,_dispersionline,_interactive,tpe,automaticex=_automaticex)
                          floyds.delete('flat'+imgex)
                          iraf.specred.apsum(flatfile,output='flat'+imgex,referen=img,interac='no',find='no',recente='no',resize='no',\
                                             edit='no',trace='no',fittrac='no',extract='yes',extras='no',review='no',backgro='none')
                          fringingmask=floyds.normflat('flat'+imgex)
                          print '\n### fringing correction'
                          print imgex,fringingmask
                          imgex,scale,shift=floyds.correctfringing_auto(imgex,fringingmask)  #  automatic correction
                          shift=int(.5+float(shift)/3.5)        # shift from correctfringing_auto in Angstrom
                          print '\n##### flat scaling: ',str(scale),str(shift)
########################################################
                          datax, hdrx = fits.getdata(flatfile, 0, header=True)
                          xdim=hdrx['NAXIS1']
                          ydim=hdrx['NAXIS2']
                          iraf.specred.apedit.nsum=15 
                          iraf.specred.apedit.width=100.  
                          iraf.specred.apedit.line=1024 
                          iraf.specred.apfind.minsep=20.  
                          iraf.specred.apfind.maxsep=1000.  
                          iraf.specred.apresize.bkg='no' 
                          iraf.specred.apresize.ylevel=0.5 
                          iraf.specred.aptrace.nsum=10
                          iraf.specred.aptrace.step=10
                          iraf.specred.aptrace.nlost=10
                          floyds.util.delete('n'+flatfile)
                          floyds.util.delete('norm.fits')
                          floyds.util.delete('n'+img)
                          floyds.util.delete(re.sub('.fits','c.fits',flatfile))
                          iraf.imcopy(flatfile+'[500:'+str(xdim)+',*]',re.sub('.fits','c.fits',flatfile),verbose='no')
                          iraf.imarith(flatfile,'/',flatfile,'norm.fits',verbose='no')
                          flatfile=re.sub('.fits','c.fits',flatfile)
                          floyds.util.delete('n'+flatfile)
                          iraf.unlearn(iraf.specred.apflatten)
                          floyds.floydsspecdef.aperture(flatfile)
                          iraf.specred.apflatten(flatfile,output='n'+flatfile,interac=_interactive,find='no',recenter='no', resize='no',edit='no',trace='no',\
                                                 fittrac='no',fitspec='no', flatten='yes', aperture='',\
                                                 pfit='fit2d',clean='no',function='legendre',order=15,sample = '*', mode='ql')
                          iraf.imcopy('n'+flatfile,'norm.fits[500:'+str(xdim)+',*]',verbose='no')
                          floyds.util.delete('n'+flatfile)
                          floyds.util.delete('n'+img)
                          iraf.imrename('norm.fits','n'+flatfile,verbose='no')
                          imgn=floyds.floydsspecdef.applyflat(img,'n'+flatfile,'n'+img,scale,shift)
              else:                  imgn=''

              if imgn and imgn not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(imgn)

###################################################      2D flux calib
              hdr=floyds.util.readhdr(img)
              _sens=''
              if liststandard:  _sens=floyds.util.searchsens(img,liststandard)[0]   # search in the list from reducer
              if not _sens:
                  try:      _sens=floyds.util.searchsens(img,sens[setup])[0]        # search in the reduced data
                  except:   _sens=floyds.util.searchsens(img,'')[0]              # search in tha archive
              if _sens:
                  if _sens[0]=='/': 
                      os.system('cp '+_sens+' .')
                      _sens=string.split(_sens,'/')[-1]
                  imgd=fluxcalib2d(img,_sens)
                  if imgn:     imgdn=fluxcalib2d(imgn,_sens)
                  else: imgdn=''
                  if _sens not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(_sens)
                  else:        imgdn='' 
                  print '\n### do 2D calibration'
              else:
                  imgd=''
                  imgdn=''
################    extraction         ####################################
              if imgdn:
                  try:
                      imgdnex=floyds.floydsspecdef.extractspectrum(imgdn,dv,_ext_trace,_dispersionline,_interactive,tpe,automaticex=_automaticex)
                  except Exception as e:
                      print 'failed to extract', imgdn
                      print e
                      imgdnex=''
              else:       
                  imgdnex=''
              if imgd:
                  try:
                      imgdex=floyds.floydsspecdef.extractspectrum(imgd,dv,_ext_trace,_dispersionline,_interactive,tpe,automaticex=_automaticex)  
                  except Exception as e:
                      print 'failed to extract', imgd
                      print e
                      imgdex=''
              else:
                  imgdex=''
              if imgd    and imgd    not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(imgd)
              if imgdn   and imgdn   not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(imgdn)
              if imgdnex and imgdnex not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(imgdnex)
              if imgdex  and imgdex  not in outputfile[tpe][archfile]: outputfile[tpe][archfile].append(imgdex)
              if tpe=='std':
                  if imgn:
                      try:
                          imgnex=floyds.floydsspecdef.extractspectrum(imgn,dv,_ext_trace,_dispersionline,_interactive,tpe,automaticex=_automaticex)  
                      except Exception as e:
                          print 'failed to extract', imgn
                          print e
                          imgnex=''
                  elif img:
                      try:
                          imgnex=floyds.floydsspecdef.extractspectrum(img,dv,_ext_trace,_dispersionline,_interactive,tpe,automaticex=_automaticex)  
                      except Exception as e:
                          print 'failed to extract', img
                          print e
                          imgnex=''
                  if imgnex:
                    hdrs=floyds.util.readhdr(imgnex)
                    _tel=floyds.util.readkey3(hdrs,'TELID')
                    try:
                      _outputsens2='sens_'+_tel+'_'+str(floyds.util.readkey3(hdrs,'date-night'))+'_'+str(floyds.util.readkey3(hdrs,'grism'))+\
                          '_'+re.sub('.dat','',floyds.util.readkey3(hdrs,'stdname'))+'_'+str(MJDtoday)
                    except:  sys.exit('Error: missing header -stdname- in standard '+str(standardfile)+'  ')                          
                    print '\n### compute sensitivity function and atmofile'
                    if setup[0]=='red':
                          atmofile=floyds.floydsspecdef.telluric_atmo(imgnex)
                          if atmofile and atmofile not in outputfile[tpe][archfile]:    outputfile[tpe][archfile].append(atmofile)
                          stdusedclean=re.sub('_ex.fits','_clean.fits',imgnex)
                          floyds.util.delete(stdusedclean)
                          _function='spline3'
                          iraf.specred.sarith(input1=imgnex,op='/',input2=atmofile,output=stdusedclean, format='multispec')
                          try:
                              _outputsens2=floyds.floydsspecdef.sensfunction(stdusedclean,_outputsens2,_function,8,_interactive)
                          except:
                              print 'Warning: problem computing sensitivity function'
                              _outputsens2=''
                          if setup not in atmo: atmo[setup]=[atmofile]
                          else: atmo[setup].append(atmofile)
                    else:
                          _function='spline3'
                          try:
                              _outputsens2=floyds.floydsspecdef.sensfunction(imgnex,_outputsens2,_function,12,_interactive,'3400:4700')#,3600:4300')
                          except:
                              print 'Warning: problem computing sensitivity function'
                              _outputsens2=''
                    if _outputsens2  and _outputsens2 not in outputfile[tpe][archfile]:    outputfile[tpe][archfile].append(_outputsens2)
    ###################################################
    if 'obj' in outputfile:
      for imm in outputfile['obj']:
        lista = []
        tt_red = ''
        ntt_red = ''
        tt_blue = ''
        for f in outputfile['obj'][imm]:
            if '_ex.fits' in f and '_blue_' in f:
                tt_blue = f
            elif '_ex.fits' in f and f[:3] == 'ntt':
                ntt_red = f
            elif '_ex.fits' in f and f[:2] == 'tt':
                tt_red = f
            else:
                lista.append(f)
        merged = ntt_red.replace('_red_', '_merge_')
        if tt_blue and ntt_red:
            floyds.floydsspecdef.combspec2(tt_blue, ntt_red, merged, scale=True, num=None)
        if os.path.isfile(merged):
            lista.append(merged)
            floyds.util.delete(tt_blue)
            floyds.util.delete(tt_red)
            floyds.util.delete(ntt_red)
        else:
            if tt_blue: lista.append(tt_blue)
            if tt_red:  lista.append(tt_red)
            if ntt_red: lista.append(ntt_red)
        outputfile['obj'][imm] = lista
    readme=floyds.floydsspecauto.writereadme()
    return outputfile,readme
Ejemplo n.º 13
0
def extractspectrum(img, dv, inst, _interactive, _type, automaticex=False):
    # print "LOGX:: Entering `extractspectrum` method/function in
    # %(__file__)s" % globals()
    import glob
    import os
    import string
    import sys
    import re
    import datetime
    import numpy as np

    MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
    from pyraf import iraf

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.apall', 'specred.transform']
    for t in toforget:
        iraf.unlearn(t)

    hdr = readhdr(img)
    iraf.specred.dispaxi = inst.get('dispaxis')

    imgex = re.sub('.fits', '_ex.fits', img)
    imgfast = re.sub(string.split(img, '_')[-2] + '_', '', img)
    if not os.path.isfile(imgex) and not os.path.isfile(
            'database/ap' + re.sub('.fits', '', img)) and not os.path.isfile(
            'database/ap' + re.sub('.fits', '', imgfast)):
        _new = 'yes'
        _extract = 'yes'
    else:
        if automaticex:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['o', 'n', 's']:
                    answ = raw_input(
                        '\n### New extraction [n], extraction with old parameters [o], skip extraction [s] ? [o]')
                    if not answ:
                        answ = 'o'
                if answ == 'o':
                    _new, _extract = 'no', 'yes'
                elif answ == 'n':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'no', 'yes'
        else:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['y', 'n']:
                    answ = raw_input(
                        '\n### do you want to extract again [[y]/n] ? ')
                    if not answ:
                        answ = 'y'
                if answ == 'y':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'yes', 'yes'
    if _extract == 'yes':
        delete(imgex)
        dist = 200
        _reference = ''
        _fittrac = 'yes'
        _trace = 'yes'
        if _new == 'no':
            if not os.path.isfile('database/ap' + re.sub('.fits', '', img)):
                repstringinfile('database/ap' + re.sub('.fits', '', imgfast),
                                         'database/ap' +
                                         re.sub('.fits', '', img), re.sub(
                                             '.fits', '', imgfast),
                                         re.sub('.fits', '', img))
            _find = 'no'
            _recenter = 'no'
            _edit = 'no'
            _trace = 'no'
            _fittrac = 'no'
            _mode = 'h'
            _resize = 'no'
            _review = 'no'
            iraf.specred.mode = 'h'
            _interactive = 'no'
        else:
            iraf.specred.mode = 'q'
            _mode = 'q'
            _find = 'yes'
            _recenter = 'yes'
            _edit = 'yes'
            _review = 'yes'
            _resize = dv[_type]['_resize']

        # _interactive = False
        _recenter = 'no'
        _resize = 'no'
        _edit = 'yes'
        _trace = 'yes'
        _fittrace = 'no'
        _review = 'yes'

        iraf.specred.apall(img, output=imgex, referen=_reference, trace=_trace, fittrac=_fittrac, find=_find,
                           recenter=_recenter, edit=_edit,
                           nfind=1, backgro='fit', lsigma=4, usigma=4,
                           format='multispec', extras='yes',
                           b_function='legendre', b_sample=dv[_type]['_b_sample'], clean='yes', pfit='fit1d',
                           lower=dv[_type]['_lower'], upper=dv[_type][
                               '_upper'], t_niter=dv[_type]['_t_niter'],
                           width=dv[_type]['_width'],
                           radius=dv[_type]['_radius'], line='INDEF', nsum=dv[
                               _type]['_nsum'], t_step=dv[_type]['_t_step'],
                           t_nsum=dv[_type]['_t_nsum'],
                           t_nlost=dv[_type]['_t_nlost'], t_sample=dv[
                               _type]['_t_sample'], resize=_resize,
                           t_order=dv[_type]['_t_order'],
                           weights=dv[_type]['_weights'], interactive=_interactive, review=_review, mode=_mode)
    else:
        print '\n### skipping new extraction'
    return imgex
Ejemplo n.º 14
0
#!/usr/bin/env python
#
# A script to reduce data from the Coude Spectrograph from the 1.6m telescope
# of the Observatorio do Pico dos Dias - Brazil
#
# Load Python standard modules
import glob
# Load third-party modules
import ds9
from pyraf import iraf


print 'Loading IRAF packages ...'
iraf.imred()
iraf.ccdred()
iraf.specred()

print 'unlearning previous settings...'
iraf.ccdred.unlearn()
iraf.ccdred.ccdproc.unlearn()
iraf.ccdred.combine.unlearn()
iraf.ccdred.zerocombine.unlearn()
iraf.ccdred.flatcombine.unlearn()
iraf.specred.response.unlearn()

# regular expression of files (e.g bias_00*.fits, flat-2000jan01_?.*)
theflat = str(raw_input('Enter flat image: '))

iraf.specred.extinction = ''
iraf.specred.caldir = ''
iraf.specred.observatory = 'lna'
Ejemplo n.º 15
0
def main():

    description = "> Performs pre-reduction steps"
    usage = "%prog    \t [option] \n Recommended syntax: %prog -i -c"

    parser = OptionParser(usage=usage, description=description, version="0.1")
    option, args = parser.parse_args()

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)

    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    mkarc = raw_input("Make arc? ([y]/n): ")
    mkflat = raw_input("Make flat? ([y]/n): ")

    if len(args) > 1:
        files = []
        sys.argv.append('--help')
        option, args = parser.parse_args()
        sys.exit()
    elif len(args) == 1:
        files = util.readlist(args[0])
        sys.exit()
    else:
        listfile = glob.glob('*.fits')
        files_science = []
        files_arc = []
        files_dflat = []
        #print 'checking your files ...'
        for img in listfile:
            _type = ''
            hdr0 = util.readhdr(img)
            _type = util.readkey3(hdr0, 'object')
            if 'flat' in _type.lower():
                files_dflat.append(img)
            elif 'arc' not in _type.lower() and 'arc' not in img.lower():
                files_science.append(img)
        if mkarc != 'n':
            mkarc_b = raw_input(
                "List blue arc files to combine (.fits will be added): "
            ).split()
            mkarc_r = raw_input(
                "List red arc files to combine (.fits will be added): ").split(
                )
            for arc in mkarc_b:
                files_arc.append(arc + '.fits')
            for arc in mkarc_r:
                files_arc.append(arc + '.fits')

    if mkarc != 'n':
        list_arc_b = []
        list_arc_r = []
        for arcs in files_arc:
            if instruments.blue_or_red(arcs)[0] == 'blue':
                list_arc_b.append(arcs)
            elif instruments.blue_or_red(arcs)[0] == 'red':
                list_arc_r.append(arcs)
            else:
                sys.exit()

    if mkflat != 'n':
        list_flat_b = []
        list_flat_r = []
        for dflats in files_dflat:
            if instruments.blue_or_red(dflats)[0] == 'blue':
                list_flat_b.append(dflats)
            elif instruments.blue_or_red(dflats)[0] == 'red':
                list_flat_r.append(dflats)
            else:
                sys.exit()

    # make pre_reduced if it doesn't exist
    if not os.path.isdir('pre_reduced/'):
        os.mkdir('pre_reduced/')

    # log the existing processed files (need to verify this works if pre_reduced is empty...)
    pfiles = []
    new_files = []
    for root, dirnames, filenames in os.walk('pre_reduced'):
        for file in filenames:
            if file.startswith('to'):
                pfiles.append(file)
    print(pfiles)

    # loop over each image in pre_reduced
    for img in listfile:
        hdr = util.readhdr(img)
        targ = util.readkey3(hdr, 'object')

        # if file is not not a processed file, run the overscan+trim code
        if 'to' + img not in pfiles:

            # if the file is a science file, grab the name for later
            if 'arc' not in targ.lower() and 'flat' not in targ.lower():
                new_files.append(img)
                print('Adding data for: ' + targ)

            inst = instruments.blue_or_red(img)[1]

            iraf.specred.dispaxi = inst.get('dispaxis')
            iraf.longslit.dispaxi = inst.get('dispaxis')

            _biassec0 = inst.get('biassec')
            _trimsec0 = inst.get('trimsec')

            ######################################################################
            #
            # JB: this chunk of code needs attention
            # It seems incredibly hacky for anything but Kast...
            #
            # overscan
            if not img.startswith('o') and inst.get('observatory') == 'lick':
                if os.path.isfile('pre_reduced/o' + img):
                    os.remove('pre_reduced/o' + img)
                util.kastbias(img, 'pre_reduced/o' + img)
            elif not img.startswith('o') and inst.get('observatory') != 'lick':
                if os.path.isfile('pre_reduced/o' + img):
                    os.remove('pre_reduced/o' + img)
                os.system('cp ' + img + ' ' + 'pre_reduced/' + img)

            # trim
            if not img.startswith('t') and inst.get('observatory') == 'lick':
                if os.path.isfile('pre_reduced/to' + img):
                    os.remove('pre_reduced/to' + img)
                iraf.ccdproc('pre_reduced/o' + img,
                             output='pre_reduced/to' + img,
                             overscan='no',
                             trim='yes',
                             zerocor="no",
                             flatcor="no",
                             readaxi='line',
                             trimsec=str(_trimsec0),
                             Stdout=1)

            elif not img.startswith('t') and inst.get('observatory') != 'lick':
                if os.path.isfile('pre_reduced/to' + img):
                    os.remove('pre_reduced/to' + img)
                iraf.ccdproc('pre_reduced/' + img,
                             output='pre_reduced/to' + img,
                             overscan='yes',
                             trim='yes',
                             zerocor="no",
                             flatcor="no",
                             readaxi='line',
                             trimsec=str(_trimsec0),
                             biassec=str(_biassec0),
                             Stdout=1)

    # combine the arcs
    if mkarc != 'n':

        # blue arcs
        if len(list_arc_b) > 0:
            if len(list_arc_b) == 1:
                arc_blue = list_arc_b[0]
                os.system('cp ' + 'pre_reduced/to' + arc_blue + ' ' +
                          'pre_reduced/ARC_blue.fits')
            else:
                arc_str = ''
                for arc in list_arc_b:
                    arc_str = arc_str + 'pre_reduced/to' + arc + ','
                if os.path.isfile('pre_reduced/ARC_blue.fits'):
                    os.remove('pre_reduced/ARC_blue.fits')
                iraf.imcombine(arc_str, output='pre_reduced/ARC_blue.fits')

        # red arcs
        if len(list_arc_r) > 0:
            if len(list_arc_r) == 1:
                arc_red = list_arc_r[0]
                os.system('cp ' + 'pre_reduced/to' + arc_red + ' ' +
                          'pre_reduced/ARC_red.fits')
            else:
                arc_str = ''
                for arc in list_arc_r:
                    arc_str = arc_str + 'pre_reduced/to' + arc + ','
                if os.path.isfile('pre_reduced/ARC_red.fits'):
                    os.remove('pre_reduced/ARC_red.fits')
                iraf.imcombine(arc_str, output='pre_reduced/ARC_red.fits')

    # combine the flats
    if mkflat != 'n':
        inter = 'yes'

        # blue flats
        if len(list_flat_b) > 0:
            br, inst = instruments.blue_or_red(list_flat_b[0])
            iraf.specred.dispaxi = inst.get('dispaxis')
            if len(list_flat_b) == 1:
                # Flat_blue = 'pre_reduced/to'+ list_flat_b[0]
                Flat_blue = list_flat_b[0]
            else:
                flat_str = ''
                for flat in list_flat_b:
                    flat_str = flat_str + 'pre_reduced/to' + flat + ','
                #subsets = 'no'
                if os.path.isfile('pre_reduced/toFlat_blue'):
                    os.remove('pre_reduced/toFlat_blue')
                iraf.flatcombine(flat_str,
                                 output='pre_reduced/toFlat_blue',
                                 ccdtype='',
                                 rdnoise=3.7,
                                 subsets='no',
                                 process='no')
                Flat_blue = 'Flat_blue.fits'

            #What is the output here? Check for overwrite
            iraf.specred.response('pre_reduced/to' + Flat_blue,
                                  normaliz='pre_reduced/to' + Flat_blue,
                                  response='pre_reduced/RESP_blue',
                                  interac=inter,
                                  thresho='INDEF',
                                  sample='*',
                                  naverage=2,
                                  function='legendre',
                                  low_rej=3,
                                  high_rej=3,
                                  order=60,
                                  niterat=20,
                                  grow=0,
                                  graphic='stdgraph')

        # red flats
        if len(list_flat_r) > 0:
            br, inst = instruments.blue_or_red(list_flat_r[0])
            iraf.specred.dispaxi = inst.get('dispaxis')
            if len(list_flat_r) == 1:
                # Flat_red = 'pre_reduced/to' + list_flat_r[0]
                Flat_red = list_flat_r[0]
            else:
                flat_str = ''
                for flat in list_flat_r:
                    flat_str = flat_str + 'pre_reduced/to' + flat + ','
                if os.path.isfile('pre_reduced/toFlat_red'):
                    os.remove('pre_reduced/toFlat_red')
                iraf.flatcombine(flat_str,
                                 output='pre_reduced/toFlat_red',
                                 ccdtype='',
                                 rdnoise=3.8,
                                 subsets='yes',
                                 process='no')
                Flat_red = 'Flat_red.fits'

            #What is the output here? Check for overwrite
            iraf.specred.response('pre_reduced/to' + Flat_red,
                                  normaliz='pre_reduced/to' + Flat_red,
                                  response='pre_reduced/RESP_red',
                                  interac=inter,
                                  thresho='INDEF',
                                  sample='*',
                                  naverage=2,
                                  function='legendre',
                                  low_rej=3,
                                  high_rej=3,
                                  order=80,
                                  niterat=20,
                                  grow=0,
                                  graphic='stdgraph')

    # science files should have 't' in front now
    # this just gets the base name, to prefix assumed below
    if new_files is not None:
        files_science = new_files

    # get all the science objects for the night
    science_targets = []
    for obj in files_science:
        hdr = util.readhdr(obj)
        _type = util.readkey3(hdr, 'object')
        science_targets.append(_type)

    # make a dir for each sci object
    science_targets = set(science_targets)
    for targ in science_targets:
        if not os.path.isdir('pre_reduced/' + targ + '/'):
            os.mkdir('pre_reduced/' + targ + '/')

    # copy the files into the obj dir
    for obj in files_science:
        hdr = util.readhdr(obj)
        targ = util.readkey3(hdr, 'object')
        if not obj.startswith('to'):
            os.system('cp ' + 'pre_reduced/to' + obj + ' ' + 'pre_reduced/' +
                      targ + '/')
        else:
            os.system('cp ' + 'pre_reduced/' + obj + ' ' + 'pre_reduced/' +
                      targ + '/')

    rawfiles = glob.glob('*.fits')
    ofiles = glob.glob('pre_reduced/o' + '*.fits')
    tfiles = glob.glob('pre_reduced/to' + '*.fits')

    # delete raw files from the pre_reduced dir
    # there shouldn't be any there though?
    # maybe if the overscan isn't implemented for that detector
    for img in rawfiles:
        util.delete('pre_reduced/' + img)

    # delete the ofiles from pre_reduced dir
    for img in ofiles:
        util.delete(img)
Ejemplo n.º 16
0
def reduce(imglist,files_arc, _cosmic, _interactive_extraction,_arc):

    import string
    import os
    import re
    import sys
    os.environ["PYRAF_BETA_STATUS"] = "1"
    try:      from astropy.io import fits as pyfits
    except:   import   pyfits
    import numpy as np
    import util
    import instruments
    import combine_sides as cs
    import cosmics
    from pyraf import iraf

    dv = util.dvex()
    scal = np.pi / 180.
    
    if not _interactive_extraction:
        _interactive = False
    else:
        _interactive = True

    if not _arc:
        _arc_identify = False
    else:
        _arc_identify = True

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.disp(inlist='1', reference='1')

    toforget = ['ccdproc', 'imcopy', 'specred.apall', 'longslit.identify', 'longslit.reidentify', 'specred.standard',
                'longslit.fitcoords', 'onedspec.wspectext']
    for t in toforget:
        iraf.unlearn(t)
    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    list_arc_b = []
    list_arc_r = []

    for arcs in files_arc:
        hdr = util.readhdr(arcs)
        if util.readkey3(hdr, 'VERSION') == 'kastb':
            list_arc_b.append(arcs)
        elif util.readkey3(hdr, 'VERSION') == 'kastr':
            list_arc_r.append(arcs)
        else:
            print util.readkey3(hdr, 'VERSION') + 'not in database'
            sys.exit()
    
    asci_files = []
    newlist = [[],[]]

    print '\n### images to reduce :',imglist
    #raise TypeError
    for img in imglist:
        if 'b' in img:
            newlist[0].append(img)
        elif 'r' in img:
            newlist[1].append(img)

    if len(newlist[1]) < 1:
        newlist = newlist[:-1]
    
    for imgs in newlist:
        hdr = util.readhdr(imgs[0])
        if util.readkey3(hdr, 'VERSION') == 'kastb':
            inst = instruments.kast_blue
        elif util.readkey3(hdr, 'VERSION') == 'kastr':
            inst = instruments.kast_red
        else:
            print util.readkey3(hdr, 'VERSION') + 'not in database'
            sys.exit()

        iraf.specred.dispaxi = inst.get('dispaxis')
        iraf.longslit.dispaxi = inst.get('dispaxis')

        _gain = inst.get('gain')
        _ron = inst.get('read_noise')
        iraf.specred.apall.readnoi = _ron
        iraf.specred.apall.gain = _gain

        _object0 = util.readkey3(hdr, 'OBJECT')
        _date0 = util.readkey3(hdr, 'DATE-OBS')


        _biassec0 = inst.get('biassec')
        _trimsec0 = inst.get('trimsec')

        _object0 = re.sub(' ', '', _object0)
        _object0 = re.sub('/', '_', _object0)
        nameout0 = str(_object0) + '_' + inst.get('name') + '_' + str(_date0)

        nameout0 = util.name_duplicate(imgs[0], nameout0, '')
        timg = nameout0
        print '\n### now processing :',timg,' for -> ',inst.get('name')
        if len(imgs) > 1:
            img_str = ''
            for i in imgs:
                img_str = img_str + i + ','
            iraf.imcombine(img_str, output=timg)
        else:
            img = imgs[0]
            if os.path.isfile(timg):
                os.system('rm -rf ' + timg)
            iraf.imcopy(img, output=timg)
        
        zero_file = inst.get('archive_zero_file')
        os.system('cp ' + zero_file + ' .')
        zero_file = string.split(zero_file, '/')[-1]
        
        flat_file = inst.get('archive_flat_file')
        os.system('cp ' + flat_file + ' .')
        flat_file = string.split(flat_file, '/')[-1]
        
        iraf.ccdproc(timg, output='', overscan='yes', trim='yes', zerocor="no", flatcor="no", readaxi='line',
                     trimsec=str(_trimsec0),biassec=str(_biassec0), Stdout=1)

        iraf.ccdproc(timg, output='', overscan='no', trim='no', zerocor="yes", flatcor="no", readaxi='line',
                     zero=zero_file,order=3, Stdout=1)
        iraf.ccdproc(timg, output='', overscan='no', trim='no', zerocor="no", flatcor="yes", readaxi='line',
                     flat=flat_file, Stdout=1)

        img = timg

        #raw_input("Press Enter to continue...")
        print '\n### starting cosmic removal'
        if _cosmic:
            array, header = cosmics.fromfits(img)
            c = cosmics.cosmicsimage(array, gain=inst.get('gain'), readnoise=inst.get('read_noise'), sigclip = 4.5, sigfrac = 0.5, objlim = 1.0)
            c.run(maxiter = 4)
            cosmics.tofits('cosmic_' + img, c.cleanarray, header)

        print '\n### cosmic removal finished'

        img='cosmic_' + img

        if inst.get('name') == 'kast_blue':
            arcfile = list_arc_b[0]
        elif inst.get('name') == 'kast_red':
            arcfile = list_arc_r[0]
        
        if not arcfile.endswith(".fits"):
            arcfile=arcfile+'.fits'

        if os.path.isfile(arcfile):
            util.delete('t' + arcfile)
            iraf.ccdproc(arcfile, output= 't' + arcfile, overscan='yes', trim='yes', zerocor="no", flatcor="no",
                         readaxi='line', trimsec=str(_trimsec0), biassec=str(_biassec0), Stdout=1)
            arcfile = 't' + arcfile
        else:
            print '\n### warning no arcfile \n exit '
            sys.exit()

        if not os.path.isdir('database/'):
                os.mkdir('database/')
        
        if _arc_identify:
            arc_ex=re.sub('.fits', '.ms.fits', arcfile)
            print '\n### arcfile : ',arcfile
            print '\n### arcfile extraction : ',arc_ex
            iraf.specred.apall(arcfile, output='', line = 'INDEF', nsum=10, interactive='no', extract='yes',find='yes', nfind=1 ,format='multispec', trace='no',back='no',recen='no')
            iraf.longslit.identify(images=arc_ex, section=inst.get('section'),coordli=inst.get('line_list'),function = 'spline3',order=3, mode='h')
        else:
            arcref = inst.get('archive_arc_extracted')
            arcrefid = inst.get('archive_arc_extracted_id')
            os.system('cp ' + arcref + ' .')
            arcref = string.split(arcref, '/')[-1]
            os.system('cp ' + arcrefid + ' ./database')

            arc_ex=re.sub('.fits', '.ms.fits', arcfile)

            print '\n###  arcfile : ',arcfile
            print '\n###  arcfile extraction : ',arc_ex
            print '\n###  arc referenece : ',arcref
            iraf.specred.apall(arcfile, output=arc_ex, line = 'INDEF', nsum=10, interactive='no', extract='yes',find='yes', nfind=1 ,format='multispec', trace='no',back='no',recen='no')

            iraf.longslit.reidentify(referenc=arcref, images=arc_ex, interac='NO', section=inst.get('section'), 
                                    coordli=inst.get('line_list'), shift='INDEF', search='INDEF',
                                    mode='h', verbose='YES', step=0,nsum=5, nlost=2, cradius=10, refit='yes',overrid='yes',newaps='no')
        
        #print '\n### checking sky lines '
        #_skyfile = inst.get('sky_file')
        #shift = util.skyfrom2d(img, _skyfile,'True')
        #print '\n### I found a shift of : ',shift

        print '\n### extraction using apall'
        result = []
        hdr_image = util.readhdr(img)
        _type=util.readkey3(hdr_image, 'object')

        if _type.startswith("arc") or _type.startswith("dflat") or _type.startswith("Dflat") or _type.startswith("Dbias") or _type.startswith("Bias"):
            print '\n### warning problem \n exit '
            sys.exit()
        else:
            imgex = util.extractspectrum(
                img, dv, inst, _interactive, 'obj')
            print '\n### applying wavelength solution'
            iraf.disp(inlist=imgex, reference=arc_ex)   
            sensfile = inst.get('archive_sens')
            os.system('cp ' + sensfile + ' .')
            sensfile = string.split(sensfile, '/')[-1]
            if sensfile:
                print '\n### sensitivity function : ',sensfile
                imgf = re.sub('.fits', '_f.fits', img)
                _extinction = inst.get('extinction_file')
                _observatory = inst.get('observatory')
                _exptime = util.readkey3(hdr, 'EXPTIME')
                _airmass = util.readkey3(hdr, 'AIRMASS')
                util.delete(imgf)
                dimgex='d'+imgex
                iraf.specred.calibrate(input=dimgex, output=imgf, sensiti=sensfile, extinct='yes',
                                        extinction=_extinction,flux='yes', ignorea='yes', airmass=_airmass, exptime=_exptime,
                                        fnu='no')
                imgout = imgf
                imgasci = re.sub('.fits', '.asci', imgout)
                errasci = re.sub('.fits', '_err.asci', imgout)
                util.delete(imgasci)
                iraf.onedspec.wspectext(imgout + '[*,1,1]', imgasci, header='no')
                iraf.onedspec.wspectext(imgout + '[*,1,4]', errasci, header='no')
                spec = np.transpose(np.genfromtxt(imgasci))
                err = np.transpose(np.genfromtxt(errasci))
                util.delete(errasci)
                final = np.transpose([spec[0], spec[1], err[1]])
                np.savetxt(imgasci, final)

                result = result + [imgout, imgasci]

        result = result + [imgex] + [timg]
       
        asci_files.append(imgasci)
        if not os.path.isdir(_object0 + '/'):
            os.mkdir(_object0 + '/')
            for img in result:
                os.system('mv ' + img + ' ' + _object0 + '/')
        else:
            for img in result:
                os.system('mv ' + img + ' ' + _object0 + '/')
        
        if not _arc_identify:
            util.delete(arcref)
        util.delete(sensfile)
        util.delete(zero_file)
        util.delete(flat_file)
        util.delete(arc_ex)
        util.delete(arcfile)
        util.delete('logfile')
        util.delete(dimgex)
        util.delete('cosmic_*')
    print '\n### now i will merge ...'
    if len(asci_files) > 1:
        final = cs.combine_blue_red(asci_files[0], asci_files[1], _object0)
    print '\n### final result in folder ',_object0,' is ',_object0+'_merged.asci'
    return result
Ejemplo n.º 17
0
def efoscspec1Dredu(files,
                    _interactive,
                    _ext_trace,
                    _dispersionline,
                    liststandard,
                    listatmo0,
                    _automaticex,
                    _verbose=False):
    # print "LOGX:: Entering `efoscspec1Dredu` method/function in
    # %(__file__)s" % globals()
    import ntt

    try:
        import pyfits
    except:
        from astropy.io import fits as pyfits

    import re
    import string
    import sys
    import os
    import numpy as np

    os.environ["PYRAF_BETA_STATUS"] = "1"
    _extinctdir = 'direc$standard/extinction/'
    _extinction = 'lasilla2.txt'
    _observatory = 'lasilla'
    import datetime

    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    dv = ntt.dvex()
    scal = np.pi / 180.
    _gain = ntt.util.readkey3(ntt.util.readhdr(re.sub('\n', '', files[0])),
                              'gain')
    _rdnoise = ntt.util.readkey3(ntt.util.readhdr(re.sub('\n', '', files[0])),
                                 'ron')
    std, rastd, decstd, magstd = ntt.util.readstandard(
        'standard_efosc_mab.txt')
    objectlist = {}
    for img in files:
        hdr = ntt.util.readhdr(img)
        img = re.sub('\n', '', img)
        ntt.util.correctcard(img)
        _ra = ntt.util.readkey3(hdr, 'RA')
        _dec = ntt.util.readkey3(hdr, 'DEC')
        _object = ntt.util.readkey3(hdr, 'object')
        _grism = ntt.util.readkey3(hdr, 'grism')
        _filter = ntt.util.readkey3(hdr, 'filter')
        _slit = ntt.util.readkey3(hdr, 'slit')
        dd = np.arccos(
            np.sin(_dec * scal) * np.sin(decstd * scal) +
            np.cos(_dec * scal) * np.cos(decstd * scal) * np.cos(
                (_ra - rastd) * scal)) * ((180 / np.pi) * 3600)
        if min(dd) < 100:
            _type = 'stdsens'
        else:
            _type = 'obj'
        if min(dd) < 100:
            ntt.util.updateheader(img, 0,
                                  {'stdname': [std[np.argmin(dd)], '']})
            ntt.util.updateheader(
                img, 0, {'magstd': [float(magstd[np.argmin(dd)]), '']})

        if _type not in objectlist:
            objectlist[_type] = {}
        if (_grism, _filter, _slit) not in objectlist[_type]:
            objectlist[_type][_grism, _filter, _slit] = [img]
        else:
            objectlist[_type][_grism, _filter, _slit].append(img)

    from pyraf import iraf
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.imutil(_doprint=0)
    toforget = ['imutil.imcopy', 'specred.sarith', 'specred.standard']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.verbose = 'no'
    iraf.specred.dispaxi = 2
    iraf.set(direc=ntt.__path__[0] + '/')
    sens = {}
    print objectlist
    outputfile = []
    if 'obj' in objectlist.keys():
        tpe = 'obj'
    elif 'stdsens' in objectlist.keys():
        tpe = 'stdsens'
    else:
        sys.exit('error: no objects and no standards in the list')

    for setup in objectlist[tpe]:
        extracted = []
        listatmo = []
        if setup not in sens:
            sens[setup] = []
        if tpe == 'obj':
            print '\n### setup= ', setup, '\n### objects= ', objectlist['obj'][
                setup], '\n'
            for img in objectlist['obj'][setup]:
                #              hdr=readhdr(img)
                print '\n\n### next object= ', img, ' ', ntt.util.readkey3(
                    ntt.util.readhdr(img), 'object'), '\n'
                if os.path.isfile(re.sub('.fits', '_ex.fits', img)):
                    if ntt.util.readkey3(
                            ntt.util.readhdr(re.sub('.fits', '_ex.fits', img)),
                            'quality') == 'Rapid':
                        ntt.util.delete(re.sub('.fits', '_ex.fits', img))
                imgex = ntt.util.extractspectrum(img,
                                                 dv,
                                                 _ext_trace,
                                                 _dispersionline,
                                                 _interactive,
                                                 'obj',
                                                 automaticex=_automaticex)
                if not os.path.isfile(imgex):
                    sys.exit('### error, extraction not computed')
                if not ntt.util.readkey3(ntt.util.readhdr(imgex), 'shift') and \
                        ntt.util.readkey3(ntt.util.readhdr(imgex), 'shift') != 0.0:
                    # if not readkey3(readhdr(imgex),'shift'):
                    ntt.efoscspec1Ddef.checkwavestd(imgex, _interactive)
                extracted.append(imgex)
                if imgex not in outputfile:
                    outputfile.append(imgex)
                ntt.util.updateheader(
                    imgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum ']})
                ntt.util.updateheader(
                    imgex, 0, {
                        'PRODCATG': [
                            'SCIENCE.' + ntt.util.readkey3(
                                ntt.util.readhdr(imgex), 'tech').upper(),
                            'Data product category'
                        ]
                    })
                ntt.util.updateheader(imgex, 0,
                                      {'TRACE1': [img, 'Originating file']})
                if os.path.isfile('database/ap' +
                                  re.sub('_ex.fits', '', imgex)):
                    if 'database/ap' + re.sub('_ex.fits', '',
                                              imgex) not in outputfile:
                        outputfile.append('database/ap' +
                                          re.sub('_ex.fits', '', imgex))
            print '\n### all object with this setup extracted\n'
        if liststandard:
            standardlist = liststandard
            _type = 'stdfromdreducer'
        else:
            try:
                standardlist = objectlist['stdsens'][setup]
                _type = 'stdsens'
            except:
                standardlist = ''
                _type = ''
        if _type == 'stdfromdreducer' and len(extracted) >= 1:
            _outputsens2 = ntt.util.searchsens(extracted[0], standardlist)[0]
            print '\n### using standard from reducer ' + str(_outputsens2)
        elif _type not in ['stdsens', 'stdfromdreducer'
                           ] and len(extracted) >= 1:
            _outputsens2 = ntt.util.searchsens(extracted[0], '')[0]
            os.system('cp ' + _outputsens2 + ' .')
            _outputsens2 = string.split(_outputsens2, '/')[-1]
            print '\n### no standard in the list, using standard from archive'
        else:
            for simg in standardlist:
                print '\n###  standard for setup ' + \
                      str(setup) + ' = ', simg, ' ', ntt.util.readkey3(
                          ntt.util.readhdr(simg), 'object'), '\n'
                simgex = ntt.util.extractspectrum(simg,
                                                  dv,
                                                  False,
                                                  False,
                                                  _interactive,
                                                  'std',
                                                  automaticex=_automaticex)
                ntt.util.updateheader(
                    simgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum']})
                ntt.util.updateheader(
                    simgex, 0, {
                        'PRODCATG': [
                            'SCIENCE.' + ntt.util.readkey3(
                                ntt.util.readhdr(simgex), 'tech').upper(),
                            'Data product category'
                        ]
                    })
                ntt.util.updateheader(simgex, 0,
                                      {'TRACE1': [simg, 'Originating file']})
                if not ntt.util.readkey3(ntt.util.readhdr(simgex), 'shift') and \
                        ntt.util.readkey3(ntt.util.readhdr(simgex), 'shift') != 0.0:
                    #                if not readkey3(readhdr(simgex),'shift'):
                    ntt.efoscspec1Ddef.checkwavestd(simgex, _interactive)
                atmofile = ntt.efoscspec1Ddef.telluric_atmo(
                    simgex)  # atmo file2
                ntt.util.updateheader(atmofile, 0,
                                      {'TRACE1': [simgex, 'Originating file']})
                ntt.util.updateheader(
                    atmofile, 0,
                    {'FILETYPE': [21211, 'telluric correction 1D spectrum ']})
                if tpe != 'obj' and atmofile not in outputfile:
                    outputfile.append(atmofile)
                if not listatmo0:
                    listatmo.append(atmofile)
                sens[setup].append(simgex)
                if simgex not in outputfile:
                    outputfile.append(simgex)
                if setup[0] == 'Gr13' and setup[1] == 'Free':
                    if os.path.isfile(re.sub('Free', 'GG495', simg)):
                        print '\n### extract standard frame with blocking filter to correct for second order contamination\n'
                        simg2 = re.sub('Free', 'GG495', simg)
                        simgex2 = ntt.util.extractspectrum(
                            simg2,
                            dv,
                            False,
                            False,
                            _interactive,
                            'std',
                            automaticex=_automaticex)
                        ntt.util.updateheader(
                            simgex2, 0,
                            {'FILETYPE': [22107, 'extracted 1D spectrum']})
                        ntt.util.updateheader(
                            simgex2, 0, {
                                'PRODCATG': [
                                    'SCIENCE.' + ntt.util.readkey3(
                                        ntt.util.readhdr(simgex2),
                                        'tech').upper(),
                                    'Data product category'
                                ]
                            })
                        if not ntt.util.readkey3(ntt.util.readhdr(simgex2), 'shift') and \
                                ntt.util.readkey3(ntt.util.readhdr(simgex2), 'shift') != 0.0:
                            # if not readkey3(readhdr(simgex2),'shift'):
                            ntt.efoscspec1Ddef.checkwavestd(
                                simgex2, _interactive)
                        ntt.util.updateheader(
                            simgex2, 0,
                            {'TRACE1': [simg2, 'Originating file']})
            print '\n### standard available: ', sens[setup]
            if tpe == 'obj':
                if len(sens[setup]) > 1:
                    goon = 'no'
                    while goon != 'yes':
                        stdused = raw_input(
                            '\n### more than one standard for this setup, which one do you want to use ['
                            + sens[setup][0] + '] ?')
                        if not stdused:
                            stdused = sens[setup][0]
                        if os.path.isfile(stdused):
                            goon = 'yes'
                else:
                    stdused = sens[setup][0]
                stdvec = [stdused]
            else:
                stdvec = sens[setup]
            for stdused in stdvec:
                stdusedclean = re.sub('_ex', '_clean', stdused)
                ntt.util.delete(stdusedclean)
                iraf.specred.sarith(input1=stdused,
                                    op='/',
                                    input2=atmofile,
                                    output=stdusedclean,
                                    format='multispec')
                _outputsens2 = ntt.efoscspec1Ddef.sensfunction(
                    stdusedclean, 'spline3', 16, _interactive)
                ntt.util.updateheader(
                    _outputsens2, 0,
                    {'FILETYPE': [21212, 'sensitivity function']})
                ntt.util.updateheader(
                    _outputsens2, 0, {'TRACE1': [stdused, 'Originating file']})

                if setup[0] == 'Gr13' and setup[1] == 'Free':
                    if os.path.isfile(re.sub('Free', 'GG495', stdused)):
                        print '\n### compute sensitivity function of grim 13 with blocking filter ' \
                              'to correct for second order contamination \n'
                        stdused2 = re.sub('Free', 'GG495', stdused)
                        if not ntt.util.readkey3(ntt.util.readhdr(stdused2),
                                                 'STDNAME'):
                            ntt.util.updateheader(
                                stdused2, 0, {
                                    'STDNAME': [
                                        ntt.util.readkey3(
                                            ntt.util.readhdr(stdused),
                                            'STDNAME'), ''
                                    ]
                                })
                        atmofile2 = ntt.efoscspec1Ddef.telluric_atmo(
                            stdused2)  # atmo file2
                        stdusedclean2 = re.sub('_ex', '_clean', stdused2)
                        ntt.util.delete(stdusedclean2)
                        iraf.specred.sarith(input1=stdused2,
                                            op='/',
                                            input2=atmofile2,
                                            output=stdusedclean2,
                                            format='multispec')
                        _outputsens3 = ntt.efoscspec1Ddef.sensfunction(
                            stdusedclean2, 'spline3', 16, _interactive)
                        ntt.util.updateheader(
                            _outputsens3, 0,
                            {'FILETYPE': [21212, 'sensitivity function']})
                        ntt.util.updateheader(
                            _outputsens3, 0,
                            {'TRACE1': [stdused2, 'Originating file']})
                        _outputsens2 = correctsens(_outputsens2, _outputsens3)

                if _outputsens2 not in outputfile:
                    outputfile.append(_outputsens2)
        if _outputsens2 and tpe == 'obj':
            ####################################################
            for img in objectlist['obj'][setup]:  # flux calibrate 2d images
                imgd = fluxcalib2d(img, _outputsens2)
                ntt.util.updateheader(
                    imgd, 0, {
                        'FILETYPE':
                        [22209, '2D wavelength and flux calibrated spectrum ']
                    })
                ntt.util.updateheader(imgd, 0,
                                      {'TRACE1': [img, 'Originating files']})
                iraf.hedit(imgd,
                           'PRODCATG',
                           delete='yes',
                           update='yes',
                           verify='no')
                if imgd not in outputfile:
                    outputfile.append(imgd)
            ####################################################
            #    flux calib in the standard way
            if not listatmo and listatmo0:
                listatmo = listatmo0[:]
            for _imgex in extracted:
                _airmass = ntt.util.readkey3(ntt.util.readhdr(_imgex),
                                             'airmass')
                _exptime = ntt.util.readkey3(ntt.util.readhdr(_imgex),
                                             'exptime')
                _imgf = re.sub('_ex.fits', '_f.fits', _imgex)
                ntt.util.delete(_imgf)
                qqq = iraf.specred.calibrate(input=_imgex,
                                             output=_imgf,
                                             sensiti=_outputsens2,
                                             extinct='yes',
                                             flux='yes',
                                             extinction=_extinctdir +
                                             _extinction,
                                             observatory=_observatory,
                                             airmass=_airmass,
                                             ignorea='yes',
                                             exptime=_exptime,
                                             fnu='no')
                hedvec = {
                    'SENSFUN': [_outputsens2, ''],
                    'FILETYPE':
                    [22208, '1D wavelength and flux calibrated spectrum', ''],
                    #                     'SNR':[ntt.util.StoN(_imgf,50),'Average signal to noise ratio per pixel'],
                    'SNR': [
                        ntt.util.StoN2(_imgf, False),
                        'Average signal to noise ratio per pixel'
                    ],
                    'BUNIT':
                    ['erg/cm2/s/Angstrom', 'Physical unit of array values'],
                    'TRACE1': [_imgex, 'Originating file'],
                    'ASSON1': [
                        re.sub('_f.fits', '_2df.fits', _imgf),
                        'Name of associated file'
                    ],
                    'ASSOC1':
                    ['ANCILLARY.2DSPECTRUM', 'Category of associated file']
                }
                ntt.util.updateheader(_imgf, 0, hedvec)
                if _imgf not in outputfile:
                    outputfile.append(_imgf)
                if listatmo:
                    atmofile = ntt.util.searcharc(_imgex, listatmo)[0]
                    if atmofile:
                        _imge = re.sub('_f.fits', '_e.fits', _imgf)
                        ntt.util.delete(_imge)
                        iraf.specred.sarith(input1=_imgf,
                                            op='/',
                                            input2=atmofile,
                                            output=_imge,
                                            w1='INDEF',
                                            w2='INDEF',
                                            format='multispec')
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,2]',
                                               output=_imge + '[*,1,2]',
                                               verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,3]',
                                               output=_imge + '[*,1,3]',
                                               verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,4]',
                                               output=_imge + '[*,1,4]',
                                               verbose='no')
                        except:
                            pass
                        if _imge not in outputfile:
                            outputfile.append(_imge)
                        ntt.util.updateheader(
                            _imge, 0, {
                                'FILETYPE': [
                                    22210,
                                    '1D, wave, flux calib, telluric corr.'
                                ]
                            })
                        if atmofile not in outputfile:
                            outputfile.append(atmofile)
                        ntt.util.updateheader(_imge, 0,
                                              {'ATMOFILE': [atmofile, '']})
                        ntt.util.updateheader(
                            _imge, 0, {'TRACE1': [_imgf, 'Originating file']})
                        imgin = _imge
                    else:
                        imgin = _imgf
                else:
                    imgin = _imgf
                imgasci = re.sub('.fits', '.asci', imgin)

                ntt.util.delete(imgasci)
                iraf.onedspec(_doprint=0)
                iraf.onedspec.wspectext(imgin + '[*,1,1]',
                                        imgasci,
                                        header='no')
                if imgasci not in outputfile:
                    outputfile.append(imgasci)

    print '\n### adding keywords for phase 3 ....... '
    for img in outputfile:
        if str(img)[-5:] == '.fits':
            try:
                ntt.util.phase3header(img)  # phase 3 definitions
                ntt.util.updateheader(img, 0, {'quality': ['Final', '']})
            except:
                print 'Warning: ' + img + ' is not a fits file'
            try:
                if int(re.sub('\.', '', str(pyfits.__version__))[:2]) <= 30:
                    aa = 'HIERARCH '
                else:
                    aa = ''
            except:
                aa = ''

            imm = pyfits.open(img, mode='update')
            hdr = imm[0].header
            if aa + 'ESO DPR CATG' in hdr:
                hdr.pop(aa + 'ESO DPR CATG')
            if aa + 'ESO DPR TECH' in hdr:
                hdr.pop(aa + 'ESO DPR TECH')
            if aa + 'ESO DPR TYPE' in hdr:
                hdr.pop(aa + 'ESO DPR TYPE')
            imm.flush()
            imm.close()

    print outputfile
    reduceddata = ntt.rangedata(outputfile)
    f = open(
        'logfile_spec1d_' + str(reduceddata) + '_' + str(datenow) +
        '.raw.list', 'w')
    for img in outputfile:
        try:
            f.write(ntt.util.readkey3(ntt.util.readhdr(img), 'arcfile') + '\n')
        except:
            pass
    f.close()
    return outputfile, 'logfile_spec1d_' + str(reduceddata) + '_' + str(
        datenow) + '.raw.list'
Ejemplo n.º 18
0
def sensfunction(standardfile, _function, _order, _interactive):
    # print "LOGX:: Entering `sensfunction` method/function in %(__file__)s" %
    # globals()
    import re
    import os
    import sys
    import ntt
    import datetime

    try:
        import pyfits  #  added later
    except:
        from astropy.io import fits as pyfits

    from pyraf import iraf
    import numpy as np

    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.scopy', 'specred.sensfunc', 'specred.standard']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.scopy.format = 'multispec'
    iraf.specred.verbose = 'no'
    hdrs = ntt.util.readhdr(standardfile)
    try:
        _outputsens = 'sens_' + str(ntt.util.readkey3(hdrs, 'date-night')) + '_' + \
                      str(ntt.util.readkey3(hdrs, 'grism')) + '_' + str(ntt.util.readkey3(hdrs, 'filter')) + '_' + \
                      re.sub('.dat', '', ntt.util.readkey3(
                          hdrs, 'stdname')) + '_' + str(MJDtoday)
    except:
        sys.exit('Error: missing header -stdname- in standard ' +
                 str(standardfile) + '  ')

    _outputsens = ntt.util.name_duplicate(standardfile, _outputsens, '')
    if os.path.isfile(_outputsens):
        if _interactive.lower() != 'yes':
            ntt.util.delete(_outputsens)
        else:
            answ = raw_input(
                'sensitivity function already computed, do you want to do it again [[y]/n] ? '
            )
            if not answ:
                answ = 'y'
            if answ.lower() in ['y', 'yes']:
                ntt.util.delete(_outputsens)

    if not os.path.isfile(_outputsens):
        iraf.set(direc=ntt.__path__[0] + '/')
        _caldir = 'direc$standard/MAB/'
        _extinctdir = 'direc$standard/extinction/'
        _observatory = 'lasilla'
        _extinction = 'lasilla2.txt'
        refstar = 'm' + \
            re.sub('.dat', '', pyfits.open(standardfile)
                   [0].header.get('stdname'))
        _airmass = ntt.util.readkey3(hdrs, 'airmass')
        _exptime = ntt.util.readkey3(hdrs, 'exptime')
        _outputstd = 'std_' + str(ntt.util.readkey3(hdrs, 'grism')) + '_' + \
                     str(ntt.util.readkey3(hdrs, 'filter')) + '.fits'
        ntt.util.delete(_outputstd)
        ntt.util.delete(_outputsens)
        iraf.specred.standard(input=standardfile,
                              output=_outputstd,
                              extinct=_extinctdir + _extinction,
                              caldir=_caldir,
                              observa=_observatory,
                              star_nam=refstar,
                              airmass=_airmass,
                              exptime=_exptime,
                              interac=_interactive)
        iraf.specred.sensfunc(standard=_outputstd,
                              sensitiv=_outputsens,
                              extinct=_extinctdir + _extinction,
                              ignorea='yes',
                              observa=_observatory,
                              functio=_function,
                              order=_order,
                              interac=_interactive)

        data, hdr = pyfits.getdata(standardfile, 0, header=True)  # added later
        data1, hdr1 = pyfits.getdata(_outputsens, 0,
                                     header=True)  # added later
        ntt.util.delete(_outputsens)  # added later
        pyfits.writeto(_outputsens, np.float32(data1), hdr)  # added later
    return _outputsens
Ejemplo n.º 19
0
def efoscspec1Dredu(files, _interactive, _ext_trace, _dispersionline, liststandard, listatmo0, _automaticex,
                    _verbose=False):
    # print "LOGX:: Entering `efoscspec1Dredu` method/function in
    # %(__file__)s" % globals()
    import ntt

    try:        import pyfits
    except:     from astropy.io import fits as pyfits

    import re
    import string
    import sys
    import os
    import numpy as np

    os.environ["PYRAF_BETA_STATUS"] = "1"
    _extinctdir = 'direc$standard/extinction/'
    _extinction = 'lasilla2.txt'
    _observatory = 'lasilla'
    import datetime

    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
    dv = ntt.dvex()
    scal = np.pi / 180.
    _gain = ntt.util.readkey3(ntt.util.readhdr(
        re.sub('\n', '', files[0])), 'gain')
    _rdnoise = ntt.util.readkey3(
        ntt.util.readhdr(re.sub('\n', '', files[0])), 'ron')
    std, rastd, decstd, magstd = ntt.util.readstandard(
        'standard_efosc_mab.txt')
    objectlist = {}
    for img in files:
        hdr = ntt.util.readhdr(img)
        img = re.sub('\n', '', img)
        ntt.util.correctcard(img)
        _ra = ntt.util.readkey3(hdr, 'RA')
        _dec = ntt.util.readkey3(hdr, 'DEC')
        _object = ntt.util.readkey3(hdr, 'object')
        _grism = ntt.util.readkey3(hdr, 'grism')
        _filter = ntt.util.readkey3(hdr, 'filter')
        _slit = ntt.util.readkey3(hdr, 'slit')
        dd = np.arccos(np.sin(_dec * scal) * np.sin(decstd * scal) + np.cos(_dec * scal) *
                       np.cos(decstd * scal) * np.cos((_ra - rastd) * scal)) * ((180 / np.pi) * 3600)
        if min(dd) < 100:
            _type = 'stdsens'
        else:
            _type = 'obj'
        if min(dd) < 100:
            ntt.util.updateheader(
                img, 0, {'stdname': [std[np.argmin(dd)], '']})
            ntt.util.updateheader(
                img, 0, {'magstd': [float(magstd[np.argmin(dd)]), '']})

        if _type not in objectlist:
            objectlist[_type] = {}
        if (_grism, _filter, _slit) not in objectlist[_type]:
            objectlist[_type][_grism, _filter, _slit] = [img]
        else:
            objectlist[_type][_grism, _filter, _slit].append(img)

    from pyraf import iraf
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.imutil(_doprint=0)
    toforget = ['imutil.imcopy', 'specred.sarith', 'specred.standard']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.verbose = 'no'
    iraf.specred.dispaxi = 2
    iraf.set(direc=ntt.__path__[0] + '/')
    sens = {}
    print objectlist
    outputfile = []
    if 'obj' in objectlist.keys():
        tpe = 'obj'
    elif 'stdsens' in objectlist.keys():
        tpe = 'stdsens'
    else:
        sys.exit('error: no objects and no standards in the list')

    for setup in objectlist[tpe]:
        extracted = []
        listatmo = []
        if setup not in sens:
            sens[setup] = []
        if tpe == 'obj':
            print '\n### setup= ', setup, '\n### objects= ', objectlist['obj'][setup], '\n'
            for img in objectlist['obj'][setup]:
                #              hdr=readhdr(img)
                print '\n\n### next object= ', img, ' ', ntt.util.readkey3(ntt.util.readhdr(img), 'object'), '\n'
                if os.path.isfile(re.sub('.fits', '_ex.fits', img)):
                    if ntt.util.readkey3(ntt.util.readhdr(re.sub('.fits', '_ex.fits', img)), 'quality') == 'Rapid':
                        ntt.util.delete(re.sub('.fits', '_ex.fits', img))
                imgex = ntt.util.extractspectrum(img, dv, _ext_trace, _dispersionline, _interactive, 'obj',
                                                 automaticex=_automaticex)
                if not os.path.isfile(imgex):
                    sys.exit('### error, extraction not computed')
                if not ntt.util.readkey3(ntt.util.readhdr(imgex), 'shift') and \
                        ntt.util.readkey3(ntt.util.readhdr(imgex), 'shift') != 0.0:
                    # if not readkey3(readhdr(imgex),'shift'):
                    ntt.efoscspec1Ddef.checkwavestd(imgex, _interactive)
                extracted.append(imgex)
                if imgex not in outputfile:
                    outputfile.append(imgex)
                ntt.util.updateheader(
                    imgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum ']})
                ntt.util.updateheader(imgex, 0, {
                    'PRODCATG': ['SCIENCE.' +
                                 ntt.util.readkey3(ntt.util.readhdr(imgex), 'tech').upper(), 'Data product category']})
                ntt.util.updateheader(
                    imgex, 0, {'TRACE1': [img, 'Originating file']})
                if os.path.isfile('database/ap' + re.sub('_ex.fits', '', imgex)):
                    if 'database/ap' + re.sub('_ex.fits', '', imgex) not in outputfile:
                        outputfile.append(
                            'database/ap' + re.sub('_ex.fits', '', imgex))
            print '\n### all object with this setup extracted\n'
        if liststandard:
            standardlist = liststandard
            _type = 'stdfromdreducer'
        else:
            try:
                standardlist = objectlist['stdsens'][setup]
                _type = 'stdsens'
            except:
                standardlist = ''
                _type = ''
        if _type == 'stdfromdreducer' and len(extracted) >= 1:
            _outputsens2 = ntt.util.searchsens(extracted[0], standardlist)[0]
            print '\n### using standard from reducer ' + str(_outputsens2)
        elif _type not in ['stdsens', 'stdfromdreducer'] and len(extracted) >= 1:
            _outputsens2 = ntt.util.searchsens(extracted[0], '')[0]
            os.system('cp ' + _outputsens2 + ' .')
            _outputsens2 = string.split(_outputsens2, '/')[-1]
            print '\n### no standard in the list, using standard from archive'
        else:
            for simg in standardlist:
                print '\n###  standard for setup ' + \
                      str(setup) + ' = ', simg, ' ', ntt.util.readkey3(
                          ntt.util.readhdr(simg), 'object'), '\n'
                simgex = ntt.util.extractspectrum(
                    simg, dv, False, False, _interactive, 'std', automaticex=_automaticex)
                ntt.util.updateheader(
                    simgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum']})
                ntt.util.updateheader(simgex, 0, {
                    'PRODCATG': [
                        'SCIENCE.' + ntt.util.readkey3(ntt.util.readhdr(simgex), 'tech').upper(), 'Data product category']})
                ntt.util.updateheader(
                    simgex, 0, {'TRACE1': [simg, 'Originating file']})
                if not ntt.util.readkey3(ntt.util.readhdr(simgex), 'shift') and \
                        ntt.util.readkey3(ntt.util.readhdr(simgex), 'shift') != 0.0:
                    #                if not readkey3(readhdr(simgex),'shift'):
                    ntt.efoscspec1Ddef.checkwavestd(simgex, _interactive)
                atmofile = ntt.efoscspec1Ddef.telluric_atmo(
                    simgex)  # atmo file2
                ntt.util.updateheader(
                    atmofile, 0, {'TRACE1': [simgex, 'Originating file']})
                ntt.util.updateheader(
                    atmofile, 0, {'FILETYPE': [21211, 'telluric correction 1D spectrum ']})
                if tpe != 'obj' and atmofile not in outputfile:
                    outputfile.append(atmofile)
                if not listatmo0:
                    listatmo.append(atmofile)
                sens[setup].append(simgex)
                if simgex not in outputfile:
                    outputfile.append(simgex)
                if setup[0] == 'Gr13' and setup[1] == 'Free':
                    if os.path.isfile(re.sub('Free', 'GG495', simg)):
                        print '\n### extract standard frame with blocking filter to correct for second order contamination\n'
                        simg2 = re.sub('Free', 'GG495', simg)
                        simgex2 = ntt.util.extractspectrum(simg2, dv, False, False, _interactive, 'std',
                                                           automaticex=_automaticex)
                        ntt.util.updateheader(
                            simgex2, 0, {'FILETYPE': [22107, 'extracted 1D spectrum']})
                        ntt.util.updateheader(simgex2, 0, {
                            'PRODCATG': ['SCIENCE.' +
                                         ntt.util.readkey3(
                                             ntt.util.readhdr(simgex2), 'tech').upper(), 'Data product category']})
                        if not ntt.util.readkey3(ntt.util.readhdr(simgex2), 'shift') and \
                                ntt.util.readkey3(ntt.util.readhdr(simgex2), 'shift') != 0.0:
                            # if not readkey3(readhdr(simgex2),'shift'):
                            ntt.efoscspec1Ddef.checkwavestd(
                                simgex2, _interactive)
                        ntt.util.updateheader(
                            simgex2, 0, {'TRACE1': [simg2, 'Originating file']})
            print '\n### standard available: ', sens[setup]
            if tpe == 'obj':
                if len(sens[setup]) > 1:
                    goon = 'no'
                    while goon != 'yes':
                        stdused = raw_input(
                            '\n### more than one standard for this setup, which one do you want to use [' + sens[setup][
                                0] + '] ?')
                        if not stdused:
                            stdused = sens[setup][0]
                        if os.path.isfile(stdused):
                            goon = 'yes'
                else:
                    stdused = sens[setup][0]
                stdvec = [stdused]
            else:
                stdvec = sens[setup]
            for stdused in stdvec:
                stdusedclean = re.sub('_ex', '_clean', stdused)
                ntt.util.delete(stdusedclean)
                iraf.specred.sarith(
                    input1=stdused, op='/', input2=atmofile, output=stdusedclean, format='multispec')
                _outputsens2 = ntt.efoscspec1Ddef.sensfunction(
                    stdusedclean, 'spline3', 16, _interactive)
                ntt.util.updateheader(_outputsens2, 0, {'FILETYPE': [
                                      21212, 'sensitivity function']})
                ntt.util.updateheader(
                    _outputsens2, 0, {'TRACE1': [stdused, 'Originating file']})

                if setup[0] == 'Gr13' and setup[1] == 'Free':
                    if os.path.isfile(re.sub('Free', 'GG495', stdused)):
                        print '\n### compute sensitivity function of grim 13 with blocking filter ' \
                              'to correct for second order contamination \n'
                        stdused2 = re.sub('Free', 'GG495', stdused)
                        if not ntt.util.readkey3(ntt.util.readhdr(stdused2), 'STDNAME'):
                            ntt.util.updateheader(stdused2, 0, {
                                'STDNAME': [ntt.util.readkey3(ntt.util.readhdr(stdused), 'STDNAME'), '']})
                        atmofile2 = ntt.efoscspec1Ddef.telluric_atmo(
                            stdused2)  # atmo file2
                        stdusedclean2 = re.sub('_ex', '_clean', stdused2)
                        ntt.util.delete(stdusedclean2)
                        iraf.specred.sarith(input1=stdused2, op='/', input2=atmofile2, output=stdusedclean2,
                                            format='multispec')
                        _outputsens3 = ntt.efoscspec1Ddef.sensfunction(
                            stdusedclean2, 'spline3', 16, _interactive)
                        ntt.util.updateheader(_outputsens3, 0, {'FILETYPE': [
                                              21212, 'sensitivity function']})
                        ntt.util.updateheader(
                            _outputsens3, 0, {'TRACE1': [stdused2, 'Originating file']})
                        _outputsens2 = correctsens(_outputsens2, _outputsens3)

                if _outputsens2 not in outputfile:
                    outputfile.append(_outputsens2)
        if _outputsens2 and tpe == 'obj':
            ####################################################
            for img in objectlist['obj'][setup]:  # flux calibrate 2d images
                imgd = fluxcalib2d(img, _outputsens2)
                ntt.util.updateheader(
                    imgd, 0, {'FILETYPE': [22209, '2D wavelength and flux calibrated spectrum ']})
                ntt.util.updateheader(
                    imgd, 0, {'TRACE1': [img, 'Originating files']})
                iraf.hedit(imgd, 'PRODCATG', delete='yes',
                           update='yes', verify='no')
                if imgd not in outputfile:
                    outputfile.append(imgd)
            ####################################################
            #    flux calib in the standard way
            if not listatmo and listatmo0:
                listatmo = listatmo0[:]
            for _imgex in extracted:
                _airmass = ntt.util.readkey3(
                    ntt.util.readhdr(_imgex), 'airmass')
                _exptime = ntt.util.readkey3(
                    ntt.util.readhdr(_imgex), 'exptime')
                _imgf = re.sub('_ex.fits', '_f.fits', _imgex)
                ntt.util.delete(_imgf)
                qqq = iraf.specred.calibrate(input=_imgex, output=_imgf, sensiti=_outputsens2, extinct='yes',
                                             flux='yes',
                                             extinction=_extinctdir + _extinction, observatory=_observatory,
                                             airmass=_airmass, ignorea='yes', exptime=_exptime, fnu='no')
                hedvec = {'SENSFUN': [_outputsens2, ''],
                          'FILETYPE': [22208, '1D wavelength and flux calibrated spectrum', ''],
                          #                     'SNR':[ntt.util.StoN(_imgf,50),'Average signal to noise ratio per pixel'],
                          'SNR': [ntt.util.StoN2(_imgf, False), 'Average signal to noise ratio per pixel'],
                          'BUNIT': ['erg/cm2/s/Angstrom', 'Physical unit of array values'],
                          'TRACE1': [_imgex, 'Originating file'],
                          'ASSON1': [re.sub('_f.fits', '_2df.fits', _imgf), 'Name of associated file'],
                          'ASSOC1': ['ANCILLARY.2DSPECTRUM', 'Category of associated file']}
                ntt.util.updateheader(_imgf, 0, hedvec)
                if _imgf not in outputfile:
                    outputfile.append(_imgf)
                if listatmo:
                    atmofile = ntt.util.searcharc(_imgex, listatmo)[0]
                    if atmofile:
                        _imge = re.sub('_f.fits', '_e.fits', _imgf)
                        ntt.util.delete(_imge)
                        iraf.specred.sarith(input1=_imgf, op='/', input2=atmofile, output=_imge, w1='INDEF', w2='INDEF',
                                            format='multispec')
                        try:
                            iraf.imutil.imcopy(
                                input=_imgf + '[*,1,2]', output=_imge + '[*,1,2]', verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(
                                input=_imgf + '[*,1,3]', output=_imge + '[*,1,3]', verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(
                                input=_imgf + '[*,1,4]', output=_imge + '[*,1,4]', verbose='no')
                        except:
                            pass
                        if _imge not in outputfile:
                            outputfile.append(_imge)
                        ntt.util.updateheader(
                            _imge, 0, {'FILETYPE': [22210, '1D, wave, flux calib, telluric corr.']})
                        if atmofile not in outputfile:
                            outputfile.append(atmofile)
                        ntt.util.updateheader(
                            _imge, 0, {'ATMOFILE': [atmofile, '']})
                        ntt.util.updateheader(
                            _imge, 0, {'TRACE1': [_imgf, 'Originating file']})
                        imgin = _imge
                    else:
                        imgin = _imgf
                else:
                    imgin = _imgf
                imgasci = re.sub('.fits', '.asci', imgin)

                ntt.util.delete(imgasci)
                iraf.onedspec(_doprint=0)
                iraf.onedspec.wspectext(
                    imgin + '[*,1,1]', imgasci, header='no')
                if imgasci not in outputfile:
                    outputfile.append(imgasci)

    print '\n### adding keywords for phase 3 ....... '
    for img in outputfile:
        if str(img)[-5:] == '.fits':
            try:
                ntt.util.phase3header(img)  # phase 3 definitions
                ntt.util.updateheader(img, 0, {'quality': ['Final', '']})
            except:
                print 'Warning: ' + img + ' is not a fits file'
            try:
                if int(re.sub('\.', '', str(pyfits.__version__))[:2]) <= 30:
                    aa = 'HIERARCH '
                else:
                    aa = ''
            except:
                aa = ''

            imm = pyfits.open(img, mode='update')
            hdr = imm[0].header
            if aa + 'ESO DPR CATG' in hdr:
                hdr.pop(aa + 'ESO DPR CATG')
            if aa + 'ESO DPR TECH' in hdr:
                hdr.pop(aa + 'ESO DPR TECH')
            if aa + 'ESO DPR TYPE' in hdr:
                hdr.pop(aa + 'ESO DPR TYPE')
            imm.flush()
            imm.close()

    print outputfile
    reduceddata = ntt.rangedata(outputfile)
    f = open('logfile_spec1d_' + str(reduceddata) +
             '_' + str(datenow) + '.raw.list', 'w')
    for img in outputfile:
        try:
            f.write(ntt.util.readkey3(ntt.util.readhdr(img), 'arcfile') + '\n')
        except:
            pass
    f.close()
    return outputfile, 'logfile_spec1d_' + str(reduceddata) + '_' + str(datenow) + '.raw.list'
Ejemplo n.º 20
0
def sensfunction(standardfile, _function, _order, _interactive):
    # print "LOGX:: Entering `sensfunction` method/function in %(__file__)s" %
    # globals()
    import re
    import os
    import sys
    import ntt
    import datetime

    try:       import pyfits  #  added later
    except:    from astropy.io import fits as pyfits

    from pyraf import iraf
    import numpy as np

    MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.scopy', 'specred.sensfunc', 'specred.standard']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.scopy.format = 'multispec'
    iraf.specred.verbose = 'no'
    hdrs = ntt.util.readhdr(standardfile)
    try:
        _outputsens = 'sens_' + str(ntt.util.readkey3(hdrs, 'date-night')) + '_' + \
                      str(ntt.util.readkey3(hdrs, 'grism')) + '_' + str(ntt.util.readkey3(hdrs, 'filter')) + '_' + \
                      re.sub('.dat', '', ntt.util.readkey3(
                          hdrs, 'stdname')) + '_' + str(MJDtoday)
    except:
        sys.exit('Error: missing header -stdname- in standard ' +
                 str(standardfile) + '  ')

    _outputsens = ntt.util.name_duplicate(standardfile, _outputsens, '')
    if os.path.isfile(_outputsens):
        if _interactive.lower() != 'yes':
            ntt.util.delete(_outputsens)
        else:
            answ = raw_input(
                'sensitivity function already computed, do you want to do it again [[y]/n] ? ')
            if not answ:
                answ = 'y'
            if answ.lower() in ['y', 'yes']:
                ntt.util.delete(_outputsens)

    if not os.path.isfile(_outputsens):
        iraf.set(direc=ntt.__path__[0] + '/')
        _caldir = 'direc$standard/MAB/'
        _extinctdir = 'direc$standard/extinction/'
        _observatory = 'lasilla'
        _extinction = 'lasilla2.txt'
        refstar = 'm' + \
            re.sub('.dat', '', pyfits.open(standardfile)
                   [0].header.get('stdname'))
        _airmass = ntt.util.readkey3(hdrs, 'airmass')
        _exptime = ntt.util.readkey3(hdrs, 'exptime')
        _outputstd = 'std_' + str(ntt.util.readkey3(hdrs, 'grism')) + '_' + \
                     str(ntt.util.readkey3(hdrs, 'filter')) + '.fits'
        ntt.util.delete(_outputstd)
        ntt.util.delete(_outputsens)
        iraf.specred.standard(input=standardfile, output=_outputstd, extinct=_extinctdir + _extinction,
                              caldir=_caldir, observa=_observatory, star_nam=refstar, airmass=_airmass,
                              exptime=_exptime, interac=_interactive)
        iraf.specred.sensfunc(standard=_outputstd, sensitiv=_outputsens, extinct=_extinctdir + _extinction,
                              ignorea='yes', observa=_observatory, functio=_function, order=_order,
                              interac=_interactive)

        data, hdr = pyfits.getdata(standardfile, 0, header=True)  # added later
        data1, hdr1 = pyfits.getdata(
            _outputsens, 0, header=True)  # added later
        ntt.util.delete(_outputsens)  # added later
        pyfits.writeto(_outputsens, np.float32(data1), hdr)  # added later
    return _outputsens
Ejemplo n.º 21
0
def lickshane1Dredu(files,
                    _interactive,
                    _ext_trace,
                    _dispersionline,
                    _automaticex,
                    _verbose=False):
    import lickshane
    import datetime
    import os
    import re
    import string
    import sys

    liststandard = ''
    listatmo0 = ''

    #    os.environ["PYRAF_BETA_STATUS"] = "1"
    _extinctdir = 'direc$standard/extinction/'
    _extinction = 'lick.dat'
    _observatory = 'lick'

    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    scal = np.pi / 180.
    dv = lickshane.util.dvex()
    std, rastd, decstd, magstd = lickshane.util.readstandard(
        'standard_lick_mab.txt')
    objectlist = {}
    for img in files:
        hdr = lickshane.util.readhdr(img)
        img = re.sub('\n', '', img)
        lickshane.util.correctcard(img)
        _ra = lickshane.util.readkey3(hdr, 'RA')
        _dec = lickshane.util.readkey3(hdr, 'DEC')
        _object = lickshane.util.readkey3(hdr, 'object')
        _grism = lickshane.util.readkey3(hdr, 'grism')
        _slit = lickshane.util.readkey3(hdr, 'slit')
        dd = np.arccos(
            np.sin(_dec * scal) * np.sin(decstd * scal) +
            np.cos(_dec * scal) * np.cos(decstd * scal) * np.cos(
                (_ra - rastd) * scal)) * ((180 / np.pi) * 3600)
        if min(dd) < 100:
            _type = 'stdsens'
        else:
            _type = 'obj'
        print img, _type
        if min(dd) < 100:
            lickshane.util.updateheader(img, 0,
                                        {'stdname': [std[np.argmin(dd)], '']})
            lickshane.util.updateheader(
                img, 0, {'magstd': [float(magstd[np.argmin(dd)]), '']})
        if _type not in objectlist:
            objectlist[_type] = {}
        if (_grism, _slit) not in objectlist[_type]:
            objectlist[_type][_grism, _slit] = [img]
        else:
            objectlist[_type][_grism, _slit].append(img)

    from pyraf import iraf
    iraf.set(stdimage='imt2048')
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.imutil(_doprint=0)
    toforget = ['imutil.imcopy', 'specred.sarith', 'specred.standard']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.verbose = 'no'
    iraf.specred.dispaxi = 2
    iraf.set(direc=lickshane.__path__[0] + '/')
    sens = {}
    print objectlist
    outputfile = []
    if 'obj' in objectlist.keys():
        tpe = 'obj'
    elif 'stdsens' in objectlist.keys():
        tpe = 'stdsens'
    else:
        sys.exit('error: no objects and no standards in the list')

    for setup in objectlist[tpe]:
        extracted = []
        listatmo = []
        if setup not in sens:
            sens[setup] = []
        if tpe == 'obj':
            print '\n### setup= ', setup, '\n### objects= ', objectlist['obj'][
                setup], '\n'
            for img in objectlist['obj'][setup]:
                #              hdr=readhdr(img)
                print '\n\n### next object= ', img, ' ', lickshane.util.readkey3(
                    lickshane.util.readhdr(img), 'object'), '\n'

                #_automaticex = ''

                imgex = lickshane.lickshane1Ddef.extractspectrum(
                    img,
                    dv,
                    _ext_trace,
                    _dispersionline,
                    _interactive,
                    'obj',
                    automaticex=_automaticex)
                if not os.path.isfile(imgex):
                    sys.exit('### error, extraction not computed')
                if not lickshane.util.readkey3(lickshane.util.readhdr(imgex), 'shift') and \
                        lickshane.util.readkey3(lickshane.util.readhdr(imgex), 'shift') != 0.0:
                    if setup in ['300_7500']:
                        lickshane.lickshane1Ddef.checkwavestd(
                            imgex, _interactive)
                    else:
                        print 'wave check using teluric not possible'

                extracted.append(imgex)
                if imgex not in outputfile:
                    outputfile.append(imgex)
                lickshane.util.updateheader(
                    imgex, 0, {'TRACE1': [img, 'Originating file']})
                if os.path.isfile('database/ap' +
                                  re.sub('_ex.fits', '', imgex)):
                    if 'database/ap' + re.sub('_ex.fits', '',
                                              imgex) not in outputfile:
                        outputfile.append('database/ap' +
                                          re.sub('_ex.fits', '', imgex))
            print '\n### all object with this setup extracted\n'

        if liststandard:
            standardlist = liststandard
            _type = 'stdfromdreducer'
        else:
            try:
                standardlist = objectlist['stdsens'][setup]
                _type = 'stdsens'
            except:
                standardlist = ''
                _type = ''
        if _type == 'stdfromdreducer' and len(extracted) >= 1:
            _outputsens2 = lichshane.util.searchsens(extracted[0],
                                                     standardlist)[0]
            print '\n### using standard from reducer ' + str(_outputsens2)
        elif _type not in ['stdsens', 'stdfromdreducer'
                           ] and len(extracted) >= 1:
            _outputsens2 = lickshane.util.searchsens(extracted[0], '')[0]
            os.system('cp ' + _outputsens2 + ' .')
            _outputsens2 = string.split(_outputsens2, '/')[-1]
            print '\n### no standard in the list, using standard from archive'
        else:
            for simg in standardlist:
                print '\n###  standard for setup ' + \
                      str(setup) + ' = ', simg, ' ', lickshane.util.readkey3(
                          lickshane.util.readhdr(simg), 'object'), '\n'
                simgex = lickshane.lickshane1Ddef.extractspectrum(
                    simg,
                    dv,
                    False,
                    False,
                    _interactive,
                    'std',
                    automaticex=_automaticex)
                lickshane.util.updateheader(
                    simgex, 0, {'TRACE1': [simg, 'Originating file']})
                if not lickshane.util.readkey3(
                        lickshane.util.readhdr(simgex),
                        'shift') and lickshane.util.readkey3(
                            lickshane.util.readhdr(simgex), 'shift') != 0.0:
                    lickshane.lickshane1Ddef.checkwavestd(simgex, _interactive)
                print simgex
                atmofile = lickshane.lickshane1Ddef.telluric_atmo(
                    simgex)  # atmo file2
                print atmofile
                lickshane.util.updateheader(
                    atmofile, 0, {'TRACE1': [simgex, 'Originating file']})
                if tpe != 'obj' and atmofile not in outputfile:
                    outputfile.append(atmofile)
                if not listatmo0:
                    listatmo.append(atmofile)
                sens[setup].append(simgex)
                if simgex not in outputfile:
                    outputfile.append(simgex)
            print '\n### standard available: ', sens[setup]

            if tpe == 'obj':
                if len(sens[setup]) > 1:
                    goon = 'no'
                    while goon != 'yes':
                        stdused = raw_input(
                            '\n### more than one standard for this setup, which one do you want to use ['
                            + sens[setup][0] + '] ?')
                        if not stdused:
                            stdused = sens[setup][0]
                        if os.path.isfile(stdused):
                            goon = 'yes'
                else:
                    stdused = sens[setup][0]
                stdvec = [stdused]
            else:
                stdvec = sens[setup]
            for stdused in stdvec:
                stdusedclean = re.sub('_ex', '_clean', stdused)
                lickshane.util.delete(stdusedclean)
                iraf.specred.sarith(input1=stdused,
                                    op='/',
                                    input2=atmofile,
                                    output=stdusedclean,
                                    format='multispec')
                _outputsens2 = lickshane.lickshane1Ddef.sensfunction(
                    stdusedclean, 'spline3', 16, _interactive)
                lickshane.util.updateheader(
                    _outputsens2, 0, {'TRACE1': [stdused, 'Originating file']})

                if _outputsens2 not in outputfile:
                    outputfile.append(_outputsens2)
        if _outputsens2 and tpe == 'obj':
            ####################################################
            for img in objectlist['obj'][setup]:  # flux calibrate 2d images
                imgd = fluxcalib2d(img, _outputsens2)
                lickshane.util.updateheader(
                    imgd, 0, {'TRACE1': [img, 'Originating files']})
                if imgd not in outputfile:
                    outputfile.append(imgd)
            ####################################################
            #    flux calib in the standard way
            if not listatmo and listatmo0:
                listatmo = listatmo0[:]
            for _imgex in extracted:
                _airmass = lickshane.util.readkey3(
                    lickshane.util.readhdr(_imgex), 'airmass')
                _exptime = lickshane.util.readkey3(
                    lickshane.util.readhdr(_imgex), 'exptime')
                _imgf = re.sub('_ex.fits', '_f.fits', _imgex)
                lickshane.util.delete(_imgf)
                qqq = iraf.specred.calibrate(input=_imgex,
                                             output=_imgf,
                                             sensiti=_outputsens2,
                                             extinct='yes',
                                             flux='yes',
                                             extinction=_extinctdir +
                                             _extinction,
                                             observatory=_observatory,
                                             airmass=_airmass,
                                             ignorea='yes',
                                             exptime=_exptime,
                                             fnu='no')
                hedvec = {
                    'SENSFUN': [_outputsens2, ''],
                    #                          'SNR': [lickshane.util.StoN2(_imgf, False), 'Average signal to noise ratio per pixel'],
                    'BUNIT':
                    ['erg/cm2/s/Angstrom', 'Physical unit of array values'],
                    'TRACE1': [_imgex, 'Originating file'],
                    'ASSON1': [
                        re.sub('_f.fits', '_2df.fits', _imgf),
                        'Name of associated file'
                    ],
                    'ASSOC1':
                    ['ANCILLARY.2DSPECTRUM', 'Category of associated file']
                }
                lickshane.util.updateheader(_imgf, 0, hedvec)
                if _imgf not in outputfile:
                    outputfile.append(_imgf)
                if listatmo:
                    atmofile = lickshane.util.searcharc(_imgex, listatmo)[0]
                    if atmofile:
                        _imge = re.sub('_f.fits', '_e.fits', _imgf)
                        lickshane.util.delete(_imge)
                        iraf.specred.sarith(input1=_imgf,
                                            op='/',
                                            input2=atmofile,
                                            output=_imge,
                                            w1='INDEF',
                                            w2='INDEF',
                                            format='multispec')
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,2]',
                                               output=_imge + '[*,1,2]',
                                               verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,3]',
                                               output=_imge + '[*,1,3]',
                                               verbose='no')
                        except:
                            pass
                        try:
                            iraf.imutil.imcopy(input=_imgf + '[*,1,4]',
                                               output=_imge + '[*,1,4]',
                                               verbose='no')
                        except:
                            pass
                        if _imge not in outputfile:
                            outputfile.append(_imge)
                        if atmofile not in outputfile:
                            outputfile.append(atmofile)
                        lickshane.util.updateheader(
                            _imge, 0, {'ATMOFILE': [atmofile, '']})
                        lickshane.util.updateheader(
                            _imge, 0, {'TRACE1': [_imgf, 'Originating file']})
                        imgin = _imge
                    else:
                        imgin = _imgf
                else:
                    imgin = _imgf
                imgasci = re.sub('.fits', '.asci', imgin)

                lickshane.util.delete(imgasci)
                iraf.onedspec(_doprint=0)
                iraf.onedspec.wspectext(imgin + '[*,1,1]',
                                        imgasci,
                                        header='no')
                if imgasci not in outputfile:
                    outputfile.append(imgasci)

    return objectlist, 'ddd'
Ejemplo n.º 22
0
                          version="%prog " + str(fdst.__version__))
    parser.add_option("-v", "--verbose",dest="verbose",\
                  action="store_true",default=False,
                  help='Print tasks description')
    option, args = parser.parse_args()

##############################################################
from pyraf import iraf

iraf.noao(_doprint=0)
iraf.imred(_doprint=0)
iraf.ccdred(_doprint=0)
iraf.twodspec(_doprint=0)
iraf.longslit(_doprint=0)
iraf.onedspec(_doprint=0)
iraf.specred(_doprint=0)
toforget = [
    'ccdproc', 'imcopy', 'specred.apall', 'longslit.identify',
    'longslit.reidentify', 'specred.standard', 'longslit.fitcoords',
    'onedspec.wspectext'
]
for t in toforget:
    iraf.unlearn(t)

##################### opening setup ##########################

print ''
question = raw_input('Do you have a list of spectra ? ([yes]/no) ')
if not question:
    question = 'yes'
Ejemplo n.º 23
0
def efoscfastredu(imglist, _listsens, _listarc, _ext_trace, _dispersionline, _cosmic, _interactive):
    # print "LOGX:: Entering `efoscfastredu` method/function in %(__file__)s"
    # % globals()
    import string
    import os
    import re
    import sys
    os.environ["PYRAF_BETA_STATUS"] = "1"
    try:      from astropy.io import fits as pyfits
    except:   import   pyfits
    from ntt.util import readhdr, readkey3
    import ntt
    import numpy as np
    dv = ntt.dvex()
    scal = np.pi / 180.
    if not _interactive:
        _interactive = False
        _inter = 'NO'
    else:
        _inter = 'YES'
    from pyraf import iraf

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['ccdproc', 'imcopy', 'specred.apall', 'longslit.identify', 'longslit.reidentify', 'specred.standard',
                'longslit.fitcoords', 'onedspec.wspectext']
    for t in toforget:
        iraf.unlearn(t)
    iraf.ccdred.verbose = 'no'  # not print steps
    iraf.specred.verbose = 'no'  # not print steps
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''
    _gain = ntt.util.readkey3(ntt.util.readhdr(imglist[0]), 'gain')
    _ron = ntt.util.readkey3(ntt.util.readhdr(imglist[0]), 'ron')
    iraf.specred.apall.readnoi = _ron
    iraf.specred.apall.gain = _gain
    iraf.specred.dispaxi = 2
    iraf.longslit.dispaxi = 2
    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"
    iraf.set(direc=ntt.__path__[0] + '/')
    for img in imglist:
        hdr = ntt.util.readhdr(img)
        _tech = ntt.util.readkey3(hdr, 'tech')
        if _tech != 'SPECTRUM':
            sys.exit('error: ' + str(img) + ' is not a spectrum ')
        print '\n####  image name = ' + img + '\n'
        _grism0 = readkey3(hdr, 'grism')
        _filter0 = readkey3(hdr, 'filter')
        _slit0 = readkey3(hdr, 'slit')
        _object0 = readkey3(hdr, 'object')
        _date0 = readkey3(hdr, 'date-night')
        setup = (_grism0, _filter0, _slit0)
        _biassec0 = '[3:1010,1026:1029]'
        if _grism0 == 'Gr16':
            _trimsec0 = '[100:950,1:950]'
        elif _grism0 == 'Gr13':
            if _filter0 == 'Free':
                _trimsec0 = '[100:950,1:1015]'
            elif _filter0 == 'GG495':
                _trimsec0 = '[100:950,208:1015]'
            elif _filter0 == 'OG530':
                _trimsec0 = '[100:950,300:1015]'
        elif _grism0 == 'Gr11':
            _trimsec0 = '[100:950,5:1015]'
        else:
            _trimsec0 = '[100:950,5:1015]'
        _object0 = re.sub(' ', '', _object0)
        _object0 = re.sub('/', '_', _object0)
        nameout0 = 't' + str(_object0) + '_' + str(_date0)
        for _set in setup:
            nameout0 = nameout0 + '_' + _set
        nameout0 = ntt.util.name_duplicate(img, nameout0, '')
        timg = nameout0
        if os.path.isfile(timg):
            os.system('rm -rf ' + timg)
        iraf.imcopy(img, output=timg)
        iraf.ccdproc(timg, output='', overscan='no', trim='yes', zerocor="no", flatcor="no", readaxi='column',
                     trimsec=str(_trimsec0), biassec=_biassec0, Stdout=1)
        img = timg
        if _listarc:
            arcfile = ntt.util.searcharc(img, _listarc)[0]
        else:
            arcfile = ''
        if not arcfile:
            arcfile = ntt.util.searcharc(img, '')[0]
        else:
            iraf.ccdproc(arcfile, output='t' + arcfile, overscan='no', trim='yes', zerocor="no", flatcor="no",
                         readaxi='column', trimsec=str(_trimsec0), biassec=str(_biassec0), Stdout=1)
            arcfile = 't' + arcfile

        if _cosmic:
            # print cosmic rays rejection
            ntt.cosmics.lacos(img, output='', gain=_gain, readn=_ron, xorder=9, yorder=9, sigclip=4.5, sigfrac=0.5,
                              objlim=1, verbose=True, interactive=False)
            print '\n### cosmic rays rejections ........ done '

        if not arcfile:
            print '\n### warning no arcfile \n exit '
        else:
            arcref = ntt.util.searcharc(img, '')[0]
            if arcfile[0] == '/':
                os.system('cp ' + arcfile + ' ' +
                          string.split(arcfile, '/')[-1])
                arcfile = string.split(arcfile, '/')[-1]
            arcref = string.split(arcref, '/')[-1]
            if arcref:
                os.system('cp ' + arcref + ' .')
                arcref = string.split(arcref, '/')[-1]
                if not os.path.isdir('database/'):
                    os.mkdir('database/')
                if os.path.isfile(ntt.util.searcharc(img, '')[1] + '/database/id' + re.sub('.fits', '', arcref)):
                    os.system('cp ' + ntt.util.searcharc(img, '')[1] + '/database/id' + re.sub('.fits', '',
                                                                                               arcref) + ' database/')
                iraf.longslit.reidentify(referenc=arcref, images=arcfile, interac=_inter, section='column 10',
                                         coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat', overrid='yes', step=0,
                                         newaps='no', nsum=5, nlost=2, mode='h', verbose='no')
            else:
                iraf.longslit.identify(images=arcfile, section='column 10',
                                       coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat', nsum=10, fwidth=7,
                                       order=3, mode='h')
            iraf.longslit.reident(referenc=arcfile, images=arcfile, interac='NO', section='column 10',
                                  coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat', overrid='yes', step=10,
                                  newaps='yes', nsum=5, nlost=2, mode='h', verbose='no')
            qqq = iraf.longslit.fitcoords(images=re.sub('.fits', '', arcfile), fitname=re.sub('.fits', '', arcfile),
                                          interac='no', combine='yes', databas='database',
                                          function='legendre', yorder=4, logfile='logfile', plotfil='', mode='h')
            iraf.specred.transform(input=img, output=img, minput='', fitnames=re.sub('.fits', '', arcfile),
                                   databas='database',
                                   x1='INDEF', x2='INDEF', y1='INDEF', y2='INDEF', flux='yes', mode='h',
                                   logfile='logfile')
            # ######################  check wavelength calibration ############
            _skyfile = ntt.__path__[
                0] + '/standard/ident/sky_' + setup[0] + '_' + setup[1] + '.fits'
            shift = ntt.efoscspec2Ddef.skyfrom2d(img, _skyfile)
            print '\n###     check in wavelengh performed ...... spectrum shifted of  ' + str(shift) + ' Angstrom \n'
            zro = pyfits.open(img)[0].header.get('CRVAL2')
            ntt.util.updateheader(img, 0, {'CRVAL2': [zro + int(shift), '']})
            std, rastd, decstd, magstd = ntt.util.readstandard(
                'standard_efosc_mab.txt')
            hdrt = readhdr(img)
            _ra = readkey3(hdrt, 'RA')
            _dec = readkey3(hdrt, 'DEC')
            _object = readkey3(hdrt, 'object')
            dd = np.arccos(np.sin(_dec * scal) * np.sin(decstd * scal) + np.cos(_dec * scal) *
                           np.cos(decstd * scal) * np.cos((_ra - rastd) * scal)) * ((180 / np.pi) * 3600)
            if min(dd) < 100:
                _type = 'stdsens'
                ntt.util.updateheader(
                    img, 0, {'stdname': [std[np.argmin(dd)], '']})
                ntt.util.updateheader(
                    img, 0, {'magstd': [float(magstd[np.argmin(dd)]), '']})
            else:
                _type = 'obj'
            print '\n###      EXTRACTION USING IRAF TASK APALL \n'
            result = []
            if _type == 'obj':
                imgex = ntt.util.extractspectrum(
                    img, dv, _ext_trace, _dispersionline, _interactive, _type)
                ntt.util.updateheader(
                    imgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum ']})
                ntt.util.updateheader(imgex, 0, {
                    'PRODCATG': ['SCIENCE.' + readkey3(readhdr(imgex), 'tech').upper(), 'Data product category']})
                ntt.util.updateheader(imgex, 0, {'TRACE1': [img, '']})
                result.append(imgex)
                if _listsens:
                    sensfile = ntt.util.searchsens(img, _listsens)[0]
                else:
                    sensfile = ''
                if not sensfile:
                    sensfile = ntt.util.searchsens(img, '')[0]
                if sensfile:
                    imgf = re.sub('.fits', '_f.fits', img)
                    _extinctdir = 'direc$standard/extinction/'
                    _extinction = 'extinction_lasilla.dat'
                    _observatory = 'lasilla'
                    _exptime = readkey3(hdrt, 'exptime')
                    _airmass = readkey3(hdrt, 'airmass')
                    ntt.util.delete(imgf)
                    iraf.specred.calibrate(input=imgex, output=imgf, sensiti=sensfile, extinct='yes',
                                           flux='yes', ignorea='yes', extinction=_extinctdir + _extinction,
                                           observatory=_observatory, airmass=_airmass, exptime=_exptime,
                                           fnu='no')
                    hedvec = {'SENSFUN': [string.split(sensfile, '/')[-1], 'sensitivity function'],
                              'FILETYPE': [22208, '1D wavelength and flux calibrated spectrum '],
                              'SNR': [ntt.util.StoN2(imgf, False), 'Average S/N ratio'],
                              'BUNIT': ['erg/cm2/s/Angstrom', 'Flux Calibration Units'], 'TRACE1': [imgex, '']}
                    ntt.util.updateheader(imgf, 0, hedvec)
                    imgout = imgf
                    imgd = ntt.efoscspec1Ddef.fluxcalib2d(img, sensfile)
                    ntt.util.updateheader(
                        imgd, 0, {'FILETYPE': [22209, '2D wavelength and flux calibrated spectrum ']})
                    ntt.util.updateheader(imgd, 0, {'TRACE1': [img, '']})
                    imgasci = re.sub('.fits', '.asci', imgout)
                    ntt.util.delete(imgasci)
                    iraf.onedspec.wspectext(
                        imgout + '[*,1,1]', imgasci, header='no')
                    result = result + [imgout, imgd, imgasci]
            else:
                imgex = ntt.util.extractspectrum(
                    img, dv, _ext_trace, _dispersionline, _interactive, 'std')
                imgout = ntt.efoscspec1Ddef.sensfunction(
                    imgex, 'spline3', 6, _inter)
                result = result + [imgout]

    for img in result:
        if img[-5:] == '.fits':
            ntt.util.phase3header(img)  # phase 3 definitions
            ntt.util.airmass(img)  # phase 3 definitions
            ntt.util.updateheader(
                img, 0, {'quality': ['Rapid', 'Final or Rapid reduction']})
    return result
Ejemplo n.º 24
0
def sofispecreduction(files, _interactive, _doflat, listflat, _docross, _verbose=False):
    # print "LOGX:: Entering `sofispecreduction` method/function in
    # %(__file__)s" % globals()
    import ntt
    from ntt.util import delete, readhdr, readkey3, correctcard, rangedata
    import string, re, sys, os, glob

    try:        
        from astropy.io import fits as pyfits
    except:     
        import pyfits

    from pyraf import iraf
    from numpy import argmin, array, min, isnan, arange, mean, sum
    from numpy import sqrt, pi

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['ccdred.flatcombine', 'ccdproc', 'specred.apall', 'longslit.identify', 'longslit.reidentify',
                'longslit.fitcoords', 'specred.transform', 'specred.response', 'imutil.hedit']
    for t in toforget:
        iraf.unlearn(t)
    iraf.longslit.dispaxi = 2
    iraf.longslit.mode = 'h'
    iraf.specred.dispaxi = 2
    iraf.specred.mode = 'h'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.overscan = 'no'
    iraf.ccdproc.ccdtype = ''
    iraf.ccdred.instrument = "/dev/null"

    iraf.set(direc=ntt.__path__[0] + '/')

    if _interactive:
        _interact = 'yes'
    else:
        _interact = 'no'
    if _verbose:
        iraf.ccdred.verbose = 'yes'
        iraf.specred.verbose = 'yes'
    else:
        iraf.specred.verbose = 'no'
        iraf.ccdred.verbose = 'no'
    import datetime
    import time

    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
    # if they are not sorted the fieldlist dict could crash
    files = ntt.sofiphotredudef.sortbyJD(files)
    outputlist = []
    setup = []
    fieldlist = {}
    OBID = {}
    RA = {}
    DEC = {}
    objects = {}
    flats = {}
    lamps1 = {}
    _rdnoise = readkey3(readhdr(re.sub('\n', '', files[0])), 'ron')
    _gain = readkey3(readhdr(re.sub('\n', '', files[0])), 'gain')
    for img in files:
        img = re.sub('\n', '', img)
        hdr = readhdr(img)
        _object = readkey3(hdr, 'object')
        _filter = readkey3(hdr, 'filter')
        _date = readkey3(hdr, 'date-night')
        _exptime = readkey3(hdr, 'exptime')
        _grism = readkey3(hdr, 'grism')
        _obsmode = readkey3(hdr, 'obsmode')
        _type = ''
        if _grism.lower() not in ['gr', 'gb']:
            _type = 'image'
        if not _type:
            if _object.lower() == 'flat':
                _type = 'flat'
                if _date not in flats:
                    flats[_date] = {}
                if _grism not in flats[_date]:
                    flats[_date][_grism] = [img]
                else:
                    flats[_date][_grism].append(img)
            elif _object.lower() == 'lamp':
                _lampid = (readkey3(hdr, 'esoid'), readkey3(hdr, 'grism'))
                if _lampid not in lamps1:
                    lamps1[_lampid] = [None, None]
                if readkey3(hdr, 'lamp1') == 'Xenon':
                    lamps1[_lampid][0] = img
                else:
                    lamps1[_lampid][1] = img
                _type = 'lamp'
                # if readkey3(hdr,'lamp1')=='Xenon':
            #                     _type='lamp'
            #                     if _grism not in lamps:
            #                         lamps[_grism]=[img]
            #                     else:
            #                         lamps[_grism].append(img)
            #                 else:
            #                     _type='notgood'
        if not _type:
            _ra = readkey3(hdr, 'RA')
            _dec = readkey3(hdr, 'DEC')
            _object_name = readkey3(hdr, 'object')
            _OBID = (readkey3(hdr, 'esoid'), _grism)
            if string.count(_object_name, '/') or string.count(_object_name, '.') or string.count(_object_name, ' '):
                nameobj = string.split(_object_name, '/')[0]
                nameobj = string.split(nameobj, ' ')[0]
                nameobj = string.split(nameobj, '.')[0]
            else:
                nameobj = _object_name
            if _grism not in fieldlist:
                fieldlist[_grism] = {}
            if _OBID not in OBID:
                count = 1
                nameobj0 = nameobj + '_' + str(count)
                answ = 'yes'
                while answ == 'yes':
                    if nameobj0 in fieldlist[_grism]:
                        count = count + 1
                        nameobj0 = nameobj + '_' + str(count)
                    else:
                        answ = 'no'
                fieldlist[_grism][nameobj0] = []
                OBID[readkey3(hdr, 'esoid'), _grism] = nameobj0
            fieldlist[_grism][nameobj0].append(img)

        if _verbose:
            print img
            print _type, _object, _filter
            print 'lamps', lamps1

    lamps = {}
    for _lampid in lamps1:
        lamp = ''
        output = 'arc_' + str(_lampid[0]) + '_' + str(_lampid[1]) + '.fits'
        if lamps1[_lampid][0] and lamps1[_lampid][1]:
            print lamps1[_lampid][0], lamps1[_lampid][1]
            # try:
            ntt.util.delete(output)
            iraf.imarith(lamps1[_lampid][0], '-', lamps1[_lampid]
                         [1], result=output, verbose='yes')
            #            except:
            #                print 'warning, lamp file not ON/OFF'
            #                os.system('cp '+lamps1[_lampid][0]+' '+output)

            lamp = output
        elif lamps1[_lampid][0] and not lamps1[_lampid][1]:
            os.system('cp ' + lamps1[_lampid][0] + ' ' + output)
            lamp = output
        if lamp:
            if _lampid[1] not in lamps:
                lamps[_lampid[1]] = [lamp]
            else:
                lamps[_lampid[1]].append(lamp)

    if _verbose:
        print '\n### FIELDS\n', fieldlist
        print '\n### OBID\n', OBID
        print '\n### FLATS\n', flats
        print '\n### LAMPS\n', lamps

#    if not flats:
#        sys.exit('\n### error: spectroscopic flat not available, add flats in the directory and try again')
#    if not lamps:
# sys.exit('\n### error: spectroscopic lamp not available, add lamps in
# the directory and try again')

    if not listflat:
        print '\n### list of available spectroscopic flats (ON,OFF):'
        for _date in flats:
            for _grism in flats[_date]:
                for img in flats[_date][_grism]:
                    if pyfits.open(img)[0].data.mean() >= 2000:
                        print img, _grism, _date, 'ON ? '
                    else:
                        print img, _grism, _date, 'OFF ? '
        for _date in flats:
            for _grism in flats[_date]:
                flat = {'ON': [], 'OFF': []}
                for img in flats[_date][_grism]:
                    _type = ''
                    if readkey3(hdr, 'lamp3'):
                        print '\n### header lamp3 found: flat ON ', str(img)
                        _type = 'ON'
                    else:
                        if pyfits.open(img)[0].data.mean() >= 2000:
                            _type = 'ON'
                        else:
                            _type = 'OFF'
                    aa, bb, cc = ntt.util.display_image(img, 1, '', '', False)
                    print '\n### number of flat already selected (ON,OFF): \n ### please select same number ' \
                          'of ON and OFF flats \n' + \
                        str(len(flat['ON'])) + '  ' + str(len(flat['OFF']))
                    print '\n### image ' + str(img)
                    answ = raw_input(
                        'ON/OFF/REJECT/STOP [' + str(_type) + ']  ok (ON[n]/OFF[f]/r/s) [' + _type + '] ? ')
                    if not answ:
                        answ = _type
                    if answ in ['ON', 'on', 'n']:
                        _type = 'ON'
                    if answ in ['OFF', 'off', 'f']:
                        _type = 'OFF'
                    if answ in ['s', 'S', 'STOP', 'stop', 'Stop']:
                        _type = 'stop'
                    if answ in ['r', 'R', 'reject']:
                        _type = 'r'
                    if _type in ['ON', 'OFF']:
                        flat[_type].append(img)
                    elif _type == 'stop':
                        if len(flat['ON']) == len(flat['OFF']) and len(flat['OFF']) >= 2:
                            break
                        elif len(flat['ON']) == len(flat['OFF']) and len(flat['OFF']) == 0:
                            break
                        else:
                            print '\n### Warning: you can stop only if the numbers of ON and OFF are the same'
                print len(flat['ON']), len(flat['OFF'])
                if len(flat['ON']) == len(flat['OFF']) and len(flat['OFF']) >= 2:
                    ff = open('_flatlist', 'w')
                    for ii in range(0, len(flat['OFF'])):
                        delete('flat_' + str(_date) + '_' + str(_grism) +
                               '_' + str(MJDtoday) + '_' + str(ii) + '.fits')
                        iraf.imarith(flat['ON'][ii], '-', flat['OFF'][ii],
                                     result='flat_' + str(_date) + '_' + str(_grism) + '_' + str(MJDtoday) + '_' + str(
                                         ii) + '.fits', verbose='no')
                        ff.write(
                            'flat_' + str(_date) + '_' + str(_grism) + '_' + str(MJDtoday) + '_' + str(ii) + '.fits\n')
                    ff.close()
                    masterflat = 'flat_' + \
                        str(_date) + '_' + str(_grism) + \
                        '_' + str(MJDtoday) + '.fits'
                    delete(masterflat)
                    _order = '80'
                    iraf.ccdred.flatcombine(input='@_flatlist', output=masterflat, combine='median', rdnoise=_rdnoise,
                                            gain=_gain, ccdtype='')
                    hdr = readhdr(masterflat)
                    matching = [s for s in hdr.keys() if "IMCMB" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(masterflat, imcmb, delete='yes', update='yes',
                                         verify='no', Stdout=1)
                    delete('_flatlist')
                    print masterflat
                    correctcard(masterflat)
                    if masterflat not in outputlist:
                        outputlist.append(masterflat)
                    ntt.util.updateheader(masterflat, 0, {'FILETYPE': [41102, 'flat field'],
                                                          'SINGLEXP': [False, 'TRUE if resulting from single exposure'],
                                                          'M_EPOCH': [False, 'TRUE if resulting from multiple epochs']})

                    print '\n###  master flat ........... done '
                    delete('n' + masterflat)
                    iraf.specred.response(masterflat, normaliz=masterflat + '[100:900,*]',
                                          response='n' + masterflat, interac=_interact, thresho='INDEF', sample='*',
                                          naverage=2,
                                          function='spline3', low_rej=3, high_rej=3, order=_order, niterat=20, grow=0,
                                          graphic='stdgraph', mode='q')
                    listflat.append('n' + masterflat)
                    if 'n' + masterflat not in outputlist:
                        outputlist.append('n' + masterflat)
                    ntt.util.updateheader('n' + masterflat, 0, {'FILETYPE': [41203, 'normalized flat field'],
                                                                'TRACE1': [masterflat, 'Originating file']})
                    # ntt.util.updateheader('n'+masterflat,0,{'TRACE1':[masterflat,'']})

                    flattot = flat['ON'] + flat['OFF']
                    num = 0
                    for img in flattot:
                        num = num + 1
                        ntt.util.updateheader(masterflat, 0, {
                            'PROV' + str(num): [readkey3(readhdr(img), 'ARCFILE'), 'Originating file'],
                            'TRACE' + str(num): [readkey3(readhdr(img), 'ARCFILE'), 'Originating file']})
                        ntt.util.updateheader('n' + masterflat, 0, {
                            'PROV' + str(num): [readkey3(readhdr(img), 'ARCFILE'), 'Originating file']})

                    if listflat:
                        print '\n### flat available:\n### ' + str(listflat), '\n'
                elif len(flat['ON']) == len(flat['OFF']) and len(flat['OFF']) == 0:
                    print '\n### no good flats in this set ......'
                else:
                    sys.exit('\n### Error: number of ON and OFF not the same')

    for _grism in fieldlist:
        obj0 = fieldlist[_grism][fieldlist[_grism].keys()[0]][0]
        # #############              arc              #########################
        if _grism not in lamps:
            print '\n### take arc from archive '
            arcfile = ntt.util.searcharc(obj0, '')[0]
            if arcfile[0] == '/':
                os.system('cp ' + arcfile + ' ' +
                          string.split(arcfile, '/')[-1])
                arcfile = string.split(arcfile, '/')[-1]
            lamps[_grism] = [arcfile]

        if _grism in lamps:
            arclist = lamps[_grism]
            if arclist:
                arcfile = ntt.util.searcharc(obj0, arclist)[0]
            else:
                arcfile = ntt.util.searcharc(obj0, '')[0]

            print arcfile
            if arcfile:
                print arcfile
                datea = readkey3(readhdr(arcfile), 'date-night')
                if arcfile[0] == '/':
                    os.system('cp ' + arcfile + ' ' +
                              string.split(arcfile, '/')[-1])
                    arcfile = string.split(arcfile, '/')[-1]

                if _doflat:
                    if listflat:
                        flat0 = ntt.util.searchflat(arcfile, listflat)[0]
                    else:
                        flat0 = ''
                else:
                    flat0 = ''

                if flat0:
                    _flatcor = 'yes'
                else:
                    _flatcor = 'no'
                    _doflat = False

                ntt.util.delete('arc_' + datea + '_' + _grism +
                                '_' + str(MJDtoday) + '.fits')

                print arcfile, flat0, _flatcor, _doflat

                if _doflat:
                    iraf.noao.imred.ccdred.ccdproc(arcfile,
                                                   output='arc_' + datea + '_' + _grism +
                                                   '_' +
                                                   str(MJDtoday) + '.fits',
                                                   overscan='no', trim='no', zerocor='no', flatcor=_flatcor, flat=flat0)
                else:
                    os.system('cp ' + arcfile + ' ' + 'arc_' + datea +
                              '_' + _grism + '_' + str(MJDtoday) + '.fits')

                iraf.noao.imred.ccdred.ccdproc('arc_' + datea + '_' + _grism + '_' + str(MJDtoday) + '.fits', output='',
                                               overscan='no', trim='yes', zerocor='no', flatcor='no', flat='',
                                               trimsec='[30:1000,1:1024]')

                arcfile = 'arc_' + datea + '_' + \
                    _grism + '_' + str(MJDtoday) + '.fits'

                ntt.util.correctcard(arcfile)
                print arcfile

                if arcfile not in outputlist:
                    outputlist.append(arcfile)

                ntt.util.updateheader(arcfile, 0, {'FILETYPE': [41104, 'pre-reduced 2D arc'],
                                                   'SINGLEXP': [True, 'TRUE if resulting from single exposure'],
                                                   'M_EPOCH': [False, 'TRUE if resulting from multiple epochs'],
                                                   'PROV1': [readkey3(readhdr(arcfile), 'ARCFILE'), 'Originating file'],
                                                   'TRACE1': [readkey3(readhdr(arcfile), 'ARCFILE'),
                                                              'Originating file']})

                arcref = ntt.util.searcharc(obj0, '')[0]
                if not arcref:
                    identific = iraf.longslit.identify(images=arcfile, section='column 10',
                                                       coordli='direc$standard/ident/Lines_XeAr_SOFI.dat', nsum=10,
                                                       fwidth=7, order=3, mode='h', Stdout=1, verbose='yes')
                else:
                    print arcref
                    os.system('cp ' + arcref + ' .')
                    arcref = string.split(arcref, '/')[-1]
                    if not os.path.isdir('database/'):
                        os.mkdir('database/')
                    if os.path.isfile(ntt.util.searcharc(obj0, '')[1] + '/database/id' + re.sub('.fits', '', arcref)):
                        os.system('cp ' + ntt.util.searcharc(obj0, '')[1] + '/database/id' + re.sub('.fits', '',
                                                                                                    arcref) + ' database/')

                    print arcref, arcfile
                    #                        time.sleep(5)
                    #                        os.system('rm -rf database/idarc_20130417_GR_56975')
                    #                        raw_input('ddd')
                    identific = iraf.longslit.reidentify(referenc=arcref, images=arcfile, interac='NO',  # _interact,
                                                         section='column 10', shift=0.0,
                                                         coordli='direc$standard/ident/Lines_XeAr_SOFI.dat',
                                                         overrid='yes', step=0, newaps='no', nsum=5, nlost=2,
                                                         mode='h', verbose='yes', Stdout=1)
                    #                        print identific
                    #                        raw_input('ddd')
                    identific = iraf.longslit.reidentify(referenc=arcref, images=arcfile, interac=_interact,
                                                         section='column 10', shift=1.0,
                                                         coordli='direc$standard/ident/Lines_XeAr_SOFI.dat',
                                                         overrid='yes', step=0, newaps='no', nsum=5, nlost=2,
                                                         mode='h', verbose='yes', Stdout=1)
                    #                        fitsfile = ntt.efoscspec2Ddef.continumsub('new3.fits', 6, 1)
                    # I need to run twice I don't know why
                    #                        print identific
                    #                        raw_input('ddd')
                    if _interactive:
                        answ = raw_input(
                            '\n### do you like the identification [[y]/n]')
                        if not answ:
                            answ = 'y'
                    else:
                        answ = 'y'
                    if answ in ['n', 'N', 'no', 'NO', 'No']:
                        yy1 = pyfits.open(arcref)[0].data[:, 10:20].mean(1)
                        xx1 = arange(len(yy1))
                        yy2 = pyfits.open(arcfile)[0].data[:, 10:20].mean(1)
                        xx2 = arange(len(yy2))

                        ntt.util.delete('_new3.fits')
                        hdu = pyfits.PrimaryHDU(yy1)
                        hdulist = pyfits.HDUList([hdu])
                        hdulist.writeto('_new3.fits')

                        fitsfile = ntt.efoscspec2Ddef.continumsub('_new3.fits', 4, 1)
                        yy1 = pyfits.open(fitsfile)[0].data

                        ntt.util.delete('_new3.fits')
                        hdu = pyfits.PrimaryHDU(yy2)
                        hdulist = pyfits.HDUList([hdu])
                        hdulist.writeto('_new3.fits')

                        fitsfile = ntt.efoscspec2Ddef.continumsub('_new3.fits', 4, 1)
                        yy2 = pyfits.open(fitsfile)[0].data

                        _shift = ntt.efoscspec2Ddef.checkwavelength_arc(
                            xx1, yy1, xx2, yy2, '', '') * (-1)

                        print arcref, arcfile, _shift
                        identific = iraf.longslit.reidentify(referenc=arcref, images=arcfile, interac='YES',
                                                             section='column 10', shift=_shift,
                                                             coordli='direc$standard/ident/Lines_XeAr_SOFI.dat',
                                                             overrid='yes', step=0, newaps='no', nsum=5, nlost=2,
                                                             mode='h', verbose='yes', Stdout=1)

                        answ = raw_input('\n### is it ok now ? [[y]/n] ')
                        if not answ:
                            answ = 'y'
                        if answ in ['n', 'N', 'no', 'NO', 'No']:
                            sys.exit(
                                '\n### Warning: line identification with some problems')
                iraf.longslit.reidentify(referenc=arcfile, images=arcfile, interac='NO', section='column 10',
                                         coordli='direc$standard/ident/Lines_XeAr_SOFI.dat', overrid='yes', step=10,
                                         newaps='yes', nsum=5, nlost=2, mode='h', verbose='no')
                iraf.longslit.fitcoords(images=re.sub('.fits', '', arcfile), fitname=re.sub('.fits', '', arcfile),
                                        interac='no', combine='yes', databas='database',
                                        function='legendre', yorder=4, logfile='', plotfil='', mode='h')
                if identific:
                    _rms = float(identific[-1].split()[-1])
                    _num = float(identific[-1].split()[2].split('/')[0])
                    hdr = ntt.util.readhdr(arcfile)
                    hedvec = {'LAMRMS': [_rms * .1, 'residual RMS [nm]'],
                              'LAMNLIN': [_num, 'Nb of arc lines used in the fit of the wavel. solution'],
                              'SPEC_ERR': [(_rms * .1) / sqrt(float(_num)), 'statistical uncertainty'],
                              'SPEC_SYE': [0.1, 'systematic error']}
                    ntt.util.updateheader(arcfile, 0, hedvec)
            else:
                sys.exit('Warning: arcfile not found')
        else:
            print 'here'
        # ########################################################################################################
        for field in fieldlist[_grism]:
            listaobj = fieldlist[_grism][field]
            listaobj = ntt.sofiphotredudef.sortbyJD(listaobj)
            listatemp = listaobj[:]
            # ##############             flat            ######################
            if listflat and _doflat:
                flat0 = ntt.util.searchflat(listaobj[0], listflat)[0]
            else:
                flat0 = ''
            if flat0:
                _flatcor = 'yes'
            else:
                _flatcor = 'no'

            ##########   crosstalk        ###########################

            listatemp2 = []
            _date = readkey3(readhdr(listatemp[0]), 'date-night')
            for img in listatemp:
                #                    num2=listatemp.index(listasub[j])
                imgout = field + '_' + str(_date) + '_' + str(_grism) + '_' + str(MJDtoday) + '_' + str(
                    listatemp.index(img)) + '.fits'
                print '\n### input image: ' + str(img)
                delete(imgout)
                listatemp2.append(imgout)
                if _docross:
                    print '### correct for cross talk   .....   done'
                    ntt.sofiphotredudef.crosstalk(img, imgout)
                    correctcard(imgout)
                    ntt.util.updateheader(
                        imgout, 0, {'CROSSTAL': ['True', '']})
                else:
                    os.system('cp ' + img + ' ' + imgout)
                    correctcard(imgout)
                if _flatcor == 'yes':
                    print '### correct for flat field   .....   done'
                    try:
                        iraf.noao.imred.ccdred.ccdproc(imgout, output='', overscan='no', trim='no', zerocor='no',
                                                       flatcor=_flatcor, flat=flat0)
                    except:
                        iraf.imutil.imreplace(
                            images=flat0, value=0.01, lower='INDEF', upper=0.01, radius=0)
                        iraf.noao.imred.ccdred.ccdproc(imgout, output='', overscan='no', trim='no', zerocor='no',
                                                       flatcor=_flatcor, flat=flat0)
                iraf.noao.imred.ccdred.ccdproc(imgout, output='', overscan='no', trim='yes', zerocor='no',
                                               flatcor='no', flat='', trimsec='[30:1000,1:1024]')
                ntt.util.updateheader(
                    imgout, 0, {'FLATCOR': [flat0, 'flat correction']})

                if imgout not in outputlist:
                    outputlist.append(imgout)
                ntt.util.updateheader(imgout, 0, {'FILETYPE': [42104, 'pre-reduced frame'],
                                                  'SINGLEXP': [True, 'TRUE if resulting from single exposure'],
                                                  'M_EPOCH': [False, 'TRUE if resulting from multiple epochs'],
                                                  'PROV1': [readkey3(readhdr(imgout), 'ARCFILE'), 'Originating file'],
                                                  'TRACE1': [readkey3(readhdr(imgout), 'ARCFILE'), 'Originating file']})
                print '### output image: ' + str(imgout)

            listatemp = listatemp2[:]
            #########    differences object images  #####################
            listasub = ntt.sofispec2Ddef.findsubimage(listatemp)
            reduced = []
            print '\n### Select Frames to be subtracted (eg A-B, B-A, C-D, D-C, ....) '
            print '###    frame1 \t  frame2  \t   offset1  \t   offset2  \t  JD1  \t    JD2\n'
            if len(listatemp) >= 2 and len(listasub) >= 2:
                for j in range(0, len(listatemp)):
                    print '### ', listatemp[j], listasub[j], str(readkey3(readhdr(listatemp[j]), 'xcum')), str(
                        readkey3(readhdr(listasub[j]), 'xcum')), \
                        str(readkey3(readhdr(listatemp[j]), 'JD')), str(
                            readkey3(readhdr(listatemp[j]), 'JD'))
                    if _interactive:
                        answ = raw_input('\n### ok [[y]/n] ? ')
                        if not answ:
                            answ = 'y'
                    else:
                        answ = 'y'
                    num1 = j
                    image1 = listatemp[j]
                    _date = readkey3(readhdr(image1), 'date-night')
                    if answ == 'y':
                        num2 = listatemp.index(listasub[j])
                        image2 = listasub[j]
                    else:
                        image2 = raw_input(
                            'which image do you want to subtract')
                        num2 = listatemp.index(image2)
                    imgoutsub = field + '_' + str(_date) + '_' + str(_grism) + '_' + str(MJDtoday) + '_' + str(
                        num1) + '_' + str(num2) + '.fits'
                    delete(imgoutsub)
                    iraf.images.imutil.imarith(
                        operand1=image1, op='-', operand2=image2, result=imgoutsub, verbose='no')
                    ntt.util.updateheader(imgoutsub, 0, {'skysub': [image2, 'sky image subtracted'],
                                                         'FILETYPE': [42115, 'pre-reduced frame sky subtracted'],
                                                         'TRACE1': [image1, 'Originating file'],
                                                         'PROV2': [readkey3(readhdr(image2), 'ARCFILE'),
                                                                   'Originating file'],
                                                         'TRACE2': [image2, 'Originating file']})

                    reduced.append(imgoutsub)
                    if imgoutsub not in outputlist:
                        outputlist.append(imgoutsub)
            ########################     2D wavelengh calibration      ########
            for img in reduced:
                if arcfile:
                    hdra = ntt.util.readhdr(arcfile)
                    delete('t' + img)
                    iraf.specred.transform(input=img, output='t' + img, minput='',
                                           fitnames=re.sub('.fits', '', arcfile), databas='database',
                                           x1='INDEF', x2='INDEF', y1='INDEF', y2='INDEF', flux='yes', mode='h',
                                           logfile='logfile')
                    ntt.util.updateheader('t' + img, 0,
                                          {'ARC': [arcfile, ''], 'FILETYPE': [42106, 'wavelength calibrate 2D frames'],
                                           'TRACE1': [img, 'Originating file']})
                    ntt.util.updateheader(
                        't' + img, 0, {'TRACE1': [img, 'Originating file']})
                    ntt.util.updateheader('t' + img, 0,
                                          {'LAMRMS': [ntt.util.readkey3(hdra, 'LAMRMS'), 'residual RMS [nm]'],
                                           'LAMNLIN': [ntt.util.readkey3(hdra, 'LAMNLIN'), 'number of arc lines'],
                                           'SPEC_ERR': [ntt.util.readkey3(hdra, 'SPEC_ERR'), 'statistical uncertainty'],
                                           'SPEC_SYE': [ntt.util.readkey3(hdra, 'SPEC_SYE'), 'systematic error']})
                    ###########################
                    delete('t' + arcfile)
                    iraf.specred.transform(input=arcfile, output='t' + arcfile, minput='',
                                           fitnames=re.sub('.fits', '', arcfile), databas='database',
                                           x1='INDEF', x2='INDEF', y1='INDEF', y2='INDEF', flux='yes', mode='h',
                                           logfile='logfile')
                    specred = ntt.util.spectraresolution2(arcfile, 50)
                    if specred:
                        ntt.util.updateheader(
                            't' + img, 0, {'SPEC_RES': [specred, 'Spectral resolving power']})
                    delete('t' + arcfile)
                    ###########################
                    iraf.hedit('t' + img, 'TRACE2', delete='yes',
                               update='yes', verify='no', Stdout=1)

                    if 't' + img not in outputlist:
                        outputlist.append('t' + img)
                    print '\n### 2D frame t' + str(img) + ' wavelengh calibrated  ............ done'

                    _skyfile = ntt.__path__[
                        0] + '/standard/ident/sky_' + _grism + '.fits'  # check in wavelengh   #########
                    hdr = ntt.util.readhdr(img)
                    if glob.glob(_skyfile) and readkey3(hdr, 'exptime') > 20.:
                        _original = readkey3(hdr, 'ORIGFILE')
                        _archive = readkey3(hdr, 'ARCFILE')
                        if os.path.isfile(_archive):
                            imgstart = _archive
                        elif os.path.isfile(_original):
                            imgstart = _original
                        else:
                            imgstart = ''
                        if imgstart:
                            delete('_tmp.fits')
                            print imgstart, arcfile
                            iraf.specred.transform(input=imgstart, output='_tmp.fits', minput='',
                                                   fitnames=re.sub('.fits', '', arcfile), databas='database',
                                                   x1='INDEF', x2='INDEF', y1='INDEF', y2='INDEF', flux='yes', mode='h',
                                                   logfile='logfile')

                            shift = ntt.sofispec2Ddef.skysofifrom2d('_tmp.fits', _skyfile)
                            zro = pyfits.open('_tmp.fits')[0].header.get('CRVAL2')

                            delete('_tmp.fits')
                            if _interactive:
                                answ = raw_input(
                                    'do you want to correct the wavelengh calibration with this shift: ' + str(
                                        shift) + ' [[y]/n] ? ')
                                if not answ:
                                    answ = 'y'
                            else:
                                answ = 'y'
                            if answ.lower() in ['y', 'yes']:
                                ntt.util.updateheader('t' + img, 0,
                                                      {'CRVAL2': [zro + int(shift), ''], 'shift': [float(shift), '']})
                            #                                    ntt.util.updateheader('t'+img,0,{'shift':[float(shift),'']})
                            print '\n### check wavelengh calibration with sky lines ..... done'
                    try:
                        hdrt = ntt.util.readhdr('t' + img)
                        wavelmin = float(readkey3(hdrt, 'CRVAL2')) + (0.5 - float(readkey3(hdrt, 'CRPIX2'))) * float(
                            readkey3(hdrt, 'CDELT2'))
                        wavelmax = float(readkey3(hdrt, 'CRVAL2')) + (
                            (float(readkey3(hdrt, 'NAXIS2')) + 0.5 - float(readkey3(hdrt, 'CRPIX2'))) * float(
                                readkey3(hdrt, 'CDELT2')))
                        hedvec = {}
                        hedvec['WAVELMIN'] = [
                            wavelmin * .1, '[nm] minimum wavelength']
                        hedvec['WAVELMAX'] = [
                            wavelmax * .1, ' [nm] maximum wavelength']
                        hedvec['XMIN'] = [wavelmin, '[A] minimum wavelength']
                        hedvec['XMAX'] = [wavelmax, '[A]  maximum wavelength']
                        hedvec['SPEC_BW'] = [
                            (wavelmax * .1) - (wavelmin * .1), '[nm] Bandpass Width Wmax - Wmin']
                        hedvec['SPEC_VAL'] = [
                            ((wavelmax * .1) + (wavelmin * .1)) / 2., '[nm] Mean Wavelength']
                        hedvec['SPEC_BIN'] = [
                            ((wavelmax * .1) - (wavelmin * .1)) /
                            (float(readkey3(hdr, 'NAXIS2')) - 1),
                            'Wavelength bin size [nm/pix]']
                        hedvec['VOCLASS'] = ['SPECTRUM V1.0', 'VO Data Model']
                        hedvec['VOPUB'] = ['ESO/SAF',
                                           'VO Publishing Authority']
                        #                            hedvec['APERTURE']=[float(re.sub('slit','',readkey3(hdrt,'slit'))),'aperture width']
                        ntt.util.updateheader('t' + img, 0, hedvec)
                    except:
                        pass
                else:
                    print '\n### Warning: arc not found for the image ' + str(img) + ' with setup ' + str(_grism)

    reduceddata = rangedata(outputlist)
    print '\n### adding keywords for phase 3 ....... '
    f = open('logfile_spec2d_' + str(reduceddata) +
             '_' + str(datenow) + '.raw.list', 'w')
    for img in outputlist:
        if img[-4:] == 'fits':
            hdr = readhdr(img)
            # ###############################################
            # cancel pc matrix
            if 'PC1_1' in hdr.keys():
                aaa = iraf.hedit(img, 'PC1_1', delete='yes',
                                 update='yes', verify='no', Stdout=1)
            if 'PC2_2' in hdr.keys():
                aaa = iraf.hedit(img, 'PC2_2', delete='yes',
                                 update='yes', verify='no', Stdout=1)
            if 'PC1_2' in hdr.keys():
                aaa = iraf.hedit(img, 'PC1_2', delete='yes',
                                 update='yes', verify='no', Stdout=1)
            if 'PC2_1' in hdr.keys():
                aaa = iraf.hedit(img, 'PC2_1', delete='yes',
                                 update='yes', verify='no', Stdout=1)
            #################
            # added for DR2
            print img

            if 'NCOMBINE' in hdr:
                _ncomb = readkey3(hdr, 'NCOMBINE')
            else:
                _ncomb = 1.0

            ntt.util.updateheader(
                img, 0, {'DETRON ': [12, 'Readout noise per output (e-)']})
            ntt.util.updateheader(img, 0, {'EFFRON': [12. * (1 / sqrt(readkey3(hdr, 'ndit') * _ncomb)) * sqrt(pi / 2),
                                                      'Effective readout noise per output (e-)']})
            ntt.util.phase3header(img)  # phase 3 definitions
            ############################
            #  change for DR2
            ############################
            texp = float(readkey3(hdr, 'dit')) * float(readkey3(hdr, 'ndit'))
            mjdend = float(readkey3(hdr, 'MJD-OBS')) + (float(readkey3(hdr, 'ndit')) * (
                float(readkey3(hdr, 'dit')) + 1.8)) / (60. * 60. * 24.)
            strtexp = time.strftime('%H:%M:%S', time.gmtime(texp))
            _telapse = (mjdend - float(readkey3(hdr, 'MJD-OBS'))) * \
                60. * 60 * 24.
            # tmid=_telapse/2.
            tmid = (mjdend + float(readkey3(hdr, 'MJD-OBS'))) / 2
            ntt.util.updateheader(img, 0, {'quality': ['Final', 'fast or rapid reduction'],
                                           'BUNIT': ['ADU', 'Physical unit of array values'],
                                           'DIT': [readkey3(hdr, 'dit'), 'Detector Integration Time'],
                                           'NDIT': [readkey3(hdr, 'ndit'), 'Number of sub-integrations'],
                                           'TEXPTIME': [texp, 'Total integration time of all exposures (s)'],
                                           'EXPTIME': [texp, 'Total integration time. ' + strtexp],
                                           'MJD-END': [mjdend, 'End of observations (days)'],
                                           'TELAPSE': [_telapse, 'Total elapsed time [days]'],
                                           'TMID': [tmid, '[d] MJD mid exposure'],
                                           'TITLE': [readkey3(hdr, 'object'), 'Dataset title'],
                                           #'TITLE':[str(tmid)[0:9]+' '+str(readkey3(hdr,'object'))+' '+str(readkey3(hdr,'grism'))+' '+\
                                           # str(readkey3(hdr,'filter'))+'
                                           # '+str(readkey3(hdr,'slit')),'Dataset
                                           # title'],\
                                           'EXT_OBJ': [False, 'TRUE if extended'],
                                           'CONTNORM': [False, 'spectrum normalized to the continuum'],
                                           'TOT_FLUX': [False, 'TRUE if phot cond and all src flux is captured'],
                                           'SPECSYS': ['TOPOCENT', 'Reference frame for spectral coordinate'],
                                           'FLUXCAL': ['ABSOLUTE', 'type of flux calibration'],
                                           'FLUXERR': [34.7, 'Fractional uncertainty of the flux [%]'],
                                           'DISPELEM': ['Gr#' + re.sub('Gr', '', readkey3(hdr, 'grism')),
                                                        'Dispersive element name']})
            if readkey3(hdr, 'tech'):
                ntt.util.updateheader(
                    img, 0, {'PRODCATG': ['SCIENCE.IMAGE', 'Data product category']})
            aaa = str(readkey3(hdr, 'arcfiles')) + '\n'
            f.write(aaa)
            try:
                ntt.util.airmass(img)  # phase 3 definitions
            except:
                print '\n### airmass not computed for image: ', img
        else:
            print img + ' is not a fits image'
    f.close()
    return outputlist, 'logfile_spec2d_' + str(reduceddata) + '_' + str(datenow) + '.raw.list'
Ejemplo n.º 25
0
def extractspectrum(img,
                    dv,
                    inst,
                    _interactive,
                    _type,
                    automaticex=False,
                    host_ex=False,
                    match_aperture='n'):
    # print "LOGX:: Entering `extractspectrum` method/function in
    # %(__file__)s" % globals()
    import glob
    import os
    import string
    import sys
    import re
    import datetime
    import numpy as np

    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 0o1, 0o1)).days
    from pyraf import iraf

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.apall', 'specred.transform']
    for t in toforget:
        iraf.unlearn(t)

    hdr = readhdr(img)
    iraf.specred.dispaxi = inst.get('dispaxis')

    imgex = re.sub('.fits', '_ex.fits', img)
    print(img)
    imgfast = re.sub(string.split(img, '_')[-2] + '_', '', img)
    if not os.path.isfile(imgex) and not os.path.isfile(
            'database/ap' + re.sub('.fits', '', img)) and not os.path.isfile(
                'database/ap' + re.sub('.fits', '', imgfast)):
        _new = 'yes'
        _extract = 'yes'
    else:
        if automaticex:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['o', 'n', 's']:
                    answ = input(
                        '\n### New extraction [n], extraction with old parameters [o], skip extraction [s] ? [o]'
                    )
                    if not answ:
                        answ = 'o'
                if answ == 'o':
                    _new, _extract = 'no', 'yes'
                elif answ == 'n':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'no', 'yes'
        else:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['y', 'n']:
                    answ = input(
                        '\n### do you want to extract again [[y]/n] ? ')
                    if not answ:
                        answ = 'y'
                if answ == 'y':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'yes', 'yes'
    if _extract == 'yes':
        delete(imgex)
        dist = 200
        _reference = ''
        _fittrac = 'yes'
        _trace = 'yes'
        if _new == 'no':
            if not os.path.isfile('database/ap' + re.sub('.fits', '', img)):
                repstringinfile('database/ap' + re.sub('.fits', '', imgfast),
                                'database/ap' + re.sub('.fits', '', img),
                                re.sub('.fits', '', imgfast),
                                re.sub('.fits', '', img))
            _find = 'no'
            _recenter = 'no'
            _edit = 'no'
            _trace = 'no'
            _fittrac = 'no'
            _mode = 'h'
            _resize = 'no'
            _review = 'no'
            iraf.specred.mode = 'h'
            _interactive = 'no'
        else:
            iraf.specred.mode = 'q'
            _mode = 'q'
            _find = 'yes'
            _recenter = 'yes'
            _edit = 'yes'
            _review = 'yes'
            _resize = dv[_type]['_resize']

        # _interactive = False
        _recenter = 'no'
        _resize = 'no'
        _edit = 'yes'
        _trace = 'yes'
        _fittrace = 'no'
        _review = 'yes'

        _lower = dv[_type]['_lower']
        _upper = dv[_type]['_upper']
        _b_sample = dv[_type]['_b_sample']

        if match_aperture == 'y':

            aps = glob.glob('database/ap*')
            for ap in aps:
                print(ap.split('/')[-1])
            ap_select = raw_input('Choose image to match apertures: ')

            #this is just to make things automatic for now
            if 'blue' in ap_select and 'lris' in inst.get('name'):
                ap_match = ap_select.replace('blue', 'red')
                ap_binning = 1.
            elif 'red' in ap_select and 'lris' in inst.get('name'):
                ap_match = ap_select.replace('red', 'blue')
                ap_binning = 2.
            elif 'blue' in ap_select and 'kast' in inst.get('name'):
                ap_match = ap_select.replace('blue', 'red')
                ap_binning = 1.
            elif 'red' in ap_select and 'kast' in inst.get('name'):
                ap_match = ap_select.replace('red', 'blue')
                ap_binning = 1.

            # img_binning = hdr.get('BINNING', None).strip()
            # if img_binning != None:
            #     img_binning = float(img_binning.split(',')[0])
            img_binning = inst.get('spatial_binning')

            # ap_binning = raw_input('Enter aperture spatial binning [1]: ') or 1
            # ap_binning = float(ap_binning)

            delete('database/' + ap_match)
            new_apfile = 'database/' + ap_match
            ismainap = False

            aps = glob.glob('../master_files/ap*')
            for ap in aps:
                print(ap.split('/')[-1])
            center_ap = raw_input('Choose master aperture for center: ')

            #get center of main reference aperture (should be a std)
            with open('../master_files/' + center_ap) as c_ap:
                c_ap_data = c_ap.readlines()
                for i, c_line in enumerate(c_ap_data):
                    if 'center' in c_line:
                        center_ap_loc = float(c_line.split()[2])

                with open('database/' + ap_select) as old_file:
                    ap_data = old_file.readlines()
                    lows, highs, b_samps = get_relevant_ap_data(
                        ap_data, ap_binning, img_binning, inst)
                    with open(new_apfile, 'w') as new_file:
                        for i in range(len(lows)):
                            low = str(lows[i])
                            high = str(highs[i])
                            for c_line in c_ap_data:
                                if 'begin' in c_line and 'aperture' in c_line:
                                    new_file.write(
                                        c_line.replace(
                                            (c_line.split()[2] + ' 1'),
                                            c_line.split()[2] + ' ' +
                                            str(i + 1)))
                                elif 'begin' not in c_line and 'aperture' in c_line:
                                    new_file.write(
                                        c_line.replace(c_line.split()[1],
                                                       str(i + 1)))
                                elif 'low' in c_line and 'reject' not in c_line:
                                    new_file.write(
                                        c_line.replace(c_line.split()[2], low))
                                elif 'high' in c_line and 'reject' not in c_line:
                                    new_file.write(
                                        c_line.replace(c_line.split()[2],
                                                       high))
                                elif 'sample' in c_line:
                                    b_sample = str(b_samps[i])
                                    new_file.write(
                                        c_line.replace(
                                            c_line.split('sample')[1].split(
                                                '\n')[0], ' ' + b_sample))
                                else:
                                    new_file.write(c_line)

            _find = 'yes'
            _recenter = 'yes'
            _reference = ap_match[2:]
            # _reference = 'BD262606_lris_red_1'

        # use_diff_aperture = raw_input('Trace different aperture? y/[n]: ')
        # if use_diff_aperture == 'y':
        #     aps = glob.glob('../master_files/ap*')
        #     for ap in aps:
        #         print (ap.split('/')[-1])
        #     ap_select = raw_input('Choose ref image from list above: ')
        #     os.system('cp ' + ' ../master_files/' + ap_select + ' database/'+ap_select)
        #     _reference = ap_select[2:]
        #     _fittrac = 'no'
        #     _trace = 'no'

        if host_ex:
            _recenter = 'n'
            _trace = 'n'
            _fittrac = 'n'
            _find = 'n'

        iraf.specred.apall(img,
                           output=imgex,
                           referen=_reference,
                           trace=_trace,
                           fittrac=_fittrac,
                           find=_find,
                           recenter=_recenter,
                           edit=_edit,
                           nfind=1,
                           backgro='fit',
                           lsigma=3,
                           usigma=3,
                           b_order=dv[_type]['_b_order'],
                           format='multispec',
                           extras='yes',
                           b_function='chebyshev',
                           b_sample=_b_sample,
                           clean='yes',
                           pfit='fit1d',
                           lower=_lower,
                           upper=_upper,
                           t_niter=dv[_type]['_t_niter'],
                           width=dv[_type]['_width'],
                           radius=dv[_type]['_radius'],
                           line=inst.get('approx_extract_column', 'INDEF'),
                           nsum=dv[_type]['_nsum'],
                           t_step=dv[_type]['_t_step'],
                           t_nsum=dv[_type]['_t_nsum'],
                           t_nlost=dv[_type]['_t_nlost'],
                           t_sample=dv[_type]['_t_sample'],
                           resize=_resize,
                           t_order=dv[_type]['_t_order'],
                           weights=dv[_type]['_weights'],
                           interactive=_interactive,
                           review=_review,
                           mode=_mode)
    else:
        print('\n### skipping new extraction')
    return imgex
Ejemplo n.º 26
0
def sofispec1Dredu(files, _interactive, _ext_trace, _dispersionline, _automaticex, _verbose=False):
    # print "LOGX:: Entering `sofispec1Dredu` method/function in %(__file__)s"
    # % globals()
    import re
    import string
    import sys
    import os
    os.environ["PYRAF_BETA_STATUS"] = "1"
    import ntt
    try:     import pyfits
    except:  from astropy.io import fits as pyfits

    import numpy as np
    import datetime
    import pylab as pl
    from pyraf import iraf

    dv = ntt.dvex()
    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
    scal = np.pi / 180.
    hdr0 = ntt.util.readhdr(re.sub('\n', '', files[0]))
    _gain = ntt.util.readkey3(hdr0, 'gain')
    _rdnoise = ntt.util.readkey3(hdr0, 'ron')
    std_sun, rastd_sun, decstd_sun, magstd_sun = ntt.util.readstandard(
        'standard_sofi_sun.txt')
    std_vega, rastd_vega, decstd_vega, magstd_vega = ntt.util.readstandard(
        'standard_sofi_vega.txt')
    std_phot, rastd_phot, decstd_phot, magstd_phot = ntt.util.readstandard(
        'standard_sofi_phot.txt')
    outputfile = []
    objectlist, RA, DEC = {}, {}, {}
    for img in files:
        img = re.sub('\n', '', img)
        hdr = ntt.util.readhdr(img)
        _ra = ntt.util.readkey3(hdr, 'RA')
        _dec = ntt.util.readkey3(hdr, 'DEC')
        _grism = ntt.util.readkey3(hdr, 'grism')
        _filter = ntt.util.readkey3(hdr, 'filter')
        _slit = ntt.util.readkey3(hdr, 'slit')
        cc_sun = np.arccos(np.sin(_dec * scal) * np.sin(decstd_sun * scal) + np.cos(_dec * scal) *
                           np.cos(decstd_sun * scal) * np.cos((_ra - rastd_sun) * scal)) * ((180 / np.pi) * 3600)
        cc_vega = np.arccos(np.sin(_dec * scal) * np.sin(decstd_vega * scal) + np.cos(_dec * scal) *
                            np.cos(decstd_vega * scal) * np.cos((_ra - rastd_vega) * scal)) * ((180 / np.pi) * 3600)
        cc_phot = np.arccos(np.sin(_dec * scal) * np.sin(decstd_phot * scal) + np.cos(_dec * scal) *
                            np.cos(decstd_phot * scal) * np.cos((_ra - rastd_phot) * scal)) * ((180 / np.pi) * 3600)
        if min(cc_sun) < 100:
            _type = 'sun'
        elif min(cc_phot) < 100:
            _type = 'stdp'
        elif min(cc_vega) < 100:
            _type = 'vega'
        else:
            _type = 'obj'
        if min(cc_phot) < 100:
            if _verbose:
                print img, 'phot', str(min(cc_phot)), str(std_phot[np.argmin(cc_phot)])
            ntt.util.updateheader(img, 0, {'stdname': [std_phot[np.argmin(cc_phot)], ''],
                                           'magstd': [float(magstd_phot[np.argmin(cc_phot)]), '']})
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_phot[argmin(cc_phot)]),'']})
        elif min(cc_sun) < 100:
            if _verbose:
                print img, 'sun', str(min(cc_sun)), str(std_sun[np.argmin(cc_sun)])
            ntt.util.updateheader(img, 0, {'stdname': [std_sun[np.argmin(cc_sun)], ''],
                                           'magstd': [float(magstd_sun[np.argmin(cc_sun)]), '']})
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_sun[argmin(cc_sun)]),'']})
        elif min(cc_vega) < 100:
            if _verbose:
                print img, 'vega', str(min(cc_vega)), str(std_vega[np.argmin(cc_vega)])
            ntt.util.updateheader(img, 0, {'stdname': [std_vega[np.argmin(cc_vega)], ''],
                                           'magstd': [float(magstd_vega[np.argmin(cc_vega)]), '']})
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_vega[argmin(cc_vega)]),'']})
        else:
            if _verbose:
                print img, 'object'

        _OBID = (ntt.util.readkey3(hdr, 'esoid'))
        if _type not in objectlist:
            objectlist[_type] = {}
        if _grism not in objectlist[_type]:
            objectlist[_type][_grism] = {}
        if _OBID not in objectlist[_type][_grism]:
            objectlist[_type][_grism][_OBID] = []
        objectlist[_type][_grism][_OBID].append(img)

    if 'stdp' not in objectlist:
        print '###  warning: not photometric standard'
    else:
        print '### photometric standard in the list of object'
    if 'sun' not in objectlist:
        print '### warning: not telluric G standard (sun type)'
    else:
        print '### telluric G standard (sun type) in the list of object'
    if 'vega' not in objectlist:
        print '### warning: not telluric A standard (vega type)'
    else:
        print '### telluric A standard (vega type) in the list of object'

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.immatch(_doprint=0)
    iraf.imutil(_doprint=0)
    toforget = ['specred.apall', 'specred.transform']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.apall.readnoi = _rdnoise
    iraf.specred.apall.gain = _gain
    iraf.specred.dispaxi = 2
    for _type in objectlist:
        for setup in objectlist[_type]:
            for _ID in objectlist[_type][setup]:
                listmerge = objectlist[_type][setup][_ID]
                listmerge = ntt.sortbyJD(listmerge)
                _object = ntt.util.readkey3(
                    ntt.util.readhdr(listmerge[0]), 'object')
                if string.count(_object, '/') or string.count(_object, '.') or string.count(_object, ' '):
                    nameobj = string.split(_object, '/')[0]
                    nameobj = string.split(nameobj, ' ')[0]
                    nameobj = string.split(nameobj, '.')[0]
                else:
                    nameobj = _object
                _date = ntt.util.readkey3(
                    ntt.util.readhdr(listmerge[0]), 'date-night')
                outputimage = nameobj + '_' + _date + \
                    '_' + setup + '_merge_' + str(MJDtoday)
                outputimage = ntt.util.name_duplicate(
                    listmerge[0], outputimage, '')
                print '### setup= ', setup, ' name field= ', nameobj, ' merge image= ', outputimage, '\n'
#################
#  added to avoid crashing with a single frame
#  header will not be updated with all info
#################

                if len(listmerge)==1:
                    ntt.util.delete(outputimage)
                    iraf.imutil.imcopy(listmerge[0], output=outputimage, verbose='no')
                    answ= 'n'
                else:
                  if os.path.isfile(outputimage) and _interactive:
                    answ = raw_input(
                        'combine frame of dithered spectra already created. Do you want to make it again [[y]/n] ? ')
                    if not answ:
                        answ = 'y'
                  else:
                    answ = 'y'
#################
                if answ in ['Yes', 'y', 'Y', 'yes']:
                    if _interactive:
                        automaticmerge = raw_input(
                            '\n### Do you want to try to find the dither bethween frames automatically [[y]/n]')
                        if not automaticmerge:
                            automaticmerge = 'yes'
                        elif automaticmerge.lower() in ['y', 'yes']:
                            automaticmerge = 'yes'
                        else:
                            automaticmerge = 'no'
                    else:
                        automaticmerge = 'yes'
                    if automaticmerge == 'yes':
                        offset = 0
                        offsetvec = []
                        _center0 = ntt.sofispec1Ddef.findaperture(
                            listmerge[0], False)
                        _offset0 = ntt.util.readkey3(
                            ntt.util.readhdr(listmerge[0]), 'xcum')
                        print '\n### Try to merge spectra considering their offset along x axes .......'
                        f = open('_offset', 'w')
                        for img in listmerge:
                            _center = ntt.sofispec1Ddef.findaperture(
                                img, False)
                            _center2 = (
                                float(_center) + (float(_offset0) - float(_center0))) * (-1)
                            _offset = (-1) * \
                                ntt.util.readkey3(
                                    ntt.util.readhdr(img), 'xcum')
                            if abs(_center2 - _offset) >= 20:
                                automaticmerge = 'no'
                                break
                            else:
                                offset3 = _center2
                            offsetvec.append(offset3)
                            line = str(offset3) + '   0\n'
                            f.write(line)
                        f.close()
                    if automaticmerge == 'yes':
                        print '### automatic merge .......... done'
                    else:
                        print '\n### warning: try identification of spectra position in interactive way '
                        offset = 0
                        offsetvec = []
                        _z1, _z2, goon = ntt.util.display_image(
                            listmerge[0], 1, '', '', False)
                        print '\n### find aperture on first frame and use it as reference position of ' \
                              'the spectra (mark with ' + '"' + 'm' + '"' + ')'
                        _center0 = ntt.sofispec1Ddef.findaperture(
                            listmerge[0], True)
                        _offset0 = ntt.util.readkey3(
                            ntt.util.readhdr(listmerge[0]), 'xcum')
                        print '\n### find the aperture on all the spectra frames (mark with ' + '"' + 'm' + '"' + ')'
                        f = open('_offset', 'w')
                        for img in listmerge:
                            print '\n### ', img
                            _z1, _z2, goon = ntt.util.display_image(
                                img, 1, '', '', False)
                            _center = ntt.sofispec1Ddef.findaperture(img, True)
                            _center2 = (
                                float(_center) + (float(_offset0) - float(_center0))) * (-1)
                            _offset = (-1) * \
                                ntt.util.readkey3(
                                    ntt.util.readhdr(img), 'xcum')
                            print '\n### position from  dither header: ' + str(_offset)
                            print '### position identified interactively: ' + str(_center2)
                            offset3 = raw_input(
                                '\n### which is the right position [' + str(_center2) + '] ?')
                            if not offset3:
                                offset3 = _center2
                            offsetvec.append(offset3)
                            line = str(offset3) + '   0\n'
                            f.write(line)
                        f.close()
                    print offsetvec
                    start = int(max(offsetvec) - min(offsetvec))
                    print start
                    f = open('_goodlist', 'w')
                    print listmerge
                    for img in listmerge:
                        f.write(img + '\n')
                    f.close()
                    ntt.util.delete(outputimage)
                    ntt.util.delete('_output.fits')
                    yy1 = pyfits.open(listmerge[0])[0].data[:, 10]
                    iraf.immatch.imcombine('@_goodlist', '_output', combine='sum', reject='none', offset='_offset',
                                           masktyp='', rdnoise=_rdnoise, gain=_gain, zero='mode', Stdout=1)

                    _head = pyfits.open('_output.fits')[0].header
                    if _head['NAXIS1'] < 1024:
                        stop = str(_head['NAXIS1'])
                    else:
                        stop = '1024'

                    iraf.imutil.imcopy(
                            '_output[' + str(start) + ':'+stop+',*]', output=outputimage, verbose='no')

                    print outputimage
                    print len(listmerge)
                    hdr1 = ntt.util.readhdr(outputimage)
                    ntt.util.updateheader(outputimage, 0,
                                          {'SINGLEXP': [False, 'TRUE if resulting from single exposure'],
                                           'M_EPOCH': [False, 'TRUE if resulting from multiple epochs'],
                                           'EXPTIME': [ntt.util.readkey3(hdr1, 'EXPTIME') * len(listmerge),
                                                       'Total integration time per pixel (s)'],
                                           'TEXPTIME': [float(ntt.util.readkey3(hdr1, 'TEXPTIME')) * len(listmerge),
                                                        'Total integration time of all exposures (s)'],
                                           'APERTURE': [2.778e-4 * float(re.sub('long_slit_', '',
                                                                                ntt.util.readkey3(hdr1, 'slit'))),
                                                        '[deg] Aperture diameter'],
                                           'NOFFSETS': [2, 'Number of offset positions'],
                                           'NUSTEP': [0, 'Number of microstep positions'],
                                           'NJITTER': [int(ntt.util.readkey3(hdr1, 'NCOMBINE') / 2),
                                                       'Number of jitter positions']})
                    hdr = ntt.util.readhdr(outputimage)
                    matching = [s for s in hdr.keys() if "IMCMB" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(outputimage, imcmb, delete='yes', update='yes',
                                         verify='no', Stdout=1)
                    if 'SKYSUB' in hdr.keys():
                        aaa = iraf.hedit(outputimage, 'SKYSUB', delete='yes', update='yes',
                                         verify='no', Stdout=1)

                    mjdend = []
                    mjdstart = []
                    num = 0
                    for img in listmerge:
                        num = num + 1
                        hdrm = ntt.util.readhdr(img)
                        ntt.util.updateheader(outputimage, 0,
                                              {'PROV' + str(num):
                                               [ntt.util.readkey3(
                                                   hdrm, 'ARCFILE'), 'Originating file'],
                                               'TRACE' + str(num): [img, 'Originating file']})
                        mjdend.append(ntt.util.readkey3(hdrm, 'MJD-END'))
                        mjdstart.append(ntt.util.readkey3(hdrm, 'MJD-OBS'))
                    _dateobs = ntt.util.readkey3(ntt.util.readhdr(
                        listmerge[np.argmin(mjdstart)]), 'DATE-OBS')

                    _telapse = (max(mjdend) - min(mjdstart)) * \
                        60. * 60 * 24.  # *86400
                    _tmid = (max(mjdend) + min(mjdstart)) / 2

                    _title = str(_tmid)[0:9] + ' ' + str(ntt.util.readkey3(hdr, 'object')) + ' ' + str(
                        ntt.util.readkey3(hdr, 'grism')) + ' ' + \
                        str(ntt.util.readkey3(hdr, 'filter')) + \
                        ' ' + str(ntt.util.readkey3(hdr, 'slit'))
                    ntt.util.updateheader(outputimage, 0,
                                          {'MJD-OBS': [min(mjdstart), 'MJD start'],
                                           'MJD-END': [max(mjdend), 'MJD end'],
                                           'TELAPSE': [_telapse, 'Total elapsed time [days]'],
                                           'TMID': [_tmid, '[d] MJD mid exposure'],
                                           'TITLE': [_title, 'Dataset title'],
                                           'DATE-OBS': [_dateobs, 'Date of observation']})
                    # missing: merge airmass
                else:
                    print '\n### skip making again combined spectrum'
                objectlist[_type][setup][_ID] = [outputimage]
                print '\n### setup= ', setup, ' name field= ', nameobj, ' merge image= ', outputimage, '\n'
                if outputimage not in outputfile:
                    outputfile.append(outputimage)
                ntt.util.updateheader(outputimage, 0, {'FILETYPE': [
                                      42116, 'combine 2D spectra frame']})

    if _verbose:
        if 'obj' in objectlist:
            print objectlist['obj']
        if 'stdp' in objectlist:
            print objectlist['stdp']
        if 'sun' in objectlist:
            print objectlist['sun']
        if 'vega' in objectlist:
            print objectlist['vega']

    if 'obj' not in objectlist.keys():
        sys.exit('\n### error: no objects in the list')

    sens = {}
    print '\n############################################\n### extract the spectra  '
    # print objectlist
    for setup in objectlist['obj']:
        reduced = []
        for _ID in objectlist['obj'][setup]:
            for img in objectlist['obj'][setup][_ID]:
                hdr = ntt.util.readhdr(img)
                print '\n### next object\n ', img, ntt.util.readkey3(hdr, 'object')
                _grism = ntt.util.readkey3(hdr, 'grism')
                _exptimeimg = ntt.util.readkey3(hdr, 'exptime')
                _JDimg = ntt.util.readkey3(hdr, 'JD')

                imgex = ntt.util.extractspectrum(img, dv, _ext_trace, _dispersionline, _interactive, 'obj',
                                                 automaticex=_automaticex)
                if imgex not in outputfile:
                    outputfile.append(imgex)
                ntt.util.updateheader(imgex, 0, {'FILETYPE': [42107, 'extracted 1D wave calib'],
                                                 'PRODCATG': ['SCIENCE.' + ntt.util.readkey3(hdr, 'tech').upper(),
                                                              'Data product category']})
                hdr = ntt.util.readhdr(imgex)
                matching = [s for s in hdr.keys() if "TRACE" in s]
                for imcmb in matching:
                    aaa = iraf.hedit(imgex, imcmb, delete='yes',
                                     update='yes', verify='no', Stdout=1)
                ntt.util.updateheader(
                    imgex, 0, {'TRACE1': [img, 'Originating file']})

                if os.path.isfile('database/ap' + re.sub('_ex.fits', '', imgex)):
                    if 'database/ap' + re.sub('_ex.fits', '', imgex) not in outputfile:
                        outputfile.append(
                            'database/ap' + re.sub('_ex.fits', '', imgex))

                ###########################   telluric standard   #############
                if 'sun' in objectlist and setup in objectlist['sun']:
                    _type = 'sun'
                elif 'vega' in objectlist and setup in objectlist['vega']:
                    _type = 'vega'
                else:
                    _type = 'none'
                if _type in ['sun', 'vega']:
                    stdref = ntt.__path__[
                        0] + '/standard/fits/' + str(_type) + '.fits'
                    stdvec, airmassvec, JDvec = [], [], []
                    for _ID in objectlist[_type][setup]:
                        for std in objectlist[_type][setup][_ID]:
                            _airmassstd = ntt.util.readkey3(
                                ntt.util.readhdr(std), 'airmass')
                            _JDstd = ntt.util.readkey3(
                                ntt.util.readhdr(std), 'JD')
                            JDvec.append(abs(_JDstd - _JDimg))
                            stdvec.append(std)
                            airmassvec.append(_airmassstd)
                    stdtelluric = stdvec[np.argmin(JDvec)]
                    _exptimestd = ntt.util.readkey3(
                        ntt.util.readhdr(stdtelluric), 'exptime')
                    _magstd = ntt.util.readkey3(
                        ntt.util.readhdr(stdtelluric), 'magstd')
                    print '\n\n ##### closer standard for telluric corrections  #### \n\n'
                    print stdtelluric, airmassvec[np.argmin(JDvec)]
                    stdtelluric_ex = ntt.util.extractspectrum(stdtelluric, dv, False, False, _interactive, 'std',
                                                              automaticex=_automaticex)
                    if stdtelluric_ex not in outputfile:
                        outputfile.append(stdtelluric_ex)
                    ntt.util.updateheader(stdtelluric_ex, 0, {'FILETYPE': [
                                          42107, 'extracted 1D wave calib ']})
                    ntt.util.updateheader(stdtelluric_ex, 0, {'PRODCATG': [
                        'SCIENCE.' + ntt.util.readkey3(
                            ntt.util.readhdr(stdtelluric_ex), 'tech').upper(), 'Data product category']})

                    hdr = ntt.util.readhdr(stdtelluric_ex)
                    matching = [s for s in hdr.keys() if "TRACE" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(
                            stdtelluric_ex, imcmb, delete='yes', update='yes', verify='no', Stdout=1)
                    ntt.util.updateheader(stdtelluric_ex, 0, {'TRACE1': [
                                          stdtelluric, 'Originating file']})
                    ###########################################################
                    #               SN tellurich calibration
                    imgf = re.sub('_ex.fits', '_f.fits', imgex)
                    imgf, senstelluric = ntt.sofispec1Ddef.calibrationsofi(imgex, stdtelluric_ex, stdref, imgf,
                                                                           _interactive)
                    if imgf not in outputfile:
                        outputfile.append(imgf)
                    if senstelluric not in outputfile:
                        outputfile.append(senstelluric)
                    ntt.util.updateheader(imgf, 0, {'FILETYPE': [42208, '1D wave calib, tell cor.'],
                                                    #                                                    'SNR': [ntt.util.StoN(imgf, 50),
                                                    'SNR': [ntt.util.StoN2(imgf, False),
                                                            'Average signal to noise ratio per pixel'],
                                                    'TRACE1': [imgex, 'Originating file'],
                                                    'ASSON1': [re.sub('_f.fits', '_2df.fits', imgf),
                                                               'Name of associated file'],
                                                    'ASSOC1': ['ANCILLARY.2DSPECTRUM', 'Category of associated file']})
                    ###########################################################
                    imgd = ntt.efoscspec1Ddef.fluxcalib2d(
                        img, senstelluric)  # flux calibration 2d images
                    ntt.util.updateheader(
                        imgd, 0, {'FILETYPE': [42209, '2D wavelength and flux calibrated spectrum']})
                    iraf.hedit(imgd, 'PRODCATG', delete='yes',
                               update='yes', verify='no')
                    hdrd = ntt.util.readhdr(imgd)
                    matching = [s for s in hdrd.keys() if "TRACE" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(
                            imgd, imcmb, delete='yes', update='yes', verify='no', Stdout=1)
                    ntt.util.updateheader(
                        imgd, 0, {'TRACE1': [img, 'Originating file']})
                    if imgd not in outputfile:
                        outputfile.append(imgd)
                ###############################################################
                if 'stdp' in objectlist and setup in objectlist['stdp']:
                    print '\n #####  photometric calibration   ######\n '
                    standardfile = []
                    for _ID in objectlist['stdp'][setup]:
                        for stdp in objectlist['stdp'][setup][_ID]:
                            stdp_ex = ntt.util.extractspectrum(stdp, dv, False, _dispersionline, _interactive, 'std',
                                                               automaticex=_automaticex)
                            standardfile.append(stdp_ex)
                            if stdp_ex not in outputfile:
                                outputfile.append(stdp_ex)
                            ntt.util.updateheader(stdp_ex, 0, {
                                'FILETYPE': [42107, 'extracted 1D wave calib'],
                                'TRACE1': [stdp_ex, 'Originating file'],
                                'PRODCATG': ['SCIENCE.' + ntt.util.readkey3(ntt.util.readhdr(stdp_ex), 'tech').upper(),
                                             'Data product category']})
                    print '\n### ', standardfile, ' \n'
                    if len(standardfile) >= 2:
                        standardfile0 = raw_input(
                            'which one do you want to use [' + str(standardfile[0]) + '] ? ')
                        if not standardfile0:
                            standardfile0 = standardfile[0]
                    else:
                        standardfile0 = standardfile[0]
                    print standardfile0
                    stdpf = re.sub('_ex.fits', '_f.fits', standardfile0)
                    stdpf, senstelluric2 = ntt.sofispec1Ddef.calibrationsofi(standardfile0, stdtelluric_ex, stdref,
                                                                             stdpf, _interactive)
                    if stdpf not in outputfile:
                        outputfile.append(stdpf)
                    ntt.util.updateheader(stdpf, 0, {'FILETYPE': [42208, '1D wave calib, tell cor'],
                                                     'TRACE1': [stdp, 'Originating file']})
                    stdname = ntt.util.readkey3(
                        ntt.util.readhdr(standardfile0), 'stdname')
                    standardfile = ntt.__path__[
                        0] + '/standard/flux/' + stdname
                    xx, yy = ntt.util.ReadAscii2(standardfile)

                    crval1 = pyfits.open(stdpf)[0].header.get('CRVAL1')
                    cd1 = pyfits.open(stdpf)[0].header.get('CD1_1')
                    datastdpf, hdrstdpf = pyfits.getdata(stdpf, 0, header=True)
                    xx1 = np.arange(len(datastdpf[0][0]))
                    aa1 = crval1 + (xx1) * cd1
                    yystd = np.interp(aa1, xx, yy)
                    rcut = np.compress(
                        ((aa1 < 13000) | (aa1 > 15150)) & ((11700 < aa1) | (aa1 < 11000)) & (aa1 > 10000) &
                        ((aa1 < 17800) | (aa1 > 19600)) & (aa1 < 24000), datastdpf[0][0] / yystd)
                    aa11 = np.compress(
                        ((aa1 < 13000) | (aa1 > 15150)) & ((11700 < aa1) | (aa1 < 11000)) & (aa1 > 10000) &
                        ((aa1 < 17800) | (aa1 > 19600)) & (aa1 < 24000), aa1)
                    yy1clean = np.interp(aa1, aa11, rcut)
                    aa1 = np.array(aa1)
                    yy1clean = np.array(yy1clean)
                    A = np.ones((len(rcut), 2), dtype=float)
                    A[:, 0] = aa11
                    result = np.linalg.lstsq(A, rcut)  # result=[zero,slope]
                    p = [result[0][1], result[0][0]]
                    yfit = ntt.util.pval(aa1, p)

                    pl.clf()
                    pl.ion()
                    pl.plot(aa1, datastdpf[0][0] / yystd,
                            color='red', label='std')
                    pl.plot(aa1, yfit, color='blue', label='fit')
                    pl.legend(numpoints=1, markerscale=1.5)
                    #   sens function sofi spectra
                    outputsens = 'sens_' + stdpf
                    ntt.util.delete(outputsens)
                    datastdpf[0][0] = yfit
                    pyfits.writeto(outputsens, np.float32(datastdpf), hdrstdpf)
                    #################
                    imgsc = re.sub('_ex.fits', '_sc.fits', imgex)
                    ntt.util.delete(imgsc)
                    crval2 = pyfits.open(imgf)[0].header.get('CRVAL1')
                    cd2 = pyfits.open(imgf)[0].header.get('CD1_1')
                    dataf, hdrf = pyfits.getdata(imgf, 0, header=True)
                    xx2 = np.arange(len(dataf[0][0]))
                    aa2 = crval2 + (xx2) * cd2
                    yyscale = np.interp(aa2, aa1, yfit)

                    dataf[0][0] = dataf[0][0] / yyscale
                    dataf[1][0] = dataf[1][0] / yyscale
                    dataf[2][0] = dataf[2][0] / yyscale
                    dataf[3][0] = dataf[3][0] / yyscale

                    pyfits.writeto(imgsc, np.float32(dataf), hdrf)
                    ntt.util.updateheader(imgsc, 0, {'SENSPHOT': [outputsens, 'sens used to flux cal'],
                                                     'FILETYPE': [42208, '1D wave,flux calib, tell cor'],
                                                     'TRACE1': [imgf, 'Originating file']})
                    #                ntt.util.updateheader(imgsc,0,{'FILETYPE':[42208,'1D wave,flux calib, tell cor']})
                    #                ntt.util.updateheader(imgsc,0,{'TRACE1':[imgf,'']})
                    print '\n### flux calibrated spectrum= ', imgf, ' with the standard= ', stdpf
                    if imgsc not in outputfile:
                        outputfile.append(imgsc)
                else:
                    print '\n### photometric calibrated not performed \n'

    print '\n### adding keywords for phase 3 ....... '
    reduceddata = ntt.util.rangedata(outputfile)
    f = open('logfile_spec1d_' + str(reduceddata) +
             '_' + str(datenow) + '.raw.list', 'w')
    for img in outputfile:
        if str(img)[-5:] == '.fits':
            hdr = ntt.util.readhdr(img)
            # added for DR2
            if 'NCOMBINE' in hdr:
                _ncomb = ntt.util.readkey3(hdr, 'NCOMBINE')
            else:
                _ncomb = 1.0

            _effron = 12. * \
                (1 / np.sqrt(ntt.util.readkey3(hdr, 'ndit') * _ncomb)) * \
                np.sqrt(np.pi / 2)

            try:
                ntt.util.phase3header(img)  # phase 3 definitions
                ntt.util.updateheader(img, 0, {'quality': ['Final', ''],
                                               'EFFRON': [_effron, 'Effective readout noise per output (e-)']})
                f.write(ntt.util.readkey3(
                    ntt.util.readhdr(img), 'arcfile') + '\n')
            except:
                print 'Warning: ' + img + ' is not a fits file'
    f.close()
    return outputfile, 'logfile_spec1d_' + str(reduceddata) + '_' + str(datenow) + '.raw.list'
Ejemplo n.º 27
0
def floydsautoredu(files,
                   _interactive,
                   _dobias,
                   _doflat,
                   _listflat,
                   _listbias,
                   _listarc,
                   _cosmic,
                   _ext_trace,
                   _dispersionline,
                   liststandard,
                   listatmo,
                   _automaticex,
                   _classify=False,
                   _verbose=False,
                   smooth=1,
                   fringing=1):
    import floyds
    import string, re, os, glob, sys, pickle
    from numpy import array, arange, mean, pi, arccos, sin, cos, argmin
    import pyfits
    from pyraf import iraf
    import datetime
    os.environ["PYRAF_BETA_STATUS"] = "1"
    iraf.set(direc=floyds.__path__[0] + '/')
    _extinctdir = 'direc$standard/extinction/'
    _tel = floyds.util.readkey3(
        floyds.util.readhdr(re.sub('\n', '', files[0])), 'TELID')
    if _tel == 'fts':
        _extinction = 'ssoextinct.dat'
        _observatory = 'sso'
    elif _tel == 'ftn':
        _extinction = 'maua.dat'
        _observatory = 'cfht'
    else:
        sys.exit('ERROR: observatory not recognised')
    dv = floyds.util.dvex()
    scal = pi / 180.
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['ccdred.flatcombine','ccdred.zerocombine','ccdproc','specred.apall','longslit.identify','longslit.reidentify',\
                    'specred.standard','longslit.fitcoords','specred.transform','specred.response']
    for t in toforget:
        iraf.unlearn(t)
    iraf.longslit.dispaxi = 2
    iraf.longslit.mode = 'h'
    iraf.identify.fwidth = 7
    iraf.identify.order = 2
    iraf.specred.dispaxi = 2
    iraf.specred.mode = 'h'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.trim = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.overscan = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.biassec = ''
    iraf.ccdproc.ccdtype = ''
    iraf.ccdred.instrument = "/dev/null"
    if _verbose:
        iraf.ccdred.verbose = 'yes'
        iraf.specred.verbose = 'yes'
    else:
        iraf.specred.verbose = 'no'
        iraf.ccdred.verbose = 'no'
    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55928 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    outputlist = []
    hdra = floyds.util.readhdr(re.sub('\n', '', files[0]))
    _gain = floyds.util.readkey3(hdra, 'gain')
    _rdnoise = floyds.util.readkey3(hdra, 'ron')
    std, rastd, decstd, magstd = floyds.util.readstandard(
        'standard_floyds_mab.txt')
    _naxis2 = hdra.get('NAXIS2')
    _naxis1 = hdra.get('NAXIS1')
    if not _naxis1: _naxis1 = 2079
    if not _naxis2:
        if not hdr0.get('HDRVER'): _naxis1 = 511
        else: _naxis1 = 512
    _overscan = '[2049:' + str(_naxis1) + ',1:' + str(_naxis2) + ']'
    _biassecblu = '[380:2048,325:' + str(_naxis2) + ']'
    _biassecred = '[1:1800,1:350]'
    lista = {}
    objectlist = {}
    biaslist = {}
    flatlist = {}
    flatlistd = {}
    arclist = {}
    max_length = 14
    for img in files:
        hdr0 = floyds.util.readhdr(img)
        if floyds.util.readkey3(hdr0, 'naxis2') >= 500:
            if 'blu' not in lista: lista['blu'] = []
            if 'red' not in lista: lista['red'] = []
            _object0 = floyds.util.readkey3(hdr0, 'object')
            _object0 = re.sub(':', '', _object0)  # colon
            _object0 = re.sub('/', '', _object0)  # slash
            _object0 = re.sub('\s', '', _object0)  # any whitespace
            _object0 = re.sub('\(', '', _object0)  # open parenthesis
            _object0 = re.sub('\[', '', _object0)  # open square bracket
            _object0 = re.sub('\)', '', _object0)  # close parenthesis
            _object0 = re.sub('\]', '', _object0)  # close square bracket
            if len(_object0) > max_length:
                _object0 = _object0[:max_length]
            _date0 = floyds.util.readkey3(hdr0, 'date-night')
            _tel = floyds.util.readkey3(hdr0, 'TELID')
            _type = floyds.util.readkey3(hdr0, 'OBSTYPE')
            if not _type: _type = floyds.util.readkey3(hdr0, 'imagetyp')
            _slit = floyds.util.readkey3(hdr0, 'slit')
            if _type:
                _type = _type.lower()
                if _type in ['sky', 'spectrum', 'expose']:
                    nameoutb = str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_blue_' + str(_slit) + '_' + str(MJDtoday)
                    nameoutr = str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_red_' + str(_slit) + '_' + str(MJDtoday)
                elif _type in ['lamp', 'arc', 'l']:
                    nameoutb = 'arc_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_blue_' + str(_slit) + '_' + str(MJDtoday)
                    nameoutr = 'arc_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_red_' + str(_slit) + '_' + str(MJDtoday)
                elif _type in ['flat', 'f', 'lampflat', 'lamp-flat']:
                    nameoutb = 'flat_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_blue_' + str(_slit) + '_' + str(MJDtoday)
                    nameoutr = 'flat_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_red_' + str(_slit) + '_' + str(MJDtoday)
                else:
                    nameoutb = str(_type.lower(
                    )) + '_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_blue_' + str(_slit) + '_' + str(MJDtoday)
                    nameoutr = str(_type.lower(
                    )) + '_' + str(_object0) + '_' + _tel + '_' + str(
                        _date0) + '_red_' + str(_slit) + '_' + str(MJDtoday)

                bimg = floyds.util.name_duplicate(img, nameoutb, '')
                rimg = floyds.util.name_duplicate(img, nameoutr, '')
                ####
                floyds.util.delete(bimg)
                floyds.util.delete(rimg)
                iraf.imcopy(img, bimg, verbose='no')
                iraf.imcopy(img, rimg, verbose='no')

                aaa = iraf.hedit(bimg,
                                 'CCDSEC',
                                 delete='yes',
                                 update='yes',
                                 verify='no',
                                 Stdout=1)
                aaa = iraf.hedit(bimg,
                                 'TRIMSEC',
                                 delete='yes',
                                 update='yes',
                                 verify='no',
                                 Stdout=1)
                aaa = iraf.hedit(rimg,
                                 'CCDSEC',
                                 delete='yes',
                                 update='yes',
                                 verify='no',
                                 Stdout=1)
                aaa = iraf.hedit(rimg,
                                 'TRIMSEC',
                                 delete='yes',
                                 update='yes',
                                 verify='no',
                                 Stdout=1)

                iraf.ccdproc(bimg,output='', overscan="yes", trim="yes", zerocor='no', flatcor='no', zero='', ccdtype='',\
                                 fixpix='no', trimsec=_biassecblu, biassec=_overscan, readaxi='line', Stdout=1)
                iraf.ccdproc(rimg,output='', overscan="yes", trim="yes", zerocor='no', flatcor='no', zero='', ccdtype='',\
                                 fixpix='no', trimsec=_biassecred, biassec=_overscan, readaxi='line', Stdout=1)
                floyds.util.updateheader(bimg, 0,
                                         {'GRISM': ['blu', ' blue order']})
                floyds.util.updateheader(rimg, 0,
                                         {'GRISM': ['red', ' blue order']})
                floyds.util.updateheader(
                    bimg, 0, {'arcfile': [img, 'file name in the archive']})
                floyds.util.updateheader(
                    rimg, 0, {'arcfile': [img, 'file name in the archive']})
                lista['blu'].append(bimg)
                lista['red'].append(rimg)
            else:
                print 'warning type not defined'
    for arm in lista.keys():
        for img in lista[arm]:
            print img
            hdr = floyds.util.readhdr(img)
            _type = floyds.util.readkey3(hdr, 'OBSTYPE')
            if _type == 'EXPOSE':
                _type = floyds.util.readkey3(hdr, 'imagetyp')
                if not _type: _type = 'EXPOSE'

            if _type == 'EXPOSE':
                print 'warning obstype still EXSPOSE, are this old data ?  run manually floydsfixheader'

            _slit = floyds.util.readkey3(hdr, 'slit')
            _grpid = floyds.util.readkey3(hdr, 'grpid')
            if _type.lower() in ['flat', 'f', 'lamp-flat', 'lampflat']:
                if (arm, _slit) not in flatlist: flatlist[arm, _slit] = {}
                if _grpid not in flatlist[arm, _slit]:
                    flatlist[arm, _slit][_grpid] = [img]
                else:
                    flatlist[arm, _slit][_grpid].append(img)
            elif _type.lower() in ['lamp', 'l', 'arc']:
                if (arm, _slit) not in arclist: arclist[arm, _slit] = {}
                if _grpid not in arclist[arm, _slit]:
                    arclist[arm, _slit][_grpid] = [img]
                else:
                    arclist[arm, _slit][_grpid].append(img)
            elif _type in ['bias', 'b']:
                if arm not in biaslist: biaslist[arm] = []
                biaslist[arm].append(img)
            elif _type.lower() in ['sky', 's', 'spectrum']:
                try:
                    _ra = float(floyds.util.readkey3(hdr, 'RA'))
                    _dec = float(floyds.util.readkey3(hdr, 'DEC'))
                except:
                    ra00 = string.split(floyds.util.readkey3(hdr, 'RA'), ':')
                    ra0, ra1, ra2 = float(ra00[0]), float(ra00[1]), float(
                        ra00[2])
                    _ra = ((ra2 / 60. + ra1) / 60. + ra0) * 15.
                    dec00 = string.split(floyds.util.readkey3(hdr, 'DEC'), ':')
                    dec0, dec1, dec2 = float(dec00[0]), float(dec00[1]), float(
                        dec00[2])
                    if '-' in str(dec0):
                        _dec = (-1) * ((dec2 / 60. + dec1) / 60. +
                                       ((-1) * dec0))
                    else:
                        _dec = (dec2 / 60. + dec1) / 60. + dec0
                dd = arccos(
                    sin(_dec * scal) * sin(decstd * scal) +
                    cos(_dec * scal) * cos(decstd * scal) * cos(
                        (_ra - rastd) * scal)) * ((180 / pi) * 3600)
                if _verbose:
                    print _ra, _dec
                    print std[argmin(dd)], min(dd)
                if min(dd) < 5200: _typeobj = 'std'
                else: _typeobj = 'obj'
                if min(dd) < 5200:
                    floyds.util.updateheader(
                        img, 0, {'stdname': [std[argmin(dd)], '']})
                    floyds.util.updateheader(
                        img, 0, {'magstd': [float(magstd[argmin(dd)]), '']})
                if _typeobj not in objectlist: objectlist[_typeobj] = {}

                if (arm, _slit) not in objectlist[_typeobj]:
                    objectlist[_typeobj][arm, _slit] = [img]
                else:
                    objectlist[_typeobj][arm, _slit].append(img)
    if _verbose:
        print 'object'
        print objectlist
        print 'flat'
        print flatlist
        print 'bias'
        print biaslist
        print 'arc'
        print arclist

    if liststandard and 'std' in objectlist.keys():
        print 'external standard, raw standard not used'
        del objectlist['std']

    sens = {}
    outputfile = {}
    atmo = {}
    for tpe in objectlist:
        if tpe not in outputfile: outputfile[tpe] = {}
        for setup in objectlist[tpe]:
            if setup not in sens: sens[setup] = []
            print '\n### setup= ', setup, '\n### objects= ', objectlist[tpe][
                setup], '\n'
            for img in objectlist[tpe][setup]:
                print '\n\n### next object= ', img, ' ', floyds.util.readkey3(
                    floyds.util.readhdr(img), 'object'), '\n'
                hdr = floyds.util.readhdr(img)
                archfile = floyds.util.readkey3(hdr, 'arcfile')
                _gain = floyds.util.readkey3(hdr, 'gain')
                _rdnoise = floyds.util.readkey3(hdr, 'ron')
                _grism = floyds.util.readkey3(hdr, 'grism')
                _grpid = floyds.util.readkey3(hdr, 'grpid')
                if archfile not in outputfile[tpe]:
                    outputfile[tpe][archfile] = []
                #####################      flat   ###############
                if _listflat: flatgood = _listflat  # flat list from reducer
                elif setup in flatlist:
                    if _grpid in flatlist[setup]:
                        print '\n###FLAT WITH SAME GRPID'
                        flatgood = flatlist[setup][
                            _grpid]  # flat in the  raw data
                    else:
                        flatgood = []
                        for _grpid0 in flatlist[setup].keys():
                            for ii in flatlist[setup][_grpid0]:
                                flatgood.append(ii)
                else:
                    flatgood = []
                if len(flatgood) != 0:
                    if len(flatgood) > 1:
                        f = open('_oflatlist', 'w')
                        for fimg in flatgood:
                            print fimg
                            f.write(fimg + '\n')
                        f.close()
                        floyds.util.delete('flat' + img)
                        iraf.ccdred.flatcombine('"@_oflatlist"',
                                                output='flat' + img,
                                                combine='average',
                                                reject='none',
                                                ccdtype=' ',
                                                rdnoise=_rdnoise,
                                                gain=_gain,
                                                process='no',
                                                Stdout=1)
                        floyds.util.delete('_oflatlist')
                        flatfile = 'flat' + img
                    elif len(flatgood) == 1:
                        os.system('cp ' + flatgood[0] + ' flat' + img)
                        flatfile = 'flat' + img
                else:
                    flatfile = ''
                ##########################   find arcfile            #######################
                arcfile = ''
                if _listarc:
                    arcfile = [floyds.util.searcharc(img, _listarc)[0]
                               ][0]  # take arc from list
                if not arcfile and setup in arclist.keys():
                    if _grpid in arclist[setup]:
                        print '\n###ARC WITH SAME GRPID'
                        arcfile = arclist[setup][
                            _grpid]  # flat in the  raw data
                    else:
                        arcfile = []
                        for _grpid0 in arclist[setup].keys():
                            for ii in arclist[setup][_grpid0]:
                                arcfile.append(ii)
                if arcfile:
                    if len(arcfile) > 1:  # more than one arc available
                        print arcfile
                        #                     _arcclose=floyds.util.searcharc(imgex,arcfile)[0]   # take the closest in time
                        _arcclose = floyds.sortbyJD(arcfile)[
                            -1]  #  take the last arc of the sequence
                        if _interactive.upper() in ['YES', 'Y']:
                            for ii in floyds.floydsspecdef.sortbyJD(arcfile):
                                print '\n### ', ii
                            arcfile = raw_input(
                                '\n### more than one arcfile available, which one to use ['
                                + str(_arcclose) + '] ? ')
                            if not arcfile: arcfile = _arcclose
                        else: arcfile = _arcclose
                    else: arcfile = arcfile[0]
                else: print '\n### Warning: no arc found'

                ###################################################################   rectify
                if setup[0] == 'red':
                    fcfile = floyds.__path__[
                        0] + '/standard/ident/fcrectify_' + _tel + '_red'
                    fcfile1 = floyds.__path__[
                        0] + '/standard/ident/fcrectify1_' + _tel + '_red'
                    print fcfile
                else:
                    fcfile = floyds.__path__[
                        0] + '/standard/ident/fcrectify_' + _tel + '_blue'
                    fcfile1 = floyds.__path__[
                        0] + '/standard/ident/fcrectify1_' + _tel + '_blue'
                    print fcfile
                print img, arcfile, flatfile
                img0 = img
                if img and not img in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(img)
                if arcfile and arcfile not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(arcfile)
                if flatfile and flatfile not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(flatfile)

                img, arcfile, flatfile = floyds.floydsspecdef.rectifyspectrum(
                    img, arcfile, flatfile, fcfile, fcfile1, 'no', _cosmic)
                print outputfile
                if img and not img in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(img)
                if arcfile and arcfile not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(arcfile)
                if flatfile and flatfile not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(flatfile)
                print outputfile
                ###################################################################         check wavecalib
                if tpe == 'std' or floyds.util.readkey3(
                        floyds.util.readhdr(img), 'exptime') < 300:
                    if setup[0] == 'red':
                        print '\n### check standard wave calib'
                        #print img
                        data, hdr = pyfits.getdata(img, 0, header=True)
                        y = data.mean(1)
                        import numpy as np
                        if np.argmax(y) < 80 and np.argmax(y) > 15:
                            y2 = data[np.argmax(y) - 3:np.argmax(y) +
                                      3].mean(0)
                            yy2 = data[np.argmax(y) - 9:np.argmax(y) -
                                       3].mean(0)
                            floyds.util.delete('_std.fits')
                            pyfits.writeto('_std.fits', np.float32(y2 - yy2),
                                           hdr)
                            #print '_std.fits',_interactive
                            shift = floyds.floydsspecdef.checkwavestd(
                                '_std.fits', _interactive, 2)
                            zro = hdr['CRVAL1']
                            floyds.util.updateheader(
                                img, 0, {'CRVAL1': [zro + int(shift), '']})
                            floyds.util.updateheader(
                                img, 0, {'shift': [float(shift), '']})
                            floyds.util.delete('_std.fits')
                        else:
                            print 'object not found'
                    else:
                        print '\n### warning check in wavelength not possible for short exposure in the blu range '
                else:
                    print '\n### check object wave calib'
                    _skyfile = floyds.__path__[
                        0] + '/standard/ident/sky_' + setup[0] + '.fits'
                    data, hdr = pyfits.getdata(img, 0, header=True)
                    y = data.mean(1)
                    import numpy as np
                    if np.argmax(y) < 80 and np.argmax(y) > 15:
                        yy1 = data[10:np.argmax(y) - 9].mean(0)
                        yy2 = data[np.argmax(y) + 9:-10].mean(0)
                        floyds.util.delete('_sky.fits')
                        pyfits.writeto('_sky.fits', np.float32(yy1 + yy2), hdr)
                        shift = floyds.floydsspecdef.checkwavelength_obj(
                            '_sky.fits', _skyfile, _interactive, 2)
                        floyds.util.delete('_sky.fits')
                        zro = hdr['CRVAL1']
                        floyds.util.updateheader(
                            img, 0, {'CRVAL1': [zro + int(shift), '']})
                        floyds.util.updateheader(img, 0,
                                                 {'shift': [float(shift), '']})
                    else:
                        print 'object not found'
####################################################     flat field
                if img and flatfile and setup[0] == 'red':
                    imgn = 'n' + img
                    hdr1 = floyds.readhdr(img)
                    hdr2 = floyds.readhdr(flatfile)
                    _grpid1 = floyds.util.readkey3(hdr1, 'grpid')
                    _grpid2 = floyds.util.readkey3(hdr2, 'grpid')
                    if _grpid1 == _grpid2:
                        print flatfile, img, setup[0]
                        imgn = floyds.fringing_classicmethod2(
                            flatfile, img, 'no', '*', 15, setup[0])
                    else:
                        print 'Warning flat not the same OB'
                        imgex = floyds.floydsspecdef.extractspectrum(
                            img,
                            dv,
                            _ext_trace,
                            _dispersionline,
                            _interactive,
                            tpe,
                            automaticex=_automaticex)
                        floyds.delete('flat' + imgex)
                        iraf.specred.apsum(flatfile,output='flat'+imgex,referen=img,interac='no',find='no',recente='no',resize='no',\
                                           edit='no',trace='no',fittrac='no',extract='yes',extras='no',review='no',backgro='none')
                        fringingmask = floyds.normflat('flat' + imgex)
                        print '\n### fringing correction'
                        print imgex, fringingmask
                        imgex, scale, shift = floyds.correctfringing_auto(
                            imgex, fringingmask)  #  automatic correction
                        shift = int(
                            .5 + float(shift) /
                            3.5)  # shift from correctfringing_auto in Angstrom
                        print '\n##### flat scaling: ', str(scale), str(shift)
                        ########################################################
                        datax, hdrx = pyfits.getdata(flatfile, 0, header=True)
                        xdim = hdrx['NAXIS1']
                        ydim = hdrx['NAXIS2']
                        iraf.specred.apedit.nsum = 15
                        iraf.specred.apedit.width = 100.
                        iraf.specred.apedit.line = 1024
                        iraf.specred.apfind.minsep = 20.
                        iraf.specred.apfind.maxsep = 1000.
                        iraf.specred.apresize.bkg = 'no'
                        iraf.specred.apresize.ylevel = 0.5
                        iraf.specred.aptrace.nsum = 10
                        iraf.specred.aptrace.step = 10
                        iraf.specred.aptrace.nlost = 10
                        floyds.util.delete('n' + flatfile)
                        floyds.util.delete('norm.fits')
                        floyds.util.delete('n' + img)
                        floyds.util.delete(re.sub('.fits', 'c.fits', flatfile))
                        iraf.imcopy(flatfile + '[500:' + str(xdim) + ',*]',
                                    re.sub('.fits', 'c.fits', flatfile),
                                    verbose='no')
                        iraf.imarith(flatfile,
                                     '/',
                                     flatfile,
                                     'norm.fits',
                                     verbose='no')
                        flatfile = re.sub('.fits', 'c.fits', flatfile)
                        floyds.util.delete('n' + flatfile)
                        iraf.unlearn(iraf.specred.apflatten)
                        floyds.floydsspecdef.aperture(flatfile)
                        iraf.specred.apflatten(flatfile,output='n'+flatfile,interac=_interactive,find='no',recenter='no', resize='no',edit='no',trace='no',\
                                               fittrac='no',fitspec='no', flatten='yes', aperture='',\
                                               pfit='fit2d',clean='no',function='legendre',order=15,sample = '*', mode='ql')
                        iraf.imcopy('n' + flatfile,
                                    'norm.fits[500:' + str(xdim) + ',*]',
                                    verbose='no')
                        floyds.util.delete('n' + flatfile)
                        floyds.util.delete('n' + img)
                        iraf.imrename('norm.fits',
                                      'n' + flatfile,
                                      verbose='no')
                        imgn = floyds.floydsspecdef.applyflat(
                            img, 'n' + flatfile, 'n' + img, scale, shift)
                else:
                    imgn = ''

                if imgn and not imgn in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(imgn)

                ###################################################      2D flux calib
                print '####### ' + imgn
                hdr = floyds.util.readhdr(img)
                _sens = ''
                if liststandard:
                    _sens = floyds.util.searchsens(
                        img,
                        liststandard)[0]  # search in the list from reducer
                if not _sens:
                    try:
                        _sens = floyds.util.searchsens(
                            img, sens[setup])[0]  # search in the reduced data
                    except:
                        _sens = floyds.util.searchsens(
                            img, '')[0]  # search in tha archive
                if _sens:
                    if _sens[0] == '/':
                        os.system('cp ' + _sens + ' .')
                        _sens = string.split(_sens, '/')[-1]
                    imgd = fluxcalib2d(img, _sens)
                    if imgn: imgdn = fluxcalib2d(imgn, _sens)
                    else: imgdn = ''
                    if _sens not in outputfile[tpe][archfile]:
                        outputfile[tpe][archfile].append(_sens)
                    else:
                        imgdn = ''
                    print '\n### do 2D calibration'
                else:
                    imgd = ''
                    imgdn = ''
################    extraction         ####################################
                if imgdn:
                    try:
                        imgdnex = floyds.floydsspecdef.extractspectrum(
                            imgdn,
                            dv,
                            _ext_trace,
                            _dispersionline,
                            _interactive,
                            tpe,
                            automaticex=_automaticex)
                    except:
                        imgdnex = ''
                else:
                    imgdnex = ''
                if imgd:
                    try:
                        imgdex = floyds.floydsspecdef.extractspectrum(
                            imgd,
                            dv,
                            _ext_trace,
                            _dispersionline,
                            _interactive,
                            tpe,
                            automaticex=_automaticex)
                    except:
                        imgdex = ''
                else:
                    imgdex = ''
                if imgd and not imgd in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(imgd)
                if imgdn and not imgdn in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(imgdn)
                if imgdnex and imgdnex not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(imgdnex)
                if imgdex and imgdex not in outputfile[tpe][archfile]:
                    outputfile[tpe][archfile].append(imgdex)
                if tpe == 'std':
                    if imgn:
                        try:
                            imgnex = floyds.floydsspecdef.extractspectrum(
                                imgn,
                                dv,
                                _ext_trace,
                                _dispersionline,
                                _interactive,
                                tpe,
                                automaticex=_automaticex)
                        except:
                            imgnex = ''
                    elif img:
                        try:
                            imgnex = floyds.floydsspecdef.extractspectrum(
                                img,
                                dv,
                                _ext_trace,
                                _dispersionline,
                                _interactive,
                                tpe,
                                automaticex=_automaticex)
                        except:
                            imgnex = ''
                    if imgnex:
                        hdrs = floyds.util.readhdr(imgnex)
                        _tel = floyds.util.readkey3(hdrs, 'TELID')
                        try:
                            _outputsens2='sens_'+_tel+'_'+str(floyds.util.readkey3(hdrs,'date-night'))+'_'+str(floyds.util.readkey3(hdrs,'grism'))+\
                                '_'+re.sub('.dat','',floyds.util.readkey3(hdrs,'stdname'))+'_'+str(MJDtoday)
                        except:
                            sys.exit(
                                'Error: missing header -stdname- in standard '
                                + str(standardfile) + '  ')
                        print '\n### compute sensitivity function and atmofile'
                        if setup[0] == 'red':
                            atmofile = floyds.floydsspecdef.telluric_atmo(
                                imgnex)
                            #print atmofile
                            if atmofile and atmofile not in outputfile[tpe][
                                    archfile]:
                                outputfile[tpe][archfile].append(atmofile)
                            stdusedclean = re.sub('_ex.fits', '_clean.fits',
                                                  imgnex)
                            floyds.util.delete(stdusedclean)
                            _function = 'spline3'
                            iraf.specred.sarith(input1=imgnex,
                                                op='/',
                                                input2=atmofile,
                                                output=stdusedclean,
                                                format='multispec')
                            try:
                                _outputsens2 = floyds.floydsspecdef.sensfunction(
                                    stdusedclean, _outputsens2, _function, 8,
                                    _interactive)
                            except:
                                print 'Warning: problem computing sensitivity function'
                                _outputsens2 = ''
                            if setup not in atmo: atmo[setup] = [atmofile]
                            else: atmo[setup].append(atmofile)
                        else:
                            _function = 'spline3'
                            try:
                                _outputsens2 = floyds.floydsspecdef.sensfunction(
                                    imgnex, _outputsens2, _function, 12,
                                    _interactive, '3400:4700')  #,3600:4300')
                            except:
                                print 'Warning: problem computing sensitivity function'
                                _outputsens2 = ''
                        if _outputsens2 and _outputsens2 not in outputfile[
                                tpe][archfile]:
                            outputfile[tpe][archfile].append(_outputsens2)
    ###################################################
    print outputfile
    if 'obj' in outputfile:
        for imm in outputfile['obj']:
            lista = outputfile['obj'][imm]
            lista1 = []
            for i in lista:
                if '_ex.fits' in i:
                    lista1.append(i)
                    lista.pop(lista.index(i))
            done = []
            for img in lista1:
                _output = ''
                if img not in done:
                    if '_red_' in img:
                        redfile = img
                        if re.sub('_red_', '_blue_', redfile)[1:] in lista1:
                            bluefile = lista1[lista1.index(
                                re.sub('_red_', '_blue_', redfile)[1:])]
                            _output = re.sub('_red_', '_merge_', redfile)
                            try:
                                _output = floyds.floydsspecdef.combspec(
                                    bluefile,
                                    redfile,
                                    _output,
                                    scale=True,
                                    num=None)
                                lista.append(_output)
                                done.append(redfile)
                                done.append(bluefile)
                                floyds.util.delete(bluefile)
                                floyds.util.delete(redfile)
                                floyds.util.delete(redfile[1:])
                            except:
                                done.append(redfile)
                                lista.append(redfile)
                        else:
                            done.append(redfile)
                            lista.append(redfile)
                    elif '_blue_' in img:
                        bluefile = img
                        if 'n' + re.sub('_blue_', '_red_', bluefile) in lista1:
                            redfile = lista1[lista1.index(
                                'n' + re.sub('_blue_', '_red_', bluefile))]
                            _output = re.sub('_red_', '_merge_', redfile)
                            try:
                                _output = floyds.floydsspecdef.combspec(
                                    bluefile,
                                    redfile,
                                    _output,
                                    scale=True,
                                    num=None)
                                lista.append(_output)
                                done.append(redfile)
                                done.append(bluefile)
                                floyds.util.delete(bluefile)
                                floyds.util.delete(redfile)
                                floyds.util.delete(redfile[1:])
                            except:
                                done.append(bluefile)
                                lista.append(bluefile)
                        else:
                            done.append(bluefile)
                            lista.append(bluefile)
            outputfile['obj'][imm] = lista
    readme = floyds.floydsspecauto.writereadme()
    return outputfile, readme
Ejemplo n.º 28
0
# Global python packages
import pyraf
from pyraf import iraf
import os, shutil, math, subprocess
import pyfits

# Local python packages
from iqutils import *

# IRAF modules
iraf.images()
iraf.noao()
iraf.imred()
iraf.ccdred()
iraf.specred()
iraf.rvsao()

# IRAF variables
yes = iraf.yes
no = iraf.no
INDEF = iraf.INDEF
hedit = iraf.hedit
imgets = iraf.imgets
imcombine = iraf.imcombine

# PyRAF setup
pyrafdir = "python/pyraf/"
pyrafdir_key = 'PYRAFPARS'

if os.environ.has_key(pyrafdir_key):
    pardir = os.environ[pyrafdir_key]
Ejemplo n.º 29
0
def efoscfastredu(imglist, _listsens, _listarc, _ext_trace, _dispersionline,
                  _cosmic, _interactive):
    # print "LOGX:: Entering `efoscfastredu` method/function in %(__file__)s"
    # % globals()
    import string
    import os
    import re
    import sys
    os.environ["PYRAF_BETA_STATUS"] = "1"
    try:
        from astropy.io import fits as pyfits
    except:
        import pyfits
    from ntt.util import readhdr, readkey3
    import ntt
    import numpy as np
    dv = ntt.dvex()
    scal = np.pi / 180.
    if not _interactive:
        _interactive = False
        _inter = 'NO'
    else:
        _inter = 'YES'
    from pyraf import iraf

    iraf.noao(_doprint=0, Stdout=0)
    iraf.imred(_doprint=0, Stdout=0)
    iraf.ccdred(_doprint=0, Stdout=0)
    iraf.twodspec(_doprint=0, Stdout=0)
    iraf.longslit(_doprint=0, Stdout=0)
    iraf.onedspec(_doprint=0, Stdout=0)
    iraf.specred(_doprint=0, Stdout=0)
    toforget = [
        'ccdproc', 'imcopy', 'specred.apall', 'longslit.identify',
        'longslit.reidentify', 'specred.standard', 'longslit.fitcoords',
        'onedspec.wspectext'
    ]
    for t in toforget:
        iraf.unlearn(t)
    iraf.ccdred.verbose = 'no'  # not print steps
    iraf.specred.verbose = 'no'  # not print steps
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''
    _gain = ntt.util.readkey3(ntt.util.readhdr(imglist[0]), 'gain')
    _ron = ntt.util.readkey3(ntt.util.readhdr(imglist[0]), 'ron')
    iraf.specred.apall.readnoi = _ron
    iraf.specred.apall.gain = _gain
    iraf.specred.dispaxi = 2
    iraf.longslit.dispaxi = 2
    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"
    iraf.set(direc=ntt.__path__[0] + '/')
    for img in imglist:
        hdr = ntt.util.readhdr(img)
        _tech = ntt.util.readkey3(hdr, 'tech')
        if _tech != 'SPECTRUM':
            sys.exit('error: ' + str(img) + ' is not a spectrum ')
        print '\n####  image name = ' + img + '\n'
        _grism0 = readkey3(hdr, 'grism')
        _filter0 = readkey3(hdr, 'filter')
        _slit0 = readkey3(hdr, 'slit')
        _object0 = readkey3(hdr, 'object')
        _date0 = readkey3(hdr, 'date-night')
        setup = (_grism0, _filter0, _slit0)
        _biassec0 = '[3:1010,1026:1029]'
        if _grism0 == 'Gr16':
            _trimsec0 = '[100:950,1:950]'
        elif _grism0 == 'Gr13':
            if _filter0 == 'Free':
                _trimsec0 = '[100:950,1:1015]'
            elif _filter0 == 'GG495':
                _trimsec0 = '[100:950,208:1015]'
            elif _filter0 == 'OG530':
                _trimsec0 = '[100:950,300:1015]'
        elif _grism0 == 'Gr11':
            _trimsec0 = '[100:950,5:1015]'
        else:
            _trimsec0 = '[100:950,5:1015]'
        _object0 = re.sub(' ', '', _object0)
        _object0 = re.sub('/', '_', _object0)
        nameout0 = 't' + str(_object0) + '_' + str(_date0)
        for _set in setup:
            nameout0 = nameout0 + '_' + _set
        nameout0 = ntt.util.name_duplicate(img, nameout0, '')
        timg = nameout0
        if os.path.isfile(timg):
            os.system('rm -rf ' + timg)
        iraf.imcopy(img, output=timg)
        iraf.ccdproc(timg,
                     output='',
                     overscan='no',
                     trim='yes',
                     zerocor="no",
                     flatcor="no",
                     readaxi='column',
                     trimsec=str(_trimsec0),
                     biassec=_biassec0,
                     Stdout=1)
        img = timg
        if _listarc:
            arcfile = ntt.util.searcharc(img, _listarc)[0]
        else:
            arcfile = ''
        if not arcfile:
            arcfile = ntt.util.searcharc(img, '')[0]
        else:
            iraf.ccdproc(arcfile,
                         output='t' + arcfile,
                         overscan='no',
                         trim='yes',
                         zerocor="no",
                         flatcor="no",
                         readaxi='column',
                         trimsec=str(_trimsec0),
                         biassec=str(_biassec0),
                         Stdout=1)
            arcfile = 't' + arcfile

        if _cosmic:
            # print cosmic rays rejection
            ntt.cosmics.lacos(img,
                              output='',
                              gain=_gain,
                              readn=_ron,
                              xorder=9,
                              yorder=9,
                              sigclip=4.5,
                              sigfrac=0.5,
                              objlim=1,
                              verbose=True,
                              interactive=False)
            print '\n### cosmic rays rejections ........ done '

        if not arcfile:
            print '\n### warning no arcfile \n exit '
        else:
            arcref = ntt.util.searcharc(img, '')[0]
            if arcfile[0] == '/':
                os.system('cp ' + arcfile + ' ' +
                          string.split(arcfile, '/')[-1])
                arcfile = string.split(arcfile, '/')[-1]
            arcref = string.split(arcref, '/')[-1]
            if arcref:
                os.system('cp ' + arcref + ' .')
                arcref = string.split(arcref, '/')[-1]
                if not os.path.isdir('database/'):
                    os.mkdir('database/')
                if os.path.isfile(
                        ntt.util.searcharc(img, '')[1] + '/database/id' +
                        re.sub('.fits', '', arcref)):
                    os.system('cp ' + ntt.util.searcharc(img, '')[1] +
                              '/database/id' + re.sub('.fits', '', arcref) +
                              ' database/')
                iraf.longslit.reidentify(
                    referenc=arcref,
                    images=arcfile,
                    interac=_inter,
                    section='column 10',
                    coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat',
                    overrid='yes',
                    step=0,
                    newaps='no',
                    nsum=5,
                    nlost=2,
                    mode='h',
                    verbose='no')
            else:
                iraf.longslit.identify(
                    images=arcfile,
                    section='column 10',
                    coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat',
                    nsum=10,
                    fwidth=7,
                    order=3,
                    mode='h')
            iraf.longslit.reident(
                referenc=arcfile,
                images=arcfile,
                interac='NO',
                section='column 10',
                coordli='direc$standard/ident/Lines_HgCdHeNeAr600.dat',
                overrid='yes',
                step=10,
                newaps='yes',
                nsum=5,
                nlost=2,
                mode='h',
                verbose='no')
            qqq = iraf.longslit.fitcoords(images=re.sub('.fits', '', arcfile),
                                          fitname=re.sub('.fits', '', arcfile),
                                          interac='no',
                                          combine='yes',
                                          databas='database',
                                          function='legendre',
                                          yorder=4,
                                          logfile='logfile',
                                          plotfil='',
                                          mode='h')
            iraf.specred.transform(input=img,
                                   output=img,
                                   minput='',
                                   fitnames=re.sub('.fits', '', arcfile),
                                   databas='database',
                                   x1='INDEF',
                                   x2='INDEF',
                                   y1='INDEF',
                                   y2='INDEF',
                                   flux='yes',
                                   mode='h',
                                   logfile='logfile')
            # ######################  check wavelength calibration ############
            _skyfile = ntt.__path__[0] + '/standard/ident/sky_' + setup[
                0] + '_' + setup[1] + '.fits'
            shift = ntt.efoscspec2Ddef.skyfrom2d(img, _skyfile)
            print '\n###     check in wavelengh performed ...... spectrum shifted of  ' + str(
                shift) + ' Angstrom \n'
            zro = pyfits.open(img)[0].header.get('CRVAL2')
            ntt.util.updateheader(img, 0, {'CRVAL2': [zro + int(shift), '']})
            std, rastd, decstd, magstd = ntt.util.readstandard(
                'standard_efosc_mab.txt')
            hdrt = readhdr(img)
            _ra = readkey3(hdrt, 'RA')
            _dec = readkey3(hdrt, 'DEC')
            _object = readkey3(hdrt, 'object')
            dd = np.arccos(
                np.sin(_dec * scal) * np.sin(decstd * scal) +
                np.cos(_dec * scal) * np.cos(decstd * scal) * np.cos(
                    (_ra - rastd) * scal)) * ((180 / np.pi) * 3600)
            if min(dd) < 100:
                _type = 'stdsens'
                ntt.util.updateheader(img, 0,
                                      {'stdname': [std[np.argmin(dd)], '']})
                ntt.util.updateheader(
                    img, 0, {'magstd': [float(magstd[np.argmin(dd)]), '']})
            else:
                _type = 'obj'
            print '\n###      EXTRACTION USING IRAF TASK APALL \n'
            result = []
            if _type == 'obj':
                imgex = ntt.util.extractspectrum(img, dv, _ext_trace,
                                                 _dispersionline, _interactive,
                                                 _type)
                ntt.util.updateheader(
                    imgex, 0, {'FILETYPE': [22107, 'extracted 1D spectrum ']})
                ntt.util.updateheader(
                    imgex, 0, {
                        'PRODCATG': [
                            'SCIENCE.' +
                            readkey3(readhdr(imgex), 'tech').upper(),
                            'Data product category'
                        ]
                    })
                ntt.util.updateheader(imgex, 0, {'TRACE1': [img, '']})
                result.append(imgex)
                if _listsens:
                    sensfile = ntt.util.searchsens(img, _listsens)[0]
                else:
                    sensfile = ''
                if not sensfile:
                    sensfile = ntt.util.searchsens(img, '')[0]
                if sensfile:
                    imgf = re.sub('.fits', '_f.fits', img)
                    _extinctdir = 'direc$standard/extinction/'
                    _extinction = 'extinction_lasilla.dat'
                    _observatory = 'lasilla'
                    _exptime = readkey3(hdrt, 'exptime')
                    _airmass = readkey3(hdrt, 'airmass')
                    ntt.util.delete(imgf)
                    iraf.specred.calibrate(input=imgex,
                                           output=imgf,
                                           sensiti=sensfile,
                                           extinct='yes',
                                           flux='yes',
                                           ignorea='yes',
                                           extinction=_extinctdir +
                                           _extinction,
                                           observatory=_observatory,
                                           airmass=_airmass,
                                           exptime=_exptime,
                                           fnu='no')
                    hedvec = {
                        'SENSFUN': [
                            string.split(sensfile, '/')[-1],
                            'sensitivity function'
                        ],
                        'FILETYPE':
                        [22208, '1D wavelength and flux calibrated spectrum '],
                        'SNR':
                        [ntt.util.StoN2(imgf, False), 'Average S/N ratio'],
                        'BUNIT':
                        ['erg/cm2/s/Angstrom', 'Flux Calibration Units'],
                        'TRACE1': [imgex, '']
                    }
                    ntt.util.updateheader(imgf, 0, hedvec)
                    imgout = imgf
                    imgd = ntt.efoscspec1Ddef.fluxcalib2d(img, sensfile)
                    ntt.util.updateheader(
                        imgd, 0, {
                            'FILETYPE': [
                                22209,
                                '2D wavelength and flux calibrated spectrum '
                            ]
                        })
                    ntt.util.updateheader(imgd, 0, {'TRACE1': [img, '']})
                    imgasci = re.sub('.fits', '.asci', imgout)
                    ntt.util.delete(imgasci)
                    iraf.onedspec.wspectext(imgout + '[*,1,1]',
                                            imgasci,
                                            header='no')
                    result = result + [imgout, imgd, imgasci]
            else:
                imgex = ntt.util.extractspectrum(img, dv, _ext_trace,
                                                 _dispersionline, _interactive,
                                                 'std')
                imgout = ntt.efoscspec1Ddef.sensfunction(
                    imgex, 'spline3', 6, _inter)
                result = result + [imgout]

    for img in result:
        if img[-5:] == '.fits':
            ntt.util.phase3header(img)  # phase 3 definitions
            ntt.util.airmass(img)  # phase 3 definitions
            ntt.util.updateheader(
                img, 0, {'quality': ['Rapid', 'Final or Rapid reduction']})
    return result
Ejemplo n.º 30
0
def pre_reduction_dev(*args,**kwargs):

    # parse kwargs
    VERBOSE = kwargs.get('VERBOSE')
    CLOBBER = kwargs.get('CLOBBER')
    FAKE_BASIC_2D = kwargs.get('FAKE_BASIC_2D')
    FULL_CLEAN = kwargs.get('FULL_CLEAN')
    FAST = kwargs.get('FAST')
    CONFIG_FILE = kwargs.get('CONFIG_FILE')
    MAKE_ARCS = kwargs.get('MAKE_ARCS')
    MAKE_FLATS = kwargs.get('MAKE_FLATS')
    QUICK = kwargs.get('QUICK')
    RED_AMP_BAD = kwargs.get('RED_AMP_BAD')
    BLUE_AMP_BAD = kwargs.get('BLUE_AMP_BAD')
    HOST = kwargs.get('HOST')
    TRIM = kwargs.get('TRIM')
    FIX_AMP_OFFSET = kwargs.get('FIX_AMP_OFFSET')
    RED_DIR = kwargs.get('RED_DIR')

    # init iraf stuff
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)

    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    prereddir= os.path.join(RED_DIR, 'pre_reduced/')

    # set up config
    if CONFIG_FILE:
        with open(CONFIG_FILE,'r') as fin:
            configDict = json.load(fin)
    else:
        STANDARD_STAR_LIBRARY = mu.construct_standard_star_library()
        observations = sorted(glob.glob('*.fits'))

        #TODO: Better first pass at config file, std have exptime < 250s(?)
        # CDK - updated this and placed it after arm/inst_name definition to
        # better generalize configDict
        configDict = {}
        for obsfile in observations:

            arm, inst_dict = instruments.blue_or_red(obsfile)
            inst_name = inst_dict.get('instrument').upper()
            for key in ['SCI','STD','CAL_ARC','CAL_FLAT','CAL']:
                if key not in configDict.keys():
                    configDict[key]={}
                if arm.upper() not in configDict[key].keys():
                    configDict[key][arm.upper()]={}
                    if 'CAL_' in key:
                        full_key = key.replace('CAL_','CALIBRATION_')
                        configDict[key][arm.upper()]={full_key: []}

            # CDK - added this to explicitly skip files that dont need to be
            # reduced.
            if not mu.needs_to_be_reduced(obsfile): continue

            hdu = fits.open(obsfile)
            if 'lris' in inst_dict.get('name'):
                hdu[0].header['OBJECT']=hdu[0].header['TARGNAME']
                hdu.writeto(obsfile, overwrite=True)

            use_ext = inst_dict['use_ext']
            header = hdu[use_ext].header

            imageType = mu.determine_image_type(header, inst_name, STANDARD_STAR_LIBRARY)

            channel, inst_dict = instruments.blue_or_red(obsfile)
            obj = header.get('OBJECT').strip()
            if imageType == 'SCI' or imageType == 'STD':
                if obj in configDict[imageType][channel.upper()].keys():

                    configDict[imageType][channel.upper()][obj].append(obsfile)
                else:
                    configDict[imageType][channel.upper()][obj] = [obsfile]
            if imageType == 'CAL_ARC' and 'foc' not in obsfile:
                configDict[imageType][channel.upper()]['CALIBRATION_ARC'].append(obsfile)
            if imageType == 'CAL_FLAT':
                configDict[imageType][channel.upper()]['CALIBRATION_FLAT'].append(obsfile)


        with open('custom_config.json','w') as fout:
            fout.write(json.dumps(configDict,indent=4))

        outStr = '\n\nOk, not config supplied, so I wrote a first pass custom_config.json\n'
        outStr += 'Use at your own risk! Manually edit if needed and run again with -c custom_config.json\n'
        outStr += 'You can manually add files to the appropriate lists and rerun.\n'
        outStr += 'WARNING: make sure you rename your config file, or it could get overwritten!\n\n'
        print(outStr)

        sys.exit(1)

    if not FAST:
        # do visual inspection of frames via ds9 windows
        usrResp = ''
        while usrResp != 'C':
            promptStr = '\nYou\'ve opted to display images before kicking off the reduction.\n'
            promptStr += 'At this point you may:\n'
            promptStr += '  (D)isplay the current state of the reduction config\n'
            promptStr += '  (C)ontinue with these files as is\n'
            promptStr += '  (R)emove a file from the current config\n'
            promptStr += '  (A)dd a file to the current config\n'
            promptStr += '  (Q)uit the whole thing. \n'
            promptStr += 'I recommend you (D)isplay and remove unwanted frames from your config file,\n'
            promptStr += '(Q)uit, and then rerun with the updated config file.\nCommand: '
            usrRespOrig = raw_input(promptStr)


            try:
                usrResp = usrRespOrig.strip().upper()
            except Exception as e:
                usrResp = 'nothing'

            # (D)isplay all the images in the lists
            if usrResp == 'D':

                blueArcList = configDict['CAL_ARC']['BLUE']['CALIBRATION_ARC']
                redArcList = configDict['CAL_ARC']['RED']['CALIBRATION_ARC']

                blueFlatList = configDict['CAL_FLAT']['BLUE']['CALIBRATION_FLAT']
                redFlatList = configDict['CAL_FLAT']['RED']['CALIBRATION_FLAT']

                blueStdList = []
                redStdList = []

                blueSciList = []
                redSciList = []

                for targ,imgList in configDict['STD']['BLUE'].items():
                    for file in imgList:
                        blueStdList.append(file)
                for targ,imgList in configDict['STD']['RED'].items():
                    for file in imgList:
                        redStdList.append(file)
                for targ,imgList in configDict['SCI']['BLUE'].items():
                    for file in imgList:
                        blueSciList.append(file)
                for targ,imgList in configDict['SCI']['RED'].items():
                    for file in imgList:
                        redSciList.append(file)

                blueArcDS9 = show_ds9_list(blueArcList,instanceName='BlueArcs')
                redArcDS9 = show_ds9_list(redArcList,instanceName='RedArcs')

                blueFlatDS9 = show_ds9_list(blueFlatList,instanceName='BlueFlats')
                redFlatDS9 = show_ds9_list(redFlatList,instanceName='RedFlats')

                blueStdDS9 = show_ds9_list(blueStdList,instanceName='BlueStandards')
                redStdDS9 = show_ds9_list(redStdList,instanceName='RedStandards')

                blueSciDS9 = show_ds9_list(blueSciList,instanceName='BlueScience')
                redSciDS9 = show_ds9_list(redSciList,instanceName='RedScience')

            if usrResp == 'R':
                configDict = user_adjust_config(configDict,operation='REMOVE')
            if usrResp == 'A':
                configDict = user_adjust_config(configDict,operation='ADD')
            if usrResp == 'Q':
                print('Okay, quitting pre_reduction...')
                sys.exit(1)


    # pre_reduced does not exist, needs to be made
    if not os.path.isdir(prereddir):
        os.mkdir(prereddir)

    if QUICK:
        file =glob.glob('*.fits')[0]
        inst = instruments.blue_or_red(file)[1]
        if 'kast' in inst['name']:
            b_inst = instruments.kast_blue
            r_inst = instruments.kast_red
        if 'lris' in inst['name']:
            b_inst = instruments.lris_blue
            r_inst = instruments.lris_red
        if 'goodman' in inst['name']:
            b_inst = instruments.goodman_blue
            r_inst = instruments.goodman_red
        if not os.path.isdir(prereddir+'master_files/'):
            os.mkdir(prereddir+'master_files/')
        b_arcsol = b_inst.get('archive_arc_extracted_id')
        b_resp = b_inst.get('archive_flat_file')
        r_arcsol = r_inst.get('archive_arc_extracted_id')
        r_resp = r_inst.get('archive_flat_file')
        if os.path.isdir(prereddir+'master_files/'):
            os.system('cp ' + b_arcsol + ' ' + 'pre_reduced/master_files/')
            os.system('cp ' + b_resp + ' ' + 'pre_reduced/')
            os.system('cp ' + r_arcsol + ' ' + 'pre_reduced/master_files/')
            os.system('cp ' + r_resp + ' ' + 'pre_reduced/')


    # pre_reduced exists, but we want to clobber/do a clean reduction
    elif FULL_CLEAN:

        promptStr = 'Do you really want to wipe pre_reduced? [y/n]: '
        usrRespOrig = raw_input(promptStr)
        if usrRespOrig and usrRespOrig[0].strip().upper() == 'Y':

            # remove all pre_reduced files
            shutil.rmtree(prereddir)
            os.mkdir(prereddir)

    # pre_reduced exists, need to document what is there
    else:

        # get existing pre_reduced files
        preRedFiles = glob.glob(prereddir+'*.fits')

    # loop over raw files in configDict, if the destination exists, do nothing
    # # otherwise, do the bias/reorient/trim/output/etc
    for imgType,typeDict in configDict.items():
        for chan,objDict in typeDict.items():
            for obj,fileList in objDict.items():
                for rawFile in fileList:
                    # try:
                    #     res = basic_2d_proc(rawFile,CLOBBER=CLOBBER)
                    #     if res != 0:
                    #         raise ValueError('Something bad happened in basic_2d_proc on {}'.format(rawFile))
                    # except (Exception,ValueError) as e:
                    #     print('Exception (basic_2d): {}'.format(e))

                    if not FAKE_BASIC_2D:
                        inst = instruments.blue_or_red(rawFile)[1]
                        if inst['name'] == 'lris_blue' or inst['name'] == 'lris_red':
                            fix_lris_header(rawFile)
                            # res = keck_basic_2d.main([rawFile])
                            if imgType != 'CAL_FLAT':
                                print (imgType)
                                res = keck_basic_2d.main([rawFile], TRIM=TRIM, ISDFLAT=False, RED_AMP_BAD=RED_AMP_BAD, MASK_MIDDLE_RED=False, MASK_MIDDLE_BLUE=False, FIX_AMP_OFFSET=FIX_AMP_OFFSET, BLUE_AMP_BAD=BLUE_AMP_BAD)
                            else:
                                print (imgType)
                                res = keck_basic_2d.main([rawFile], TRIM=TRIM, ISDFLAT = True, RED_AMP_BAD=RED_AMP_BAD, MASK_MIDDLE_RED=False, MASK_MIDDLE_BLUE=False, FIX_AMP_OFFSET=FIX_AMP_OFFSET, BLUE_AMP_BAD=BLUE_AMP_BAD)
                        else:
                            res = basic_2d_proc(rawFile,imgType=imgType,CLOBBER=CLOBBER)
                    else:
                        # here we're faking the basic 2D reduction because we've done
                        # specialized 2D reduction (e.g., keck_basic_2d)
                        res = 0
                    if res != 0:
                        raise ValueError('Something bad happened in basic_2d_proc on {}'.format(rawFile))

    # move the std and sci files into their appropriate directories
    try:
        res = reorg_files(configDict,CLOBBER=CLOBBER)
        if res != 0:
            raise ValueError('Something bad happened in reorg_files')
    except (Exception,ValueError) as e:
        print('Exception (reorg): {}'.format(e))


    ### some blocks of code from the original pre_reduction ###
    # combine the arcs
    if MAKE_ARCS:
        # CDK - generalized arc creation method
        for key in configDict['CAL_ARC'].keys():
            list_arc = configDict['CAL_ARC'][key]['CALIBRATION_ARC']
            if len(list_arc)>0:
                first = prereddir+'to{}'.format(list_arc[0])
                br, inst = instruments.blue_or_red(first)
                destFile = prereddir+'ARC_{0}.fits'.format(key.lower())

                util.make_arc(list_arc, destFile, inst, iraf)


    # combine the flats
    if MAKE_FLATS and 'lris' in inst['name']:

        list_flat_b = configDict['CAL_FLAT']['BLUE']['CALIBRATION_FLAT']
        list_flat_r = configDict['CAL_FLAT']['RED']['CALIBRATION_FLAT']
        inter = 'yes'

        b_amp1_list = []
        b_amp2_list = []
        r_amp1_list = []
        r_amp2_list = []
        predir = prereddir+'to'
        for flat in list_flat_b:
            suffix = '.'.join(flat.split('.')[1:])
            b_amp1_file = flat.split('.')[0]+'_amp1.'+suffix
            b_amp2_file = flat.split('.')[0]+'_amp2.'+suffix
            if os.path.exists(predir+b_amp1_file): b_amp1_list.append(b_amp1_file)
            if os.path.exists(predir+b_amp2_file): b_amp2_list.append(b_amp2_file)
        for flat in list_flat_r:
            suffix = '.'.join(flat.split('.')[1:])
            r_amp1_file = flat.split('.')[0]+'_amp1.'+suffix
            r_amp2_file = flat.split('.')[0]+'_amp2.'+suffix
            print(r_amp1_file)
            if os.path.exists(predir+r_amp1_file): r_amp1_list.append(r_amp1_file)
            if os.path.exists(predir+r_amp2_file): r_amp2_list.append(r_amp2_file)

        # blue flats
        if len(list_flat_b) > 0:
            # br, inst = instruments.blue_or_red(list_flat_b[0])
            br, inst = instruments.blue_or_red(prereddir+'to{}'.format(b_amp1_list[0]))
            dispaxis = inst.get('dispaxis')
            iraf.specred.dispaxi = dispaxis
            Flat_blue_amp1 = prereddir+'toFlat_blue_amp1.fits'
            Flat_blue_amp2 = prereddir+'toFlat_blue_amp2.fits'

            flat_list_amp1 = []
            for flat in b_amp1_list:
                flat_list_amp1.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_blue_amp1):
                os.remove(Flat_blue_amp1)

            # first, combine all the flat files into a master flat
            res = combine_flats(flat_list_amp1,OUTFILE=Flat_blue_amp1,MEDIAN_COMBINE=True)

            # run iraf response
            iraf.specred.response(Flat_blue_amp1,
                                   normaliz=Flat_blue_amp1,
                                   response=prereddir+'RESP_blue_amp1',
                                   interac=inter, thresho='INDEF',
                                   sample='*', naverage=2, function='legendre',
                                   low_rej=5,high_rej=5, order=60, niterat=20,
                                   grow=0, graphic='stdgraph')

            # finally, inspect the flat and mask bad regions
            res = inspect_flat([prereddir+'RESP_blue_amp1.fits'],
                OUTFILE=prereddir+'RESP_blue_amp1.fits', DISPAXIS=dispaxis)

            hdu_amp1 = fits.open(prereddir+'RESP_blue_amp1.fits')
            amp1_flatten = np.asarray(hdu_amp1[0].data).flatten()
            shape1=hdu_amp1[0].data.shape
            dim1 = shape1[0]
            dim2 = shape1[1]

            concat_amps = amp1_flatten

            if not BLUE_AMP_BAD:
                flat_list_amp2 = []
                for flat in b_amp2_list:
                    flat_list_amp2.append(prereddir+'to'+ flat)
                if os.path.isfile(Flat_blue_amp2):
                    os.remove(Flat_blue_amp2)

                res = combine_flats(flat_list_amp2,OUTFILE=Flat_blue_amp2,MEDIAN_COMBINE=True)
                iraf.specred.response(Flat_blue_amp2,
                                       normaliz=Flat_blue_amp2,
                                       response=prereddir+'RESP_blue_amp2',
                                       interac=inter, thresho='INDEF',
                                       sample='*', naverage=2, function='legendre',
                                       low_rej=5,high_rej=5, order=60, niterat=20,
                                       grow=0, graphic='stdgraph')

                res = inspect_flat([prereddir+'RESP_blue_amp2.fits'],
                    OUTFILE=prereddir+'RESP_blue_amp2.fits', DISPAXIS=dispaxis)
                hdu_amp2 = fits.open(prereddir+'RESP_blue_amp2.fits')
                amp2_flatten = np.asarray(hdu_amp2[0].data).flatten()
                shape2=hdu_amp2[0].data.shape

                dim1+=shape2[0]
                concat_amps = np.concatenate([concat_amps, amp2_flatten])

            print('Output blue response dimensions:',dim1,dim2)
            resp_blue_data = np.reshape(concat_amps, (dim1, dim2))

            header = hdu_amp1[0].header
            if os.path.exists(prereddir+'RESP_blue.fits'):
                os.remove(prereddir+'RESP_blue.fits')

            hdu = fits.PrimaryHDU(resp_blue_data,header)
            hdu.writeto(prereddir+'RESP_blue.fits',output_verify='ignore')

            resp_files = ['RESP_blue_amp1.fits','RESP_blue_amp2.fits']
            for file in resp_files:
                if os.path.exists(prereddir+file):
                    os.remove(prereddir+file)

        # red flats
        if len(list_flat_r) > 0:
            # br, inst = instruments.blue_or_red(list_flat_r[0])
            br, inst = instruments.blue_or_red(prereddir+'to{}'.format(r_amp1_list[0]))
            dispaxis = inst.get('dispaxis')
            iraf.specred.dispaxi = dispaxis
            Flat_red_amp1 = prereddir+'toFlat_red_amp1.fits'
            Flat_red_amp2 = prereddir+'toFlat_red_amp2.fits'


            flat_list_amp1 = []
            for flat in r_amp1_list:
                flat_list_amp1.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_red_amp1):
                os.remove(Flat_red_amp1)

            flat_list_amp2 = []
            for flat in r_amp2_list:
                flat_list_amp2.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_red_amp2):
                os.remove(Flat_red_amp2)

            amp2flag = len(flat_list_amp2)>0

            # first, combine all the flat files into a master flat
            if amp2flag:
                res = combine_flats(flat_list_amp1,OUTFILE=Flat_red_amp1,MEDIAN_COMBINE=True)
                res = combine_flats(flat_list_amp2,OUTFILE=Flat_red_amp2,MEDIAN_COMBINE=True)
            else:
                res = combine_flats(flat_list_amp1,OUTFILE=Flat_red_amp1,MEDIAN_COMBINE=True)

            #What is the output here? Check for overwrite
            iraf.specred.response(Flat_red_amp1,
                                  normaliz=Flat_red_amp1,
                                  response='pre_reduced/RESP_red_amp1',
                                  interac=inter, thresho='INDEF',
                                  sample='*', naverage=2, function='legendre',
                                  low_rej=5,high_rej=5, order=80, niterat=20,
                                  grow=0, graphic='stdgraph')
            if amp2flag:
                iraf.specred.response(Flat_red_amp2,
                                      normaliz=Flat_red_amp2,
                                      response='pre_reduced/RESP_red_amp2',
                                      interac=inter, thresho='INDEF',
                                      sample='*', naverage=2, function='legendre',
                                      low_rej=5,high_rej=5, order=80, niterat=20,
                                      grow=0, graphic='stdgraph')

            # finally, inspect the flat and mask bad regions
            if amp2flag:
                res = inspect_flat([prereddir+'RESP_red_amp1.fits'],
                    OUTFILE=prereddir+'RESP_red_amp1.fits', DISPAXIS=dispaxis)
                res = inspect_flat([prereddir+'RESP_red_amp2.fits'],
                    OUTFILE=prereddir+'RESP_red_amp2.fits', DISPAXIS=dispaxis)
            else:
                res = inspect_flat([prereddir+'RESP_red_amp1.fits'],
                    OUTFILE=prereddir+'RESP_red.fits', DISPAXIS=dispaxis)

            if amp2flag:
                hdu_amp1 = fits.open(prereddir+'RESP_red_amp1.fits')
                hdu_amp2 = fits.open(prereddir+'RESP_red_amp2.fits')
                head = hdu_amp1[0].header
                shape1 = hdu_amp1[0].data.shape
                shape2 = hdu_amp2[0].data.shape
                amp1_flatten = np.asarray(hdu_amp1[0].data).flatten()
                amp2_flatten = np.asarray(hdu_amp2[0].data).flatten()
                concat_amps = np.concatenate([amp2_flatten, amp1_flatten])
                xbin, ybin = [int(ibin) for ibin in head['BINNING'].split(',')]
                resp_red_data = np.reshape(concat_amps, (shape1[0]+shape2[0],shape1[1]))

                resp_red_data[278:294,:] = 1.

                header = hdu_amp1[0].header
                if os.path.isfile(prereddir+'RESP_red.fits'):
                    os.remove(prereddir+'RESP_red.fits')

                hdu = fits.PrimaryHDU(resp_red_data,header)
                hdu.writeto(prereddir+'RESP_red.fits',output_verify='ignore')

                os.remove(prereddir+'RESP_red_amp1.fits')
                os.remove(prereddir+'RESP_red_amp2.fits')
            else:
                os.remove(prereddir+'RESP_red_amp1.fits')

    elif MAKE_FLATS:
        # CDK - generalized arc creation method
        for key in configDict['CAL_FLAT'].keys():
            list_flat = configDict['CAL_FLAT'][key]['CALIBRATION_FLAT']
            if len(list_flat)>0:

                first = prereddir+'to{}'.format(list_flat[0])
                br, inst = instruments.blue_or_red(first)
                dispaxis = inst.get('dispaxis')

                iraf.specred.dispaxi = dispaxis
                inter = True
                Flat_out = prereddir+'toFlat_{0}.fits'.format(br.lower())
                dum = prereddir+'dummy_{0}.fits'.format(br.lower())
                resp = prereddir+'RESP_{0}.fits'.format(br.lower())

                flat_list = []
                norm_list = []
                for flat in list_flat:
                    flat_list.append(prereddir+'to'+flat)
                    norm = flat.replace('.fits','_norm.fits')
                    norm_list.append(prereddir+'to'+norm)
                for file in [Flat_out, dum, resp]:
                    if os.path.exists(file):
                        os.remove(Flat_out)

                # first, combine all the flat files into a master flat
                res = combine_flats(flat_list,OUTFILE=Flat_out,
                    MEDIAN_COMBINE=True)
                # combine all the flat files for norm region
                res = combine_flats(norm_list,OUTFILE=dum, MEDIAN_COMBINE=True)

                iraf.specred.response(Flat_out, normaliz=dum, response=resp,
                    interac=inter, thresho='INDEF', sample='*', naverage=2,
                    function='legendre', low_rej=5,high_rej=5, order=60,
                    niterat=20, grow=0, graphic='stdgraph')

                # finally, inspect the flat and mask bad regions
                res = inspect_flat([resp], OUTFILE=resp, DISPAXIS=dispaxis)

                for flat in norm_list:
                    os.remove(flat)

    if HOST:
        host_gals.make_host_metadata(configDict)

    return 0
Ejemplo n.º 31
0
def reduce(imglist, files_arc, files_flat, _cosmic, _interactive_extraction,
           _arc):
    import string
    import os
    import re
    import sys
    import pdb
    os.environ["PYRAF_BETA_STATUS"] = "1"
    try:
        from astropy.io import fits as pyfits
    except:
        import pyfits
    import numpy as np
    import util
    import instruments
    import combine_sides as cs
    import cosmics
    from pyraf import iraf

    dv = util.dvex()
    scal = np.pi / 180.

    if not _interactive_extraction:
        _interactive = False
    else:
        _interactive = True

    if not _arc:
        _arc_identify = False
    else:
        _arc_identify = True

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.disp(inlist='1', reference='1')

    toforget = [
        'ccdproc', 'imcopy', 'specred.apall', 'longslit.identify',
        'longslit.reidentify', 'specred.standard', 'longslit.fitcoords',
        'onedspec.wspectext'
    ]
    for t in toforget:
        iraf.unlearn(t)
    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    list_arc_b = []
    list_arc_r = []

    for arcs in files_arc:
        hdr = util.readhdr(arcs)
        br, inst = instruments.blue_or_red(arcs)

        if br == 'blue':
            list_arc_b.append(arcs)
        elif br == 'red':
            list_arc_r.append(arcs)
        else:
            errStr = '{} '.format(str(util.readkey3(hdr, 'VERSION')))
            errStr += 'not in database'
            print(errStr)
            sys.exit()

    asci_files = []
    newlist = [[], []]

    print('\n### images to reduce :', imglist)
    #raise TypeError
    for img in imglist:
        if 'b' in img:
            newlist[0].append(img)
        elif 'r' in img:
            newlist[1].append(img)

    if len(newlist[1]) < 1:
        newlist = newlist[:-1]
    elif len(newlist[0]) < 1:
        newlist = newlist[1:]
    else:
        sides = raw_input("Reduce which side? ([both]/b/r): ")
        if sides == 'b':
            newlist = newlist[:-1]
        elif sides == 'r':
            newlist = newlist[1:]

    for imgs in newlist:
        hdr = util.readhdr(imgs[0])
        br, inst = instruments.blue_or_red(imgs[0])
        if br == 'blue':
            flat_file = '../RESP_blue'
        elif br == 'red':
            flat_file = '../RESP_red'
        else:
            errStr = 'Not in intrument list'
            print(errStr)
            sys.exit()

        iraf.specred.dispaxi = inst.get('dispaxis')
        iraf.longslit.dispaxi = inst.get('dispaxis')

        _gain = inst.get('gain')
        _ron = inst.get('read_noise')
        iraf.specred.apall.readnoi = _ron
        iraf.specred.apall.gain = _gain

        _object0 = util.readkey3(hdr, 'OBJECT')
        _date0 = util.readkey3(hdr, 'DATE-OBS')

        _object0 = re.sub(' ', '', _object0)
        _object0 = re.sub('/', '_', _object0)
        nameout0 = str(_object0) + '_' + inst.get('name') + '_' + str(_date0)

        nameout0 = util.name_duplicate(imgs[0], nameout0, '')
        timg = nameout0
        print('\n### now processing :', timg, ' for -> ', inst.get('name'))
        if len(imgs) > 1:
            img_str = ''
            for i in imgs:
                img_str = img_str + i + ','
            iraf.imcombine(img_str, output=timg)
        else:
            img = imgs[0]
            if os.path.isfile(timg):
                os.system('rm -rf ' + timg)
            iraf.imcopy(img, output=timg)

        # should just do this by hand
        iraf.ccdproc(timg,
                     output='',
                     overscan='no',
                     trim='no',
                     zerocor="no",
                     flatcor="yes",
                     readaxi='line',
                     flat=flat_file,
                     Stdout=1)

        img = timg

        #raw_input("Press Enter to continue...")
        if _cosmic:
            print('\n### starting cosmic removal')

            array, header = cosmics.fromfits(img)
            c = cosmics.cosmicsimage(array,
                                     gain=inst.get('gain'),
                                     readnoise=inst.get('read_noise'),
                                     sigclip=5,
                                     sigfrac=0.5,
                                     objlim=2.0)
            c.run(maxiter=5)
            cosmics.tofits('cosmic_' + img, c.cleanarray, header)
            img = 'cosmic_' + img

            print('\n### cosmic removal finished')
        else:
            print(
                '\n### No cosmic removal, saving normalized image for inspection???'
            )

        if inst.get('arm') == 'blue' and len(list_arc_b) > 0:
            arcfile = list_arc_b[0]
        elif inst.get('arm') == 'red' and len(list_arc_r) > 0:
            arcfile = list_arc_r[0]
        else:
            arcfile = None

        if arcfile is not None and not arcfile.endswith(".fits"):
            arcfile = arcfile + '.fits'

        if not os.path.isdir('database/'):
            os.mkdir('database/')

        if _arc_identify:
            os.system('cp ' + arcfile + ' .')
            arcfile = string.split(arcfile, '/')[-1]
            arc_ex = re.sub('.fits', '.ms.fits', arcfile)

            arcref = inst.get('archive_arc_extracted')
            arcref_img = string.split(arcref, '/')[-1]
            arcref_img = arcref_img.replace('.ms.fits', '')
            arcrefid = inst.get('archive_arc_extracted_id')
            os.system('cp ' + arcref + ' .')
            arcref = string.split(arcref, '/')[-1]
            os.system('cp ' + arcrefid + ' ./database')

            aperture = inst.get('archive_arc_aperture')
            os.system('cp ' + aperture + ' ./database')

            print('\n###  arcfile : ', arcfile)
            print('\n###  arcfile extraction : ', arc_ex)
            print('\n###  arc reference : ', arcref)

            # read for some meta data to get the row right
            tmpHDU = pyfits.open(arcfile)
            header = tmpHDU[0].header
            try:
                spatialBin = int(header['binning'].split(',')[0])
            except KeyError:
                spatialBin = 1
            apLine = 700 // spatialBin

            iraf.specred.apall(arcfile,
                               output=arc_ex,
                               ref=arcref_img,
                               line=apLine,
                               nsum=10,
                               interactive='no',
                               extract='yes',
                               find='yes',
                               nfind=1,
                               format='multispec',
                               trace='no',
                               back='no',
                               recen='no')

            iraf.longslit.reidentify(referenc=arcref,
                                     images=arc_ex,
                                     interac='NO',
                                     section=inst.get('section'),
                                     coordli=inst.get('line_list'),
                                     shift='INDEF',
                                     search='INDEF',
                                     mode='h',
                                     verbose='YES',
                                     step=0,
                                     nsum=5,
                                     nlost=2,
                                     cradius=10,
                                     refit='yes',
                                     overrid='yes',
                                     newaps='no')

        print('\n### extraction using apall')
        result = []
        hdr_image = util.readhdr(img)
        _type = util.readkey3(hdr_image, 'object')

        if (_type.startswith("arc") or _type.startswith("dflat")
                or _type.startswith("Dflat") or _type.startswith("Dbias")
                or _type.startswith("Bias")):
            print('\n### warning problem \n exit ')
            sys.exit()
        else:
            imgex = util.extractspectrum(img, dv, inst, _interactive, 'obj')
            print('\n### applying wavelength solution')
            print(arc_ex)
            iraf.disp(inlist=imgex, reference=arc_ex)

        result = result + [imgex] + [timg]

        # asci_files.append(imgasci)
        if not os.path.isdir(_object0 + '_ex/'):
            os.mkdir(_object0 + '_ex/')

        if not _arc_identify:
            util.delete(arcref)
        else:
            util.delete(arcfile)

        util.delete(arc_ex)
        util.delete(img)
        util.delete(imgex)
        util.delete(arcref)
        util.delete('logfile')
        #if _cosmic:
        #util.delete(img[7:])
        #util.delete("cosmic_*")

        os.system('mv ' + 'd' + imgex + ' ' + _object0 + '_ex/')

        use_sens = raw_input('Use archival flux calibration? [y]/n ')
        if use_sens != 'no':
            sensfile = inst.get('archive_sens')
            os.system('cp ' + sensfile + ' ' + _object0 + '_ex/')
            bstarfile = inst.get('archive_bstar')
            os.system('cp ' + bstarfile + ' ' + _object0 + '_ex/')

    return result
Ejemplo n.º 32
0
def extractspectrum(img,
                    dv,
                    _ext_trace,
                    _dispersionline,
                    _interactive,
                    _type,
                    automaticex=False):
    # print "LOGX:: Entering `extractspectrum` method/function in
    # %(__file__)s" % globals()
    import glob
    import os
    import string
    import sys
    import re
    import lickshane
    import datetime

    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    from pyraf import iraf
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.specred(_doprint=0)
    toforget = ['specred.apall', 'specred.transform']
    for t in toforget:
        iraf.unlearn(t)

    dv = lickshane.util.dvex()
    hdr = lickshane.util.readhdr(img)
    _gain = lickshane.util.readkey3(hdr, 'gain')
    _rdnoise = lickshane.util.readkey3(hdr, 'ron')
    _grism = lickshane.util.readkey3(hdr, 'grism')

    _instrument = lickshane.util.readkey3(hdr, 'version')

    imgex = re.sub('.fits', '_ex.fits', img)
    imgfast = re.sub(string.split(img, '_')[-2] + '_', '', img)
    # imgfast=re.sub(str(MJDtoday)+'_','',img)
    if not os.path.isfile(imgex) and not os.path.isfile(
            'database/ap' + re.sub('.fits', '', img)) and not os.path.isfile(
                'database/ap' + re.sub('.fits', '', imgfast)):
        _new = 'yes'
        _extract = 'yes'
    else:
        if automaticex:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['o', 'n', 's']:
                    answ = raw_input(
                        '\n### New extraction [n], extraction with old parameters [o], skip extraction [s] ? [o]'
                    )
                    if not answ:
                        answ = 'o'
                if answ == 'o':
                    _new, _extract = 'no', 'yes'
                elif answ == 'n':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'no', 'yes'
        else:
            if _interactive in ['Yes', 'yes', 'YES', 'y', 'Y']:
                answ = 'x'
                while answ not in ['y', 'n']:
                    answ = raw_input(
                        '\n### do you want to extract again [[y]/n] ? ')
                    if not answ:
                        answ = 'y'
                if answ == 'y':
                    _new, _extract = 'yes', 'yes'
                else:
                    _new, _extract = 'yes', 'no'
            else:
                _new, _extract = 'yes', 'yes'
    if _extract == 'yes':
        lickshane.util.delete(imgex)
        if _dispersionline:
            question = 'yes'
            while question == 'yes':
                _z1, _z2, goon = lickshane.util.display_image(
                    img, 1, '', '', False)
                dist = raw_input(
                    '\n### At which line do you want to extract the spectrum ['
                    + str(dv['line'][_grism]) + '] ? ')
                if not dist:
                    dist = 400
                try:
                    dist = int(dist)
                    question = 'no'
                except:
                    print '\n### input not valid, try again:'
        else:
            dist = dv['line'][_grism]
        if _ext_trace in ['yes', 'Yes', 'YES', True]:
            lista = glob.glob('*ex.fits')
            if lista:
                for ii in lista:
                    print ii
                _reference = raw_input(
                    '\### which object do you want to use for the trace [' +
                    str(lista[0]) + '] ? ')
                if not _reference:
                    _reference = lista[0]
                _reference = re.sub('_ex', '', _reference)
                _fittrac = 'no'
                _trace = 'no'
            else:
                sys.exit('\n### error: no extracted spectra in the directory')
        else:
            _reference = ''
            _fittrac = 'yes'
            _trace = 'yes'
        if _new == 'no':
            if not os.path.isfile('database/ap' + re.sub('.fits', '', img)):
                lickshane.util.repstringinfile(
                    'database/ap' + re.sub('.fits', '', imgfast),
                    'database/ap' + re.sub('.fits', '', img),
                    re.sub('.fits', '', imgfast), re.sub('.fits', '', img))
            _find = 'no'
            _recenter = 'no'
            _edit = 'no'
            _trace = 'no'
            _fittrac = 'no'
            _mode = 'h'
            _resize = 'no'
            _review = 'no'
            iraf.specred.mode = 'h'
            _interactive = 'no'
        else:
            iraf.specred.mode = 'q'
            _mode = 'q'
            _find = 'yes'
            _recenter = 'yes'
            _edit = 'yes'
            _review = 'yes'
            _resize = dv[_type]['_resize']

        if _instrument == 'kastb':
            iraf.specred.dispaxi = 1
        elif _instrument == 'kastr':
            iraf.specred.dispaxi = 2

        iraf.specred.apall(img,
                           output=imgex,
                           referen=_reference,
                           trace=_trace,
                           fittrac=_fittrac,
                           find=_find,
                           recenter=_recenter,
                           edit=_edit,
                           nfind=1,
                           extract='yes',
                           backgro='fit',
                           gain=_gain,
                           readnoi=_rdnoise,
                           lsigma=4,
                           usigma=4,
                           format='multispec',
                           b_function='legendre',
                           b_sample=dv[_type]['_b_sample'],
                           clean='yes',
                           pfit='fit1d',
                           lower=dv[_type]['_lower'],
                           upper=dv[_type]['_upper'],
                           t_niter=dv[_type]['_t_niter'],
                           width=dv[_type]['_width'],
                           radius=dv[_type]['_radius'],
                           line=dist,
                           nsum=dv[_type]['_nsum'],
                           t_step=dv[_type]['_t_step'],
                           t_nsum=dv[_type]['_t_nsum'],
                           t_nlost=dv[_type]['_t_nlost'],
                           t_sample=dv[_type]['_t_sample'],
                           resize=_resize,
                           t_order=dv[_type]['_t_order'],
                           weights=dv[_type]['_weights'],
                           interactive=_interactive,
                           review=_review,
                           mode=_mode)

        lickshane.util.repstringinfile(
            'database/ap' + re.sub('.fits', '', img),
            'database/ap' + re.sub('.fits', '', imgfast),
            re.sub('.fits', '', img), re.sub('.fits', '', imgfast))
    else:
        print '\n### skipping new extraction'
    return imgex