Example #1
0
def calflux(lfits,
            sensitivity='sensfunc.fits',
            extinction="lapalma_extinction.dat",
            observatory='lapalma',
            extinct='yes',
            outfits=None,
            fmt_out='cf%s',
            prefix=None,
            suffix=None,
            in_files='tmp_in.list',
            out_files='tmp_out.list'):

    lfits = cklist(lfits)

    if outfits is None:
        outfits = [fmt_file(fmt_out, fits) for fits in lfits]

    delfiles(outfits)

    wfile(in_files, lfits, prefix=prefix, suffix=suffix)
    wfile(out_files, outfits)

    unlearn('calibrate')
    # output=pref+'//@'+tlist
    calibrate(input='@' + in_files,
              output='@' + out_files,
              sensiti=sensitivity,
              extinct=extinct,
              extinction=extinction,
              observatory=observatory,
              ignoreaps='yes')
    unlearn('calibrate')

    delfiles([in_files, out_files])
Example #2
0
def combine(lfits,
            outfits,
            rej='crreject',
            combine='average',
            scale='exposure',
            weight='exposure',
            nlow=1,
            expname='EXPTIME',
            rdnoise=0.0,
            gain=1.0,
            suffix=None,
            prefix=None,
            input_files='tmp_input.list',
            delete_list=True):

    wfile(input_files, lfits, suffix=suffix, prefix=prefix)
    delfiles(outfits)

    unlearn('imcombine')
    imcombine('@' + input_files,
              output=outfits,
              combine=combine,
              reject=rej,
              scale=scale,
              weight=weight,
              expname=expname,
              rdnoise=rdnoise,
              gain=gain,
              nlow=nlow)
    unlearn('imcombine')

    if delete_list:
        delfiles(input_files)
Example #3
0
def ccdp(files,
         prefix=None,
         suffix=None,
         input_files='tmp_input.list',
         output_files='tmp_output.list',
         fmt_out='c%s',
         **kwargs):

    # Dictionary with default parameters for ccdproc
    dpar = dict(ccdtype="",
                fixpix='no',
                overscan='yes',
                trim='yes',
                zerocor='yes',
                flatcor='no',
                darkcor='no',
                illumco='no',
                fringec='no',
                zero="",
                illum="",
                function='legendre',
                order=4,
                interactive='no',
                readaxi='column',
                trimsec=None,
                biassec=None,
                flat="")

    # Update parameters dictionary for ccdproc if new are provided (kwargs)
    dpar.update(kwargs)

    # Make "files" a list, in case there is only one file
    files = cklist(files)

    # Write the list of input files to a temporary file
    wfile(input_files, files, prefix=prefix, suffix=suffix)

    # Create a list of names with the output name after ccdproc is applied
    new_files_ccdproc = [fmt_out % item for item in files]
    # Write the new files list to a file
    wfile(output_files, new_files_ccdproc)

    # Delete, if exits, the new files to be created
    delfiles(new_files_ccdproc)

    # Apply ccdproc
    unlearn('ccdproc')
    ccdproc(images='@' + input_files, output='@' + output_files, **dpar)
    unlearn('ccdproc')

    # Delete the temporary lists
    delfiles([input_files, output_files])
Example #4
0
def transf(lfits,
           fitnames,
           interp='linear',
           database='database',
           flux='yes',
           y1='INDEF',
           prefix=None,
           suffix=None,
           input_files='tmp_input.list',
           output_files='tmp_output.list',
           fmt_out='l%s',
           dispaxis=None,
           **kwargs):

    fitnames = '.'.join(fitnames.split('.')[0:-1])

    if isinstance(lfits, str) and input_files is None:
        input_files = lfits
        lfits = np.loadtxt(input_files, dtype=np.str, unpack=True)
    else:
        lfits = cklist(lfits)
        wfile(input_files, lfits, prefix=prefix, suffix=suffix)

    wfits = [fmt_out % fits.split('[')[0] for fits in lfits]
    wfile(output_files, wfits)
    delfiles(wfits)

    if dispaxis is not None:
        for fits in lfits:
            hedit(fits, 'DISPAXIS', dispaxis, add='yes', verify='no')

    unlearn('transform')
    transform(input='@' + input_files,
              output='@' + output_files,
              fitnames=fitnames,
              interp=interp,
              database=database,
              flux=flux,
              y1=y1,
              **kwargs)
    unlearn('transform')

    # for fits in wfits:
    #     hedit(fit, 'WAT2_001', 'wtype=linear label=Wavelength units=Angstroms', add='yes', verify='no')

    delfiles([input_files, output_files])
Example #5
0
def back(fits, outfits=None, fmt='b%s', **kwargs):
    # sample: Lines or columns to be used in the background fits. The default "*" selects all lines or columns
    # sample = '200:400,1000:1200'
    dback = dict(axis=1,
                 low_reject=3.,
                 high_reject=3.,
                 inter='yes',
                 order=1,
                 niterat=3)
    dback.update(kwargs)

    if outfits is None:
        outfits = fmt_file(fmt, fits)
    delfiles(outfits)

    unlearn('background')
    background(fits, outfits, **dback)
    unlearn('background')
Example #6
0
def setair(files,
           observatory='lapalma',
           equinox='EPOCH',
           ut='UTSTART',
           input_files='tmp_input.list',
           prefix=None,
           suffix=None):

    files = cklist(files)
    wfile(input_files, files, prefix=prefix, suffix=suffix)

    unlearn('setairmass')
    setairmass(images='@' + input_files,
               observatory=observatory,
               equinox=equinox,
               ut=ut)
    unlearn('setairmass')

    delfiles(input_files)
Example #7
0
def rmcosmic(lfits,
             xorder=5,
             yorder=5,
             sigfrac=2.0,
             niter=5,
             objlim=2,
             sigclip=3.0,
             fmt_in=None,
             fmt_out='cr%s',
             rdnoise=0.0,
             gain=1.0):

    # Download "lacos_spec.cl" from: http://www.astro.yale.edu/dokkum/lacosmic/lacos_spec.cl
    # Save it into a directory ("lacos_directory" for our purposes)
    # In your IRAF "login.cl" include the following line:
    # task    lacos_spec = lacos_directory/lacos_spec.cl

    from iraf import lacos_spec

    lfits = cklist(lfits)
    fmt_in = '%s' if fmt_in is None else fmt_in

    unlearn('lacos_spec')
    for fits in lfits:
        fits = fmt_in % fits
        output = fmt_out % fits
        mask = 'mask%s' % fits
        delfiles([mask, output])

        lacos_spec(fits,
                   output,
                   mask,
                   gain=gain,
                   readn=rdnoise,
                   xorder=xorder,
                   yorder=yorder,
                   sigclip=sigclip,
                   sigfrac=sigfrac,
                   objlim=objlim,
                   niter=niter)

        delfiles(mask)
    unlearn('lacos_spec')
Example #8
0
def cdispcor(fits,
             outfits,
             ref_disp=None,
             flux='yes',
             linearize='yes',
             key_ref='REFSPEC1',
             add='yes',
             verify='no',
             **kwargs):

    # To use the dispersion solution of other image, create a keyword
    # in the header "REFSPEC1" to point to the file with the dispersion
    # solution you want to use
    if ref_disp is not None:
        hedit(fits, key_ref, ref_disp, add=add, verify=verify)

    delfiles(outfits)

    unlearn('dispcor')
    dispcor(fits, outfits, linearize=linearize, flux=flux)
    unlearn('dispcor')
Example #9
0
def unlearn_list(ltask):
    ltask = cklist(ltask)
    for task in ltask:
        unlearn(task)
Example #10
0
def sensf(lstd,
          sensitivity='sensfunc',
          standards='standards.dat',
          observatory='lapalma',
          graphs='srei',
          caldir='onedstds$iidscal/',
          extinction='lapalma_extinction.dat',
          istand='yes',
          isens='yes',
          order=5,
          logfile="",
          star_names=None,
          **kwargs):
    # La Palma extinction directory
    # http://www.ing.iac.es/Astronomy/observing/conditions/wlext.html
    # Directory of the standard star file
    # iraf/noao/lib/onedstds/iidscal/bd253941.dat
    # iraf/noao/lib/onedstds/iidscal/bd332642.dat
    # iraf/iraf/noao/lib/onedstds/oke1990/feige110.dat
    # caldir = "onedstds$oke1990/"
    # iraf/iraf/noao/lib/obsdb.dat -> observatory

    # The ones in IRAF have very poor resolution. Better to use new ones by ESO:
    # https://www.eso.org/sci/observing/tools/standards/spectra/okestandards_rev.html
    # Download them from:
    # ftp://ftp.eso.org/pub/stecf/standards/okestan/

    # The 'f' files list wavelength ( A ), flux ( ergs/cm/cm/s/A * 10**16 ) and flux ( milli-Jy ) and bin (A)
    # The file name consists of a prefix 'f' and the star name
    # The 'm' files list wavelength ( A ), AB magnitude and bin (A). The file
    # name consists of a prefix 'm' and the star name --> TAKE 'm' FILES FOR IRAF

    # The in the current directory create a file called "standars.men" with the folling format

    # Standard stars in .:
    #
    # mbd25d4655
    # mbd33d2642

    # Set the variable caldir="" and run standard (sensf)

    sensfile = '%s.fits' % sensitivity
    delfiles([sensfile, standards])

    unlearn('standard')
    unlearn('sensfunc')

    lstd = cklist(lstd)
    star_names = cklist(star_names)

    if star_names is None:
        star_names = [
            '.'.join(os.path.basename(std).split('.')[0:-1]) for std in lstd
        ]

    for std, star_name in zip(lstd, star_names):
        standard(input=std,
                 output=standards,
                 observatory=observatory,
                 caldir=caldir,
                 extinction=extinction,
                 star_name=star_name,
                 interact=istand)

    # ignorepars='yes' --> All the observations are combined into a single sensitivity function
    sensfunc(standards=standards,
             sensitivity=sensitivity,
             extinction=extinction,
             observatory=observatory,
             logfile=logfile,
             graphs=graphs,
             interactive=isens,
             order=order,
             ignoreaps='yes',
             **kwargs)

    unlearn('standard')
    unlearn('sensfunc')
Example #11
0
def extract_aperture(fits,
                     line,
                     lower=-5,
                     upper=5,
                     b_sample='-10:-6,6:10',
                     outfits=None,
                     fmt_out='ap%s',
                     fmt_ap='%s.000?.fits',
                     database='database/',
                     rdnoise=0.0,
                     gain=1.0,
                     std=False,
                     **kwargs):

    # line, nsum: The dispersion line (line or column perpendicular to the dispersion axis) and
    # number of adjacent lines (half before and half after unless at the end of the image)
    # used in finding, recentering, resizing, and editing operations. A line of INDEF selects
    # the middle of the image along the dispersion axis. A positive nsum selects a sum of
    # lines and a negative selects a median of lines.

    # lower, upper: Default lower and upper aperture limits relative to the aperture center.
    # These limits are used for apertures found with apfind and when defining the first
    # aperture in apedit.

    # b_sample = "-10:-6,6:10": Default background sample. The sample is given by a set of colon
    # separated ranges each separated by either whitespace or commas. The string "*" refers to
    # all points. Note that the background coordinates are relative to the aperture center and not
    # image pixel coordinates so the endpoints need not be integer.

    # Default parameter dictionary
    dapall = dict(format='onedspec',
                  nsum=18,
                  t_function='spline3',
                  t_order=5,
                  recenter='no',
                  b_naverage=-50,
                  resize='no',
                  gain=gain,
                  readnoise=rdnoise,
                  interac='yes',
                  extras='no',
                  trace='no',
                  references="",
                  apertures="1",
                  b_function='legendre')

    # Default values for standard or object
    # For standard stars: background='fit',  weights='variance', clean='yes'
    # For objects:        background='none', weights='none',     clean='no'
    if std:
        dextra = dict(background='fit', weights='variance', clean='yes')
    else:
        dextra = dict(background='none', weights='none', clean='no')
    dapall.update(dextra)

    # Overwrite default values
    dapall.update(kwargs)

    # outfits name
    if outfits is None:
        outfits = fmt_file(fmt_out, fits)

    # Delete output file
    delfiles(outfits)

    # Delete aperture files created from previous extractions
    ap_file = fmt_file(fmt_ap, '.'.join(outfits.split('.')[0:-1]))
    list_ap_files = glob(ap_file)
    delfiles(list_ap_files)

    # Delete aperture database for this file
    root_name_ap = '.'.join(outfits.split('.fit')[0:-1])
    db_files = glob(os.path.join(database, root_name_ap))
    delfiles(db_files)

    # When using optimum extraction sometimes the spectrum is zero
    unlearn('apall')
    apall(input=fits,
          output=outfits,
          line=line,
          lower=lower,
          upper=upper,
          b_sample=b_sample,
          **dapall)
    unlearn('apall')
Example #12
0
def flatccdres(flat_files,
               flat='flat.fits',
               nflat='nflat.fits',
               cflat='cflat.fits',
               iccd='no',
               iresponse='yes',
               combine='average',
               reject='avsigclip',
               ccdtype="",
               process='no',
               fixpix='no',
               overscan='yes',
               zerocor='yes',
               trim='yes',
               darkcor='no',
               flatcor='no',
               illumco='no',
               fringec='no',
               function='legendre',
               corder=5,
               rorder=18,
               readaxi='column',
               biassec=None,
               zero=None,
               trimsec=None,
               low_reject=3,
               high_reject=3,
               scale='mode',
               rdnoise=0.0,
               gain=1.0,
               lflats=None,
               prefix=None,
               suffix=None):

    unlearn('flatcombine')
    unlearn('ccdproc')
    unlearn('response')

    delfiles(flat)
    if lflats is not None:
        wfile(flat_files, lflats, prefix=prefix, suffix=suffix)

    flatcombine('@' + flat_files,
                output=flat,
                combine=combine,
                reject=reject,
                ccdtype=ccdtype,
                scale=scale,
                process=process,
                rdnoise=rdnoise,
                gain=gain)

    delfiles(cflat)
    ccdproc(images=flat,
            output=cflat,
            ccdtype=ccdtype,
            fixpix=fixpix,
            overscan=overscan,
            trim=trim,
            zerocor=zerocor,
            darkcor=darkcor,
            flatcor=flatcor,
            illumco=illumco,
            fringec=fringec,
            zero=zero,
            function=function,
            order=corder,
            interactive=iccd,
            readaxi=readaxi,
            trimsec=trimsec,
            biassec=biassec)

    # Dispersion goes up-down
    # hedit(cfout,'DISPAXIS', 2, add='yes', verify='no')
    delfiles(nflat)
    response(cflat,
             cflat,
             nflat,
             interac=iresponse,
             order=rorder,
             low_reject=low_reject,
             high_reject=high_reject)

    unlearn('flatcombine')
    unlearn('ccdproc')
    unlearn('response')
Example #13
0
interact=sys.argv[8]




############################
#
# unlearn all parameters 
#
#########################


print "\n Unlearning the following tasks:\n"

print "gemini\n" 
unlearn ("gemini") 
print "gnirs\n" 
unlearn ("gnirs")
print "niri\n"  
unlearn ("niri")
print "f2\n" 
unlearn ("f2")
print "gemtools\n" 
unlearn ("gemtools")

print "geomap\n" 
unlearn ("geomap")



Example #14
0
    print "ERROR: too many objects! Include one only. Bye!\n"
else:
    print "So using", len(drk), "dark,", len(flt), "flat and", len(
        obj), "objects images, with object's rootname:", rname

getout = raw_input(
    "Is everything ok? Press 'enter' to continue, 'e' to exit.\t")
# option to exit
if getout == 'e' or getout == 'E':
    print "\nexiting then... Bye!\n"
    raise SystemExit

# preparing header fields (input of imagetyp, exptime/darktime)
print "> preparing the image headers"
#l.write("\n> preparing the image headers")
unlearn(iraf.ccdhedit)
iraf.ccdhedit("Dark*.fit", parameter="imagetyp", value="dark")
iraf.ccdhedit("Flat*.fit", parameter="imagetyp", value="flat")
iraf.ccdhedit(rname + "_*.fit", parameter="imagetyp", value="object")
for im in glob.glob('*.fit*'):
    unlearn(iraf.imgets)
    iraf.imgets(im, param="inttime")
    iraf.hedit(im,
               fields="exptime",
               value=iraf.imgets.value,
               add="no",
               addonly="yes",
               delete="no",
               verify="no",
               show="no",
               update="yes")
Example #15
0
def runCrosscorr(flist, reference, input_pars):
    """ Use cross-correlation to compute shifts.
        Input and reference will ONLY be singly-drizzled images.
    """
    shift_dict = {}

    iraf.set(imtype='fits')

    # Run 'crossdriz' with proper inputs
    iraf.unlearn('crossdriz')
    iraf.unlearn('shiftfind')

    iraf.crossdriz.dinp = input_pars['dinp']
    iraf.crossdriz.dref = input_pars['dref']
    iraf.crossdriz.pad = input_pars['pad']
    iraf.crossdriz.margin = input_pars['margin']
    iraf.crossdriz.tapersz = input_pars['tapersz']
    iraf.crossdriz.mintheta = 0.0
    iraf.crossdriz.maxtheta = 0.0
    iraf.crossdriz.stptheta = 0.1

    iraf.shiftfind.xshift = 0.
    iraf.shiftfind.xshift = 0.
    iraf.shiftfind.boxsize = 'INDEF'
    iraf.shiftfind.fwhm = input_pars['xc_fwhm']
    iraf.shiftfind.ellip = input_pars['xc_ellip']
    iraf.shiftfind.pa = input_pars['xc_pa']
    iraf.shiftfind.fitbox = input_pars['xc_fitbox']
    iraf.shiftfind.kellip = yes
    iraf.shiftfind.kpa = yes
    iraf.shiftfind.tempdir = "tmp$"

    print('#\n# Performing Cross-correlation with CROSSDRIZ \n#')
    print('    Reference image  : ', reference)
    xcorr_root = 'tweak_xcorr'
    xcorr_sroot = 'xcorr_shifts'

    outname = define_output_name(reference)
    shift_dict['order'] = [outname]
    shift_dict[outname] = [0., 0., 0., 1.0]

    indx = 0
    for img in flist:
        print('    Single input     : ', img)
        xcorr_indx = "%03d" % indx
        xcorr_base = xcorr_root + xcorr_indx
        xcorr_shift = xcorr_sroot + xcorr_indx + '.txt'

        # Insure that products from previous runs are deleted
        xcorr_list = irafglob.irafglob(xcorr_base + '*')
        for file in xcorr_list:
            removeFile(file)

        iraf.crossdriz(image=img, refimage=reference, basename=xcorr_base)

        # Now, run shiftfind to compute the shifts
        removeFile(xcorr_shift)
        print('#\n# Computing shift using SHIFTFIND \n#')
        iraf.shiftfind(cclist=xcorr_base, outfile=xcorr_shift)

        # Read shifts from shiftfind file...
        xcfile = open(xcorr_shift, 'r')
        xclines = xcfile.readlines()
        xcol = 2
        ycol = 4
        for xcl in xclines:
            if xcl[0] == '#' or xcl.strip() == '': continue

            xcl = xcl.split()
            outname = define_output_name(xcl[0])
            shift_dict[outname] = (-float(xcl[xcol]), -float(xcl[ycol]), 0.0,
                                   1.0)

        xcfile.close()

        # Clean up intermediate CROSSDRIZ products
        if input_pars['clean'] == yes:
            removeFile(xcorr_shift)
            xcorr_list = irafglob.irafglob(xcorr_base + '*')
            for file in xcorr_list:
                removeFile(file)
        # Used for indexing xcorr_base rootname for all inputs
        indx += 1
        if img != reference:
            outname = define_output_name(img)
            shift_dict['order'].append(outname)

    return shift_dict
Example #16
0

iraf.gemini.gnirs.nsheaders.instrument ='f2'
iraf.gemini.gnirs.nsheaders.instrument ='f2'
iraf.gemini.gnirs.nsheaders.instrument ="f2"
iraf.gemini.gnirs.nsheaders.instrument ="f2"
iraf.gemini.gnirs.nsheaders.logfile ="f2.logfile.log"
iraf.gemini.gnirs.nsheaders ('f2',logfile="f2.logfile.log")
iraf.gemini.gnirs.nsheaders ('f2',logfile="f2.logfile.log")
iraf.gemini.gnirs.nsheaders ('f2',logfile="f2.logfile.log")


print "\n Unlearning the following tasks:\n"

print "hselect\n" 
unlearn("hselect")
print "gemseeing\n"  
unlearn("gemseeing")

print "phot\n" 
unlearn("phot")
print "datapars"  
unlearn("datapars")
print "centerpars\n"
unlearn("centerpars")
print "fitskypars\n"
unlearn("fitskypars")
print "photpars\n"
unlearn("photpars")