示例#1
0
def basic_2d_proc(rawFile,imgType=None,CLOBBER=False):

    # set up file names based on our convention
    oScanFile = 'pre_reduced/o{}'.format(rawFile)
    toScanFile = 'pre_reduced/to{}'.format(rawFile)
    toScanFlat = 'pre_reduced/to{}'.format(rawFile.replace('.fits','_norm.fits'))

    # run the basic 2D stuff on this image if necessary
    if (not os.path.isfile(oScanFile)) or CLOBBER:

        # get the instrument configuration
        inst = instruments.blue_or_red(rawFile)[1]
        iraf.specred.dispaxi = inst.get('dispaxis')
        iraf.longslit.dispaxi = inst.get('dispaxis')
        _biassec0 = inst.get('biassec')
        _trimsec0 = inst.get('trimsec')
        _flatsec0 = inst.get('flatsec')

        # remove destination files
        for file in [oScanFile,toScanFile]:
            if os.path.isfile(file) and CLOBBER:
                os.remove(file)

        # check the ultimate desination file, since intermediates get deleted
        if not os.path.isfile(toScanFile):
            if inst.get('instrument')=='kast':

                # do Lick specific bias operations
                util.kastbias(rawFile,oScanFile)
            elif inst.get('instrument')=='binospec':

                util.binospecbias(rawFile,oScanFile,inst)

            # do general (IRAF is in such a sorry state I'm not even sure if this works)
            else:
                iraf.ccdproc(rawFile, output=oScanFile,
                             overscan='yes', trim='yes',
                             zerocor="no", flatcor="no",readaxi='line',
                             trimsec=str(_trimsec0), biassec=str(_biassec0),
                             Stdout=1)

            # trim (same trimming operation for all telescopes)
            iraf.ccdproc(oScanFile, output=toScanFile,
                        overscan='no', trim='yes', zerocor="no", flatcor="no",
                        readaxi='line',trimsec=str(_trimsec0), Stdout=1)

            #create trimmed flats for norm region
            if imgType == 'CAL_FLAT' and inst.get('instrument')=='kast':
                iraf.ccdproc(oScanFile, output=toScanFlat,
                        overscan='no', trim='yes', zerocor="no", flatcor="no",
                        readaxi='line',trimsec=str(_flatsec0), Stdout=1)
            if imgType == 'CAL_FLAT' and inst.get('instrument')=='binospec':
                iraf.ccdproc(oScanFile, output=toScanFlat,
                        overscan='no', trim='yes', zerocor="no", flatcor="no",
                        readaxi='line',trimsec=str(_flatsec0), Stdout=1)
            os.remove(oScanFile)

    return 0
def needs_to_be_reduced(file):

    hdu = fits.open(file, mode='readonly')
    header = hdu[0].header
    arm, inst_dict = instruments.blue_or_red(file)
    inst = inst_dict['name']

    # Select against imaging mode data for LRIS
    if 'LRIS' in inst.upper():
        if 'GRISTRAN' in header.keys() and header['GRISTRAN'].lower(
        ) == 'stowed':
            return (False)

    return (True)
def reduce(imglist, files_arc, files_flat, _cosmic, _interactive_extraction,
           _arc):
    import string
    import os
    import re
    import sys
    import pdb
    os.environ["PYRAF_BETA_STATUS"] = "1"
    try:
        from astropy.io import fits as pyfits
    except:
        import pyfits
    import numpy as np
    import util
    import instruments
    import combine_sides as cs
    import cosmics
    from pyraf import iraf

    dv = util.dvex()
    scal = np.pi / 180.

    if not _interactive_extraction:
        _interactive = False
    else:
        _interactive = True

    if not _arc:
        _arc_identify = False
    else:
        _arc_identify = True

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)
    iraf.disp(inlist='1', reference='1')

    toforget = [
        'ccdproc', 'imcopy', 'specred.apall', 'longslit.identify',
        'longslit.reidentify', 'specred.standard', 'longslit.fitcoords',
        'onedspec.wspectext'
    ]
    for t in toforget:
        iraf.unlearn(t)
    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    list_arc_b = []
    list_arc_r = []

    for arcs in files_arc:
        hdr = util.readhdr(arcs)
        br, inst = instruments.blue_or_red(arcs)

        if br == 'blue':
            list_arc_b.append(arcs)
        elif br == 'red':
            list_arc_r.append(arcs)
        else:
            errStr = '{} '.format(str(util.readkey3(hdr, 'VERSION')))
            errStr += 'not in database'
            print(errStr)
            sys.exit()

    asci_files = []
    newlist = [[], []]

    print('\n### images to reduce :', imglist)
    #raise TypeError
    for img in imglist:
        if 'b' in img:
            newlist[0].append(img)
        elif 'r' in img:
            newlist[1].append(img)

    if len(newlist[1]) < 1:
        newlist = newlist[:-1]
    elif len(newlist[0]) < 1:
        newlist = newlist[1:]
    else:
        sides = raw_input("Reduce which side? ([both]/b/r): ")
        if sides == 'b':
            newlist = newlist[:-1]
        elif sides == 'r':
            newlist = newlist[1:]

    for imgs in newlist:
        hdr = util.readhdr(imgs[0])
        br, inst = instruments.blue_or_red(imgs[0])
        if br == 'blue':
            flat_file = '../RESP_blue'
        elif br == 'red':
            flat_file = '../RESP_red'
        else:
            errStr = 'Not in intrument list'
            print(errStr)
            sys.exit()

        iraf.specred.dispaxi = inst.get('dispaxis')
        iraf.longslit.dispaxi = inst.get('dispaxis')

        _gain = inst.get('gain')
        _ron = inst.get('read_noise')
        iraf.specred.apall.readnoi = _ron
        iraf.specred.apall.gain = _gain

        _object0 = util.readkey3(hdr, 'OBJECT')
        _date0 = util.readkey3(hdr, 'DATE-OBS')

        _object0 = re.sub(' ', '', _object0)
        _object0 = re.sub('/', '_', _object0)
        nameout0 = str(_object0) + '_' + inst.get('name') + '_' + str(_date0)

        nameout0 = util.name_duplicate(imgs[0], nameout0, '')
        timg = nameout0
        print('\n### now processing :', timg, ' for -> ', inst.get('name'))
        if len(imgs) > 1:
            img_str = ''
            for i in imgs:
                img_str = img_str + i + ','
            iraf.imcombine(img_str, output=timg)
        else:
            img = imgs[0]
            if os.path.isfile(timg):
                os.system('rm -rf ' + timg)
            iraf.imcopy(img, output=timg)

        # should just do this by hand
        iraf.ccdproc(timg,
                     output='',
                     overscan='no',
                     trim='no',
                     zerocor="no",
                     flatcor="yes",
                     readaxi='line',
                     flat=flat_file,
                     Stdout=1)

        img = timg

        #raw_input("Press Enter to continue...")
        if _cosmic:
            print('\n### starting cosmic removal')

            array, header = cosmics.fromfits(img)
            c = cosmics.cosmicsimage(array,
                                     gain=inst.get('gain'),
                                     readnoise=inst.get('read_noise'),
                                     sigclip=5,
                                     sigfrac=0.5,
                                     objlim=2.0)
            c.run(maxiter=5)
            cosmics.tofits('cosmic_' + img, c.cleanarray, header)
            img = 'cosmic_' + img

            print('\n### cosmic removal finished')
        else:
            print(
                '\n### No cosmic removal, saving normalized image for inspection???'
            )

        if inst.get('arm') == 'blue' and len(list_arc_b) > 0:
            arcfile = list_arc_b[0]
        elif inst.get('arm') == 'red' and len(list_arc_r) > 0:
            arcfile = list_arc_r[0]
        else:
            arcfile = None

        if arcfile is not None and not arcfile.endswith(".fits"):
            arcfile = arcfile + '.fits'

        if not os.path.isdir('database/'):
            os.mkdir('database/')

        if _arc_identify:
            os.system('cp ' + arcfile + ' .')
            arcfile = string.split(arcfile, '/')[-1]
            arc_ex = re.sub('.fits', '.ms.fits', arcfile)

            arcref = inst.get('archive_arc_extracted')
            arcref_img = string.split(arcref, '/')[-1]
            arcref_img = arcref_img.replace('.ms.fits', '')
            arcrefid = inst.get('archive_arc_extracted_id')
            os.system('cp ' + arcref + ' .')
            arcref = string.split(arcref, '/')[-1]
            os.system('cp ' + arcrefid + ' ./database')

            aperture = inst.get('archive_arc_aperture')
            os.system('cp ' + aperture + ' ./database')

            print('\n###  arcfile : ', arcfile)
            print('\n###  arcfile extraction : ', arc_ex)
            print('\n###  arc reference : ', arcref)

            # read for some meta data to get the row right
            tmpHDU = pyfits.open(arcfile)
            header = tmpHDU[0].header
            try:
                spatialBin = int(header['binning'].split(',')[0])
            except KeyError:
                spatialBin = 1
            apLine = 700 // spatialBin

            iraf.specred.apall(arcfile,
                               output=arc_ex,
                               ref=arcref_img,
                               line=apLine,
                               nsum=10,
                               interactive='no',
                               extract='yes',
                               find='yes',
                               nfind=1,
                               format='multispec',
                               trace='no',
                               back='no',
                               recen='no')

            iraf.longslit.reidentify(referenc=arcref,
                                     images=arc_ex,
                                     interac='NO',
                                     section=inst.get('section'),
                                     coordli=inst.get('line_list'),
                                     shift='INDEF',
                                     search='INDEF',
                                     mode='h',
                                     verbose='YES',
                                     step=0,
                                     nsum=5,
                                     nlost=2,
                                     cradius=10,
                                     refit='yes',
                                     overrid='yes',
                                     newaps='no')

        print('\n### extraction using apall')
        result = []
        hdr_image = util.readhdr(img)
        _type = util.readkey3(hdr_image, 'object')

        if (_type.startswith("arc") or _type.startswith("dflat")
                or _type.startswith("Dflat") or _type.startswith("Dbias")
                or _type.startswith("Bias")):
            print('\n### warning problem \n exit ')
            sys.exit()
        else:
            imgex = util.extractspectrum(img, dv, inst, _interactive, 'obj')
            print('\n### applying wavelength solution')
            print(arc_ex)
            iraf.disp(inlist=imgex, reference=arc_ex)

        result = result + [imgex] + [timg]

        # asci_files.append(imgasci)
        if not os.path.isdir(_object0 + '_ex/'):
            os.mkdir(_object0 + '_ex/')

        if not _arc_identify:
            util.delete(arcref)
        else:
            util.delete(arcfile)

        util.delete(arc_ex)
        util.delete(img)
        util.delete(imgex)
        util.delete(arcref)
        util.delete('logfile')
        #if _cosmic:
        #util.delete(img[7:])
        #util.delete("cosmic_*")

        os.system('mv ' + 'd' + imgex + ' ' + _object0 + '_ex/')

        use_sens = raw_input('Use archival flux calibration? [y]/n ')
        if use_sens != 'no':
            sensfile = inst.get('archive_sens')
            os.system('cp ' + sensfile + ' ' + _object0 + '_ex/')
            bstarfile = inst.get('archive_bstar')
            os.system('cp ' + bstarfile + ' ' + _object0 + '_ex/')

    return result
示例#4
0
def main():

    description = "> Performs pre-reduction steps"
    usage = "%prog    \t [option] \n Recommended syntax: %prog -i -c"

    parser = OptionParser(usage=usage, description=description, version="0.1")
    option, args = parser.parse_args()

    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)

    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    mkarc = raw_input("Make arc? ([y]/n): ")
    mkflat = raw_input("Make flat? ([y]/n): ")

    if len(args) > 1:
        files = []
        sys.argv.append('--help')
        option, args = parser.parse_args()
        sys.exit()
    elif len(args) == 1:
        files = util.readlist(args[0])
        sys.exit()
    else:
        listfile = glob.glob('*.fits')
        files_science = []
        files_arc = []
        files_dflat = []
        #print 'checking your files ...'
        for img in listfile:
            _type = ''
            hdr0 = util.readhdr(img)
            _type = util.readkey3(hdr0, 'object')
            if 'flat' in _type.lower():
                files_dflat.append(img)
            elif 'arc' not in _type.lower() and 'arc' not in img.lower():
                files_science.append(img)
        if mkarc != 'n':
            mkarc_b = raw_input(
                "List blue arc files to combine (.fits will be added): "
            ).split()
            mkarc_r = raw_input(
                "List red arc files to combine (.fits will be added): ").split(
                )
            for arc in mkarc_b:
                files_arc.append(arc + '.fits')
            for arc in mkarc_r:
                files_arc.append(arc + '.fits')

    if mkarc != 'n':
        list_arc_b = []
        list_arc_r = []
        for arcs in files_arc:
            if instruments.blue_or_red(arcs)[0] == 'blue':
                list_arc_b.append(arcs)
            elif instruments.blue_or_red(arcs)[0] == 'red':
                list_arc_r.append(arcs)
            else:
                sys.exit()

    if mkflat != 'n':
        list_flat_b = []
        list_flat_r = []
        for dflats in files_dflat:
            if instruments.blue_or_red(dflats)[0] == 'blue':
                list_flat_b.append(dflats)
            elif instruments.blue_or_red(dflats)[0] == 'red':
                list_flat_r.append(dflats)
            else:
                sys.exit()

    # make pre_reduced if it doesn't exist
    if not os.path.isdir('pre_reduced/'):
        os.mkdir('pre_reduced/')

    # log the existing processed files (need to verify this works if pre_reduced is empty...)
    pfiles = []
    new_files = []
    for root, dirnames, filenames in os.walk('pre_reduced'):
        for file in filenames:
            if file.startswith('to'):
                pfiles.append(file)
    print(pfiles)

    # loop over each image in pre_reduced
    for img in listfile:
        hdr = util.readhdr(img)
        targ = util.readkey3(hdr, 'object')

        # if file is not not a processed file, run the overscan+trim code
        if 'to' + img not in pfiles:

            # if the file is a science file, grab the name for later
            if 'arc' not in targ.lower() and 'flat' not in targ.lower():
                new_files.append(img)
                print('Adding data for: ' + targ)

            inst = instruments.blue_or_red(img)[1]

            iraf.specred.dispaxi = inst.get('dispaxis')
            iraf.longslit.dispaxi = inst.get('dispaxis')

            _biassec0 = inst.get('biassec')
            _trimsec0 = inst.get('trimsec')

            ######################################################################
            #
            # JB: this chunk of code needs attention
            # It seems incredibly hacky for anything but Kast...
            #
            # overscan
            if not img.startswith('o') and inst.get('observatory') == 'lick':
                if os.path.isfile('pre_reduced/o' + img):
                    os.remove('pre_reduced/o' + img)
                util.kastbias(img, 'pre_reduced/o' + img)
            elif not img.startswith('o') and inst.get('observatory') != 'lick':
                if os.path.isfile('pre_reduced/o' + img):
                    os.remove('pre_reduced/o' + img)
                os.system('cp ' + img + ' ' + 'pre_reduced/' + img)

            # trim
            if not img.startswith('t') and inst.get('observatory') == 'lick':
                if os.path.isfile('pre_reduced/to' + img):
                    os.remove('pre_reduced/to' + img)
                iraf.ccdproc('pre_reduced/o' + img,
                             output='pre_reduced/to' + img,
                             overscan='no',
                             trim='yes',
                             zerocor="no",
                             flatcor="no",
                             readaxi='line',
                             trimsec=str(_trimsec0),
                             Stdout=1)

            elif not img.startswith('t') and inst.get('observatory') != 'lick':
                if os.path.isfile('pre_reduced/to' + img):
                    os.remove('pre_reduced/to' + img)
                iraf.ccdproc('pre_reduced/' + img,
                             output='pre_reduced/to' + img,
                             overscan='yes',
                             trim='yes',
                             zerocor="no",
                             flatcor="no",
                             readaxi='line',
                             trimsec=str(_trimsec0),
                             biassec=str(_biassec0),
                             Stdout=1)

    # combine the arcs
    if mkarc != 'n':

        # blue arcs
        if len(list_arc_b) > 0:
            if len(list_arc_b) == 1:
                arc_blue = list_arc_b[0]
                os.system('cp ' + 'pre_reduced/to' + arc_blue + ' ' +
                          'pre_reduced/ARC_blue.fits')
            else:
                arc_str = ''
                for arc in list_arc_b:
                    arc_str = arc_str + 'pre_reduced/to' + arc + ','
                if os.path.isfile('pre_reduced/ARC_blue.fits'):
                    os.remove('pre_reduced/ARC_blue.fits')
                iraf.imcombine(arc_str, output='pre_reduced/ARC_blue.fits')

        # red arcs
        if len(list_arc_r) > 0:
            if len(list_arc_r) == 1:
                arc_red = list_arc_r[0]
                os.system('cp ' + 'pre_reduced/to' + arc_red + ' ' +
                          'pre_reduced/ARC_red.fits')
            else:
                arc_str = ''
                for arc in list_arc_r:
                    arc_str = arc_str + 'pre_reduced/to' + arc + ','
                if os.path.isfile('pre_reduced/ARC_red.fits'):
                    os.remove('pre_reduced/ARC_red.fits')
                iraf.imcombine(arc_str, output='pre_reduced/ARC_red.fits')

    # combine the flats
    if mkflat != 'n':
        inter = 'yes'

        # blue flats
        if len(list_flat_b) > 0:
            br, inst = instruments.blue_or_red(list_flat_b[0])
            iraf.specred.dispaxi = inst.get('dispaxis')
            if len(list_flat_b) == 1:
                # Flat_blue = 'pre_reduced/to'+ list_flat_b[0]
                Flat_blue = list_flat_b[0]
            else:
                flat_str = ''
                for flat in list_flat_b:
                    flat_str = flat_str + 'pre_reduced/to' + flat + ','
                #subsets = 'no'
                if os.path.isfile('pre_reduced/toFlat_blue'):
                    os.remove('pre_reduced/toFlat_blue')
                iraf.flatcombine(flat_str,
                                 output='pre_reduced/toFlat_blue',
                                 ccdtype='',
                                 rdnoise=3.7,
                                 subsets='no',
                                 process='no')
                Flat_blue = 'Flat_blue.fits'

            #What is the output here? Check for overwrite
            iraf.specred.response('pre_reduced/to' + Flat_blue,
                                  normaliz='pre_reduced/to' + Flat_blue,
                                  response='pre_reduced/RESP_blue',
                                  interac=inter,
                                  thresho='INDEF',
                                  sample='*',
                                  naverage=2,
                                  function='legendre',
                                  low_rej=3,
                                  high_rej=3,
                                  order=60,
                                  niterat=20,
                                  grow=0,
                                  graphic='stdgraph')

        # red flats
        if len(list_flat_r) > 0:
            br, inst = instruments.blue_or_red(list_flat_r[0])
            iraf.specred.dispaxi = inst.get('dispaxis')
            if len(list_flat_r) == 1:
                # Flat_red = 'pre_reduced/to' + list_flat_r[0]
                Flat_red = list_flat_r[0]
            else:
                flat_str = ''
                for flat in list_flat_r:
                    flat_str = flat_str + 'pre_reduced/to' + flat + ','
                if os.path.isfile('pre_reduced/toFlat_red'):
                    os.remove('pre_reduced/toFlat_red')
                iraf.flatcombine(flat_str,
                                 output='pre_reduced/toFlat_red',
                                 ccdtype='',
                                 rdnoise=3.8,
                                 subsets='yes',
                                 process='no')
                Flat_red = 'Flat_red.fits'

            #What is the output here? Check for overwrite
            iraf.specred.response('pre_reduced/to' + Flat_red,
                                  normaliz='pre_reduced/to' + Flat_red,
                                  response='pre_reduced/RESP_red',
                                  interac=inter,
                                  thresho='INDEF',
                                  sample='*',
                                  naverage=2,
                                  function='legendre',
                                  low_rej=3,
                                  high_rej=3,
                                  order=80,
                                  niterat=20,
                                  grow=0,
                                  graphic='stdgraph')

    # science files should have 't' in front now
    # this just gets the base name, to prefix assumed below
    if new_files is not None:
        files_science = new_files

    # get all the science objects for the night
    science_targets = []
    for obj in files_science:
        hdr = util.readhdr(obj)
        _type = util.readkey3(hdr, 'object')
        science_targets.append(_type)

    # make a dir for each sci object
    science_targets = set(science_targets)
    for targ in science_targets:
        if not os.path.isdir('pre_reduced/' + targ + '/'):
            os.mkdir('pre_reduced/' + targ + '/')

    # copy the files into the obj dir
    for obj in files_science:
        hdr = util.readhdr(obj)
        targ = util.readkey3(hdr, 'object')
        if not obj.startswith('to'):
            os.system('cp ' + 'pre_reduced/to' + obj + ' ' + 'pre_reduced/' +
                      targ + '/')
        else:
            os.system('cp ' + 'pre_reduced/' + obj + ' ' + 'pre_reduced/' +
                      targ + '/')

    rawfiles = glob.glob('*.fits')
    ofiles = glob.glob('pre_reduced/o' + '*.fits')
    tfiles = glob.glob('pre_reduced/to' + '*.fits')

    # delete raw files from the pre_reduced dir
    # there shouldn't be any there though?
    # maybe if the overscan isn't implemented for that detector
    for img in rawfiles:
        util.delete('pre_reduced/' + img)

    # delete the ofiles from pre_reduced dir
    for img in ofiles:
        util.delete(img)
示例#5
0
def pre_reduction_dev(*args,**kwargs):

    # parse kwargs
    VERBOSE = kwargs.get('VERBOSE')
    CLOBBER = kwargs.get('CLOBBER')
    FAKE_BASIC_2D = kwargs.get('FAKE_BASIC_2D')
    FULL_CLEAN = kwargs.get('FULL_CLEAN')
    FAST = kwargs.get('FAST')
    CONFIG_FILE = kwargs.get('CONFIG_FILE')
    MAKE_ARCS = kwargs.get('MAKE_ARCS')
    MAKE_FLATS = kwargs.get('MAKE_FLATS')
    QUICK = kwargs.get('QUICK')
    RED_AMP_BAD = kwargs.get('RED_AMP_BAD')
    BLUE_AMP_BAD = kwargs.get('BLUE_AMP_BAD')
    HOST = kwargs.get('HOST')
    TRIM = kwargs.get('TRIM')
    FIX_AMP_OFFSET = kwargs.get('FIX_AMP_OFFSET')
    RED_DIR = kwargs.get('RED_DIR')

    # init iraf stuff
    iraf.noao(_doprint=0)
    iraf.imred(_doprint=0)
    iraf.ccdred(_doprint=0)
    iraf.twodspec(_doprint=0)
    iraf.longslit(_doprint=0)
    iraf.onedspec(_doprint=0)
    iraf.specred(_doprint=0)

    iraf.ccdred.verbose = 'no'
    iraf.specred.verbose = 'no'
    iraf.ccdproc.darkcor = 'no'
    iraf.ccdproc.fixpix = 'no'
    iraf.ccdproc.flatcor = 'no'
    iraf.ccdproc.zerocor = 'no'
    iraf.ccdproc.ccdtype = ''

    iraf.longslit.mode = 'h'
    iraf.specred.mode = 'h'
    iraf.noao.mode = 'h'
    iraf.ccdred.instrument = "ccddb$kpno/camera.dat"

    prereddir= os.path.join(RED_DIR, 'pre_reduced/')

    # set up config
    if CONFIG_FILE:
        with open(CONFIG_FILE,'r') as fin:
            configDict = json.load(fin)
    else:
        STANDARD_STAR_LIBRARY = mu.construct_standard_star_library()
        observations = sorted(glob.glob('*.fits'))

        #TODO: Better first pass at config file, std have exptime < 250s(?)
        # CDK - updated this and placed it after arm/inst_name definition to
        # better generalize configDict
        configDict = {}
        for obsfile in observations:

            arm, inst_dict = instruments.blue_or_red(obsfile)
            inst_name = inst_dict.get('instrument').upper()
            for key in ['SCI','STD','CAL_ARC','CAL_FLAT','CAL']:
                if key not in configDict.keys():
                    configDict[key]={}
                if arm.upper() not in configDict[key].keys():
                    configDict[key][arm.upper()]={}
                    if 'CAL_' in key:
                        full_key = key.replace('CAL_','CALIBRATION_')
                        configDict[key][arm.upper()]={full_key: []}

            # CDK - added this to explicitly skip files that dont need to be
            # reduced.
            if not mu.needs_to_be_reduced(obsfile): continue

            hdu = fits.open(obsfile)
            if 'lris' in inst_dict.get('name'):
                hdu[0].header['OBJECT']=hdu[0].header['TARGNAME']
                hdu.writeto(obsfile, overwrite=True)

            use_ext = inst_dict['use_ext']
            header = hdu[use_ext].header

            imageType = mu.determine_image_type(header, inst_name, STANDARD_STAR_LIBRARY)

            channel, inst_dict = instruments.blue_or_red(obsfile)
            obj = header.get('OBJECT').strip()
            if imageType == 'SCI' or imageType == 'STD':
                if obj in configDict[imageType][channel.upper()].keys():

                    configDict[imageType][channel.upper()][obj].append(obsfile)
                else:
                    configDict[imageType][channel.upper()][obj] = [obsfile]
            if imageType == 'CAL_ARC' and 'foc' not in obsfile:
                configDict[imageType][channel.upper()]['CALIBRATION_ARC'].append(obsfile)
            if imageType == 'CAL_FLAT':
                configDict[imageType][channel.upper()]['CALIBRATION_FLAT'].append(obsfile)


        with open('custom_config.json','w') as fout:
            fout.write(json.dumps(configDict,indent=4))

        outStr = '\n\nOk, not config supplied, so I wrote a first pass custom_config.json\n'
        outStr += 'Use at your own risk! Manually edit if needed and run again with -c custom_config.json\n'
        outStr += 'You can manually add files to the appropriate lists and rerun.\n'
        outStr += 'WARNING: make sure you rename your config file, or it could get overwritten!\n\n'
        print(outStr)

        sys.exit(1)

    if not FAST:
        # do visual inspection of frames via ds9 windows
        usrResp = ''
        while usrResp != 'C':
            promptStr = '\nYou\'ve opted to display images before kicking off the reduction.\n'
            promptStr += 'At this point you may:\n'
            promptStr += '  (D)isplay the current state of the reduction config\n'
            promptStr += '  (C)ontinue with these files as is\n'
            promptStr += '  (R)emove a file from the current config\n'
            promptStr += '  (A)dd a file to the current config\n'
            promptStr += '  (Q)uit the whole thing. \n'
            promptStr += 'I recommend you (D)isplay and remove unwanted frames from your config file,\n'
            promptStr += '(Q)uit, and then rerun with the updated config file.\nCommand: '
            usrRespOrig = raw_input(promptStr)


            try:
                usrResp = usrRespOrig.strip().upper()
            except Exception as e:
                usrResp = 'nothing'

            # (D)isplay all the images in the lists
            if usrResp == 'D':

                blueArcList = configDict['CAL_ARC']['BLUE']['CALIBRATION_ARC']
                redArcList = configDict['CAL_ARC']['RED']['CALIBRATION_ARC']

                blueFlatList = configDict['CAL_FLAT']['BLUE']['CALIBRATION_FLAT']
                redFlatList = configDict['CAL_FLAT']['RED']['CALIBRATION_FLAT']

                blueStdList = []
                redStdList = []

                blueSciList = []
                redSciList = []

                for targ,imgList in configDict['STD']['BLUE'].items():
                    for file in imgList:
                        blueStdList.append(file)
                for targ,imgList in configDict['STD']['RED'].items():
                    for file in imgList:
                        redStdList.append(file)
                for targ,imgList in configDict['SCI']['BLUE'].items():
                    for file in imgList:
                        blueSciList.append(file)
                for targ,imgList in configDict['SCI']['RED'].items():
                    for file in imgList:
                        redSciList.append(file)

                blueArcDS9 = show_ds9_list(blueArcList,instanceName='BlueArcs')
                redArcDS9 = show_ds9_list(redArcList,instanceName='RedArcs')

                blueFlatDS9 = show_ds9_list(blueFlatList,instanceName='BlueFlats')
                redFlatDS9 = show_ds9_list(redFlatList,instanceName='RedFlats')

                blueStdDS9 = show_ds9_list(blueStdList,instanceName='BlueStandards')
                redStdDS9 = show_ds9_list(redStdList,instanceName='RedStandards')

                blueSciDS9 = show_ds9_list(blueSciList,instanceName='BlueScience')
                redSciDS9 = show_ds9_list(redSciList,instanceName='RedScience')

            if usrResp == 'R':
                configDict = user_adjust_config(configDict,operation='REMOVE')
            if usrResp == 'A':
                configDict = user_adjust_config(configDict,operation='ADD')
            if usrResp == 'Q':
                print('Okay, quitting pre_reduction...')
                sys.exit(1)


    # pre_reduced does not exist, needs to be made
    if not os.path.isdir(prereddir):
        os.mkdir(prereddir)

    if QUICK:
        file =glob.glob('*.fits')[0]
        inst = instruments.blue_or_red(file)[1]
        if 'kast' in inst['name']:
            b_inst = instruments.kast_blue
            r_inst = instruments.kast_red
        if 'lris' in inst['name']:
            b_inst = instruments.lris_blue
            r_inst = instruments.lris_red
        if 'goodman' in inst['name']:
            b_inst = instruments.goodman_blue
            r_inst = instruments.goodman_red
        if not os.path.isdir(prereddir+'master_files/'):
            os.mkdir(prereddir+'master_files/')
        b_arcsol = b_inst.get('archive_arc_extracted_id')
        b_resp = b_inst.get('archive_flat_file')
        r_arcsol = r_inst.get('archive_arc_extracted_id')
        r_resp = r_inst.get('archive_flat_file')
        if os.path.isdir(prereddir+'master_files/'):
            os.system('cp ' + b_arcsol + ' ' + 'pre_reduced/master_files/')
            os.system('cp ' + b_resp + ' ' + 'pre_reduced/')
            os.system('cp ' + r_arcsol + ' ' + 'pre_reduced/master_files/')
            os.system('cp ' + r_resp + ' ' + 'pre_reduced/')


    # pre_reduced exists, but we want to clobber/do a clean reduction
    elif FULL_CLEAN:

        promptStr = 'Do you really want to wipe pre_reduced? [y/n]: '
        usrRespOrig = raw_input(promptStr)
        if usrRespOrig and usrRespOrig[0].strip().upper() == 'Y':

            # remove all pre_reduced files
            shutil.rmtree(prereddir)
            os.mkdir(prereddir)

    # pre_reduced exists, need to document what is there
    else:

        # get existing pre_reduced files
        preRedFiles = glob.glob(prereddir+'*.fits')

    # loop over raw files in configDict, if the destination exists, do nothing
    # # otherwise, do the bias/reorient/trim/output/etc
    for imgType,typeDict in configDict.items():
        for chan,objDict in typeDict.items():
            for obj,fileList in objDict.items():
                for rawFile in fileList:
                    # try:
                    #     res = basic_2d_proc(rawFile,CLOBBER=CLOBBER)
                    #     if res != 0:
                    #         raise ValueError('Something bad happened in basic_2d_proc on {}'.format(rawFile))
                    # except (Exception,ValueError) as e:
                    #     print('Exception (basic_2d): {}'.format(e))

                    if not FAKE_BASIC_2D:
                        inst = instruments.blue_or_red(rawFile)[1]
                        if inst['name'] == 'lris_blue' or inst['name'] == 'lris_red':
                            fix_lris_header(rawFile)
                            # res = keck_basic_2d.main([rawFile])
                            if imgType != 'CAL_FLAT':
                                print (imgType)
                                res = keck_basic_2d.main([rawFile], TRIM=TRIM, ISDFLAT=False, RED_AMP_BAD=RED_AMP_BAD, MASK_MIDDLE_RED=False, MASK_MIDDLE_BLUE=False, FIX_AMP_OFFSET=FIX_AMP_OFFSET, BLUE_AMP_BAD=BLUE_AMP_BAD)
                            else:
                                print (imgType)
                                res = keck_basic_2d.main([rawFile], TRIM=TRIM, ISDFLAT = True, RED_AMP_BAD=RED_AMP_BAD, MASK_MIDDLE_RED=False, MASK_MIDDLE_BLUE=False, FIX_AMP_OFFSET=FIX_AMP_OFFSET, BLUE_AMP_BAD=BLUE_AMP_BAD)
                        else:
                            res = basic_2d_proc(rawFile,imgType=imgType,CLOBBER=CLOBBER)
                    else:
                        # here we're faking the basic 2D reduction because we've done
                        # specialized 2D reduction (e.g., keck_basic_2d)
                        res = 0
                    if res != 0:
                        raise ValueError('Something bad happened in basic_2d_proc on {}'.format(rawFile))

    # move the std and sci files into their appropriate directories
    try:
        res = reorg_files(configDict,CLOBBER=CLOBBER)
        if res != 0:
            raise ValueError('Something bad happened in reorg_files')
    except (Exception,ValueError) as e:
        print('Exception (reorg): {}'.format(e))


    ### some blocks of code from the original pre_reduction ###
    # combine the arcs
    if MAKE_ARCS:
        # CDK - generalized arc creation method
        for key in configDict['CAL_ARC'].keys():
            list_arc = configDict['CAL_ARC'][key]['CALIBRATION_ARC']
            if len(list_arc)>0:
                first = prereddir+'to{}'.format(list_arc[0])
                br, inst = instruments.blue_or_red(first)
                destFile = prereddir+'ARC_{0}.fits'.format(key.lower())

                util.make_arc(list_arc, destFile, inst, iraf)


    # combine the flats
    if MAKE_FLATS and 'lris' in inst['name']:

        list_flat_b = configDict['CAL_FLAT']['BLUE']['CALIBRATION_FLAT']
        list_flat_r = configDict['CAL_FLAT']['RED']['CALIBRATION_FLAT']
        inter = 'yes'

        b_amp1_list = []
        b_amp2_list = []
        r_amp1_list = []
        r_amp2_list = []
        predir = prereddir+'to'
        for flat in list_flat_b:
            suffix = '.'.join(flat.split('.')[1:])
            b_amp1_file = flat.split('.')[0]+'_amp1.'+suffix
            b_amp2_file = flat.split('.')[0]+'_amp2.'+suffix
            if os.path.exists(predir+b_amp1_file): b_amp1_list.append(b_amp1_file)
            if os.path.exists(predir+b_amp2_file): b_amp2_list.append(b_amp2_file)
        for flat in list_flat_r:
            suffix = '.'.join(flat.split('.')[1:])
            r_amp1_file = flat.split('.')[0]+'_amp1.'+suffix
            r_amp2_file = flat.split('.')[0]+'_amp2.'+suffix
            print(r_amp1_file)
            if os.path.exists(predir+r_amp1_file): r_amp1_list.append(r_amp1_file)
            if os.path.exists(predir+r_amp2_file): r_amp2_list.append(r_amp2_file)

        # blue flats
        if len(list_flat_b) > 0:
            # br, inst = instruments.blue_or_red(list_flat_b[0])
            br, inst = instruments.blue_or_red(prereddir+'to{}'.format(b_amp1_list[0]))
            dispaxis = inst.get('dispaxis')
            iraf.specred.dispaxi = dispaxis
            Flat_blue_amp1 = prereddir+'toFlat_blue_amp1.fits'
            Flat_blue_amp2 = prereddir+'toFlat_blue_amp2.fits'

            flat_list_amp1 = []
            for flat in b_amp1_list:
                flat_list_amp1.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_blue_amp1):
                os.remove(Flat_blue_amp1)

            # first, combine all the flat files into a master flat
            res = combine_flats(flat_list_amp1,OUTFILE=Flat_blue_amp1,MEDIAN_COMBINE=True)

            # run iraf response
            iraf.specred.response(Flat_blue_amp1,
                                   normaliz=Flat_blue_amp1,
                                   response=prereddir+'RESP_blue_amp1',
                                   interac=inter, thresho='INDEF',
                                   sample='*', naverage=2, function='legendre',
                                   low_rej=5,high_rej=5, order=60, niterat=20,
                                   grow=0, graphic='stdgraph')

            # finally, inspect the flat and mask bad regions
            res = inspect_flat([prereddir+'RESP_blue_amp1.fits'],
                OUTFILE=prereddir+'RESP_blue_amp1.fits', DISPAXIS=dispaxis)

            hdu_amp1 = fits.open(prereddir+'RESP_blue_amp1.fits')
            amp1_flatten = np.asarray(hdu_amp1[0].data).flatten()
            shape1=hdu_amp1[0].data.shape
            dim1 = shape1[0]
            dim2 = shape1[1]

            concat_amps = amp1_flatten

            if not BLUE_AMP_BAD:
                flat_list_amp2 = []
                for flat in b_amp2_list:
                    flat_list_amp2.append(prereddir+'to'+ flat)
                if os.path.isfile(Flat_blue_amp2):
                    os.remove(Flat_blue_amp2)

                res = combine_flats(flat_list_amp2,OUTFILE=Flat_blue_amp2,MEDIAN_COMBINE=True)
                iraf.specred.response(Flat_blue_amp2,
                                       normaliz=Flat_blue_amp2,
                                       response=prereddir+'RESP_blue_amp2',
                                       interac=inter, thresho='INDEF',
                                       sample='*', naverage=2, function='legendre',
                                       low_rej=5,high_rej=5, order=60, niterat=20,
                                       grow=0, graphic='stdgraph')

                res = inspect_flat([prereddir+'RESP_blue_amp2.fits'],
                    OUTFILE=prereddir+'RESP_blue_amp2.fits', DISPAXIS=dispaxis)
                hdu_amp2 = fits.open(prereddir+'RESP_blue_amp2.fits')
                amp2_flatten = np.asarray(hdu_amp2[0].data).flatten()
                shape2=hdu_amp2[0].data.shape

                dim1+=shape2[0]
                concat_amps = np.concatenate([concat_amps, amp2_flatten])

            print('Output blue response dimensions:',dim1,dim2)
            resp_blue_data = np.reshape(concat_amps, (dim1, dim2))

            header = hdu_amp1[0].header
            if os.path.exists(prereddir+'RESP_blue.fits'):
                os.remove(prereddir+'RESP_blue.fits')

            hdu = fits.PrimaryHDU(resp_blue_data,header)
            hdu.writeto(prereddir+'RESP_blue.fits',output_verify='ignore')

            resp_files = ['RESP_blue_amp1.fits','RESP_blue_amp2.fits']
            for file in resp_files:
                if os.path.exists(prereddir+file):
                    os.remove(prereddir+file)

        # red flats
        if len(list_flat_r) > 0:
            # br, inst = instruments.blue_or_red(list_flat_r[0])
            br, inst = instruments.blue_or_red(prereddir+'to{}'.format(r_amp1_list[0]))
            dispaxis = inst.get('dispaxis')
            iraf.specred.dispaxi = dispaxis
            Flat_red_amp1 = prereddir+'toFlat_red_amp1.fits'
            Flat_red_amp2 = prereddir+'toFlat_red_amp2.fits'


            flat_list_amp1 = []
            for flat in r_amp1_list:
                flat_list_amp1.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_red_amp1):
                os.remove(Flat_red_amp1)

            flat_list_amp2 = []
            for flat in r_amp2_list:
                flat_list_amp2.append(prereddir+'to'+ flat)
            if os.path.isfile(Flat_red_amp2):
                os.remove(Flat_red_amp2)

            amp2flag = len(flat_list_amp2)>0

            # first, combine all the flat files into a master flat
            if amp2flag:
                res = combine_flats(flat_list_amp1,OUTFILE=Flat_red_amp1,MEDIAN_COMBINE=True)
                res = combine_flats(flat_list_amp2,OUTFILE=Flat_red_amp2,MEDIAN_COMBINE=True)
            else:
                res = combine_flats(flat_list_amp1,OUTFILE=Flat_red_amp1,MEDIAN_COMBINE=True)

            #What is the output here? Check for overwrite
            iraf.specred.response(Flat_red_amp1,
                                  normaliz=Flat_red_amp1,
                                  response='pre_reduced/RESP_red_amp1',
                                  interac=inter, thresho='INDEF',
                                  sample='*', naverage=2, function='legendre',
                                  low_rej=5,high_rej=5, order=80, niterat=20,
                                  grow=0, graphic='stdgraph')
            if amp2flag:
                iraf.specred.response(Flat_red_amp2,
                                      normaliz=Flat_red_amp2,
                                      response='pre_reduced/RESP_red_amp2',
                                      interac=inter, thresho='INDEF',
                                      sample='*', naverage=2, function='legendre',
                                      low_rej=5,high_rej=5, order=80, niterat=20,
                                      grow=0, graphic='stdgraph')

            # finally, inspect the flat and mask bad regions
            if amp2flag:
                res = inspect_flat([prereddir+'RESP_red_amp1.fits'],
                    OUTFILE=prereddir+'RESP_red_amp1.fits', DISPAXIS=dispaxis)
                res = inspect_flat([prereddir+'RESP_red_amp2.fits'],
                    OUTFILE=prereddir+'RESP_red_amp2.fits', DISPAXIS=dispaxis)
            else:
                res = inspect_flat([prereddir+'RESP_red_amp1.fits'],
                    OUTFILE=prereddir+'RESP_red.fits', DISPAXIS=dispaxis)

            if amp2flag:
                hdu_amp1 = fits.open(prereddir+'RESP_red_amp1.fits')
                hdu_amp2 = fits.open(prereddir+'RESP_red_amp2.fits')
                head = hdu_amp1[0].header
                shape1 = hdu_amp1[0].data.shape
                shape2 = hdu_amp2[0].data.shape
                amp1_flatten = np.asarray(hdu_amp1[0].data).flatten()
                amp2_flatten = np.asarray(hdu_amp2[0].data).flatten()
                concat_amps = np.concatenate([amp2_flatten, amp1_flatten])
                xbin, ybin = [int(ibin) for ibin in head['BINNING'].split(',')]
                resp_red_data = np.reshape(concat_amps, (shape1[0]+shape2[0],shape1[1]))

                resp_red_data[278:294,:] = 1.

                header = hdu_amp1[0].header
                if os.path.isfile(prereddir+'RESP_red.fits'):
                    os.remove(prereddir+'RESP_red.fits')

                hdu = fits.PrimaryHDU(resp_red_data,header)
                hdu.writeto(prereddir+'RESP_red.fits',output_verify='ignore')

                os.remove(prereddir+'RESP_red_amp1.fits')
                os.remove(prereddir+'RESP_red_amp2.fits')
            else:
                os.remove(prereddir+'RESP_red_amp1.fits')

    elif MAKE_FLATS:
        # CDK - generalized arc creation method
        for key in configDict['CAL_FLAT'].keys():
            list_flat = configDict['CAL_FLAT'][key]['CALIBRATION_FLAT']
            if len(list_flat)>0:

                first = prereddir+'to{}'.format(list_flat[0])
                br, inst = instruments.blue_or_red(first)
                dispaxis = inst.get('dispaxis')

                iraf.specred.dispaxi = dispaxis
                inter = True
                Flat_out = prereddir+'toFlat_{0}.fits'.format(br.lower())
                dum = prereddir+'dummy_{0}.fits'.format(br.lower())
                resp = prereddir+'RESP_{0}.fits'.format(br.lower())

                flat_list = []
                norm_list = []
                for flat in list_flat:
                    flat_list.append(prereddir+'to'+flat)
                    norm = flat.replace('.fits','_norm.fits')
                    norm_list.append(prereddir+'to'+norm)
                for file in [Flat_out, dum, resp]:
                    if os.path.exists(file):
                        os.remove(Flat_out)

                # first, combine all the flat files into a master flat
                res = combine_flats(flat_list,OUTFILE=Flat_out,
                    MEDIAN_COMBINE=True)
                # combine all the flat files for norm region
                res = combine_flats(norm_list,OUTFILE=dum, MEDIAN_COMBINE=True)

                iraf.specred.response(Flat_out, normaliz=dum, response=resp,
                    interac=inter, thresho='INDEF', sample='*', naverage=2,
                    function='legendre', low_rej=5,high_rej=5, order=60,
                    niterat=20, grow=0, graphic='stdgraph')

                # finally, inspect the flat and mask bad regions
                res = inspect_flat([resp], OUTFILE=resp, DISPAXIS=dispaxis)

                for flat in norm_list:
                    os.remove(flat)

    if HOST:
        host_gals.make_host_metadata(configDict)

    return 0
def inspect_flat(flat_list,*args,**kwargs):
    '''
    Model a flat field from slitflat data

    Parameters
    ----------
    flat_list : list
        List of flat field image filenames

    OUTFILE : string, optional
        If specified, this will automatically be the file location of 
        the output flat image
    DISPAXIS : int, optional (Default = 1)
        Specifies the dispersion axis of the image. DISPAXIS=1
        corresponds to (spectral, spatial), DISPAXIS=2 corresponds
        to (spatial, spectral)
    REMOVE_COLOR: bool, optional (Default = False)
        If true, remove the color term from the supplied images.

    Returns
    -------
    int : 0, and writes files to disk
    '''
    
    # unpack
    outFile = kwargs.get('OUTFILE',None)
    dispaxis = kwargs.get('DISPAXIS',1)
    remove_color = kwargs.get('REMOVE_COLOR',False)
    read_from_file = kwargs.get('READ_FROM_FILE',False)

    # if user specified a file containing a list, read into flat_list
    if read_from_file:
        flat_list_tmp = []
        with open(flat_list,'r') as fin:
            for line in fin:
                if len(line.split()) > 0 and line.split()[0] != '#':
                    flat_list_tmp.append(line.split()[0].strip())

        # assign to flat_list, then proceed as usual
        flat_list = flat_list_tmp

    # if user passed multiple files, combine them, otherwise, just read in the single file
    if len(flat_list) > 1:
        flat_comb_image, header, inst = combine_flats(flat_list,**kwargs)
    else:
        br, inst = instruments.blue_or_red(flat_list[0])
        hdu = fits.open(flat_list[0])
        flat_comb_image = hdu[0].data
        header = hdu[0].header

    # transpose if we're dealing with cols x rows
    if dispaxis == 2:
        flat_comb_image = flat_comb_image.T

    # this is a hack and inferior to running IRAF response
    if remove_color:
        # for each column, divide by the median
        for i in range(len(flat_comb_image[0,:])):
            flat_comb_image[:,i] /= np.median(flat_comb_image[:,i])
    
    # set up plotting window
    plt.ion()
    
    fig=plt.figure(figsize=(16,8))
    axMain = plt.subplot2grid((36,36), (0,0), rowspan=36, colspan=36)
    ax1 = plt.subplot2grid((36,36), (0,0), rowspan=11, colspan=12)
    ax2 = plt.subplot2grid((36,36), (0,12), rowspan=11, colspan=12)
    ax3 = plt.subplot2grid((36,36), (0,24), rowspan=11, colspan=12)
    ax4 = plt.subplot2grid((36,36), (12,0), rowspan=12, colspan=36)
    ax5 = plt.subplot2grid((36,36), (24,0), rowspan=12, colspan=36)
    
    # ax1 data
    bsd_col_lo = 0
    bsd_col_up = bsd_col_lo + 500
    blueSkyData = np.median(flat_comb_image[:,bsd_col_lo:bsd_col_up],axis=0)
    blueSkyX = np.arange(bsd_col_lo,bsd_col_up,1)
    
    
    # ax2 data
    # msd_col_lo = flat_comb_image.shape[1] // 2
    msd_col_lo = 1800
    msd_col_up = msd_col_lo + 300
    midSkyData = np.median(flat_comb_image[:,msd_col_lo:msd_col_up],axis=0)
    midSkyX = np.arange(msd_col_lo,msd_col_up,1)
    
    # ax3 data
    # rsd_col_lo = flat_comb_image.shape[1] - 500
    rsd_col_lo = 3200
    rsd_col_up = rsd_col_lo + 400
    redSkyData = np.median(flat_comb_image[:,rsd_col_lo:rsd_col_up],axis=0)
    redSkyX = np.arange(rsd_col_lo,rsd_col_up,1)

    # ax1.plot(blueSkyX,blueSkyData,c='k',ls='-',lw=3.)
    # ax2.plot(midSkyX, midSkyData,c='k',ls='-',lw=3.)
    # ax3.plot(redSkyX,redSkyData,c='k',ls='-',lw=3.)

    # grab the values of the 10 and 90 percentile pixels
    flat_comb_image_ravel = np.ravel(flat_comb_image)
    sortedIndexes = np.argsort(flat_comb_image_ravel)
    # vmin = 1.*flat_comb_image_ravel[sortedIndexes[int(0.2*len(sortedIndexes))]]
    # vmax = 2.*flat_comb_image_ravel[sortedIndexes[int(0.8*len(sortedIndexes))]]
    vmin = 0.9
    vmax = 1.1

    # image
    ax4.imshow(flat_comb_image,aspect=1.,origin='lower',
                norm=colors.Normalize(vmin=vmin, vmax=vmax))
    
    # residuals
    ax5.imshow(flat_comb_image,aspect=1.,origin='lower',
                norm=colors.Normalize(vmin=vmin, vmax=vmax))

    # ranges on image plots
    ax4.set_xlim([0,flat_comb_image.shape[1]])
    ax4.set_ylim([0,flat_comb_image.shape[0]])
    
    ax5.set_xlim([0,flat_comb_image.shape[1]])
    ax5.set_ylim([0,flat_comb_image.shape[0]])
    
    # sparse axis labels
    ax1.set_xlabel('column')
    ax1.set_ylabel('counts')
    ax2.set_xlabel('column')
    ax3.set_xlabel('column')
    
    ax1.set_yticklabels([])
    ax2.set_yticklabels([])
    ax3.set_yticklabels([])

    flatFitObj = fitFlatClass(flat_comb_image,fig,inst)

    while True:
        
        # this really should be a dict of key/value pairs
        # and then the prompt is dynamically generated
        validResps = ['A','R','F','S','U','H',  # standard options
                      'AHARD','RHARD','REFINE', # poweruser/hidden options
                      'W','D','Q','Q!']              # stardard ends 
        promptStr = 'Enter (a) to add an exclusion region.\n'
        promptStr += 'Enter (r) to remove a region.\n'
        promptStr += 'Enter (f) to fit the exclusion regions.\n'
        promptStr += 'Enter (s) to substitute model in exclusion regions.\n'
        promptStr += 'Enter (h) to substitute the median profile in a region\n'
        promptStr += 'Enter (u) to undo everything and restart.\n'
        promptStr += 'Enter (w) to write the improved flat to disk.\n'
        promptStr += 'Enter (d) to enter the debugger.\n'
        promptStr += 'Enter (q) to quit and write the current flat to disk.\n'
        promptStr += 'Enter (q!) to quit and do nothing.'
        promptStr += 'Answer: '
        usrResp = raw_input(promptStr).strip().upper()
        
        if usrResp in validResps:
            
            # add region by marking it
            if usrResp == 'A':
                promptStr = 'Enter the name of the sky region (e.g. c1): '
                name = raw_input(promptStr).strip().upper()
                flatFitObj.add_fit_region(name)
                
            # add hardcoded region
            if usrResp == 'AHARD':
                promptStr = 'Enter name colLo colUp (e.g. c1 113 171): '
                usrResp = raw_input(promptStr).upper().strip()
                try:
                    name = usrResp.split()[0]
                    colLo = int(usrResp.split()[1])
                    colUp = int(usrResp.split()[2])
                    flatFitObj.add_fit_region(name,colLo=colLo,colUp=colUp)
                except Exception as e:
                    print(e)
                    
                    
            # remove
            if usrResp == 'R' or usrResp == 'RHARD':
                flatFitObj.remove_fit_region()
                
            # fit sky
            if usrResp == 'F':
                flatFitObj.fit_sky_background()
              
            # substitute model in sketchy regions  
            if usrResp == 'S':
                flatFitObj.subsitute_model_flat()
                
            if usrResp == 'U':
                flatFitObj = fitFlatClass(flat_comb_image,fig,inst)
                flatFitObj.refresh_plot()
                
            if usrResp == 'H':
                flatFitObj.hard_mask()
                
            if usrResp == 'REFINE':
                # this substitutes the corrected
                # flat for the input data...
                # basically an experiment, not intended for use
                flatFitObj.refine()
            
            # write file
            if usrResp == 'W':

                # if cols x rows, transpose before writting
                if dispaxis == 2:
                    flatFitObj.flatCorrData = flatFitObj.flatCorrData.T

                if outFile is None:
                    promptStr = 'Enter name of save file (e.g. RESP_blue): '
                    outFile = raw_input(promptStr).strip()
                    promptStr = 'Write to file {} [y/n]: '.format(outFile)
                    usrResp = raw_input(promptStr).upper().strip()
                    if usrResp == 'Y':
                        flatFitObj.save_flat('pre_reduced/'+outFile,header=header)
                        break
                    else:
                        outFile = None
                        print('Ok, aborting save...')
                else:
                    if os.path.isfile(outFile):
                        os.remove(outFile)
                    flatFitObj.save_flat(outFile,header=header)
            # debug
            if usrResp == 'D':
                pdb.set_trace()
            # quit
            if usrResp == 'Q':

                #mask problematic pixels
                print (flatFitObj.flatCorrData[flatFitObj.flatCorrData <= 0.001])
                flatFitObj.flatCorrData[flatFitObj.flatCorrData <= 0.001] = 1.

                print('Saving and quitting.')
                # if cols x rows, transpose before writting
                if dispaxis == 2:
                    flatFitObj.flatCorrData = flatFitObj.flatCorrData.T
                if outFile is None:
                    promptStr = 'Enter name of save file '
                    promptStr += '(e.g. RESP_blue.fits): '
                    outFile = raw_input(promptStr).strip()
                    promptStr = 'Write to file {} [y/n]: '.format(outFile)
                    usrResp = raw_input(promptStr).upper().strip()
                    if usrResp == 'Y':
                        flatFitObj.save_flat('pre_reduced/'+outFile,header=header)
                        break
                    else:
                        outFile = None
                        print('Ok, aborting save...')
                else:
                    if os.path.isfile(outFile):
                        os.remove(outFile)
                    flatFitObj.save_flat(outFile,header=header)
                break


            # quit
            if usrResp == 'Q!':
                print('Quitting.')
                break
        else:
            errStr = 'I don\'t understand, try again...'
            print(errStr)
    
    return 0
def combine_flats(flat_list,MEDIAN_COMBINE=False,**kwargs):
    ''' Stacks images in flat_list, returns a master flat and header '''

    # unpack
    outFile = kwargs.get('OUTFILE')
    clobber = kwargs.get('CLOBBER')

    # read data
    flat_comb_image = np.array([])
    median_image_stack = np.array([])
    nImages = 0
    expTime = 0
    nFlatLimit = 15

    # calculate the stack batch size
    if nFlatLimit < len(flat_list):
        batchSize = len(flat_list) // 2 + 1
        while batchSize > len(flat_list):
            batchSize = batchSize // 2 + 1
    else:
        batchSize = len(flat_list)



    # loop over the flat files
    for file in flat_list:
        hdu = fits.open(file)
        br, inst = instruments.blue_or_red(file)
        data = hdu[0].data
        header = hdu[0].header
        nImages += 1
        expTime += header.get('EXPTIME')

        # scale to expTime
        data /= expTime

        if len(flat_comb_image) == 0:
            flat_comb_image = np.copy(data)
        else:

            # if median combining, stack the data
            if MEDIAN_COMBINE:

                # stack images in the z direction
                flat_comb_image = np.dstack((flat_comb_image,data))

                # if flat_comb_image is getting too big, or if we're at the end of flat_list,
                # then squash it along the z axis with a median
                if ((flat_comb_image.shape[2] == batchSize) or 
                    (flat_comb_image.shape[2] == len(flat_list)-1)):

                    # stack the intermediate squashed frames
                    if len(median_image_stack) == 0:
                        median_image_stack = np.median(flat_comb_image,axis=2)
                    else:
                        median_image_stack = np.dstack((median_image_stack,
                                                        np.median(flat_comb_image,axis=2)))

                    # reset the stack
                    flat_comb_image = np.array([])


            # otherwise just sum them
            else:
                flat_comb_image += np.copy(data)

    # if median combining, squash the stack of median
    if MEDIAN_COMBINE:
        # if there are multiple median images in the stack, median them
        if len(median_image_stack.shape) > 2:
            flat_comb_image = np.median(median_image_stack,axis=2)
        # otherwise just return the single frame
        else:
            flat_comb_image = np.copy(median_image_stack)
        header.add_history('combine_flats: median combined {} files'.format(nImages))
    else:
        header.add_history('combine_flats: summed {} files'.format(nImages))

    # counts / sec * expTime
    flat_comb_image *= expTime

    if outFile:
        
        # clear space
        if os.path.isfile(outFile) and clobber:
            os.remove(outFile)

        # write correct flat data
        hdu = fits.PrimaryHDU(flat_comb_image,header)
        hdu.writeto(outFile,output_verify='ignore')  

    return (flat_comb_image,header,inst)
示例#8
0
def main(rawFiles,*args,**kwargs):
    '''
    Run basic 2D CCD reduction on Keck LRIS data

    Parameters
    ----------
    CLOBBER : bool, optional (default=False)
        Overwrite the individual files in pre_reduced, but
        do not wipe subdirectories
    FULL_CLEAN : bool, optional (default=False)
        Completely wipe pre_reduced and all subdirectories
    BPM : bool, optional (default=False)
        Mask bad pixels (not currently implemented)
    PIXEL_FLOOR : bool, optional (default=False)
        Removes negative pixel values
    REORIENT : bool, optional (default=True)
        Transposes to wavelength increasing rightward. LRIS images
        are by default (spatial, spectral), and in general should
        be transposed to (spectral, spatial).
    TRIM : bool, optional (default=True)
        Trim to some hard coded section of the detector
    MASK_MIDDLE_BLUE : bool (default=False)
        Mask the middle section of the blue images
    MASK_MIDDLE_RED : bool (default=False)
        Mask the middle section of the red images (useful if
        there's wildly disparate values that make the iraf
        windowing tedious)

    Returns
    -------
    int : 0, and writes files to disk
    '''

    # unpack supported kwargs
    CLOBBER = kwargs.get('CLOBBER',False) #
    FULL_CLEAN = kwargs.get('FULL_CLEAN',False) # this will completely wipe pre_reduced.
    BPM = kwargs.get('BPM',False) # no bad pixel mask available (at least for our binning)
    PIXEL_FLOOR = kwargs.get('PIXEL_FLOOR',False)
    REORIENT = kwargs.get('REORIENT',True)
    TRIM = kwargs.get('TRIM',False)
    ISDFLAT = kwargs.get('ISDFLAT',False)
    RED_AMP_BAD = kwargs.get('RED_AMP_BAD',False)
    BLUE_AMP_BAD = kwargs.get('BLUE_AMP_BAD',False)
    MASK_MIDDLE_RED = kwargs.get('MASK_MIDDLE_RED',False)
    MASK_MIDDLE_BLUE = kwargs.get('MASK_MIDDLE_BLUE',False)
    FIX_AMP_OFFSET = kwargs.get('FIX_AMP_OFFSET',False)

    # pre_reduced does not exist, needs to be made
    if not os.path.isdir('pre_reduced/'):
        os.mkdir('pre_reduced/')

    # pre_reduced exists, but we want to clobber/do a clean reduction
    elif FULL_CLEAN:

        promptStr = 'Do you really want to wipe pre_reduced? [y/n]: '
        usrRespOrig = raw_input(promptStr)
        if usrRespOrig and usrRespOrig[0].strip().upper() == 'Y':

            # remove all pre_reduced files
            shutil.rmtree('pre_reduced')
            os.mkdir('pre_reduced/')

    # pre_reduced exists, need to document what is there
    else:

        # get files
        preRedFiles = glob.glob('pre_reduced/*.fits')

    # loop over raw files, if the destination exists, do nothing
    # otherwise, do the bias/reorient/trim/output
    for i in xrange(len(rawFiles)):

        rawFile = rawFiles[i]
        inst = instruments.blue_or_red(rawFile)[1]['name']

        print('Working on:',rawFile,inst)

        # Get list of output file names and their index ranges
        outfiles, indices = get_file_names_and_indices(rawFile, **kwargs)

        if any([not os.path.exists(f) for f in outfiles]) or CLOBBER:

            # read file
            img,gain_img,head,secs = read_lris(rawFile)
            dsec,osec = np.array(secs[0]),np.array(secs[1])
            xbin, ybin = [int(ibin) for ibin in head['BINNING'].split(',')]

            # fix header
            head['EXPTIME'] = head['ELAPTIME']
            head['DATE-OBS'] = head['DATE_BEG']
            head['BASIC-2D'] = 'DONE'

            # get number of extensions for nAmps
            tmpHDU = fits.open(rawFile)
            nAmps = len(tmpHDU) - 1

            # perform oscan/bias subtraction
            noBiasImg = subtract_overscan(img,nAmps,dsec,osec,
                gain_image=gain_img,method='polynomial',params=[5,65])

            # mask bad pixels
            if BPM:
                print('Bad pixel masking not yet implemented...')

            # apply floor to pixel values
            if PIXEL_FLOOR:
                noBiasImg[noBiasImg < 0] = 0.

            # rotate/flip/transpose (wavelength increasing w/ increasing row)
            if REORIENT:
                outImg = noBiasImg.T
            else:
                outImg = noBiasImg

            if FIX_AMP_OFFSET:
                if inst=='lris_blue':
                    ampsplit = 2260//xbin
                    amp1 = np.median(outImg[ampsplit-10:ampsplit])
                    amp2 = np.median(outImg[ampsplit:ampsplit+10])
                    outImg[ampsplit:] = outImg[ampsplit:] + (amp1-amp2)

            for outfile,index in zip(outfiles, indices):
                outdata = outImg[index]

                if 'amp' not in outfile:
                    # This method is more robust for getting "background" level
                    # than np.median
                    data = outdata.flatten()
                    n, bins = np.histogram(data, 5000)
                    back_level = bins[np.argmax(n)]
                    if MASK_MIDDLE_RED and inst=='lris_red':
                        if not RED_AMP_BAD:
                            #outdata[480//xbin:520//xbin,:] = back_level
                            # Instead of changing pixel values, remove pixels
                            outdata=np.delete(outdata, np.s_[468//xbin:520//xbin], 0)
                        else:
                            outdata[190//xbin:250//xbin,:] = back_level
                    if MASK_MIDDLE_BLUE and inst=='lris_blue':
                        outdata[380//xbin:480//xbin,:] = back_level

                write_out_file(outdata, head, outfile, clobber=CLOBBER)

        else:
            outStr = 'Files exist: {}'.format(outfiles)
            print(outStr)

    return 0
示例#9
0
def get_file_names_and_indices(rawFile, **kwargs):

    CLOBBER = kwargs.get('CLOBBER',False) #
    FULL_CLEAN = kwargs.get('FULL_CLEAN',False) # this will completely wipe pre_reduced.
    BPM = kwargs.get('BPM',False) # no bad pixel mask available (at least for our binning)
    PIXEL_FLOOR = kwargs.get('PIXEL_FLOOR',False)
    REORIENT = kwargs.get('REORIENT',True)
    TRIM = kwargs.get('TRIM',True)
    ISDFLAT = kwargs.get('ISDFLAT',False)
    RED_AMP_BAD = kwargs.get('RED_AMP_BAD',False)
    BLUE_AMP_BAD = kwargs.get('BLUE_AMP_BAD',False)
    MASK_MIDDLE_BLUE = kwargs.get('MASK_MIDDLE_BLUE',False)
    MASK_MIDDLE_RED = kwargs.get('MASK_MIDDLE_RED',False)

    TRIM=True

    outfiles = []
    indices = []

    hdu = fits.open(rawFile, mode='readonly')
    head = hdu[0].header
    xbin, ybin = [int(ibin) for ibin in head['BINNING'].split(',')]
    inst = instruments.blue_or_red(rawFile)[1]['name']
    nAmps = len(hdu)-1

    oScanFile = 'pre_reduced/to{}'.format(rawFile)
    suffix = '.'.join(oScanFile.split('.')[1:])
    oScanFile_amp1 = oScanFile.split('.')[0]+'_amp1.'+suffix
    oScanFile_amp2 = oScanFile.split('.')[0]+'_amp2.'+suffix

    allidx = slice(None, None, None)

    if not TRIM:
        outfiles.append(oScanFile)
        indices.append((allidx, allidx))
        if ISDFLAT and inst == 'lris_blue' and not BLUE_AMP_BAD:
            # amp1
            outfiles.append(oScanFile_amp1)
            indices.append((slice(2260//xbin, 2800//xbin, None), allidx))

            # amp2
            outfiles.append(oScanFile_amp2)
            indices.append((slice(1800//xbin, 2260//xbin, None), allidx))
        elif ISDFLAT and inst == 'lris_blue':
            # amp1
            outfiles.append(oScanFile_amp1)
            indices.append((slice(2260//xbin, 2800//xbin, None), allidx))
        elif ISDFLAT and inst=='lris_red':
            outfiles.append(oScanFile_amp1)
            indices.append((slice(None, 290, None), allidx))

            outfiles.append(oScanFile_amp2)
            indices.append((slice(290, None, None), allidx))
        return(outfiles, indices)

    if ISDFLAT:
        if inst == 'lris_blue' and not BLUE_AMP_BAD:
            # amp1
            outfiles.append(oScanFile_amp1)
            indices.append((slice(2260//xbin, 2800//xbin, None), allidx))

            # amp2
            outfiles.append(oScanFile_amp2)
            indices.append((slice(1800//xbin, 2260//xbin, None), allidx))
        elif inst=='lris_blue':
            # amp1
            outfiles.append(oScanFile_amp1)
            indices.append((slice(2260//xbin, 2800//xbin, None), allidx))
        elif nAmps==2 and not RED_AMP_BAD:
            outfiles.append(oScanFile_amp1)
            indices.append((slice(26, 290, None), slice(None, -55, None)))

            outfiles.append(oScanFile_amp2)
            indices.append((slice(290, 500, None), slice(None, -55, None)))
        elif nAmps==2 and RED_AMP_BAD:
            outfiles.append(oScanFile_amp1)
            indices.append((slice(290, 500, None), slice(None, -55, None)))
        else:
            outfiles.append(oScanFile_amp1)
            indices.append((slice(1600//xbin, 2600//xbin, None), allidx))

    outfiles.append(oScanFile)
    if inst=='lris_blue' and not BLUE_AMP_BAD:
        indices.append((slice(1800//xbin, 2800//xbin, None), allidx))
    elif inst=='lris_blue':
        indices.append((slice(2260//xbin, 2800//xbin, None), allidx))
    elif nAmps==2 and RED_AMP_BAD:
        indices.append((slice(290, 500, None), slice(None, -55, None)))
    elif nAmps==2:
        indices.append((slice(26, 500, None), slice(None, -55, None)))
    else:
        indices.append((slice(1600//xbin, 2600//xbin, None), allidx))

    return(outfiles, indices)