示例#1
0
def check(filename, head=False):
  paths = u.find(filename,path+'Spectra')
  try:
    for f in paths:
      print f
      wav, flx, err = u.unc(a.read_spec(f, errors=True, atomicron=True, negtonan=True, verbose=False)[0])
      plt.plot(wav,flx), plt.fill_between(wav, flx-err, flx+err), plt.title(filename)
      if head: return pf.getheader(f)
  except KeyError:
    print filename    
示例#2
0
def main(spInput, grav=''):
    # 1. LOAD RELEVANT MODULES ---------------------------------------------------------
    import astrotools as at
    import asciidata
    import pyfits
    import matplotlib.pyplot as plt
    import numpy
    import sys
    import pdb
    
    
    # 2. SET UP VARIABLES --------------------------------------------------------------
    FOLDER_ROOT = '/Users/alejo/KCData/'  # Location of NIR and OPT folders
    FOLDER_OUT  = 'Output/NOCN/'
    OPTNIR_KEYS  = ['OPT', 'NIR']
    BAND_NAME  = ['NIR']
    data       = ''
    dataRaw    = ''
    specFiles  = ''
    spectraRaw = ''
    spectra    = ''
    
    # For TXT objects file (updatable here directly)
    FILE_IN     = 'nir_spex_prism_with_optical_12aug15.txt' # ASCII file w/ data
    HDR_FILE_IN = ('Ref','Designation','J','H','K','SpType','SpType_T','NIRFobs',\
                   'NIRFtel','NIRfile','OPTobs','OPTtel','OPTinst','OPTfile',\
                   'Young?','Dusty?','Blue?','Multiple?','Pec?')
    
    colNameRef   = HDR_FILE_IN[0]
    colNameDesig = HDR_FILE_IN[1]
    colNameJ     = HDR_FILE_IN[2]
    colNameK     = HDR_FILE_IN[4]
    colNameJK    = 'J-K'
    colNameType  = HDR_FILE_IN[6]
    colNameYng   = HDR_FILE_IN[14]
    colNameDust  = HDR_FILE_IN[15]
    colNameBlue  = HDR_FILE_IN[16]
    colNamePec   = HDR_FILE_IN[18]
    
    # For TXT exclude-objects file
    EXCL_FILE = 'Exclude_Objs.txt'   # ASCII file w/ U#s of objects to exclude
    
    
    # 3. READ DATA FROM INPUT FILES ----------------------------------------------------
    NULL_CHAR = ''   # Null character
    DELL_CHAR = '\t' # Delimiter character
    COMM_CHAR = '#'  # Comment character
    
    # File with objects (query in Access)
    dataRaw = asciidata.open(FOLDER_ROOT + FILE_IN, NULL_CHAR, DELL_CHAR, COMM_CHAR)
    
    # Store data in a dictionary-type object
    data = {}.fromkeys(HDR_FILE_IN)
    for colIdx,colData in enumerate(dataRaw):
        data[HDR_FILE_IN[colIdx]] = colData.tonumpy()
    
    
    # 4. FORMAT SOME ASCII COLUMNS -----------------------------------------------------
    # 4.1 Convert into unicode the Spectral Type-Text column
    uniSpType = [None] * len(data[colNameType])
    for sIdx,sType in enumerate(data[colNameType]):
        uniSpType[sIdx] = sType.decode('utf-8')
    
    data[colNameType] = numpy.array(uniSpType)
    
    # 4.2 Calculate J-K Color And Add J-K Column
    data[colNameJK] = data[colNameJ] - data[colNameK]
    
    # 4.3 Format Designation Number from Designation Column
    for desigIdx,desig in enumerate(data[colNameDesig]):
        desig = ''.join(desig.split())
        signType = '+'
        signPos = desig.find(signType)
        if signPos == -1:
            signType = '-'
            signPos  = desig.find(signType)
        
        desigProper = desig[:4] + signType + desig[signPos+1:signPos+5]
        data[colNameDesig][desigIdx] = desigProper
    
    
    # 5. FILTER DATA BY USER INPUT IN spInput ------------------------------------------
    # Find all spectra of same spectral type
    specIdx = []
    for spIdx,spType in enumerate(data[colNameType]):
        if spType.upper().startswith(spInput.upper()):
            specIdx.append(spIdx)
    
    if not specIdx:
        print 'No target found for given input.'
        return
    spTypeInput = spInput.upper()
    
    # Sort relevant objects by JKmag value
    specIdx     = numpy.array(specIdx)
    specSortIdx = data[colNameJK][specIdx].argsort()
    
    
    # 6. READ SPECTRAL DATA FROM SPECTRAL FILES ----------------------------------------
    spectraRaw    = {}.fromkeys(OPTNIR_KEYS) # Used to store the raw data from fits files
    specFilesDict = {}.fromkeys(OPTNIR_KEYS) # Used for reference purposes
    
    for key in OPTNIR_KEYS:
        specFiles = [None] * len(specSortIdx)
        
        for sortIdx,specSort in enumerate(specSortIdx):
            tmpFullName = FOLDER_ROOT + key + '/' + data[key + 'file'][specIdx[specSort]]
            specFiles[sortIdx] = tmpFullName
            specFilesDict[key] = specFiles
        
        spectraRaw[key] = at.read_spec(specFiles, atomicron=True, negtonan=True, \
                                       errors=True, verbose=False)
    
    # Clear out spectral data for objects missing either OPT or NIR data
    allNone = True
    for spIdx in range(0,len(spectraRaw['OPT'])):
        if spectraRaw['OPT'][spIdx] is None:
            spectraRaw['NIR'][spIdx] = None
        elif spectraRaw['NIR'][spIdx] is None:
            spectraRaw['OPT'][spIdx] = None
        else:
            allNone = False
    
    if allNone:
        print 'No spectral data found for objects of the given spectral type.'
        return
    
    # Convert spectraRaw contents into lists if only one spectral data
    for key in spectraRaw.keys():
        if spectraRaw[key][0] is not None:
            if len(spectraRaw[key][0]) > 3:
                spectraRaw[key] = [spectraRaw[key],]
    
    
    # 7. GATHER OBJECTS' NAMES----------------------------------------------------------
    # Filtered objects
    refs = [None] * len(specSortIdx)
    for idx,spIdx in enumerate(specSortIdx):
        tmpRef    = data[colNameRef][specIdx[spIdx]]
        refs[idx] = str(int(tmpRef))
    
    
    #8. SMOOTH SPECTRA -----------------------------------------------------------------
    # Smooth the flux data to a reasonable resolution
    spectraS = at.smooth_spec(spectraRaw['NIR'], specFile=specFilesDict['NIR'], \
                              winWidth=0)
    
    
    # 9. SET LIMITS FOR BAND AND NORMALIZING SECTION------------------------------------
    # Initialize dictionary to store limits
    BAND_LIMS = {}.fromkeys(BAND_NAME)
    for bandKey in BAND_NAME:
        BAND_LIMS[bandKey] = dict(lim = [None] * 2, limN = [None] * 2)
    
    # Set wl limits for band
    # Limits are in microns
    BAND_LIMS['NIR']['lim'][0] = 0.8
    BAND_LIMS['NIR']['lim'][1] = 2.4
    
    # Set wl limits for normalizing sections; this is the peak of the J band
    # Limits are in microns
    BAND_LIMS['NIR']['limN'][0] = 1.28
    BAND_LIMS['NIR']['limN'][1] = 1.32
    
    
    # 10. SELECT SPECTRAL DATA FOR NIR BAND---------------------------------------------
    # Initialize variables
    spectraN = {}.fromkeys(BAND_NAME)
    
    # Gather reference numbers of objects
    objRef = data[colNameRef][specIdx[specSortIdx]]
    
    # Select band
    spectra = at.sel_band(spectraS, BAND_LIMS['NIR']['lim'], objRef)
    
    # Normalize band
    spectraN['NIR'] = at.norm_spec(spectra, BAND_LIMS['NIR']['limN'])
    
    
    # 11. CHARACTERIZE TARGETS (i.e. identify young, blue, to exclude...)---------------
    # Determine which targets to exclude using the "Exclude_Objs" file
    toExclude = [False] * len(refs)
    dataExcl = asciidata.open(FOLDER_ROOT + EXCL_FILE, NULL_CHAR, DELL_CHAR, COMM_CHAR)
    if len(dataExcl[0]) > 0:
        # Extract data from "Exclude_Objs" file
        excludeObjs = [None] * len(dataExcl[0])
        for rowIdx, rowData in enumerate(dataExcl[0]):
            excludeObjs[rowIdx] = str(rowData)
        
        # Find intersection of exclude-obj list and filtered targets list
        setExclude = set(excludeObjs).intersection(set(refs))
        
        # Create list with intersection targets
        if len(setExclude) != 0:
            for exclIdx in setExclude:
                tmpExclIdx = numpy.where(numpy.array(refs) == exclIdx)
                toExclude[tmpExclIdx[0]] = True
    
    # Determine which targets are blue
    blueObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBlue][spIdx].upper() == 'YES':
            blueObjs[idx] = True
    
    # Determine which targets are dusty
    dustyObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameDust][spIdx].upper() == 'YES':
            dustyObjs[idx] = True
    
    # Determine which targets are peculiar
    pecObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNamePec][spIdx].upper() == 'YES':
            pecObjs[idx] = True
    
    # Determine which plots are young objects
    youngObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specSortIdx):
        if data[colNameYng][specIdx[spIdx]].upper() == 'YES':
            youngObjs[idx] = True
    
    # Determine which targets are GAMMA
    gammaObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen  = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb3"
        if utcA == '\xce' and utcB == '\xb3':
            gammaObjs[idx] = True
    
    # Determine which targets are BETA
    betaObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen  = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb2"
        if utcA == '\xce' and utcB == '\xb2':
            betaObjs[idx] = True
    
    # Determine which targets to include in plots (based on user input)
    # Consolidate plotting instructions
    grav = grav.upper()
    plotInstructions = ['exclude'] * len(refs)
    if grav == 'Y': # If plot request is Young, include gamma, beta & young targets
        for plotIdx in range(len(refs)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx] or betaObjs[plotIdx] or youngObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'G': # If plot request is Gamma, include only gamma targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'B': # If plot request is Beta, include only beta targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'F': # If plot request is Field, include Field & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx] or gammaObjs[plotIdx] or youngObjs[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                continue
            plotInstructions[plotIdx] = 'field'
    
    else:   # Otherwise, print Field, gamma, beta, young & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                continue
            if youngObjs[plotIdx]:
                plotInstructions[plotIdx] = 'young'
            else:
                plotInstructions[plotIdx] = 'field'
    
    # If all plot instructions are "exclude", then stop procedure
    allExcl = True
    for instr in plotInstructions:
        if instr != 'exclude':
            allExcl = False
    if allExcl:
        if not uniqueSpec:
            print 'No spectral data to plot based on your request.'
            return
    
    
    # 12. PLOT DATA --------------------------------------------------------------------
    # Gather info on each object (for legend purposes)
    objInfo = [None] * len(refs)
    for posIdx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpDesig  = data[colNameDesig][spIdx]
        tmpJK     = data[colNameJK][spIdx]
        tmpSPtype = data[colNameType][spIdx]
        tmpSPtype = tmpSPtype + ' ' * (5 - len(tmpSPtype))  # For alignment purposes
    
        objInfo[posIdx] = (tmpDesig + ' ' + tmpSPtype + ' ' + '%.2f' %tmpJK)
    
    # Create Figure with Subplots
    figObj = plotspec(spectraN, BAND_NAME, BAND_LIMS, objInfo, spTypeInput, grav, \
                        plotInstructions)
    
    figObj.savefig(FOLDER_ROOT + FOLDER_OUT + spTypeInput + grav + '_fan.pdf', \
                   dpi=800)
spTypes = []
spectra = {}.fromkeys(BANDS)
for band in BANDS:
    spectra[band] = []

for idxTp, spTp in enumerate(SPTYPES):
    if not(spTp in SPSTDNM): continue
    # Fetch and normalize special standard (L2, L5 and L7 only)
    if spTp in SPSTDNM:
        if spTp == 'L2':
            isp = 0
        elif spTp == 'L5':
            isp = 1
        else:
            isp = 2
        tmpspec = at.read_spec('../more data/NIR/' + SPSTD[isp], errors=False, \
                               atomicron=True, negtonan=True)
        for band in BANDS:
            tmpband = at.sel_band(tmpspec, BAND_LIMS[band]['lim'])
            if idxTp in [1,2,3] and band == 'H':
                norm_lims = SPECIAL_H_NORM_LIM
            else:
                norm_lims = NORM_LIMS[band]['lim']
            stdToPlot = at.norm_spec(tmpband, norm_lims)[0]
            spectra[band].append(stdToPlot)
        spTypes.append(spTp)
    
    # # Fetch standard
    # tmpStd = nocs.main(spTp, GRAV, plot=False, std=True, normalize=False)
    # # Normalize standard
    # for bdIdx, band in enumerate(BANDS):
    #     if idxTp in [1,2,3] and band == 'H':
示例#4
0
def main(spInput,
         grav='',
         plot=True,
         templ=False,
         std=False,
         lbl=False,
         normalize=True):

    # 1. LOAD RELEVANT MODULES ------------------------------------------------
    #import asciidata
    import astrotools as at
    import numpy as np
    import sys
    import pdb
    import matplotlib.pyplot as plt
    from astropy.io import ascii

    # 2. SET UP VARIABLES -----------------------------------------------------
    # Customizable variables <><><><><><><><><><><><><><><><><><><><><><><><><><><>
    FOLDER_ROOT = '/Users/alejo/Dropbox/Project_0/more data/'  # Location of NIR and OPT folders
    FOLDER_IN = '/Users/alejo/Dropbox/Project_0/data/'  # Location of input files
    FOLDER_OUT = '/Users/alejo/Dropbox/Project_0/plots/'  # Location to save output figures
    FILE_IN = 'nir_spex_prism_with_optical.txt'  # ASCII file w/ data
    # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>

    # For TXT objects list file
    HDR_FILE_IN = ('Ref','Designation`','J','H','K','SpType','SpType_T','NIRFobs',\
                   'NIRFtel','NIRfile','OPTobs','OPTtel','OPTinst','OPTfile',\
                   'Young?','Dusty?','Blue?','Binary?','Pec?')
    # For TXT standards file
    FILE_IN_STD = 'NIR_Standards.txt'  # ASCII file w/ standards
    HDR_FILE_IN_STD = ('Ref', 'Designation', 'NIR SpType', 'OPT SpType')
    colNameNIRS = HDR_FILE_IN_STD[2]
    colNameOPTS = HDR_FILE_IN_STD[3]

    # For TXT exclude-objects file
    EXCL_FILE = 'Exclude_Objs.txt'  # ASCII file w/ unums of objects to exclude

    OPTNIR_KEYS = ['OPT', 'NIR']
    BANDS_NAMES = ['K', 'H', 'J', 'OPT']
    data = ''
    dataRaw = ''
    specFiles = ''
    spectraRaw = ''
    spectra = ''

    colNameRef = HDR_FILE_IN[0]
    colNameDesig = HDR_FILE_IN[1]
    colNameJ = HDR_FILE_IN[2]
    colNameK = HDR_FILE_IN[4]
    colNameJK = 'J-K'
    colNameType = HDR_FILE_IN[6]
    colNameNIRfile = HDR_FILE_IN[9]
    colNameYng = HDR_FILE_IN[14]
    colNameDust = HDR_FILE_IN[15]
    colNameBlue = HDR_FILE_IN[16]
    colNameBin = HDR_FILE_IN[17]
    colNamePec = HDR_FILE_IN[18]

    # Initialize dictionary to store NIR bands limits and normalizing sections
    BAND_LIMS = {}.fromkeys(BANDS_NAMES)
    for bandKey in BANDS_NAMES:
        BAND_LIMS[bandKey] = dict(lim=[None] * 2, limN=[None] * 2)

    # Set wavelength limits for bands
    # Limits are in microns
    BAND_LIMS['OPT']['lim'][0] = 0.65
    BAND_LIMS['OPT']['lim'][1] = 0.90
    BAND_LIMS['J']['lim'][0] = 0.8
    BAND_LIMS['J']['lim'][1] = 1.4
    BAND_LIMS['H']['lim'][0] = 1.4
    BAND_LIMS['H']['lim'][1] = 1.9
    BAND_LIMS['K']['lim'][0] = 1.9
    BAND_LIMS['K']['lim'][1] = 2.4

    # Set wl limits for normalizing sections
    # Limits are in microns
    BAND_LIMS['OPT']['limN'][0] = 0.66
    BAND_LIMS['OPT']['limN'][1] = 0.89
    BAND_LIMS['J']['limN'][0] = 0.87
    BAND_LIMS['J']['limN'][1] = 1.39
    BAND_LIMS['H']['limN'][0] = 1.41
    BAND_LIMS['H']['limN'][1] = 1.89
    BAND_LIMS['K']['limN'][0] = 1.91
    BAND_LIMS['K']['limN'][1] = 2.39

    # 3. READ DATA FROM INPUT FILES -------------------------------------------
    DELL_CHAR = '\t'  # Delimiter character
    COMM_CHAR = '#'  # Comment character

    # File with objects (source: query in Access)
    dataRaw = ascii.read(FOLDER_IN + FILE_IN, format='no_header', \
                         delimiter=DELL_CHAR, comment=COMM_CHAR, data_start=1)

    # Store data in a dictionary-type object
    data = {}.fromkeys(HDR_FILE_IN)
    for colIdx, colname in enumerate(dataRaw.colnames):
        data[HDR_FILE_IN[colIdx]] = np.array(dataRaw[colname])

    # File with standards (source: manually generated)
    dataRawS = ascii.read(FOLDER_IN + FILE_IN_STD, data_start=0)

    # Store standard data in a dictionary-type object
    dataS = {}.fromkeys(HDR_FILE_IN_STD)
    for colIdx, colname in enumerate(dataRawS.colnames):
        dataS[HDR_FILE_IN_STD[colIdx]] = np.array(dataRawS[colname])

    # 4. FORMAT SOME ASCII COLUMNS --------------------------------------------
    # 4.1 Convert into unicode the Spectral Type-Text column
    uniSpType = [None] * len(data[colNameType])
    for sIdx, sType in enumerate(data[colNameType]):
        uniSpType[sIdx] = sType  #.decode('utf-8')
    data[colNameType] = np.array(uniSpType)

    # 4.2 Calculate J-K Color
    data[colNameJK] = data[colNameJ] - data[colNameK]

    # 4.3 Format Designation Number in Designation Column
    #     (From "XX XX XX.X +XX XX XX.X" to "XXXX+XXXX")
    for desigIdx, desig in enumerate(data[colNameDesig]):
        desig = ''.join(desig.split())
        signType = '+'
        signPos = desig.find(signType)
        if signPos == -1:
            signType = '-'
            signPos = desig.find(signType)

        desigProper = desig[:4] + signType + desig[signPos + 1:signPos + 5]
        data[colNameDesig][desigIdx] = desigProper

    # 5. FILTER DATA BY USER INPUT IN spInput ---------------------------------
    uniqueSpec = False
    specIdx = []
    if spInput.upper().startswith('L'):
        # If input is a spectral type, then find all spectra of same spectral type
        for spIdx, spType in enumerate(data[colNameType]):
            if spType.upper().startswith(spInput.upper()):
                specIdx.append(spIdx)
        if not specIdx:
            print('No targets found for given input.')
            if std is False:
                return
        spTypeInput = spInput.upper()
    else:
        # If input is one single spectrum, then find it
        for spIdx, spType in enumerate(data[colNameRef]):
            if str(spType) == spInput.upper():
                specIdx.append(spIdx)
        if not specIdx:
            print('Requested target not found.')
            if std is False:
                return
        else:
            spTypeInput = data[colNameType][specIdx[0]][0:2]
            uniqueSpec = True

    # Find NIR standard target that matches user's spectral type
    stdIdx = []
    for spIdx, spType in enumerate(dataS[colNameNIRS]):
        if spType.upper().startswith(spTypeInput):
            stdIdx.append(spIdx)

    # Add NIR standard target to list of filtered objects if not there already
    # (It may not be included in first filter because OPT SpT != NIR SpT)
    if not uniqueSpec:
        if dataS[colNameNIRS][stdIdx] != dataS[colNameOPTS][stdIdx]:
            for spIdx, spRef in enumerate(data[colNameRef]):
                if spRef == dataS[colNameRef][stdIdx][0]:
                    if spIdx not in specIdx:
                        specIdx.append(spIdx)

    # Sort relevant objects by JKmag value
    specIdx = np.array(specIdx)
    specSortIdx = data[colNameJK][specIdx].argsort()

    # 6. READ SPECTRAL DATA FROM SPECTRAL FILES -------------------------------
    spectraRaw = {}.fromkeys(
        OPTNIR_KEYS)  # Used to store the raw data from fits files
    specFilesDict = {}.fromkeys(OPTNIR_KEYS)  # Used for reference purposes

    for key in OPTNIR_KEYS:
        specFiles = [None] * len(specSortIdx)

        for sortIdx, specSort in enumerate(specSortIdx):
            if data[key + 'file'][specIdx[specSort]][-4:] == '.dat': continue
            if data[key + 'file'][specIdx[specSort]] == 'include': continue
            tmpFullName = FOLDER_ROOT + key + '/' + data[key \
                          + 'file'][specIdx[specSort]]
            specFiles[sortIdx] = tmpFullName
            specFilesDict[key] = specFiles

        spectraRaw[key] = at.read_spec(specFiles, atomicron=True, negtonan=True, \
                                       errors=True, verbose=False)

    # Clear out spectral data for objects missing either OPT or NIR data
    allNone = True
    for spIdx in range(0, len(spectraRaw['OPT'])):
        if spectraRaw['OPT'][spIdx] is None:
            spectraRaw['NIR'][spIdx] = None
        elif spectraRaw['NIR'][spIdx] is None:
            spectraRaw['OPT'][spIdx] = None
        else:
            allNone = False

    if allNone:
        print('No spectral data found for objects of the given spectral type.')
        if std is False:
            return

    # Convert spectraRaw contents into lists if only one spectral data
    # (This reduces the dimensions of the object holding the data)
    for key in spectraRaw.keys():
        if spectraRaw[key][0] is not None:
            if len(spectraRaw[key][0]) > 3:
                spectraRaw[key] = [
                    spectraRaw[key],
                ]

    # 7. GATHER OBJECTS' NAMES ------------------------------------------------
    # Filtered objects
    refs = [None] * len(specSortIdx)
    for idx, spIdx in enumerate(specSortIdx):
        tmpRef = data[colNameRef][specIdx[spIdx]]
        refs[idx] = str(tmpRef)

    # Standard objects
    refsStd = [None] * len(dataS[colNameRef])
    for idx, spIdx in enumerate(dataS[colNameRef]):
        tmpRef = dataS[colNameRef][idx]
        refsStd[idx] = str(tmpRef)

    # Gather reference numbers of objects
    objRef = data[colNameRef][specIdx[specSortIdx]]

    #8. SMOOTH SPECTRA --------------------------------------------------------
    # Smooth the flux data to a reasonable resolution
    spectraS = {}.fromkeys(OPTNIR_KEYS)
    tmpSpOPT = at.smooth_spec(spectraRaw['OPT'], specFile=specFilesDict['OPT'], \
                              winWidth=10)
    tmpSpNIR = at.smooth_spec(spectraRaw['NIR'], specFile=specFilesDict['NIR'], \
                              winWidth=0)

    spectraS['OPT'] = tmpSpOPT
    spectraS['NIR'] = tmpSpNIR

    # 9. SELECT SPECTRAL DATA FOR THE DIFFERENT BANDS -------------------------
    # Initialize variables
    spectra = {}.fromkeys(BANDS_NAMES)
    spectraN = {}.fromkeys(BANDS_NAMES)

    for bandKey in BANDS_NAMES:
        if bandKey == 'OPT':
            optNIR = 'OPT'
        else:
            optNIR = 'NIR'

        # Select band
        spectra[bandKey] = at.sel_band(spectraS[optNIR], BAND_LIMS[bandKey]['lim'], \
                                       objRef)
        if spectra[bandKey] is None:
            break

        # Normalize band
        spectraN[bandKey], flagN = at.norm_spec(spectra[bandKey], \
                                               BAND_LIMS[bandKey]['limN'], flag=True)
        if flagN:
            print('LIMITS for normalization changed!')
        if spectraN[bandKey] is None:
            break

    # 10. CHARACTERIZE TARGETS (i.e. identify young, blue, to exclude...) -----
    # Determine which targets to exclude
    # (source: file manually generated)
    toExclude = [False] * len(refs)  # FORCE TO INCLUDE ALL TARGETS
    # dataExcl = ascii.read(FOLDER_IN + EXCL_FILE, data_start=0, delimiter=DELL_CHAR, \
    #                       comment=COMM_CHAR, names=['ID'])
    # if len(dataExcl['ID']) > 0:
    #     # Extract data from "Exclude_Objs" file
    #     excludeObjs = np.array(dataExcl['ID'], dtype='string')
    #
    #     # Find intersection of exclude-obj list and filtered targets list
    #     setExclude = set(excludeObjs).intersection(set(refs))
    #
    #     # Create list with intersection targets
    #     if len(setExclude) != 0:
    #         for exclIdx in setExclude:
    #             tmpExclIdx = np.where(np.array(refs) == exclIdx)
    #             toExclude[tmpExclIdx[0]] = True

    # Determine which target is the NIR Standard object
    O_standard = [None] * 3  # Holds standard for output
    stdObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameRef][spIdx] == dataS[colNameRef][stdIdx]:
            stdObjs[idx] = True

            if normalize:
                O_standard[0] = spectraN['J'][idx]
                O_standard[1] = spectraN['H'][idx]
                O_standard[2] = spectraN['K'][idx]
            else:
                O_standard[0] = spectra['J'][idx]
                O_standard[1] = spectra['H'][idx]
                O_standard[2] = spectra['K'][idx]

    # Determine which targets are blue
    blueObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBlue][spIdx].upper() == 'YES':
            blueObjs[idx] = True

    # Determine which targets are dusty
    dustyObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameDust][spIdx].upper() == 'YES':
            dustyObjs[idx] = True

    # Determine which targets are binary
    binObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBin][spIdx].upper() == 'YES':
            binObjs[idx] = True

    # Determine which targets are peculiar
    pecObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNamePec][spIdx].upper() == 'YES':
            pecObjs[idx] = True

    # Determine which targets are young
    youngObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameYng][spIdx].upper() == 'YES':
            youngObjs[idx] = True

    # Determine which targets are GAMMA
    gammaObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb3"
        if utcA == '\xce' and utcB == '\xb3':
            gammaObjs[idx] = True

    # Determine which targets are BETA
    betaObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb2"
        if utcA == '\xce' and utcB == '\xb2':
            betaObjs[idx] = True

    # Determine which targets to include in plots (based on user input)
    # Consolidate plotting & template-flux instructions
    grav = grav.upper()
    plotInstructions = ['exclude'] * len(refs)
    templInstructions = [False] * len(refs)
    if grav == 'Y':  # If plot request is Young, include gamma, beta & young targets
        for plotIdx in range(len(refs)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx] or betaObjs[plotIdx] or youngObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] \
                                                           or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
                templInstructions[plotIdx] = True

    elif grav == 'G':  # If plot request is Gamma, include only gamma targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] \
                                                           or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
                templInstructions[plotIdx] = True

    elif grav == 'B':  # If plot request is Beta, include only beta targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] \
                                                           or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
                templInstructions[plotIdx] = True

    elif grav == 'F':  # If plot request is Field, include Field & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx] or gammaObjs[plotIdx] or youngObjs[plotIdx]:
                continue
            #if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] \
            #                                           or binObjs[plotIdx]:
            #    continue
            if stdObjs[plotIdx]:
                plotInstructions[plotIdx] = 'standard'
            else:
                plotInstructions[plotIdx] = 'field'
            templInstructions[plotIdx] = True

    else:  # Otherwise, print Field, gamma, beta, young & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] \
                                                       or binObjs[plotIdx]:
                continue
            if youngObjs[plotIdx]:
                plotInstructions[plotIdx] = 'young'
            elif stdObjs[plotIdx]:
                plotInstructions[plotIdx] = 'standard'
            else:
                plotInstructions[plotIdx] = 'field'
            templInstructions[plotIdx] = True

    # If all plot instructions are "exclude", then stop procedure (for spectral types)
    allExcl = True
    for instr in plotInstructions:
        if instr != 'exclude':
            allExcl = False
    if allExcl:
        if std:
            return O_standard
        if not uniqueSpec:
            print('No spectral data to plot based on your request.')
            return

    # 11. CALCULATE TEMPLATE SPECTRA FOR SELECTED SET OF SPECTRA -----------------------
    # Gather spectra to use to calculate template spectrum
    # if not allExcl:
    #     O_template = [None] * 3 # Holds calculated template for output
    #     templCalculated = False
    #     for bandIdx, bandKey in enumerate(BANDS_NAMES):
    #         if bandKey == 'OPT':
    #             continue
    #
    #         templSpecs = []
    #         for spIdx, spex in enumerate(spectraN[bandKey]):
    #             if templInstructions[spIdx]:
    #                 # Check that spectrum exists
    #                 if spex is None:
    #                     templInstructions[spIdx] = False
    #                     continue
    #
    #                 if bandKey == 'OPT':
    #                     templSpecs.append(spex)
    #                 else:
    #                     # Check that spectrum comes with error values (NIR bands only)
    #                     notNansBool = np.isfinite(spex[2])
    #                     notNans     = np.any(notNansBool)
    #                     if notNans:
    #                         templSpecs.append(spex)
    #                     else:
    #                         print(str(objRef[spIdx]) + ' excluded from template')
    #
    #         # Calculate template spectrum
    #         if len(templSpecs) > 1:
    #             template = at.mean_comb(templSpecs)
    #             templCalculated = True
    #
    #             # Append template to list of spectra to plot in the next step
    #             spectraN[bandKey].append(template)
    #             # Append template to output object
    #             if bandIdx == 0:
    #                 tempIdx = 2
    #             elif bandIdx == 2:
    #                 tempIdx = 0
    #             else:
    #                 tempIdx = 1
    #             O_template[tempIdx] = template
    #
    #     if templCalculated:
    #         refs.append('template')
    #         plotInstructions.append('template')
    #     else:
    #         O_template = None

    # 12. PLOT DATA -----------------------------------------------------------
    if lbl or plot:
        # Gather info on each target
        objInfo = [None] * len(refs)
        for posIdx, spIdx in enumerate(specIdx[specSortIdx]):
            tmpDesig = data[colNameDesig][spIdx]
            tmpJK = data[colNameJK][spIdx]
            tmpSPtype = data[colNameType][spIdx]
            tmpSPtype = tmpSPtype + ' ' * (5 - len(tmpSPtype)
                                           )  # For alignment purposes

            objInfo[posIdx] = (tmpDesig + ' ' + tmpSPtype + ' ' +
                               '%.2f' % tmpJK)

        if objInfo[-1] is None:
            objInfo[-1] = 'template'
    if plot:
        # Create Figure with Subplots and Annotations
        tmpspectraN = {key: spectraN[key] for key in ['J', 'H', 'K']}
        tmpBANDS_NAMES = BANDS_NAMES[:-1]
        tmpBAND_LIMS = {key: BAND_LIMS[key] for key in ['J', 'H', 'K']}
        figObj = plotspec(tmpspectraN, tmpBANDS_NAMES, tmpBAND_LIMS, objInfo, \
                          spTypeInput, grav, plotInstructions)

        figObj.savefig(FOLDER_OUT + spTypeInput + grav + '.pdf', dpi=600)

    # 13. DETERMINE OUTPUT ----------------------------------------------------
    if templ:
        if std:
            return O_template, O_standard
        else:
            return O_template
    elif std:
        return O_standard
    else:
        if lbl:
            return spectraN, objInfo
        else:
            return spectraN
示例#5
0
  def add_fits(self, fitsPath, source_id, unc_fitsPath='', wavelength_units='', flux_units='', publication_id='', obs_date='', wavelength_order='', regime='', instrument_id='', telescope_id='', mode_id='', airmass=0, comment='', wlog=False, SDSS=False):
    '''
    Checks the header of the **fitsFile** and inserts the data with **source_id**.
    '''
    filename, header = os.path.basename(fitsPath), pf.getheader(fitsPath)

    # x- and y-units
    if not wavelength_units:
      try:
        wavelength_units = header['XUNITS'] 
        if 'microns' in wavelength_units or 'Microns' in wavelength_units or 'um' in wavelength_units: wavelength_units = 'um'
      except KeyError:
        try:
           if header['BUNIT']: wavelength_units = 'um'
        except KeyError: wavelength_units = ''
    if not flux_units:
      try: flux_units = header['YUNITS'].replace(' ','')
      except KeyError:
        try: flux_units = header['BUNIT'].replace(' ','')
        except KeyError: flux_units = ''
    if 'erg' in flux_units and 'A' in flux_units: flux_units = 'ergs-1cm-2A-1' if 'erg' in flux_units and 'A' in flux_units else 'ergs-1cm-2um-1' if 'erg' in flux_units and 'um' in flux_units else 'Wm-2um-1' if 'W' in flux_units and 'um' in flux_units else 'Wm-2A-1' if 'W' in flux_units and 'A' in flux_units else ''

    # Date, object name, telescope and instrument
    if not obs_date:
      try: obs_date = header['DATE_OBS']
      except KeyError:
        try: obs_date = header['DATE-OBS']
        except KeyError:
          try: obs_date = header['DATE']
          except KeyError: obs_date = ''
    if not telescope_id:
      try:
        n = header['TELESCOP'].lower() if isinstance(header['TELESCOP'],str) else ''
        telescope_id = 5 if 'hst' in n else 6 if 'spitzer' in n else 7 if 'irtf' in n else 9 if 'keck' in n and 'ii' in n else 8 if 'keck' in n and 'i' in n else 10 if 'kp' in n and '4' in n else 11 if 'kp' in n and '2' in n else 12 if 'bok' in n else 13 if 'mmt' in n else 14 if 'ctio' in n and '1' in n else 15 if 'ctio' in n and '4' in n else 16 if 'gemini' in n and 'north' in n else 17 if 'gemini' in n and 'south' in n else 18 if 'vlt' in n else 19 if '3.5m' in n else 20 if 'subaru' in n else 21 if ('mag' in n and 'ii' in n) or ('clay' in n) else 22 if ('mag' in n and 'i' in n) or ('baade' in n) else None
      except KeyError: telescope_id = ''
    if not instrument_id:
      try: 
        i = header['INSTRUME'].lower()
        instrument_id = 1 if 'r-c spec' in i or 'test' in i or 'nod' in i else 2 if 'gmos-n' in i else 3 if 'gmos-s' in i else 4 if 'fors' in i else 5 if 'lris' in i else 6 if 'spex' in i else 7 if 'ldss3' in i else 8 if 'focas' in i else 9 if 'nirspec' in i else 0
      except KeyError: instrument_id = ''
    try: airmass = header['AIRMASS']
    except: airmass = 0
    
    try:
      if SDSS:
        data = pf.open(fitsPath, memmap=True)[1].data
        flx, wav, err = map(np.array,zip(*data)[:3])
        flx, wav, err, wavelength_units, flux_units = flx*10**-17, 10**wav, np.sqrt(1/err)*10**-17, 'A', 'ergs-1cm-2A-1'
      else:
        data = a.read_spec(fitsPath, errors=True, atomicron=True, negtonan=True, verbose=False, wlog=wlog)[0]
        wav, flx = data[:2]
        try: err = a.read_spec(unc_fitsPath, errors=True, atomicron=True, negtonan=True, verbose=False, wlog=wlog)[0][1] if unc_fitsPath else data[2]
        except: err = ''
      try: snr = flx/err if any(flx) and any(err) else None
      except (TypeError,IndexError): snr = None

      if not regime:
        if wav[0]<500 or wavelength_units=='um': regime = 'OPT' if wav[0]<0.8 and wav[-1]<1.2 else 'NIR' if wav[0]<1.2 and wav[-1]>2 else 'MIR' if wav[-1]>2.5 else None     
        else: regime = 'OPT' if wav[0]<8000 and wav[-1]<12000 else 'NIR' if wav[0]<12000 and wav[-1]>20000 else 'MIR' if wav[-1]>25000 else None     

      spec_id = sorted(list(set(range(1,self.query.execute("SELECT max(id) FROM spectra").fetchone()[0]+2))-set(zip(*self.query.execute("SELECT id FROM spectra").fetchall())[0])))[0]
      try: self.query.execute("INSERT INTO spectra VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (spec_id, source_id, wav, wavelength_units, flx, flux_units, err, snr, wavelength_order, regime, publication_id, obs_date, instrument_id, telescope_id, mode_id, airmass, filename, comment, header)), self.modify.commit()
      except: self.query.execute("INSERT INTO spectra VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (spec_id, source_id, wav, wavelength_units, flx, flux_units, err, snr, wavelength_order, regime, publication_id, obs_date, instrument_id, telescope_id, mode_id, airmass, filename, comment, None)), self.modify.commit()
      u.printer(['spec_id','source_id','wavelength_unit','flux_unit','regime','publication_id','obs_date', 'instrument_id', 'telescope_id', 'mode_id', 'airmass', 'filename', 'comment'],[[spec_id,source_id, wavelength_units, flux_units, regime, publication_id, obs_date, instrument_id, telescope_id, mode_id, airmass, filename, comment]], empties=True)
      # self.clean_up('spectra')
    except KeyError: print "Couldn't add fits file {}".format(fitsPath); print [filename, source_id, wavelength_units, flux_units, obs_date, instrument_id, telescope_id, mode_id, airmass, comment]
示例#6
0
    def add_data(self, overwrite=False):
        """
        Add spectral and/or photometry data for a target. If target does not exist in database, it will create a Target instance automatically using the *add_target* method. The data to upload is read form the *upload.txt* file located in the same folder as the database file in your computer. Read *upload.txt* header for more information.
        
        *overwrite*
          Boolean, whether to overwrite existing data or not.
        """

        # 1. Initialize variables ---------------------------------------------
        DB_FILE = 'BDNYCData.txt'
        UP_FILE = 'upload.txt'  # ascii file with upload data
        UP_HEADERS = ('unum','name','ra', 'dec', 'sptype', 'standard', \
                      'rng', 'res', 'instr', 'date', 'ord_filt', 'fitsname', \
                      'survey', 'band_1', 'val_1', 'err_1', \
                      'band_2', 'val_2', 'err_2', \
                      'band_3', 'val_3', 'err_3')
        NULL = ''  # Null character in ascii file
        DEL = '\t'  # Delimiter character in ascii file
        COMM = '#'  # Comment character in ascii file
        NUM_BANDS = 3

        colNmUnum = UP_HEADERS[0]
        colNmName = UP_HEADERS[1]
        colNmRa = UP_HEADERS[2]
        colNmDec = UP_HEADERS[3]
        colNmSptype = UP_HEADERS[4]
        colNmStd = UP_HEADERS[5]
        colNmRng = UP_HEADERS[6]
        colNmRes = UP_HEADERS[7]
        colNmInstr = UP_HEADERS[8]
        colNmDate = UP_HEADERS[9]
        colNmOrdfilt = UP_HEADERS[10]
        colNmFits = UP_HEADERS[11]
        colNmSurvey = UP_HEADERS[12]
        colNmBand1 = UP_HEADERS[13]
        colNmVal1 = UP_HEADERS[14]
        colNmErr1 = UP_HEADERS[15]
        colNmBand2 = UP_HEADERS[16]
        colNmVal2 = UP_HEADERS[17]
        colNmErr2 = UP_HEADERS[18]
        colNmBand3 = UP_HEADERS[19]
        colNmVal3 = UP_HEADERS[20]
        colNmErr3 = UP_HEADERS[21]

        # 2. Load ascii file --------------------------------------------------
        dataRaw = ad.open(UP_FILE, null=NULL, delimiter=DEL, comment_char=COMM)
        # Store ascii data in a dictionary-type object
        data = {}.fromkeys(UP_HEADERS)
        for colIdx, colData in enumerate(dataRaw):
            data[UP_HEADERS[colIdx]] = colData.tonumpy()

        if data[colNmUnum] is None:
            print 'Upload file empty.'
            return

        # 3. Upload data to database ------------------------------------------
        somethingAdded = False
        for row in range(len(data[colNmUnum])):

            # 3.1 Check if target already exists in database
            unum = data[colNmUnum][row].upper()
            try:
                unum + 0
                print 'U-number invalid.'
                continue
            except TypeError:
                if len(unum) != 6:
                    print 'U-number invalid.'
                    continue
                if unum[0] != 'U':
                    print 'U-number invalid.'
                    continue
            dbIdx = self.match_unum(unum)
            if dbIdx is None:
                newTgt = True
            else:
                newTgt = False

            # 3.2 Get target attributes
            if newTgt:
                name = data[colNmName][row]
                if name == '':
                    name = None
                sptype = data[colNmSptype][row].capitalize()
                if sptype == '':
                    sptype = 'XX'
                ra = data[colNmRa][row]
                if len(ra) < 8 or ra == '':
                    print unum + ' Right ascension invalid.'
                    continue
                dec = data[colNmDec][row]
                if len(dec) < 9:
                    print unum + ' Declination invalid.'
                    continue
                std = data[colNmStd][row].capitalize()
                if not (std == 'Yes' or std == 'No'):
                    print unum + ' Standard column must be Yes or No.'
                    continue

            # 3.3 Get range of data
            rng = data[colNmRng][row].lower()
            if not (rng == 'opt' or rng == 'nir' or rng == 'mir'):
                print unum + ' Range invalid.'
                continue

            # 3.4 Get spectrum data
            # 3.4.1 Open & read fits file
            specAdd = False
            fitsName = data[colNmFits][row]
            if fitsName != '':
                fitsRaw = at.read_spec(fitsName, errors=True)
                if fitsRaw[0] is not None:
                    wl = fitsRaw[0][0]
                    flux = fitsRaw[0][1]

                    # 3.4.2 Determine if 3rd dimension is uncertainty or snr
                    errNm = None
                    if len(fitsRaw[0]) == 3:
                        errVals = None
                        med = np.median(flux / fitsRaw[0][2])
                        if med < 10**3 and med > 10**-3:
                            errVals = fitsRaw[0][2]
                            errNm = 'uncertainty'
                        else:
                            errVals = fitsRaw[0][2]
                            errNm = 'snr'

                    # 3.4.3 Get spectrum attributes
                    attsOK = True
                    res = data[colNmRes][row].lower()
                    if not (res == 'high' or res == 'med' or res == 'low'):
                        print unum + ' Resolution invalid.'
                        attsOK = False
                    instr = data[colNmInstr][row]
                    if instr == '':
                        print unum + ' Must provide Instrument for spectrum.'
                        attsOK = False
                    date = data[colNmDate][row].lower()
                    if len(date) != 9:
                        print unum + ' Date invalid.'
                        attsOK = False
                    if date[-1] == '\r':
                        date = date[:-1]
                    ord_filt = data[colNmOrdfilt][row]
                    if ord_filt == '':
                        print unum + ' Must provide Order/Filter for spectrum.'
                        attsOK = False

                    # 3.4.4 Create dictionary structure with spectrum data
                    if attsOK:
                        specAdd = True
                        specDict = {instr:{date:{ord_filt:{'wl':wl, \
                                                'flux':flux, errNm:errVals}}}}

            # 3.5 Get target photometry attributes
            # 3.5.1 Check if photometry data was provided
            photAdd = False
            try:
                survey = data[colNmSurvey][row]
            except TypeError:
                survey = ''
            # 3.5.2 Get data for bands
            if survey != '':
                bands = []
                vals = []
                errs = []
                photOK = True

                for bndNum in range(1, NUM_BANDS + 1):
                    bndNm = 'band_' + str(bndNum)
                    valNm = 'val_' + str(bndNum)
                    errNm = 'err_' + str(bndNum)
                    try:
                        # Get band name
                        bands.append(data[bndNm][row])
                        if bands[bndNum - 1] == '':
                            photOK = False
                        else:
                            try:
                                bands[bndNum - 1][0]
                            except IndexError:
                                photOK = False
                        # Get band photometry value
                        try:
                            vals.append(data[valNm][row])
                            try:
                                vals[bndNum - 1] + 0
                            except TypeError:
                                if vals[bndNum - 1][-1] == '\r':
                                    vals[bndNum - 1] = vals[bndNum - 1][:-1]
                                    try:
                                        vals[bndNum - 1] + 0
                                    except TypeError:
                                        photOK = False
                                else:
                                    photOK = False
                        except TypeError:
                            photOK = False
                        # Get band photometry value error
                        try:
                            errs.append(data[errNm][row])
                            if errs[bndNum - 1] == '':
                                errs[bndNum - 1] = None
                            else:
                                try:
                                    errs[bndNum - 1] + 0
                                except TypeError:
                                    if errs[bndNum - 1][0] == '\r':
                                        errs[bndNum - 1] = None
                                    else:
                                        try:
                                            errs[bndNum - 1] = float(
                                                errs[bndNum - 1])
                                        except ValueError:
                                            photOK = False
                        except TypeError:
                            errs.append(None)
                    except TypeError:
                        if bndNum == 1:
                            photOK = False

                # 3.5.3 Create dictionary structure with photometry data
                if photOK:
                    photDict = {survey: {}}
                    for bdIdx, band in enumerate(bands):
                        if band != '':
                            photAdd = True
                            photDict[survey][band] = {'val': vals[bdIdx], \
                                                      'err': errs[bdIdx]}
                else:
                    print unum + ' Photometry data invalid.'

        # 3.6 Create range-level dictionary with all data
            if photAdd and specAdd:
                rngDict = {res: specDict, 'phot': photDict}
            elif photAdd and not specAdd:
                rngDict = {'phot': photDict}
            elif not photAdd and specAdd:
                rngDict = {res: specDict}
            else:
                print 'No data to add for ' + unum
                continue

            # 3.7 Add new target to database if necessary
            if newTgt:
                # 3.7.1 Create Target instance
                if rng == 'opt':
                    target = Target(name, unum, ra, dec, sptype, \
                                         rngDict, {}, {}, std)
                elif rng == 'nir':
                    target = Target(name, unum, ra, dec, sptype, \
                                         {}, rngDict, {}, std)
                elif rng == 'mir':
                    target = Target(name, unum, ra, dec, sptype, \
                                         {}, {}, rngDict, std)

                # 3.7.2 Add to database
                self.add_target(target, verbose=False)
                somethingAdded = True
                print unum + ' new target added to database: Index # ' + \
                      str(len(self.targets) - 1)

            # 3.8 Add new data to database if target already exists
            else:
                addedPhot = False
                addedSpec = False
                if rng == 'opt':
                    currentTgt = self.targets[dbIdx].opt
                elif rng == 'nir':
                    currentTgt = self.targets[dbIdx].nir
                elif rng == 'mir':
                    currentTgt = self.targets[dbIdx].mir

                # 3.8.1 Check dictionary level where to add data
                if currentTgt != {}:
                    # For photometry data
                    if photAdd:
                        try:
                            currentTgt['phot'][survey]
                            for bnd in rngDict['phot'][survey].keys():
                                try:
                                    currentTgt['phot'][survey][bnd]
                                    # Overwrite existing data if requested
                                    if overwrite:
                                        addedPhot = True
                                        currentTgt['phot'][survey][bnd] = \
                                                   rngDict['phot'][survey][bnd]
                                    else:
                                        print unum  + ' ' + bnd + \
                                              ' photometry already exists' + \
                                              ' in database.'
                                except KeyError:
                                    addedPhot = True
                                    currentTgt['phot'][survey][bnd] = \
                                                   rngDict['phot'][survey][bnd]
                        except KeyError:
                            addedPhot = True
                            currentTgt['phot'][survey] = \
                                                    rngDict['phot'][survey]
                    # For spectrum data
                    if specAdd:
                        try:
                            currentTgt[res][instr]
                            try:
                                currentTgt[res][instr][date]
                                try:
                                    currentTgt[res][instr][date][ord_filt]
                                    # Overwrite existing data if requested
                                    if overwrite:
                                        addedSpec = True
                                        currentTgt[res][instr][date][ord_filt] \
                                           = rngDict[res][instr][date][ord_filt]
                                    else:
                                        print unum + ', ' + \
                                        data[colNmFits][row] + \
                                        ' spectrum already exists in database.'
                                except KeyError:
                                    addedSpec = True
                                    currentTgt[res][instr][date][ord_filt] = \
                                            rngDict[res][instr][date][ord_filt]
                            except KeyError:
                                addedSpec = True
                                currentTgt[res][instr][date] = \
                                                    rngDict[res][instr][date]
                        except KeyError:
                            addedSpec = True
                            currentTgt[res][instr] = rngDict[res][instr]
                else:
                    addedSpec = True
                    currentTgt = rngDict

                if addedSpec or addedPhot:
                    somethingAdded = True
                    print unum + ' new data added to target in database.'

        # 4. Commit additions to database file --------------------------------
        if somethingAdded:
            # Check that database txt file exists in current folder
            try:
                f = open(DB_FILE, 'rb')
            except IOError:
                print DB_FILE + ' could not be loaded. Check that it is ' + \
                      'in the current folder. Process stopped.'
                return
            f.close()
            f = open(DB_FILE, 'wb')
            print 'Updating ' + DB_FILE + '...'
            pickle.dump(self, f)
            f.close()

            print 'Remember to push updated ' + DB_FILE + ' to github.'

        return
示例#7
0
 def add_data(self, overwrite=False):
     """
     Add spectral and/or photometry data for a target. If target does not exist in database, it will create a Target instance automatically using the *add_target* method. The data to upload is read form the *upload.txt* file located in the same folder as the database file in your computer. Read *upload.txt* header for more information.
     
     *overwrite*
       Boolean, whether to overwrite existing data or not.
     """
     
     # 1. Initialize variables ---------------------------------------------
     DB_FILE = 'BDNYCData.txt'
     UP_FILE = 'upload.txt' # ascii file with upload data
     UP_HEADERS = ('unum','name','ra', 'dec', 'sptype', 'standard', \
                   'rng', 'res', 'instr', 'date', 'ord_filt', 'fitsname', \
                   'survey', 'band_1', 'val_1', 'err_1', \
                   'band_2', 'val_2', 'err_2', \
                   'band_3', 'val_3', 'err_3')
     NULL = ''   # Null character in ascii file
     DEL  = '\t' # Delimiter character in ascii file
     COMM = '#'  # Comment character in ascii file
     NUM_BANDS = 3
     
     colNmUnum = UP_HEADERS[0]
     colNmName = UP_HEADERS[1]
     colNmRa   = UP_HEADERS[2]
     colNmDec  = UP_HEADERS[3]
     colNmSptype = UP_HEADERS[4]
     colNmStd    = UP_HEADERS[5]
     colNmRng    = UP_HEADERS[6]
     colNmRes    = UP_HEADERS[7]
     colNmInstr  = UP_HEADERS[8]
     colNmDate   = UP_HEADERS[9]
     colNmOrdfilt = UP_HEADERS[10]
     colNmFits    = UP_HEADERS[11]
     colNmSurvey  = UP_HEADERS[12]
     colNmBand1 = UP_HEADERS[13]
     colNmVal1  = UP_HEADERS[14]
     colNmErr1  = UP_HEADERS[15]
     colNmBand2 = UP_HEADERS[16]
     colNmVal2  = UP_HEADERS[17]
     colNmErr2  = UP_HEADERS[18]
     colNmBand3 = UP_HEADERS[19]
     colNmVal3  = UP_HEADERS[20]
     colNmErr3  = UP_HEADERS[21]
     
     # 2. Load ascii file --------------------------------------------------
     dataRaw = ad.open(UP_FILE, null=NULL, delimiter=DEL, comment_char=COMM)
     # Store ascii data in a dictionary-type object
     data = {}.fromkeys(UP_HEADERS)
     for colIdx, colData in enumerate(dataRaw):
         data[UP_HEADERS[colIdx]] = colData.tonumpy()
     
     if data[colNmUnum] is None:
         print 'Upload file empty.'
         return
     
     # 3. Upload data to database ------------------------------------------
     somethingAdded = False
     for row in range(len(data[colNmUnum])):
         
         # 3.1 Check if target already exists in database
         unum = data[colNmUnum][row].upper()
         try:
             unum + 0
             print 'U-number invalid.'
             continue
         except TypeError:
             if len(unum) != 6:
                 print 'U-number invalid.'
                 continue
             if unum[0] != 'U':
                 print 'U-number invalid.'
                 continue
         dbIdx = self.match_unum(unum)
         if dbIdx is None:
             newTgt = True
         else:
             newTgt = False
         
         # 3.2 Get target attributes
         if newTgt:
             name = data[colNmName][row]
             if name == '':
                 name = None
             sptype = data[colNmSptype][row].capitalize()
             if sptype == '':
                 sptype = 'XX'
             ra = data[colNmRa][row]
             if len(ra) < 8 or ra == '':
                 print unum + ' Right ascension invalid.'
                 continue
             dec = data[colNmDec][row]
             if len(dec) < 9:
                 print unum + ' Declination invalid.'
                 continue
             std = data[colNmStd][row].capitalize()
             if not (std == 'Yes' or std == 'No'):
                 print unum + ' Standard column must be Yes or No.'
                 continue
         
         # 3.3 Get range of data
         rng = data[colNmRng][row].lower()
         if not (rng == 'opt' or rng == 'nir' or rng == 'mir'):
             print unum + ' Range invalid.'
             continue
         
         # 3.4 Get spectrum data
         # 3.4.1 Open & read fits file
         specAdd = False
         fitsName = data[colNmFits][row]
         if fitsName != '':
             fitsRaw = at.read_spec(fitsName, errors=True)
             if fitsRaw[0] is not None:
                 wl   = fitsRaw[0][0]
                 flux = fitsRaw[0][1]                                                                                                                                                                                                                                                                                  
                 
                 # 3.4.2 Determine if 3rd dimension is uncertainty or snr
                 errNm = None
                 if len(fitsRaw[0]) == 3:
                     errVals = None
                     med = np.median(flux/fitsRaw[0][2])
                     if med < 10**3 and med > 10**-3:
                         errVals = fitsRaw[0][2]
                         errNm = 'uncertainty'
                     else:
                         errVals = fitsRaw[0][2]
                         errNm = 'snr'
                 
                 # 3.4.3 Get spectrum attributes
                 attsOK = True
                 res = data[colNmRes][row].lower()
                 if not (res == 'high' or res == 'med' or res == 'low'):
                     print unum + ' Resolution invalid.'
                     attsOK = False
                 instr = data[colNmInstr][row]
                 if instr == '':
                     print unum + ' Must provide Instrument for spectrum.'
                     attsOK = False
                 date  = data[colNmDate][row].lower()
                 if len(date) != 9:
                     print unum + ' Date invalid.'
                     attsOK = False
                 if date[-1] == '\r':
                     date = date[:-1]
                 ord_filt = data[colNmOrdfilt][row]
                 if ord_filt == '':
                     print unum + ' Must provide Order/Filter for spectrum.'
                     attsOK = False
                 
                 # 3.4.4 Create dictionary structure with spectrum data
                 if attsOK:
                     specAdd = True
                     specDict = {instr:{date:{ord_filt:{'wl':wl, \
                                             'flux':flux, errNm:errVals}}}}
         
         # 3.5 Get target photometry attributes
         # 3.5.1 Check if photometry data was provided
         photAdd = False
         try:
             survey = data[colNmSurvey][row]
         except TypeError:
             survey = ''
         # 3.5.2 Get data for bands
         if survey != '':
             bands = []
             vals  = []
             errs  = []
             photOK = True
             
             for bndNum in range(1,NUM_BANDS + 1):
                 bndNm = 'band_' + str(bndNum)
                 valNm = 'val_' + str(bndNum)
                 errNm = 'err_' + str(bndNum)
                 try:
                     # Get band name
                     bands.append(data[bndNm][row])
                     if bands[bndNum - 1] == '':
                         photOK = False
                     else:
                         try:
                             bands[bndNum - 1][0]
                         except IndexError:
                             photOK = False
                     # Get band photometry value
                     try:
                         vals.append(data[valNm][row])
                         try:
                             vals[bndNum - 1] + 0
                         except TypeError:
                             if vals[bndNum - 1][-1] == '\r':
                                 vals[bndNum - 1] = vals[bndNum - 1][:-1]
                                 try:
                                     vals[bndNum - 1] + 0
                                 except TypeError:
                                     photOK = False
                             else:
                                 photOK = False
                     except TypeError:
                         photOK = False
                     # Get band photometry value error
                     try:
                         errs.append(data[errNm][row])
                         if errs[bndNum - 1] == '':
                             errs[bndNum - 1] = None
                         else:
                             try:
                                 errs[bndNum - 1] + 0
                             except TypeError:
                                 if errs[bndNum - 1][0] == '\r':
                                     errs[bndNum - 1] = None
                                 else:
                                     try:
                                         errs[bndNum - 1] = float(errs[bndNum - 1])
                                     except ValueError:
                                         photOK = False
                     except TypeError:
                         errs.append(None)
                 except TypeError:
                     if bndNum == 1:
                         photOK = False
             
             # 3.5.3 Create dictionary structure with photometry data
             if photOK:
                 photDict = {survey:{}}
                 for bdIdx, band in enumerate(bands):
                     if band != '':
                         photAdd = True
                         photDict[survey][band] = {'val': vals[bdIdx], \
                                                   'err': errs[bdIdx]}                    
             else:
                 print unum + ' Photometry data invalid.'
         
        # 3.6 Create range-level dictionary with all data
         if photAdd and specAdd:
             rngDict = {res:specDict, 'phot':photDict}
         elif photAdd and not specAdd:
             rngDict = {'phot':photDict}
         elif not photAdd and specAdd:
             rngDict = {res:specDict}
         else:
             print 'No data to add for ' + unum
             continue
         
         # 3.7 Add new target to database if necessary
         if newTgt:
             # 3.7.1 Create Target instance
             if rng == 'opt':
                 target = Target(name, unum, ra, dec, sptype, \
                                      rngDict, {}, {}, std)
             elif rng == 'nir':
                 target = Target(name, unum, ra, dec, sptype, \
                                      {}, rngDict, {}, std)
             elif rng == 'mir':
                 target = Target(name, unum, ra, dec, sptype, \
                                      {}, {}, rngDict, std)
             
             # 3.7.2 Add to database
             self.add_target(target, verbose=False)
             somethingAdded = True
             print unum + ' new target added to database: Index # ' + \
                   str(len(self.targets) - 1)
         
         # 3.8 Add new data to database if target already exists
         else:
             addedPhot = False
             addedSpec = False
             if rng == 'opt':
                 currentTgt = self.targets[dbIdx].opt
             elif rng == 'nir':
                 currentTgt = self.targets[dbIdx].nir
             elif rng == 'mir':
                 currentTgt = self.targets[dbIdx].mir
             
             # 3.8.1 Check dictionary level where to add data
             if currentTgt != {}:
                 # For photometry data
                 if photAdd:
                     try:
                         currentTgt['phot'][survey]
                         for bnd in rngDict['phot'][survey].keys():
                             try:
                                 currentTgt['phot'][survey][bnd]
                                 # Overwrite existing data if requested
                                 if overwrite:
                                     addedPhot = True
                                     currentTgt['phot'][survey][bnd] = \
                                                rngDict['phot'][survey][bnd]
                                 else:
                                     print unum  + ' ' + bnd + \
                                           ' photometry already exists' + \
                                           ' in database.'
                             except KeyError:
                                 addedPhot = True
                                 currentTgt['phot'][survey][bnd] = \
                                                rngDict['phot'][survey][bnd]
                     except KeyError:
                         addedPhot = True
                         currentTgt['phot'][survey] = \
                                                 rngDict['phot'][survey]
                 # For spectrum data
                 if specAdd:
                     try:
                         currentTgt[res][instr]
                         try:
                             currentTgt[res][instr][date]
                             try:
                                 currentTgt[res][instr][date][ord_filt]
                                 # Overwrite existing data if requested
                                 if overwrite:
                                     addedSpec = True
                                     currentTgt[res][instr][date][ord_filt] \
                                        = rngDict[res][instr][date][ord_filt]
                                 else:
                                     print unum + ', ' + \
                                     data[colNmFits][row] + \
                                     ' spectrum already exists in database.'
                             except KeyError:
                                 addedSpec = True
                                 currentTgt[res][instr][date][ord_filt] = \
                                         rngDict[res][instr][date][ord_filt]
                         except KeyError:
                             addedSpec = True
                             currentTgt[res][instr][date] = \
                                                 rngDict[res][instr][date]
                     except KeyError:
                         addedSpec = True
                         currentTgt[res][instr] = rngDict[res][instr]
             else:
                 addedSpec = True
                 currentTgt = rngDict
             
             if addedSpec or addedPhot:
                 somethingAdded = True
                 print unum + ' new data added to target in database.'
     
     # 4. Commit additions to database file --------------------------------
     if somethingAdded:
         # Check that database txt file exists in current folder
         try:
             f = open(DB_FILE,'rb')
         except IOError:
             print DB_FILE + ' could not be loaded. Check that it is ' + \
                   'in the current folder. Process stopped.'
             return
         f.close()
         f = open(DB_FILE,'wb')
         print 'Updating ' + DB_FILE + '...'
         pickle.dump(self, f)
         f.close()
         
         print 'Remember to push updated ' + DB_FILE + ' to github.'
     
     return
示例#8
0
def main(spInput, grav='', plot=True, templ=False, std=False, excluded=False, normalize=True):
    # 1. LOAD RELEVANT MODULES ------------------------------------------------
    from astropy.io import ascii
    import astrotools as at
    import numpy as np
    import sys
    import os
    import pdb
    import matplotlib.pyplot as plt
    
    # 2. SET UP VARIABLES -----------------------------------------------------
    # Customizable variables <><><><><><><><><><><><><><><><><><><><><><><><><><><>
    FOLDER_ROOT = '/Users/alejo/Dropbox/Project_0/more data/'  # Location of NIR and OPT folders
    FOLDER_IN = '/Users/alejo/Dropbox/Project_0/data/' # Location of input files
    FOLDER_OUT = '/Users/alejo/Dropbox/Project_0/plots/' # Location to save output figures
    FILE_IN = 'nir_spex_prism_with_optical.txt' # ASCII file w/ data
    # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
    
    # For TXT objects file (updatable here directly)
    HDR_FILE_IN = ('Ref','Designation`','J','H','K','SpType','SpType_T','NIRFobs',\
                   'NIRFtel','NIRfile','OPTobs','OPTtel','OPTinst','OPTfile',\
                   'Young?','Dusty?','Blue?','Binary?','Pec?')
    
    # For TXT standards file
    FILE_IN_STD = 'NIR_Standards_K10.txt'   # ASCII file w/ standards
    HDR_FILE_IN_STD = ('Ref','Designation','NIR SpType','OPT SpType')
    colNameNIRS = HDR_FILE_IN_STD[2]
    colNameOPTS = HDR_FILE_IN_STD[3]
    
    OPTNIR_KEYS = ['OPT','NIR']
    BANDS_NAMES = ['K','H','J','OPT']
    data       = ''
    dataRaw    = ''
    specFiles  = ''
    spectraRaw = ''
    spectra    = ''
    
    colNameRef   = HDR_FILE_IN[0]
    colNameDesig = HDR_FILE_IN[1]
    colNameJ     = HDR_FILE_IN[2]
    colNameK     = HDR_FILE_IN[4]
    colNameJK    = 'J-K'
    colNameType  = HDR_FILE_IN[6]
    colNameNIRfile = HDR_FILE_IN[9]
    colNameYng   = HDR_FILE_IN[14]
    colNameDust  = HDR_FILE_IN[15]
    colNameBlue  = HDR_FILE_IN[16]
    colNameBin   = HDR_FILE_IN[17]
    colNamePec   = HDR_FILE_IN[18]
    
    # Initialize dictionary to store NIR bands limits and normalizing sections
    BAND_LIMS = {}.fromkeys(BANDS_NAMES)
    for bandKey in BANDS_NAMES:
        BAND_LIMS[bandKey] = dict(lim = [None] * 2, limN = [None] * 2)
    
    # Set wavelength limits for bands
    # Limits are in microns
    BAND_LIMS['OPT']['lim'][0] = 0.65
    BAND_LIMS['OPT']['lim'][1] = 0.90
    BAND_LIMS['J'  ]['lim'][0] = 0.8
    BAND_LIMS['J'  ]['lim'][1] = 1.4 
    BAND_LIMS['H'  ]['lim'][0] = 1.4
    BAND_LIMS['H'  ]['lim'][1] = 1.9
    BAND_LIMS['K'  ]['lim'][0] = 1.9
    BAND_LIMS['K'  ]['lim'][1] = 2.4
    
    # Set wl limits for normalizing sections
    # Limits are in microns
    BAND_LIMS['OPT']['limN'][0] = 0.66
    BAND_LIMS['OPT']['limN'][1] = 0.89
    BAND_LIMS['J'  ]['limN'][0] = 0.87
    BAND_LIMS['J'  ]['limN'][1] = 1.39
    BAND_LIMS['H'  ]['limN'][0] = 1.41
    BAND_LIMS['H'  ]['limN'][1] = 1.89
    BAND_LIMS['K'  ]['limN'][0] = 1.91
    BAND_LIMS['K'  ]['limN'][1] = 2.39
    
    
    # 3. READ DATA FROM MAIN INPUT FILE ---------------------------------------
    DELL_CHAR = '\t' # Delimiter character
    COMM_CHAR = '#'  # Comment character
    
    # File with ALL objects (source: query in Access)
    dataRaw = ascii.read(FOLDER_IN + FILE_IN, format='no_header', \
                         delimiter=DELL_CHAR, comment=COMM_CHAR, data_start=1)
    
    # Store data in a dictionary-type object
    data = {}.fromkeys(HDR_FILE_IN)
    for colIdx,colname in enumerate(dataRaw.colnames):
        data[HDR_FILE_IN[colIdx]] = np.array(dataRaw[colname])
    
    # File with standards (source: manually generated)
    dataRawS = ascii.read(FOLDER_IN + FILE_IN_STD, data_start=0)
    
    # Store standard data in a dictionary-type object
    dataS = {}.fromkeys(HDR_FILE_IN_STD)
    for colIdx,colname in enumerate(dataRawS.colnames):
        dataS[HDR_FILE_IN_STD[colIdx]] = np.array(dataRawS[colname])
    
    
    # 4. FORMAT SOME ASCII COLUMNS --------------------------------------------
    # 4.1 Convert into unicode the Spectral Type-Text column
    uniSpType = [None] * len(data[colNameType])
    for sIdx,sType in enumerate(data[colNameType]):
        uniSpType[sIdx] = sType #.decode('utf-8')
    data[colNameType] = np.array(uniSpType)
    
    # 4.2 Calculate J-K Color
    data[colNameJK] = data[colNameJ] - data[colNameK]
    
    # 4.3 Format Designation Number from Designation Column
    #     (From "XX XX XX.X +XX XX XX.X" to "XXXX+XXXX")
    for desigIdx,desig in enumerate(data[colNameDesig]):
        desig    = ''.join(desig.split())
        signType = '+'
        signPos  = desig.find(signType)
        if signPos == -1:
            signType = '-'
            signPos  = desig.find(signType)
        
        desigProper = desig[:4] + signType + desig[signPos+1:signPos+5]
        data[colNameDesig][desigIdx] = desigProper
    
    
    # 5. FILTER DATA BY USER INPUT IN spInput ---------------------------------
    specIdx = []
    # Find all spectra of same spectral type
    for spIdx,spType in enumerate(data[colNameType]):
        if spType.upper().startswith(spInput.upper()):
            specIdx.append(spIdx)
    if not specIdx:
        print('No targets found for given input.')
        if std is False:
            return
    spTypeInput = spInput.upper()
    
    # Find NIR standard target that matches user's spectral type
    stdIdx = []
    for spIdx,spType in enumerate(dataS[colNameNIRS]):
        if spType.upper().startswith(spTypeInput):
            stdIdx.append(spIdx)
    
    # Add NIR standard target to list of filtered objects if not there already
    # (It may not be included in first filter because OPT SpT != NIR SpT)
    if dataS[colNameNIRS][stdIdx] != dataS[colNameOPTS][stdIdx]:
        for spIdx,spRef in enumerate(data[colNameRef]):
            if spRef == int(dataS[colNameRef][stdIdx][0]):
                if spIdx not in specIdx:
                    specIdx.append(spIdx)
    
    # Sort relevant objects by JKmag value
    specIdx     = np.array(specIdx)
    specSortIdx = data[colNameJK][specIdx].argsort()
    
    
    # 6. READ SPECTRAL DATA FROM SPECTRAL FILES -------------------------------
    spectraRaw    = {}.fromkeys(OPTNIR_KEYS) # Used to store the raw data from fits files
    specFilesDict = {}.fromkeys(OPTNIR_KEYS) # Used for reference purposes
    
    for key in OPTNIR_KEYS:
        specFiles = [None] * len(specSortIdx)
        
        for sortIdx,specSort in enumerate(specSortIdx):
            if data[key + 'file'][specIdx[specSort]][-4:] == '.dat': continue
            if data[key + 'file'][specIdx[specSort]] == 'include': continue
            tmpFullName = FOLDER_ROOT + key + '/' + data[key + 'file'][specIdx[specSort]]
            specFiles[sortIdx] = tmpFullName
        specFilesDict[key] = specFiles
        
        spectraRaw[key] = at.read_spec(specFiles, atomicron=True, negtonan=True, \
                                       errors=True, verbose=False)
    
    # Clear out spectral data for objects missing either OPT or NIR data
    allNone = True
    for spIdx in range(0,len(spectraRaw['OPT'])):
        if spectraRaw['OPT'][spIdx] is None:
            spectraRaw['NIR'][spIdx] = None
        elif spectraRaw['NIR'][spIdx] is None:
            spectraRaw['OPT'][spIdx] = None
        else:
            allNone = False
    
    if allNone:
        print('No spectral data found for objects of the given spectral type.')
        if std is False:
            return
    
    # Convert spectraRaw contents into lists if only one spectral data
    # (This reduces the dimensions of the object holding the data)
    for key in spectraRaw.keys():
        if spectraRaw[key][0] is not None:
            if len(spectraRaw[key][0]) > 3:
                spectraRaw[key] = [spectraRaw[key],]
    
    
    # 7. GATHER OBJECTS' NAMES ------------------------------------------------
    # Filtered objects
    refs = [None] * len(specSortIdx)
    NIRfilenames = [None] * len(specSortIdx)
    for idx,spIdx in enumerate(specSortIdx):
        tmpRef    = data[colNameRef][specIdx[spIdx]]
        refs[idx] = str(int(tmpRef))
        NIRfilenames[idx] = data[colNameNIRfile][specIdx[spIdx]]
    
    # Standard objects
    refsStd = [None] * len(dataS[colNameRef])
    for idx,spIdx in enumerate(dataS[colNameRef]):
        tmpRef       = dataS[colNameRef][idx]
        refsStd[idx] = str(int(tmpRef))
    
    # Gather reference numbers of objects
    objRef = data[colNameRef][specIdx[specSortIdx]]
    
    
    # 8. SMOOTH SPECTRA -------------------------------------------------------
    # Smooth the flux data to a reasonable resolution
    spectraS = {}.fromkeys(OPTNIR_KEYS)
    tmpSpOPT = at.smooth_spec(spectraRaw['OPT'], specFile=specFilesDict['OPT'], \
                              winWidth=10)
    tmpSpNIR = at.smooth_spec(spectraRaw['NIR'], specFile=specFilesDict['NIR'], \
                              winWidth=0)
    
    spectraS['OPT'] = tmpSpOPT
    spectraS['NIR'] = tmpSpNIR
    
    # 9. SELECT SPECTRAL DATA FOR THE DIFFERENT BANDS -------------------------
    # Initialize variables
    spectra  = {}.fromkeys(BANDS_NAMES)
    spectraN = {}.fromkeys(BANDS_NAMES)
    
    for bandKey in BANDS_NAMES:
        if bandKey == 'OPT':
            optNIR = 'OPT'
        else:
            optNIR = 'NIR'
        
        # Select band
        spectra[bandKey] = at.sel_band(spectraS[optNIR], BAND_LIMS[bandKey]['lim'], \
                                       objRef)
        if spectra[bandKey] is None:
            break
        
        # Normalize band
        spectraN[bandKey], flagN = at.norm_spec(spectra[bandKey], \
                                               BAND_LIMS[bandKey]['limN'], flag=True)
        if flagN:
            print(bandKey + ' LIMITS for normalization changed!')
        if spectraN[bandKey] is None:
            break
    
    
    # 10. CHARACTERIZE TARGETS (i.e. identify young, field, and excluded) -----
    grav = grav.lower()
    toInclude = [False] * len(refs)
    # toInclude_LG = [False] * len(refs)
    toExclude = [False] * len(refs)
    dataIncl = []
    # dataIncl_LG = []
    dataExcl = []
    
    # 10.1 Extract NIR file names from "keepers" file
    fileslist = os.listdir(FOLDER_IN)
    inclFile = ''
    for fl in fileslist:
        if fl.find('keepers') == -1 or fl.find(spInput) == -1: continue
        
        tmpfl = fl.split('_')
        if len(tmpfl[1]) == 2 and grav == 'f':
            inclFile = fl
            break
        elif len(tmpfl[1]) == 3:
            if tmpfl[1][-1] == grav:
                inclFile = fl
                break
    
    if inclFile != '':
        dataIncl = ascii.read(FOLDER_IN + inclFile, format='no_header', \
                              delimiter=DELL_CHAR, comment=COMM_CHAR)
    if len(dataIncl) > 0:
        includeObjs = np.array(dataIncl['col1']).astype(object)
        includeObjs = includeObjs + np.repeat('.fits', len(dataIncl))
        # Find intersection of include-obj list and filtered targets list
        setInclude = set(includeObjs).intersection(set(NIRfilenames))
        # Create list with intersection targets
        if len(setInclude) != 0:
            for inclIdx in setInclude:
                tmpInclIdx = np.where(np.array(NIRfilenames) == inclIdx)[0]
                toInclude[tmpInclIdx] = True
    
    # 10.2 Extract NIR file names from "rejects" file
    exclFile = ''
    for fl in fileslist:
        if fl.find('rejects') == -1 or fl.find(spInput) == -1: continue
        
        tmpfl = fl.split('_')
        if len(tmpfl[1]) == 2 and grav == 'f':
            exclFile = fl
            break
        elif len(tmpfl[1]) == 3:
            if tmpfl[1][-1] == grav:
                exclFile = fl
                break
    
    if exclFile != '':
        try:
            dataExcl = ascii.read(FOLDER_IN + exclFile, format='no_header', \
                                  delimiter=DELL_CHAR, comment=COMM_CHAR)
        except:
            dataExcl = []
    if len(dataExcl) == 0 and excluded:
        print('No objects found in REJECTS file. Nothing to plot.')
        return
    elif len(dataExcl) > 0:
        excludeObjs = np.array(dataExcl['col1']).astype(object)
        excludeObjs = excludeObjs + np.repeat('.fits', len(dataExcl))
        # Find intersection of exclude-obj list and filtered targets list
        setExclude = set(excludeObjs).intersection(set(NIRfilenames))
        # Create list with intersection targets
        if len(setExclude) != 0:
            for exclIdx in setExclude:
                tmpExclIdx = np.where(np.array(NIRfilenames) == exclIdx)[0]
                toExclude[tmpExclIdx] = True
    
    # 10.3 Determine which target is the NIR Standard object
    O_standard = [None] * 3 # Holds standard for output
    stdObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameRef][spIdx] == dataS[colNameRef][stdIdx]:
            stdObjs[idx] = True
            
            if normalize:
                O_standard[0] = spectraN['J'][idx]
                O_standard[1] = spectraN['H'][idx]
                O_standard[2] = spectraN['K'][idx]
            else:
                O_standard[0] = spectra['J'][idx]
                O_standard[1] = spectra['H'][idx]
                O_standard[2] = spectra['K'][idx]
    
    # 10.4 Determine which targets to include in plots (based on user input)
    # Consolidate plotting & template-flux instructions
    plotInstructions  = ['no'] * len(refs)
    templInstructions = [False] * len(refs)
    if grav == 'f':
        plotinstlbl = 'field'
    elif grav == 'lg':
        plotinstlbl = 'low'
    elif grav == 'g':
        plotinstlbl = 'gamma'
    elif grav == 'b':
        plotinstlbl = 'beta'
    else:
        print('Wrong gravity input.')
        return
    for plotIdx in range(len(refs)):
        if toInclude[plotIdx]:
            plotInstructions[plotIdx] = plotinstlbl
            templInstructions[plotIdx] = True
        if toExclude[plotIdx] and excluded:
            plotInstructions[plotIdx] = 'excluded'
    
    # If all plot instructions are "no", then stop procedure (for spectral types)
    allExcl = True
    for instr in plotInstructions:
        if instr != 'no':
            allExcl = False
    if allExcl:
        print('No spectral data to plot based on your request.')
        return
    
    
    # 11. CALCULATE TEMPLATE SPECTRA FOR SELECTED SET OF SPECTRA ---------------------
    # Gather spectra to use to calculate template spectrum
    if not allExcl:
        O_template = [None] * 3 # Holds calculated template for output
        templCalculated = False
        for bandIdx, bandKey in enumerate(BANDS_NAMES):
            template = None
            templSpecs = []
            for spIdx, spex in enumerate(spectraN[bandKey]):
                if templInstructions[spIdx]:
                    # Check that spectrum exists
                    if spex is None:
                        templInstructions[spIdx] = False
                        continue
                    
                    if bandKey == 'OPT':
                        # Manually skip including OPT spectrum of some specific targets
                        # which use the same NIR fits file as both OPT and NIR spectrum
                        # so OPT spectrum is very bad
                        if refs[spIdx] == '50246':
                            continue
                        if refs[spIdx] == '50061':
                            continue
                        elif refs[spIdx] == '50188':
                            continue
                        templSpecs.append(spex)
                    
                    else:
                        # Check that spectrum comes with error values (NIR bands only)
                        notNansBool = np.isfinite(spex[2])
                        notNans = np.any(notNansBool)
                        if notNans:
                            templSpecs.append(spex)
                        else:
                            print(str(objRef[spIdx]) + ' excluded from template')
                            templInstructions[spIdx] = False
            
            # Calculate template spectrum using spec uncertainties as weights
            if len(templSpecs) > 1:
                if bandKey == 'OPT':
                    template = at.mean_comb(templSpecs, extremes=True)
                else:
                    template_first, renormSpecs = at.mean_comb(templSpecs, renormalize=True)
                    # Re-calculate template using re-normalized spectra
                    template = at.mean_comb(renormSpecs, extremes=True)
                
                # To calculate simple standard deviation, recalculate template without
                # any weights, and just use the simple variance that comes out of that.
                tmptempl = at.mean_comb(renormSpecs, forcesimple=True)
                template[2] = tmptempl[2].copy()
                templCalculated = True
            
            # Append template to list of spectra to plot in the next step
            if templCalculated:
                spectraN[bandKey].append(template)
                
                # Append template to output object
                if bandIdx == 0:
                    tempIdx = 2
                elif bandIdx == 2:
                    tempIdx = 0
                elif bandIdx == 1:
                    tempIdx = 1
                else:
                    tempIdx = None
                if tempIdx is not None:
                    O_template[tempIdx] = template
        
        if templCalculated:
            refs.append('template')
            plotInstructions.append('template')
        else:
            O_template = None
    
    
    # 12. PLOT DATA -----------------------------------------------------------
    if plot:
        # Gather info on each target
        objInfo = [None] * len(refs)
        for posIdx,spIdx in enumerate(specIdx[specSortIdx]):
            tmpDesig  = data[colNameDesig][spIdx]
            tmpJK     = data[colNameJK][spIdx]
            
            # Append description of special object to its spectral type when missing
            spDesc = ''
            try:
                loc = data[colNameType][spIdx].index(spDesc)
            except ValueError:
                loc = None
            if loc is None:
                tmpSPtype = data[colNameType][spIdx] + spDesc
            else:
                tmpSPtype = data[colNameType][spIdx]
            tmpSPtype = tmpSPtype + ' ' * (7 - len(tmpSPtype)) # For alignment purposes
            
            if tmpDesig == '1126-5003':
                objInfo[posIdx] = (tmpDesig + ' ' + tmpSPtype + '%.2f' %tmpJK)
            else:
                objInfo[posIdx] = (tmpDesig + ' ' + tmpSPtype + ' ' + '%.2f' %tmpJK)
        
        if objInfo[-1] is None:
            objInfo[-1] = 'template' 
        
        # Create Figure with Subplots and Annotations
        figObj = plotspec(spectraN, BANDS_NAMES, BAND_LIMS, objInfo, spTypeInput, \
                          grav, plotInstructions, excluded)
    
    if plot:
        if excluded:
            sptxt = '_excluded'
        else:
            sptxt = ''
        figObj.savefig(FOLDER_OUT + spTypeInput + 'strip_' + \
                       grav + sptxt + '.pdf', dpi=300)
    
    
    # 13. DETERMINE OUTPUT ----------------------------------------------------
    if templ:
        if std:
            return O_template, O_standard
        else:
            return O_template
    elif std:
        return O_standard
    else:
        return spectraN
示例#9
0
def main(spInput, grav=''):
    # 1. LOAD RELEVANT MODULES ---------------------------------------------------------
    import astrotools as at
    from astropy.io import ascii
    import matplotlib.pyplot as plt
    import numpy as np
    import sys
    import pdb
    
    
    # 2. SET UP VARIABLES --------------------------------------------------------------
    # Customizable variables <><><><><><><><><><><><><><><><><><><><><><><><><><><>
    FOLDER_ROOT = '/Users/alejo/Dropbox/Project_0/more data/'  # Location of NIR and OPT folders
    FOLDER_IN = '/Users/alejo/Dropbox/Project_0/data/' # Location of input files
    FOLDER_OUT = '/Users/alejo/Dropbox/Project_0/plots/' # Location to save output figures
    FILE_IN = 'nir_spex_prism_with_optical.txt' # ASCII file w/ data
    # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
    
    # For TXT objects file (updatable here directly)
    HDR_FILE_IN = ('Ref','Designation`','J','H','K','SpType','SpType_T','NIRFobs',\
                   'NIRFtel','NIRfile','OPTobs','OPTtel','OPTinst','OPTfile',\
                   'Young?','Dusty?','Blue?','Binary?','Pec?')
    
    OPTNIR_KEYS  = ['OPT', 'NIR']
    BAND_NAME  = ['NIR']
    data       = ''
    dataRaw    = ''
    specFiles  = ''
    spectraRaw = ''
    spectra    = ''
    
    colNameRef   = HDR_FILE_IN[0]
    colNameDesig = HDR_FILE_IN[1]
    colNameJ     = HDR_FILE_IN[2]
    colNameK     = HDR_FILE_IN[4]
    colNameJK    = 'J-K'
    colNameType  = HDR_FILE_IN[6]
    colNameNIRfile = HDR_FILE_IN[9]
    colNameYng   = HDR_FILE_IN[14]
    colNameDust  = HDR_FILE_IN[15]
    colNameBlue  = HDR_FILE_IN[16]
    colNameBin   = HDR_FILE_IN[17]
    colNamePec   = HDR_FILE_IN[18]
    
    # Initialize dictionary to store NIR bands limits and normalizing sections
    BAND_LIMS = {}.fromkeys(BAND_NAME)
    for bandKey in BAND_NAME:
        BAND_LIMS[bandKey] = dict(lim = [None] * 2, limN = [None] * 2)
    
    # Set wl limits for band
    # Limits are in microns
    BAND_LIMS['NIR']['lim'][0] = 0.8
    BAND_LIMS['NIR']['lim'][1] = 2.4
    
    # Set wl limits for normalizing sections; this is the peak of the J band
    # Limits are in microns
    BAND_LIMS['NIR']['limN'][0] = 1.28
    BAND_LIMS['NIR']['limN'][1] = 1.32
    
    
    # 3. READ DATA FROM INPUT FILES ----------------------------------------------------
    DELL_CHAR = '\t' # Delimiter character
    COMM_CHAR = '#'  # Comment character
    
    # File with objects (source: query in Access)
    dataRaw = ascii.read(FOLDER_IN + FILE_IN, format='no_header', \
                         delimiter=DELL_CHAR, comment=COMM_CHAR, data_start=1)
    
    # Store data in a dictionary-type object
    data = {}.fromkeys(HDR_FILE_IN)
    for colIdx,colname in enumerate(dataRaw.colnames):
        data[HDR_FILE_IN[colIdx]] = np.array(dataRaw[colname])
    
    
    # 4. FORMAT SOME ASCII COLUMNS -----------------------------------------------------
    # 4.1 Convert into unicode the Spectral Type-Text column
    uniSpType = [None] * len(data[colNameType])
    for sIdx,sType in enumerate(data[colNameType]):
        uniSpType[sIdx] = sType#.decode('utf-8')
    
    data[colNameType] = np.array(uniSpType)
    
    # 4.2 Calculate J-K Color
    data[colNameJK] = data[colNameJ] - data[colNameK]
    
    # 4.3 Format Designation Number from Designation Column
    for desigIdx,desig in enumerate(data[colNameDesig]):
        desig = ''.join(desig.split())
        signType = '+'
        signPos = desig.find(signType)
        if signPos == -1:
            signType = '-'
            signPos  = desig.find(signType)
        
        desigProper = desig[:4] + signType + desig[signPos+1:signPos+5]
        data[colNameDesig][desigIdx] = desigProper
    
    
    # 5. FILTER DATA BY USER INPUT IN spInput ------------------------------------------
    # Find all spectra of same spectral type
    specIdx = []
    for spIdx,spType in enumerate(data[colNameType]):
        if spType.upper().startswith(spInput.upper()):
            specIdx.append(spIdx)
    
    if not specIdx:
        print('No target found for given input.')
        return
    spTypeInput = spInput.upper()
    
    # Sort relevant objects by JKmag value
    specIdx     = np.array(specIdx)
    specSortIdx = data[colNameJK][specIdx].argsort()
    
    
    # 6. READ SPECTRAL DATA FROM SPECTRAL FILES ----------------------------------------
    spectraRaw    = {}.fromkeys(OPTNIR_KEYS) # Used to store the raw data from fits files
    specFilesDict = {}.fromkeys(OPTNIR_KEYS) # Used for reference purposes
    
    for key in OPTNIR_KEYS:
        specFiles = [None] * len(specSortIdx)
        
        for sortIdx,specSort in enumerate(specSortIdx):
            if data[key + 'file'][specIdx[specSort]][-4:] == '.dat': continue
            if data[key + 'file'][specIdx[specSort]] == 'include': continue
            tmpFullName = FOLDER_ROOT + key + '/' + data[key \
                          + 'file'][specIdx[specSort]]
            specFiles[sortIdx] = tmpFullName
            specFilesDict[key] = specFiles
        
        spectraRaw[key] = at.read_spec(specFiles, atomicron=True, negtonan=True, \
                                       errors=True, verbose=False)
    
    # Clear out spectral data for objects missing either OPT or NIR data
    allNone = True
    for spIdx in range(0,len(spectraRaw['OPT'])):
        if spectraRaw['OPT'][spIdx] is None:
            spectraRaw['NIR'][spIdx] = None
        elif spectraRaw['NIR'][spIdx] is None:
            spectraRaw['OPT'][spIdx] = None
        else:
            allNone = False
    
    if allNone:
        print('No spectral data found for objects of the given spectral type.')
        return
    
    # Convert spectraRaw contents into lists if only one spectral data
    for key in spectraRaw.keys():
        if spectraRaw[key][0] is not None:
            if len(spectraRaw[key][0]) > 3:
                spectraRaw[key] = [spectraRaw[key],]
    
    
    # 7. GATHER OBJECTS' NAMES ---------------------------------------------------------
    # Filtered objects
    refs = [None] * len(specSortIdx)
    for idx,spIdx in enumerate(specSortIdx):
        tmpRef    = data[colNameRef][specIdx[spIdx]]
        refs[idx] = str(int(tmpRef))
    
    
    #8. SMOOTH SPECTRA -----------------------------------------------------------------
    # Smooth the flux data to a reasonable resolution
    spectraS = at.smooth_spec(spectraRaw['NIR'], specFile=specFilesDict['NIR'], \
                              winWidth=0)
    
    
    # 9. SELECT SPECTRAL DATA FOR NIR BAND --------------------------------------------
    # Initialize variables
    spectraN = {}.fromkeys(BAND_NAME)
    
    # Gather reference numbers of objects
    objRef = data[colNameRef][specIdx[specSortIdx]]
    
    # Select band
    spectra = at.sel_band(spectraS, BAND_LIMS['NIR']['lim'], objRef)
    
    # Normalize band
    spectraN['NIR'] = at.norm_spec(spectra, BAND_LIMS['NIR']['limN'])
    
    
    # 11. CHARACTERIZE TARGETS (i.e. identify young, blue, to exclude...) --------------
    # Force to include all targerts
    toExclude = [False] * len(refs)
    
    # Determine which targets are blue
    blueObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBlue][spIdx].upper() == 'YES':
            blueObjs[idx] = True
    
    # Determine which targets are dusty
    dustyObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameDust][spIdx].upper() == 'YES':
            dustyObjs[idx] = True
    
    # Determine which targets are peculiar
    pecObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNamePec][spIdx].upper() == 'YES':
            pecObjs[idx] = True
    
    # Determine which plots are young objects
    youngObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specSortIdx):
        if data[colNameYng][specIdx[spIdx]].upper() == 'YES':
            youngObjs[idx] = True
    
    # Determine which targets are GAMMA
    gammaObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen  = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb3"
        if utcA == '\xce' and utcB == '\xb3':
            gammaObjs[idx] = True
    
    # Determine which targets are BETA
    betaObjs = [False] * len(refs)
    for idx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode('utf-8')
        tmpLen  = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb2"
        if utcA == '\xce' and utcB == '\xb2':
            betaObjs[idx] = True
    
    # Determine which targets to include in plots (based on user input)
    # Consolidate plotting instructions
    grav = grav.upper()
    plotInstructions = ['exclude'] * len(refs)
    if grav == 'Y': # If plot request is Young, include gamma, beta & young targets
        for plotIdx in range(len(refs)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx] or betaObjs[plotIdx] or youngObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'G': # If plot request is Gamma, include only gamma targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'B': # If plot request is Beta, include only beta targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = 'young'
    
    elif grav == 'F': # If plot request is Field, include Field & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx] or gammaObjs[plotIdx] or youngObjs[plotIdx]:
                continue
            plotInstructions[plotIdx] = 'field'
    
    else:   # Otherwise, print Field, gamma, beta, young & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx]:
                continue
            if youngObjs[plotIdx]:
                plotInstructions[plotIdx] = 'young'
            else:
                plotInstructions[plotIdx] = 'field'
    
    # If all plot instructions are "exclude", then stop procedure
    allExcl = True
    for instr in plotInstructions:
        if instr != 'exclude':
            allExcl = False
    if allExcl:
        if not uniqueSpec:
            print('No spectral data to plot based on your request.')
            return
    
    
    # 11. PLOT DATA --------------------------------------------------------------------
    # Gather info on each object (for legend purposes)
    objInfo = [None] * len(refs)
    for posIdx,spIdx in enumerate(specIdx[specSortIdx]):
        tmpDesig  = data[colNameDesig][spIdx]
        tmpJK     = data[colNameJK][spIdx]
        tmpSPtype = data[colNameType][spIdx]
        tmpSPtype = tmpSPtype + ' ' * (5 - len(tmpSPtype))  # For alignment purposes
        
        objInfo[posIdx] = (tmpDesig + ' ' + tmpSPtype + ' ' + '%.2f' %tmpJK)
        
    # Create Figure with Subplots
    figObj = plotspec(spectraN, BAND_NAME, BAND_LIMS, objInfo, spTypeInput, grav, \
                        plotInstructions)
    
    figObj.savefig(FOLDER_OUT + spTypeInput + grav + '_fan.pdf', \
                   dpi=800)
示例#10
0
tgtNames = []
fitsNames = []
for tgt in dataOrg:
    tgtNames.append(tgt[0])
    fitsNames.append(FOLDER_NIR + tgt[1])

# Break into several plots if too many objects
numData = len(dataOrg)
if numData > 6:
    numPlots = int(np.ceil(numData / 6.))
else:
    numPlots = 1

# 3. GET SPECTRA --------------------------------------------------------------
spectra = at.read_spec(fitsNames, aToMicron=True, negToZero=True, errors=False, plot=False)

# 4. CLEAN AND NORMALIZE SPECTRA ----------------------------------------------
spectraC = at.sel_band(spectra, BAND_LIMS)
spectraN = at.norm_spec(spectraC, BAND_LIMS)

# 6. PLOT ALL SPECTRA ---------------------------------------------------------
# Parameter to space spectra more
if FILE_LBL == 'emln_galaxies':
    broad = True
else:
    broad = False
start = 0
stop = 6
for plIdx in range(numPlots):
    if stop > numData:
示例#11
0
def main(spInput, grav="", plot=True, templ=False, std=False, lbl=False):
    # 1. LOAD RELEVANT MODULES ---------------------------------------------------------
    import asciidata
    import astrotools as at
    import pyfits
    import numpy
    import sys
    import pdb
    import matplotlib.pyplot as plt

    # 2. SET UP VARIABLES --------------------------------------------------------------
    # General variables
    FOLDER_ROOT = "/Users/alejo/KCData/"  # Location of NIR and OPT folders
    FOLDER_OUT = "Output/NOC/"
    OPTNIR_KEYS = ["OPT", "NIR"]
    BANDS_NAMES = ["K", "H", "J", "OPT"]
    data = ""
    dataRaw = ""
    specFiles = ""
    spectraRaw = ""
    spectra = ""

    # For TXT objects file (updatable here directly)
    FILE_IN = "nir_spex_prism_with_optical_12aug15.txt"  # ASCII file w/ data
    HDR_FILE_IN = (
        "Ref",
        "Designation",
        "J",
        "H",
        "K",
        "SpType",
        "SpType_T",
        "NIRFobs",
        "NIRFtel",
        "NIRfile",
        "OPTobs",
        "OPTtel",
        "OPTinst",
        "OPTfile",
        "Young?",
        "Dusty?",
        "Blue?",
        "Multiple?",
        "Pec?",
    )

    colNameRef = HDR_FILE_IN[0]
    colNameDesig = HDR_FILE_IN[1]
    colNameJ = HDR_FILE_IN[2]
    colNameK = HDR_FILE_IN[4]
    colNameJK = "J-K"
    colNameType = HDR_FILE_IN[6]
    colNameYng = HDR_FILE_IN[14]
    colNameDust = HDR_FILE_IN[15]
    colNameBlue = HDR_FILE_IN[16]
    colNameBin = HDR_FILE_IN[17]
    colNamePec = HDR_FILE_IN[18]

    # For TXT standards file
    FILE_IN_STD = "NIR_Standards.txt"  # ASCII file w/ standards
    HDR_FILE_IN_STD = ("Ref", "Designation", "NIR SpType", "OPT SpType")
    colNameNIRS = HDR_FILE_IN_STD[2]
    colNameOPTS = HDR_FILE_IN_STD[3]

    # For TXT exclude-objects file
    EXCL_FILE = "Exclude_Objs.txt"  # ASCII file w/ unums of objects to exclude

    # 3. READ DATA FROM INPUT FILES-----------------------------------------------------
    NULL_CHAR = ""  # Null character
    DELL_CHAR = "\t"  # Delimiter character
    COMM_CHAR = "#"  # Comment character

    # File with objects (query in Access)
    dataRaw = asciidata.open(FOLDER_ROOT + FILE_IN, NULL_CHAR, DELL_CHAR, COMM_CHAR)

    # Store data in a dictionary-type object
    data = {}.fromkeys(HDR_FILE_IN)
    for colIdx, colData in enumerate(dataRaw):
        data[HDR_FILE_IN[colIdx]] = colData.tonumpy()

    # File with standards
    dataRawS = asciidata.open(FOLDER_ROOT + FILE_IN_STD, NULL_CHAR, DELL_CHAR, COMM_CHAR)

    # Store standard data in a dictionary-type object
    dataS = {}.fromkeys(HDR_FILE_IN_STD)
    for colIdx, colData in enumerate(dataRawS):
        dataS[HDR_FILE_IN_STD[colIdx]] = colData.tonumpy()

    # 4. FORMAT SOME ASCII COLUMNS -----------------------------------------------------
    # 4.1 Convert into unicode the Spectral Type-Text column
    uniSpType = [None] * len(data[colNameType])
    for sIdx, sType in enumerate(data[colNameType]):
        uniSpType[sIdx] = sType.decode("utf-8")

    data[colNameType] = numpy.array(uniSpType)

    # 4.2 Calculate J-K Color And Add J-K Column
    data[colNameJK] = data[colNameJ] - data[colNameK]

    # 4.3 Format Designation Number from Designation Column
    #     (From "XX XX XX.X +XX XX XX.X" to "XXXX+XXXX")
    for desigIdx, desig in enumerate(data[colNameDesig]):
        desig = "".join(desig.split())
        signType = "+"
        signPos = desig.find(signType)
        if signPos == -1:
            signType = "-"
            signPos = desig.find(signType)

        desigProper = desig[:4] + signType + desig[signPos + 1 : signPos + 5]
        data[colNameDesig][desigIdx] = desigProper

    # 5. FILTER DATA BY USER INPUT IN spInput -------------------------------------------
    uniqueSpec = False
    specIdx = []
    if spInput.upper().startswith("L"):
        # If input is a spectral type, then find all spectra of same spectral type
        for spIdx, spType in enumerate(data[colNameType]):
            if spType.upper().startswith(spInput.upper()):
                specIdx.append(spIdx)
        if not specIdx:
            print "No targets found for given input."
            if std is False:
                return
        spTypeInput = spInput.upper()
    else:
        # If input is one single spectrum, then find it
        for spIdx, spType in enumerate(data[colNameRef]):
            if str(spType) == spInput.upper():
                specIdx.append(spIdx)
        if not specIdx:
            print "Requested target not found."
            if std is False:
                return
        else:
            spTypeInput = data[colNameType][specIdx[0]][0:2]
            uniqueSpec = True

    # Find NIR standard target that matches user's spectral type
    stdIdx = []
    for spIdx, spType in enumerate(dataS[colNameNIRS]):
        if spType.upper().startswith(spTypeInput):
            stdIdx.append(spIdx)

    # Add NIR standard target to list of filtered objects if not there already
    # (It may not be included in first filter because OPT SpT != NIR SpT)
    if not uniqueSpec:
        if dataS[colNameNIRS][stdIdx] != dataS[colNameOPTS][stdIdx]:
            for spIdx, spRef in enumerate(data[colNameRef]):
                if spRef == int(dataS[colNameRef][stdIdx][0]):
                    if spIdx not in specIdx:
                        specIdx.append(spIdx)

    # Sort relevant objects by JKmag value
    specIdx = numpy.array(specIdx)
    specSortIdx = data[colNameJK][specIdx].argsort()

    # 6. READ SPECTRAL DATA FROM SPECTRAL FILES ----------------------------------------
    spectraRaw = {}.fromkeys(OPTNIR_KEYS)  # Used to store the raw data from fits files
    specFilesDict = {}.fromkeys(OPTNIR_KEYS)  # Used for reference purposes

    for key in OPTNIR_KEYS:
        specFiles = [None] * len(specSortIdx)

        for sortIdx, specSort in enumerate(specSortIdx):
            tmpFullName = FOLDER_ROOT + key + "/" + data[key + "file"][specIdx[specSort]]
            specFiles[sortIdx] = tmpFullName
            specFilesDict[key] = specFiles

        spectraRaw[key] = at.read_spec(specFiles, atomicron=True, negtonan=True, errors=True, verbose=False)

    # Clear out spectral data for objects missing either OPT or NIR data
    allNone = True
    for spIdx in range(0, len(spectraRaw["OPT"])):
        if spectraRaw["OPT"][spIdx] is None:
            spectraRaw["NIR"][spIdx] = None
        elif spectraRaw["NIR"][spIdx] is None:
            spectraRaw["OPT"][spIdx] = None
        else:
            allNone = False

    if allNone:
        print "No spectral data found for objects of the given spectral type."
        if std is False:
            return

    # Convert spectraRaw contents into lists if only one spectral data
    # (This reduces the dimensions of the object holding the data)
    for key in spectraRaw.keys():
        if spectraRaw[key][0] is not None:
            if len(spectraRaw[key][0]) > 3:
                spectraRaw[key] = [spectraRaw[key]]

    # 7. GATHER OBJECTS' NAMES----------------------------------------------------------
    # Filtered objects
    refs = [None] * len(specSortIdx)
    for idx, spIdx in enumerate(specSortIdx):
        tmpRef = data[colNameRef][specIdx[spIdx]]
        refs[idx] = str(int(tmpRef))

    # Standard objects
    refsStd = [None] * len(dataS[colNameRef])
    for idx, spIdx in enumerate(dataS[colNameRef]):
        tmpRef = dataS[colNameRef][idx]
        refsStd[idx] = str(int(tmpRef))

    # Gather reference numbers of objects
    objRef = data[colNameRef][specIdx[specSortIdx]]

    # 8. SMOOTH SPECTRA -----------------------------------------------------------------
    # Smooth the flux data to a reasonable resolution
    spectraS = {}.fromkeys(OPTNIR_KEYS)

    tmpSpOPT = at.smooth_spec(spectraRaw["OPT"], specFile=specFilesDict["OPT"], winWidth=10)
    tmpSpNIR = at.smooth_spec(spectraRaw["NIR"], specFile=specFilesDict["NIR"], winWidth=0)

    spectraS["OPT"] = tmpSpOPT
    spectraS["NIR"] = tmpSpNIR

    # 9. SET LIMITS FOR BANDS AND NORMALIZING SECTIONS----------------------------------
    # Initialize dictionary to store limits
    BAND_LIMS = {}.fromkeys(BANDS_NAMES)
    for bandKey in BANDS_NAMES:
        BAND_LIMS[bandKey] = dict(lim=[None] * 2, limN=[None] * 2)

    # Set wavelength limits for bands
    # Limits are in microns
    BAND_LIMS["OPT"]["lim"][0] = 0.65
    BAND_LIMS["OPT"]["lim"][1] = 0.90
    BAND_LIMS["J"]["lim"][0] = 0.8
    BAND_LIMS["J"]["lim"][1] = 1.4
    BAND_LIMS["H"]["lim"][0] = 1.4
    BAND_LIMS["H"]["lim"][1] = 1.9
    BAND_LIMS["K"]["lim"][0] = 1.9
    BAND_LIMS["K"]["lim"][1] = 2.4

    # Set wl limits for normalizing sections
    # Limits are in microns
    BAND_LIMS["OPT"]["limN"][0] = 0.66
    BAND_LIMS["OPT"]["limN"][1] = 0.89
    BAND_LIMS["J"]["limN"][0] = 0.87
    BAND_LIMS["J"]["limN"][1] = 1.39
    BAND_LIMS["H"]["limN"][0] = 1.41
    BAND_LIMS["H"]["limN"][1] = 1.89
    BAND_LIMS["K"]["limN"][0] = 1.91
    BAND_LIMS["K"]["limN"][1] = 2.39

    # 10. SELECT SPECTRAL DATA FOR OPTICAL, J-BAND, H-BAND, & K-BAND--------------------
    # Initialize variables
    spectra = {}.fromkeys(BANDS_NAMES)
    spectraN = {}.fromkeys(BANDS_NAMES)

    for bandKey in BANDS_NAMES:
        if bandKey == "OPT":
            optNIR = "OPT"
        else:
            optNIR = "NIR"

        # Select band
        spectra[bandKey] = at.sel_band(spectraS[optNIR], BAND_LIMS[bandKey]["lim"], objRef)
        if spectra[bandKey] is None:
            break

        # Normalize band
        spectraN[bandKey], flagN = at.norm_spec(spectra[bandKey], BAND_LIMS[bandKey]["limN"], flag=True)
        if flagN:
            print "LIMITS for normalization changed!"
        if spectraN[bandKey] is None:
            break

    # 11. CHARACTERIZE TARGETS (i.e. identify young, blue, to exclude...)---------------
    # Determine which targets to exclude using the "Exclude_Objs" file
    toExclude = [False] * len(refs)
    dataExcl = asciidata.open(FOLDER_ROOT + EXCL_FILE, NULL_CHAR, DELL_CHAR, COMM_CHAR)
    if len(dataExcl[0]) > 0:
        # Extract data from "Exclude_Objs" file
        excludeObjs = [None] * len(dataExcl[0])
        for rowIdx, rowData in enumerate(dataExcl[0]):
            excludeObjs[rowIdx] = str(rowData)

        # Find intersection of exclude-obj list and filtered targets list
        setExclude = set(excludeObjs).intersection(set(refs))

        # Create list with intersection targets
        if len(setExclude) != 0:
            for exclIdx in setExclude:
                tmpExclIdx = numpy.where(numpy.array(refs) == exclIdx)
                toExclude[tmpExclIdx[0]] = True

    # Determine which target is the NIR Standard object
    O_standard = [None] * 3  # Holds standard for output
    stdObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameRef][spIdx] == dataS[colNameRef][stdIdx]:
            stdObjs[idx] = True

            O_standard[0] = spectraN["J"][idx]
            O_standard[1] = spectraN["H"][idx]
            O_standard[2] = spectraN["K"][idx]

    # Determine which targets are blue
    blueObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBlue][spIdx].upper() == "YES":
            blueObjs[idx] = True

    # Determine which targets are dusty
    dustyObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameDust][spIdx].upper() == "YES":
            dustyObjs[idx] = True

    # Determine which targets are binary
    binObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameBin][spIdx].upper() == "YES":
            binObjs[idx] = True

    # Determine which targets are peculiar
    pecObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNamePec][spIdx].upper() == "YES":
            pecObjs[idx] = True

    # Determine which targets are young
    youngObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        if data[colNameYng][spIdx].upper() == "YES":
            youngObjs[idx] = True

    # Determine which targets are GAMMA
    gammaObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode("utf-8")
        tmpLen = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb3"
        if utcA == "\xce" and utcB == "\xb3":
            gammaObjs[idx] = True

    # Determine which targets are BETA
    betaObjs = [False] * len(refs)
    for idx, spIdx in enumerate(specIdx[specSortIdx]):
        tmpType = data[colNameType][spIdx].encode("utf-8")
        tmpLen = len(tmpType)
        utcA = tmpType[tmpLen - 2]
        utcB = tmpType[tmpLen - 1]
        # GAMMA in utf-8 code is "\xce\xb2"
        if utcA == "\xce" and utcB == "\xb2":
            betaObjs[idx] = True

    # Determine which targets to include in plots (based on user input)
    # Consolidate plotting & template-flux instructions
    grav = grav.upper()
    plotInstructions = ["exclude"] * len(refs)
    templInstructions = [False] * len(refs)
    if grav == "Y":  # If plot request is Young, include gamma, beta & young targets
        for plotIdx in range(len(refs)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx] or betaObjs[plotIdx] or youngObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = "young"
                templInstructions[plotIdx] = True

    elif grav == "G":  # If plot request is Gamma, include only gamma targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if gammaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = "young"
                templInstructions[plotIdx] = True

    elif grav == "B":  # If plot request is Beta, include only beta targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx]:
                if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] or binObjs[plotIdx]:
                    continue
                plotInstructions[plotIdx] = "young"
                templInstructions[plotIdx] = True

    elif grav == "F":  # If plot request is Field, include Field & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if betaObjs[plotIdx] or gammaObjs[plotIdx] or youngObjs[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] or binObjs[plotIdx]:
                continue
            if stdObjs[plotIdx]:
                plotInstructions[plotIdx] = "standard"
            else:
                plotInstructions[plotIdx] = "field"
            templInstructions[plotIdx] = True

    else:  # Otherwise, print Field, gamma, beta, young & Standard targets
        for plotIdx in range(len(plotInstructions)):
            if toExclude[plotIdx]:
                continue
            if blueObjs[plotIdx] or dustyObjs[plotIdx] or pecObjs[plotIdx] or binObjs[plotIdx]:
                continue
            if youngObjs[plotIdx]:
                plotInstructions[plotIdx] = "young"
            elif stdObjs[plotIdx]:
                plotInstructions[plotIdx] = "standard"
            else:
                plotInstructions[plotIdx] = "field"
            templInstructions[plotIdx] = True

    # If all plot instructions are "exclude", then stop procedure (for spectral types)
    allExcl = True
    for instr in plotInstructions:
        if instr != "exclude":
            allExcl = False
    if allExcl:
        if std:
            return O_standard
        if not uniqueSpec:
            print "No spectral data to plot based on your request."
            return

    # 12. CALCULATE TEMPLATE SPECTRA FOR SELECTED SET OF SPECTRA -----------------------
    # Gather spectra to use to calculate template spectrum
    if not allExcl:
        O_template = [None] * 3  # Holds calculated template for output
        templCalculated = False
        for bandIdx, bandKey in enumerate(BANDS_NAMES):
            if bandKey == "OPT":
                continue

            templSpecs = []
            for spIdx, spex in enumerate(spectraN[bandKey]):
                if templInstructions[spIdx]:
                    # Check that spectrum exists
                    if spex is None:
                        templInstructions[spIdx] = False
                        continue

                    if bandKey == "OPT":
                        templSpecs.append(spex)
                    else:
                        # Check that spectrum comes with error values (NIR bands only)
                        notNansBool = numpy.isfinite(spex[2])
                        notNans = numpy.any(notNansBool)
                        if notNans:
                            templSpecs.append(spex)
                        else:
                            print str(objRef[spIdx]) + " excluded from template"

            # Calculate template spectrum
            if len(templSpecs) > 1:
                template = at.mean_comb(templSpecs)
                templCalculated = True

                # Append template to list of spectra to plot in the next step
                spectraN[bandKey].append(template)
                # Append template to output object
                if bandIdx == 0:
                    tempIdx = 2
                elif bandIdx == 2:
                    tempIdx = 0
                else:
                    tempIdx = 1
                O_template[tempIdx] = template

        if templCalculated:
            refs.append("template")
            plotInstructions.append("template")
        else:
            O_template = None

    # 13. PLOT DATA --------------------------------------------------------------------
    if lbl or plot:
        # Gather info on each target
        objInfo = [None] * len(refs)
        for posIdx, spIdx in enumerate(specIdx[specSortIdx]):
            tmpDesig = data[colNameDesig][spIdx]
            tmpJK = data[colNameJK][spIdx]
            tmpSPtype = data[colNameType][spIdx]
            tmpSPtype = tmpSPtype + " " * (5 - len(tmpSPtype))  # For alignment purposes

            objInfo[posIdx] = tmpDesig + " " + tmpSPtype + " " + "%.2f" % tmpJK

        if objInfo[-1] is None:
            objInfo[-1] = "template"
    if plot:
        # Create Figure with Subplots and Annotations
        figObj = plotspec(spectraN, BANDS_NAMES, BAND_LIMS, objInfo, spTypeInput, grav, plotInstructions)

        figObj.savefig(FOLDER_ROOT + FOLDER_OUT + spTypeInput + grav + ".pdf", dpi=600)

    # 14. DETERMINE OUTPUT -------------------------------------------------------------
    if templ:
        if std:
            return O_template, O_standard
        else:
            return O_template
    elif std:
        return O_standard
    else:
        if lbl:
            return spectraN, objInfo
        else:
            return spectraN
示例#12
0
spTypes = []
spectra = {}.fromkeys(BANDS)
for band in BANDS:
    spectra[band] = []

for idxTp, spTp in enumerate(SPTYPES):
    if not(spTp in SPSTDNM): continue
    # Fetch and normalize special standard (L2, L5 and L7 only)
    if spTp in SPSTDNM:
        if spTp == 'L2':
            isp = 0
        elif spTp == 'L5':
            isp = 1
        else:
            isp = 2
        tmpspec = at.read_spec(FOLDER_DATASPEC + 'NIR/' + SPSTD[isp], errors=False, \
                               atomicron=True, negtonan=True)
        for band in BANDS:
            tmpband = at.sel_band(tmpspec, BAND_LIMS[band]['lim'])
            if idxTp in [1,2,3] and band == 'H':
                norm_lims = SPECIAL_H_NORM_LIM
            else:
                norm_lims = NORM_LIMS[band]['lim']
            stdToPlot = at.norm_spec(tmpband, norm_lims)[0]
            spectra[band].append(stdToPlot)
        spTypes.append(spTp)
    
    # # Fetch standard
    # tmpStd = nocs.main(spTp, GRAV, plot=False, std=True, normalize=False)
    # # Normalize standard
    # for bdIdx, band in enumerate(BANDS):
    #     if idxTp in [1,2,3] and band == 'H':
示例#13
0
    BAND_NORMS[band] = [None] * 2
BAND_LIMS['J'][0] = 0.8
BAND_LIMS['J'][1] = 1.4 
BAND_LIMS['H'][0] = 1.4
BAND_LIMS['H'][1] = 1.9
BAND_LIMS['K'][0] = 1.9
BAND_LIMS['K'][1] = 2.4
BAND_NORMS['J'][0] = 0.87
BAND_NORMS['J'][1] = 1.39
BAND_NORMS['H'][0] = 1.41
BAND_NORMS['H'][1] = 1.89
BAND_NORMS['K'][0] = 1.91
BAND_NORMS['K'][1] = 2.39

# 3. GET SPECTRUM OF TARGET ---------------------------------------------------
specRaw = at.read_spec(interestObject)[0]
specRaw = np.array(specRaw)
if specRaw is None:
    print 'Could not get spectrum from file.'
    sys.exit(0)

# Separate spectrum by bands
specSep = separate_bands(specRaw, BANDS, BAND_LIMS)

# Normalize spectrum
specN = [None] * 3
for bIdx, band in enumerate(BANDS):
    specN[bIdx] = at.norm_spec(specSep[bIdx], BAND_NORMS[band])[0]

if specN[0] is None:
    'Error normalizing spectra.'