def run_update_with_ska(msid, group, msid_sub_list=[], glim=''):
    """
    extract data from ska database and update the data for the msids in the msid_list
    input:  msid            --- a list of msids
            group           --- the group of the msids
            msid_sub_list   --- a list of lists of:
                                [msid, msid_1, msid_2, operand]
                                this is used to compute the first msid from following two
                                msid values with operand (+/-/*)
            glim            --- glim usually found in this function, but you can give it; default: ''
    output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits
    """
#
#--- get basic information dict/list
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
#
#--- set starting and stopping data period
#
    test_fits = data_dir + group.capitalize() + '/' + msid + '_data.fits'

    if os.path.isfile(test_fits):
        tstart    = ecf.find_the_last_entry_time(test_fits)
    
        ttemp = time.strftime("%Y:%j:00:00:00", time.gmtime())
        tstop = Chandra.Time.DateTime(ttemp).secs - 86400.0
    
        if tstop < tstart:
            exit(1)
    
        if len(msid_sub_list) != 0:
            [dtime, tdata] = compute_sub_msid(msid, msid_sub_list, tstart, tstop)
        else:
            out     = fetch.MSID(msid, tstart, tstop)
            ok      = ~out.bads
            dtime   = out.times[ok]
            tdata   = out.vals[ok]
#
#--- fetch occasionally adds -999.xxx to the output data of some msids; remove them (Jun 13, 2018)
#
            tind    = [(tdata > -999) | (tdata <= -1000)]
            dtime   = dtime[tind]
            tdata   = tdata[tind]
#
#--- get limit data table for the msid
#
        if glim == '':
            try:
                tchk  = ecf.convert_unit_indicator(udict[msid])
            except:
                tchk  = 0
    
            if msid in sp_limt_case_c:
                tchk = 1

            glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
        update_database(msid, group, dtime, tdata, glim)
示例#2
0
def extract_hrcveto_data():
    """
    extract hrc veto data
    input:  none
    output: fits file data related to grad and comp
    """
    #
    #--- set basic information
    #
    group = 'Hrcveto'
    cols = ['TLEVART', 'VLEVART', 'SHEVART']
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
    #
    #--- find the date to be filled
    #
    ctime = ecf.find_the_last_entry_time(testfits)
    start = Chandra.Time.DateTime(ctime).date

    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    ctime = Chandra.Time.DateTime(today).secs - 43200.0
    stop = Chandra.Time.DateTime(ctime).date

    print("Group: " + group + ': ' + str(start) + '<-->' + str(stop))

    [xxx, tbdata] = uds.extract_data_arc5gl('hrc', '0', 'hrcss', start, stop)
    #
    #--- get time data in the list form
    #
    dtime = list(tbdata.field('time'))

    for col in cols:
        #
        #---- extract data in a list form
        #
        data = list(tbdata.field(col))
        #
        #--- change col name to msid
        #
        msid = col.lower()
        #
        #--- get limit data table for the msid
        #
        try:
            tchk = ecf.convert_unit_indicator(udict[msid])
        except:
            tchk = 0

        glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
        #
        #--- update database
        #
        uds.update_database(msid, group, dtime, data, glim)
示例#3
0
def get_data(msid, start, stop):
    """
    create an interactive html page for a given msid
    input:  msid    --- msid
            oup   --- group name
            start   --- start time
            stop    --- stop time
    output: ttime   --- a list of time data
            tdata   --- a list of data
    """
#    start = ecf.check_time_format(start)
#    stop  = ecf.check_time_format(stop)
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- get limit data table for the msid
#
    try:
        uck   = udict[msid]
        if uck.lower() == 'k':
            tchk = 1
        else:
            tchk  = ecf.convert_unit_indicator(uchk)
    except:
        tchk  = 0

    glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- extract data from archive
#
    chk = 0
    try:
        out     = fetch.MSID(msid, start, stop)
        tdata   = out.vals
        ttime   = out.times
    except:
        tdata   = []
        ttime   = []


    return [ttime, tdata]
def run_update_with_archive(msid_list, group,  date_list, detector, level, filetype, tstart, tstop, sub=''):
    """
    extract data using arc5gl and update the data for the msids in the msid_list
    input:  msid_list   --- the name of the list of msids
            group       --- a group name
            date_list   --- a list of date to be processed in the form of <yyyy>:<ddd>
            detector    --- detector name
            level       --- level
            filetype    --- file name
            tstart      --- starting time
            tstop       --- stopping time
            sub         --- subdetector name; defalut "" --- no sub detector
    output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits
    """
#
#--- get basic information dict/list
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
#
#--- extract data using arc5gl
#
    [cols, tbdata] = extract_data_arc5gl(detector, level, filetype, tstart, tstop, sub='')
#
#--- get time data in the list form
#
    dtime = list(tbdata.field('time'))

    for col in msid_list:
#
#---- extract data in a list form
#
        data = list(tbdata.field(col))
#
#--- change col name to msid
#
        msid = col.lower()
#
#--- get limit data table for the msid
#
        try:
            tchk  = ecf.convert_unit_indicator(udict[msid])
        except:
            tchk  = 0

        glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
        update_database(msid, group, dtime, data, glim)
示例#5
0
def update_simsuppl_data(date=''):
    """
    collect sim msids data
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to sim
    """
    #
    #--- read group names which need special treatment
    #
    sfile = house_keeping + 'msid_list_simactu_supple'
    data = mcf.read_data_file(sfile)
    cols = []
    g_dir = {}
    for ent in data:
        atemp = re.split('\s+', ent)
        cols.append(atemp[0])
        g_dir[atemp[0]] = atemp[1]
#
#--- get the basic information
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
    #
    #--- find date to read the data
    #
    if date == '':
        date_list = ecf.create_date_list_to_yestaday(testfits)
    else:
        date_list = [date]

    for sday in date_list:
        sday = sday[:4] + '-' + sday[4:6] + '-' + sday[6:]
        print("Date: " + sday)

        start = sday + 'T00:00:00'
        stop = sday + 'T23:59:59'

        [xxx, tbdata] = extract_data_arc5gl('sim', '0', 'sim', start, stop)
        #
        #--- get time data in the list form
        #
        dtime = list(tbdata.field('time'))

        for k in range(0, len(cols)):
            #
            #--- select col name without ST_ (which is standard dev)
            #
            col = cols[k]
            #
            #---- extract data in a list form
            #
            data = list(tbdata.field(col))
            #
            #--- change col name to msid
            #
            msid = col.lower()
            #
            #--- get limit data table for the msid
            #
            try:
                tchk = ecf.convert_unit_indicator(udict[msid])
            except:
                tchk = 0

            glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
            #
            #--- update database
            #
            update_database(msid, g_dir[msid], dtime, data, glim)
示例#6
0
def update_simdiag_data(date=''):
    """
    collect sim diag msids
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    #sfile = house_keeping + 'msid_list_simdiag'
    sfile = './msid_list_simsupple'
    data = mcf.read_data_file(sfile)
    cols = []
    g_dir = {}
    for ent in data:
        atemp = re.split('\s+', ent)
        cols.append(atemp[0])
        g_dir[atemp[0]] = atemp[1]
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()

    day_list = []
    for year in range(1999, 2021):
        cyear = str(year)
        for mon in range(1, 13):
            if year == 1999:
                if mon < 8:
                    continue
            if year == 2020:
                if mon > 1:
                    break

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            if mcf.is_leapyear(year):
                lday = mday_list2[mon - 1]
            else:
                lday = mday_list[mon - 1]

            for day in range(1, lday + 1):
                cday = str(day)
                if day < 10:
                    cday = '0' + cday

                sday = cyear + '-' + cmon + '-' + cday
                day_list.append(sday)

    for sday in day_list:
        if sday == '2020-01-17':
            break
        print("Date: " + sday)

        start = sday + 'T00:00:00'
        stop = sday + 'T23:59:59'

        line = 'operation=retrieve\n'
        line = line + 'dataset = flight\n'
        line = line + 'detector = sim\n'
        line = line + 'level = 0\n'
        line = line + 'filetype = sim\n'
        line = line + 'tstart = ' + start + '\n'
        line = line + 'tstop = ' + stop + '\n'
        line = line + 'go\n'

        flist = mcf.run_arc5gl_process(line)

        if len(flist) < 1:
            print("\t\tNo data")
            continue
#
#--- combined them
#
        flen = len(flist)

        if flen == 0:
            continue

        elif flen == 1:
            cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
            os.system(cmd)

        else:
            mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
            if flen > 2:
                for k in range(2, flen):
                    mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                    cmd = 'mv out.fits ztemp.fits'
                    os.system(cmd)
#
#--- remove indivisual fits files
#
        for ent in flist:
            cmd = 'rm -rf ' + ent
            os.system(cmd)
#
#--- read out the data for the full day
#
        [cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits')

        cmd = 'rm -f ztemp.fits out.fits'
        os.system(cmd)
        #
        #--- get time data in the list form
        #
        dtime = list(tbdata.field('time'))

        for k in range(0, len(cols)):
            #
            #---- extract data in a list form
            #
            col = cols[k]
            data = list(tbdata.field(col))
            #
            #--- change col name to msid
            #
            msid = col.lower()
            #
            #--- get limit data table for the msid
            #
            try:
                tchk = convert_unit_indicator(udict[msid])
            except:
                tchk = 0

            glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
            #
            #--- update database
            #
            tstart = convert_time_format(start)
            tstop = convert_time_format(stop)

            update_database(msid,
                            g_dir[msid],
                            dtime,
                            data,
                            glim,
                            pstart=tstart,
                            pstop=tstop)
示例#7
0
def update_grad_and_comp_data(date=''):
    """
    collect grad and  comp data for trending
    input:  date    ---- the data colletion  end date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    sfile = house_keeping + 'mp_process_list'
    glist = mcf.read_data_file(sfile)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()
    #
    #--- find date to read the data
    #
    if date == '':
        date_list = ecf.create_date_list_to_yestaday(testfits)

    else:
        date_list = [date]

    for day in date_list:
        #
        #--- find the names of the fits files of the day of the group
        #
        print("Date: " + str(day))

        for group in glist:
            print("Group: " + str(group))
            cmd = 'ls /data/mta_www/mp_reports/' + day + '/' + group + '/data/mta*fits* > ' + zspace
            os.system(cmd)

            flist = mcf.read_data_file(zspace, remove=1)
            #
            #--- combined them
            #
            flen = len(flist)

            if flen == 0:
                continue

            elif flen == 1:
                cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                os.system(cmd)

            else:
                mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                if flen > 2:
                    for k in range(2, flen):
                        mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                        cmd = 'mv out.fits ztemp.fits'
                        os.system(cmd)
#
#--- read out the data for the full day
#
            [cols, tbdata] = ecf.read_fits_file('ztemp.fits')

            cmd = 'rm -f ztemp.fits out.fits'
            os.system(cmd)
            #
            #--- get time data in the list form
            #
            dtime = list(tbdata.field('time'))

            for k in range(1, len(cols)):
                #
                #--- select col name without ST_ (which is standard dev)
                #
                col = cols[k]
                mc = re.search('ST_', col)
                if mc is not None:
                    continue
#
#---- extract data in a list form
#
                data = list(tbdata.field(col))
                #
                #--- change col name to msid
                #
                msid = col.lower()
                #
                #--- get limit data table for the msid
                #
                try:
                    tchk = ecf.convert_unit_indicator(udict[msid])
                except:
                    tchk = 0

                glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
                #
                #--- update database
                #
                uds.update_database(msid, group, dtime, data, glim)
示例#8
0
def update_eph_data_from_comm(date = ''):
    """
    collect eph data for trending
    input:  date    ---- the data collection end date in yyyymmdd format. 
                        if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
#
#--- read group names which need special treatment
#
    #sfile = house_keeping + 'eph_list'
    #glist = mcf.read_data_file(sfile)
    glist = ['ephhk',]
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- find date to read the data
#
    if date == '':
        yesterday = datetime.date.today() - datetime.timedelta(1)
        yesterday = str(yesterday).replace('-', '')
        date_list = create_date_list(yesterday)

    else:
        date_list = [date]

    error_message = ''
    for day in date_list:
#
#--- find the names of the fits files of the day of the group
#
        dline = "Date: " + str(day)
        print(dline)
    
        for group in glist:
            print("Group: " + str(group))
            cmd = 'ls /data/mta_www/mp_reports/' + day + '/' + group + '/data/* > ' + zspace
            os.system(cmd)
    
            tlist = mcf.read_data_file(zspace, remove=1)
            flist = []
            for ent in tlist:
                mc = re.search('_STephhk_static_eio0.fits',  ent)
                if mc is not None:
                    flist.append(ent)
#
#--- combined them
#
            flen = len(flist)
    
            if flen == 0:
                continue
    
            elif flen == 1:
                cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                os.system(cmd)
    
            else:
                mcf.rm_files('ztemp.fits')
                mfo. appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                if flen > 2:
                    for k in range(2, flen):
                        mfo. appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                        cmd = 'mv out.fits ztemp.fits'
                        os.system(cmd)
#
#--- read out the data for the full day
#
            [cols, tbdata] = ecf.read_fits_file('ztemp.fits')
    
            cmd = 'rm -f ztemp.fits out.fits'
            os.system(cmd)
#
#--- get time data in the list form
#
            dtime = list(tbdata.field('time'))
    
            for k in range(1, len(cols)):
#
#--- select col name without ST_ (which is standard dev)
#
                col = cols[k]
                mc  = re.search('ST_', col)
                if mc is not None:
                    continue
                mc  = re.search('quality', col, re.IGNORECASE)
                if mc is not None:
                    continue
#
#---- extract data in a list form
#
                data = list(tbdata.field(col))
#
#--- change col name to msid
#
                msid = col.lower()
#
#--- get limit data table for the msid
#
                try:
                    tchk  = ecf.convert_unit_indicator(udict[msid])
                except:
                    tchk  = 0
    
                glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
                wline = uds.update_database(msid, group.capitalize(), dtime, data, glim)

                if wline != "":
                    error_message = error_message + dline + '\n' + wline
#
#--- if there are errors, sending error message
#
    if error_message != "":
        error_message = 'MTA limit trend EPH got problems: \n' + error_message

        fo = open(zspace, 'w')
        fo.write(error_message)
        fo.close()
        cmd  = 'cat ' + zspace + ' | mailx -s "Subject: EPH data update problem "'
        cmd  = cmd    + '*****@*****.**'
        os.system(cmd)
        mcf.rm_files(zspace)
示例#9
0
def recover_hrcveto_data():
    """
    recover hrc veto data
    input:  none
    output: fits file data related to grad and comp
    """
#
#--- read group names which need special treatment
#
    #sfile = 'eph_list'
    #glist = mcf.read_data_file(sfile)
    glist  = ['Hrcveto']
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()


    day_list = []
    for year in range(1999, 2018):
        lyear = year
        cyear = str(year)
        for mon in range(1, 13):
            if year == 1999:
                if mon < 8:
                    continue

            if year == 2017:
                if mon > 10:
                    break

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            nmon = mon + 1
            if nmon > 12:
                nmon = 1
                lyear += 1

            cnmon = str(nmon)
            if nmon < 10:
                cnmon = '0' + cnmon


            start = str(year)  + '-' + cmon  + '-01T00:00:00'
            stop  = str(lyear) + '-' + cnmon  + '-01T00:00:00'
    
            for group in glist:
                print "Group: " + group + ' : ' + str(start) + '<-->' + str(stop)
    
                line = 'operation=retrieve\n'
                line = line + 'dataset = flight\n'
                line = line + 'detector = hrc\n'
                line = line + 'level = 0\n'
                line = line + 'filetype = hrcss\n'
                line = line + 'tstart = '   + start + '\n'
                line = line + 'tstop = '    + stop  + '\n'
                line = line + 'go\n'
    
                flinst = mcf.run_arc5gl_process(line)
    
                if len(flist) < 1:
                    print "\t\tNo data"
                    continue
#
#--- combined them
#
                flen = len(flist)
     
                if flen == 0:
                    continue
     
                elif flen == 1:
                    cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                    os.system(cmd)
     
                else:
                    mfo. appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                    if flen > 2:
                        for k in range(2, flen):
                            mfo. appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                            cmd = 'mv out.fits ztemp.fits'
                            os.system(cmd)
#
#--- remove indivisual fits files
#
    
                for ent in flist:
                    cmd = 'rm -rf ' + ent 
                    os.system(cmd)

#
#--- read out the data for the full day
#
                [cols, tbdata] = ecf.read_fits_file('ztemp.fits')
    
                cols = ['TLEVART', 'VLEVART', 'SHEVART']
     
                cmd = 'rm -f ztemp.fits out.fits'
                os.system(cmd)
#
#--- get time data in the list form
#
                dtime = list(tbdata.field('time'))
     
                for col in cols:
#
#---- extract data in a list form
#
                    data = list(tbdata.field(col))
#
#--- change col name to msid
#
                    msid = col.lower()
#
#--- get limit data table for the msid
#
                    try:
                        tchk  = convert_unit_indicator(udict[msid])
                    except:
                        tchk  = 0
     
                    glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
                    update_database(msid, group, dtime, data, glim)
def update_eph_data(date=''):
    """
    collect grad and  comp data for trending
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    #sfile = 'eph_list'
    #glist = mcf.read_data_file(sfile)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()

    day_list = []
    for year in range(2000, 2019):  #---- CHANGE CHANGE CHAGE!!!!!
        lyear = year
        for mon in range(1, 13):
            #if year == 2018 and mon > 1:
            #    break
            #if year == 2017 and mon < 11:
            #    continue

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            nmon = mon + 1
            if nmon > 12:
                nmon = 1
                lyear += 1

            clmon = str(nmon)
            if nmon < 10:
                clmon = '0' + clmon

            start = str(year) + '-' + cmon + '-01T00:00:00'
            stop = str(lyear) + '-' + clmon + '-01T00:00:00'

            print "Period: " + str(start) + "<--->" + str(stop)

            for group in glist:
                print "Group: " + group
                #
                #---CHANGE THE DETECTOR/FILETYPE BEFORE RUNNING IF IT IS DIFFERENT FROM EPHHK
                #
                line = 'operation=retrieve\n'
                line = line + 'dataset=flight\n'
                line = line + 'detector=ephin\n'
                line = line + 'level=0\n'
                line = line + 'filetype=ephhk\n'
                line = line + 'tstart=' + start + '\n'
                line = line + 'tstop=' + stop + '\n'
                line = line + 'go\n'

                flist = mcf.run_arc5gl_process(line)

                if len(flist) < 1:
                    print "\t\tNo data"
                    continue
#
#--- combined them
#
                flen = len(flist)

                if flen == 0:
                    continue

                elif flen == 1:
                    cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                    os.system(cmd)

                else:
                    mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                    if flen > 2:
                        for k in range(2, flen):
                            mfo.appendFitsTable('ztemp.fits', flist[k],
                                                'out.fits')
                            cmd = 'mv out.fits ztemp.fits'
                            os.system(cmd)
#
#--- remove indivisual fits files
#

                for ent in flist:
                    cmd = 'rm -rf ' + ent
                    os.system(cmd)
#
#--- read out the data
#
                [cols, tbdata] = ecf.read_fits_file('ztemp.fits')

                cmd = 'rm -f ztemp.fits out.fits'
                os.system(cmd)
                #
                #--- get time data in the list form
                #
                dtime = list(tbdata.field('time'))

                for k in range(1, len(cols)):
                    #
                    #--- select col name without ST_ (which is standard dev)
                    #
                    col = cols[k]
                    mc = re.search('ST_', col)
                    if mc is not None:
                        continue
                    mc = re.search('quality', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('mjf', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('gap', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('dataqual', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('tlm_fmt', col, re.IGNORECASE)
                    if mc is not None:
                        continue
#
#---- extract data in a list form
#
                    data = list(tbdata.field(col))
                    #
                    #--- change col name to msid
                    #
                    msid = col.lower()
                    #
                    #--- get limit data table for the msid
                    #
                    try:
                        tchk = convert_unit_indicator(udict[msid])
                    except:
                        tchk = 0

                    glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
                    #
                    #--- update database
                    #
                    update_database(msid, group, dtime, data, glim)
示例#11
0
def create_interactive_page(msid, group, start, stop, step):
    """
    create an interactive html page for a given msid
    input:  msid    --- msid
            group   --- group name
            start   --- start time
            stop    --- stop time
            step    --- bin size in seconds
    """
    start = ecf.check_time_format(start)
    stop = ecf.check_time_format(stop)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()
    #
    #--- get limit data table for the msid
    #
    try:
        tchk = ecf.convert_unit_indicator(udict[msid])
    except:
        tchk = 0

    glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
    #
    #--- extract data from archive
    #
    chk = 0
    try:
        out = fetch.MSID(msid, start, stop)
        tdata = out.vals
        ttime = out.times
    except:
        #
        #--- if no data in archive, try mta local database
        #
        try:
            [ttime, tdata] = uds.get_mta_fits_data(msid, start, stop)
#
#--- if it is also failed, return the empty data set
#
        except:
            chk = 1
#
#--- only short_p can change step size (by setting "step")
#
    if chk == 0:
        [week_p, short_p, long_p] = uds.process_day_data(msid,
                                                         ttime,
                                                         tdata,
                                                         glim,
                                                         step=step)
        #
        #--- try to find data from ska or mta local data base
        #
        try:
            fits_data = create_inter_fits(msid, short_p)
#
#--- for the case, the data is mta special cases
#
        except:
            fits_data = 'na'
    else:
        fits_data = 'na'
#
#--- create interactive html page
#
    create_html_page(msid, fits_data, step)
    #
    #--- remove fits file
    #
    if fits_data != 'na':
        cmd = 'rm -rf ' + fits_data
        os.system(cmd)
示例#12
0
def update_msid_data(msid_list):
    """
    update all msid listed in msid_list
    input:  msid_list   --- a list of msids to processed. default: msid_list_fetch
    output: <msid>_data.fits/<msid>_short_data.fits
    """
    start_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- read msid list
#
    mfile = house_keeping + msid_list
    data  = mcf.read_data_file(mfile)

    for ent in data:
#
#--- find msid and group name
#
        mc = re.search('#', ent)
        if mc is not None:
            continue
        try:
            [msid, group] = re.split('\s+', ent)
        except:
            atemp = re.split('\s+', ent)
            msid  = atemp[0]
            group = atemp[1]

        msid.strip()
        group.strip()
#
#--- get limit data table for the msid
#
        try:
            tchk  = convert_unit_indicator(udict[msid])
        except:
            tchk  = 0
        ####glim  = gsr.read_glimmon(msid, tchk)
        if msid in sp_limt_case_c:
            tchk = 1
        ###(print "I AM HERE TCHK : " + str(tchk) + "<--->" + str(udict[msid]))
        glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
        ###(print "I AM HERE GLIM: " + str(glim))
        ###exit(1)
#
#--- update database
#
#        try:
#            out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
#            print("MSID: " + msid)
#        except:
#            out = get_mta_fits_data(msid, '2017:001:00:00:00', '2017:002')
#            if out == False:
#                missed = house_keeping + '/missing_data'
#                fo     = open(missed, 'a')
#                fo.write(msid)
#                fo.write('\n')
#                fo.close()
#
#                continue

        update_database(msid, group,  glim)
#
#--- test entry to check how long it took
#
    end_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())


    line = "trending plotting: \n"
    line = line + "Started: " + start_time + '\n'
    line = line + "Ended: " + end_time + '\n'

    print(line)
示例#13
0
def update_ephhk_data():
    """
    update eph hk related msid data
    input:  none
    output: updated data fits files
    """
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()
    #
    #--- find the data period
    #
    [tstart, tstop] = find_data_period()
    #
    #--- extract fits files from archive
    #
    ofits = extract_archive_data(tstart, tstop)
    #
    #--- if no data is extracted, stop
    #
    if ofits == False:
        exit(1)
#
#--- read out the data
#
    [cols, tbdata] = ecf.read_fits_file(ofits)

    cmd = 'rm -f out.fits ' + ofits
    os.system(cmd)
    #
    #--- get time data in the list form
    #
    dtime = list(tbdata.field('time'))
    #
    #--- find useable colnames
    #
    col_list = find_col_names(cols)

    for col in col_list:
        #
        #---- extract data in a list form
        #
        data = list(tbdata.field(col))
        #
        #--- change col name to msid
        #
        msid = col.lower()
        #
        #--- get limit data table for the msid
        #
        try:
            tchk = convert_unit_indicator(udict[msid])
        except:
            tchk = 0

        glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
        #
        #--- update database
        #
        update_database(msid, 'ephhk', dtime, data, glim)
def tephin_leak_data_update(year=''):
    """
    update tephin - ephin rate/leak current data
    input:  year    --- year of the data to be updated. if it is '', the current year is used
    output: <data_dir>/<msid>/<msid>_data_<year>.fits
    """
    #
    #--- set data extraction period
    #
    tout = set_time_period(year)
    if len(tout) == 6:
        [ltstart, ltstop, lyear, tstart, tstop, year] = tout
        chk = 1
    else:
        [tstart, tstop, year] = tout
        chk = 0
#
#--- get the basic information
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
    #
    #--- extract tephin data
    #
    tchk = ecf.convert_unit_indicator(udict['tephin'])
    glim = ecf.get_limit('tephin', tchk, mta_db, mta_cross)
    #
    #--- for the case the time span goes over the year boundary
    #
    if chk == 1:
        ltephin = update_database('tephin', 'Eleak', glim, ltstart, ltstop,
                                  lyear)

    tephin = update_database('tephin', 'Eleak', glim, tstart, tstop, year)
    #
    #--- read msid list
    #
    mfile = house_keeping + 'msid_list_eph_tephin'
    data = mcf.read_data_file(mfile)

    for ent in data:
        #
        #--- find msid and group name
        #
        mc = re.search('#', ent)
        if mc is not None:
            continue
        try:
            [msid, group] = re.split('\s+', ent)
        except:
            atemp = re.split('\s+', ent)
            msid = atemp[0]
            group = atemp[1]

        msid.strip()
        group.strip()
        #
        #--- get limit data table for the msid
        #
        try:
            tchk = ecf.convert_unit_indicator(udict[msid])
        except:
            tchk = 0
        glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
        #
        #--- update database
        #
        try:
            out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
            print("MSID: " + msid)
        except:
            missed = house_keeping + '/missing_data'
            with open(missed, 'a') as fo:
                fo.write(msid + '\n')

            continue
#
#--- for the case, the time span goes over the year boundary
#
        if chk == 1:
            update_database(msid,
                            group,
                            glim,
                            ltstart,
                            ltstop,
                            lyear,
                            sdata=ltephin)

        try:
            update_database(msid,
                            group,
                            glim,
                            tstart,
                            tstop,
                            year,
                            sdata=tephin)
        except:
            pass
def gratgen_categorize_data():
    """
    separate gratgen data into different categories
    input: none but use <data_dir>/Gratgen/*.fits
    output: <data_dir>/Gratgen_<catogry>/*.fits
    """
    #
    #--- get the basic information
    #
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()

    for msid in msid_list:
        cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\
                'ylimupper', 'rlimlower', 'rlimupper']

        glim = ecf.get_limit(msid, 0, mta_db, mta_cross)

        for category in cname_list:
            print("Running: " + str(msid) + '<-->' + category)

            cfile1 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_data.fits'
            cfile2 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_short_data.fits'
            cfile3 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_week_data.fits'

            stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
            tcut1 = 0.0
            tcut2 = Chandra.Time.DateTime(
                stday).secs - 31622400.0  #--- a year agao
            tcut3 = Chandra.Time.DateTime(
                stday).secs - 864000.0  #--- 10 days ago

            if os.path.isfile(cfile1):
                tchk = ecf.find_the_last_entry_time(cfile1)
            else:
                tchk = 0

            ifile = house_keeping + category
            data = mcf.read_data_file(ifile)
            start = []
            stop = []
            for ent in data:
                atemp = re.split('\s+', ent)
                val1 = float(atemp[0])
                val2 = float(atemp[1])
                if val1 > tchk:
                    start.append(val1)
                    stop.append(val2)

            if len(start) == 0:
                continue

            for k in range(0, len(start)):
                diff = stop[k] - start[k]
                if diff < 300:
                    start[k] -= 100
                    stop[k] = start[k] + 300.

                data = fetch.MSID(msid, start[k], stop[k])

                if k == 0:
                    ttime = list(data.times)
                    tdata = list(data.vals)
                else:
                    ttime = ttime + list(data.times)
                    tdata = tdata + list(data.vals)

            if len(ttime) == 0:
                continue

            stat_out1 = get_stat(ttime, tdata, glim, 86400.0)
            stat_out2 = get_stat(ttime, tdata, glim, 3600.0)
            stat_out3 = get_stat(ttime, tdata, glim, 300.0)

            if tchk > 0:
                ecf.update_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.update_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.update_fits_file(cfile3, cols, stat_out3, tcut=tcut3)
            else:
                ecf.create_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.create_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.create_fits_file(cfile3, cols, stat_out3, tcut=tcut3)