def find_violation():
    """
    find msids which violate yellow and/or red upper and lower limits
    input:  none but read from sqlite database (<house_keeping>/v_table.sqlite3
    output: v_list  --- a list of msids
            vdict   --- a dictionary of msids<--->[yl_lim, yt_lim, rl_lim, rt_lim]
    """
    #
    #--- find which msids have the sub html page cared
    #
    cmd = 'ls ' + web_dir + '*_plot.html > ' + zspace
    os.system(cmd)

    data = ecf.read_file_data(zspace, 1)

    v_list = []
    vdict = {}
    for ent in data:
        atemp = re.split('\/', ent)
        btemp = re.split('_plot', atemp[-1])
        msid = btemp[0]
        #
        #--- check the violation status of the msid from the database
        #
        out = ved.read_v_estimate(msid)
        chk = 0
        for test in out:
            if (test != 0) or (test != na):
                chk = 1
                break
        if chk > 0:
            v_list.append(msid)
            vdict[msid] = out

    return [v_list, vdict]
示例#2
0
def create_sub_html():
    """
    creates html pages for different categories of msids
    input:  none but read from <house_keeping>/sub_html_list_all
    output: <web_address>/Htmls/<category>_main.html
    """
    #
    #--- get today's date in fractional year
    #
    sec1998 = ecf.find_current_stime()
    ytime = ecf.stime_to_frac_year(sec1998)
    #
    #--- create dictionary of unit and dictionary of descriptions for msid
    #
    [udict, ddict] = ecf.read_unit_list()

    lfile = house_keeping + 'sub_html_list_all'
    data = ecf.read_file_data(lfile)
    #
    #--- create indivisual html pages under each category
    #
    for ent in data:
        atemp = re.split('::', ent)
        catg = atemp[0]
        msids = re.split(':', atemp[1])

        create_html(catg, msids, ytime, udict, ddict)
示例#3
0
def find_dy_range():

    msid_list = house_keeping + 'msid_list_sun_angle'
    data = ecf.read_file_data(msid_list)

    fo = open('msid_list_sun_angle', 'w')
    for ent in data:
        atemp = re.split('\s+', ent)
        msid = atemp[0]
        group = atemp[1]

        fits = data_dir + group.capitalize() + '/' + msid + '_data.fits'
        fout = pyfits.open(fits)
        fdata = fout[1].data
        dout = fdata[msid]
        bot = numpy.percentile(dout, 2)
        top = numpy.percentile(dout, 98)
        diff = top - bot
        ratio = diff / 120.0
        if ratio < 1:
            ratio = round(ratio, 2)
            ratio *= 2
        else:
            ratio = round(ratio, 0)
            ratio *= 3

        if ratio < 0.2:
            ratio = 0.2

        btemp = re.split('0.011', ent)

        line = btemp[0] + '\t0.011\t' + str(ratio) + '\n'
        fo.write(line)

    fo.close()
def select_example_trend():
    """
    select one plot as an example for the front page
    input:  none but read from the <web_dir>/Futre directory
    ouput:  cont    --- plot in a html formt
    """
#
#--- <web_dir>/Future contains file with html formated plot output 
#--- of those with future violation potentials
#
    cmd  = 'ls ' + web_dir + 'Future/* > ' + zspace
    os.system(cmd)
#
#--- choose one of the plot using random #
#
    data = ecf.read_file_data(zspace, 1)
    dlen = len(data)

    if dlen > 0:
        pos  = int(dlen * random.random())
        f    = open(data[pos], 'r')
        cont = f.read()
        f.close()
    else:
        cont = ''

    return cont
示例#5
0
def get_mta_fits_data(msid, start, stop):
    """
    fetch data from mta local database
    input:  msid    --- msid
            start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
    output: time    --- time in second from 1998.1.1 for the given period
            vals    --- vals of msid for the given period
    """
    #
    #--- find which year(s) the requested period falls
    #
    date = Chandra.Time.DateTime(start)
    byear = int(float(date.year))
    date = Chandra.Time.DateTime(stop)
    eyear = int(float(date.year))

    chk = 0
    for year in range(byear, eyear + 1):
        #
        #--- find whether the fits data file exists
        #
        cmd = 'ls ' + deposit_dir + '*/*/' + msid + '_full_data_' + str(
            year) + '.fits* > ' + zspace
        os.system(cmd)

        out = ecf.read_file_data(zspace, remove=1)
        fits = out[0]

        if not os.path.isfile(fits):
            continue
#
#--- extract the data for the given period
#
        f = pyfits.open(fits)
        data = f[1].data
        f.close()
        if chk == 0:
            time = data['time']
            vals = data[msid]
            ext = [(time > start) & (time < stop)]
            time = time[ext]
            vals = vals[ext]
            chk = 1
        else:
            tmp1 = data['time']
            tmp2 = data[msid]
            ext = [(tmp1 > start) & (tmp1 < stop)]
            tmp1 = tmp1[ext]
            tmp2 = tmp2[ext]
            time = numpy.append(time, tmp1)
            vals = numpy.append(vals, tmp2)

    if chk > 0:
        return [time, vals]
    else:
        #
        #--- if no data, return False
        #
        return False
示例#6
0
def update_eph_l1():
    """
    update eph L1 related data
    input: none
    output: <out_dir>/<msid>_full_data_<year>.fits
    """
    t_file  = 'sce1300_full_data_*.fits*'
    out_dir = deposit_dir + 'Comp_save/Compephkey/'

    ifile = house_keeping + 'msid_list_ephkey'
    data  = ecf.read_file_data(ifile)
    msid_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        msid_list.append(atemp[0])

    [tstart, tstop, year] = ecf.find_data_collecting_period(out_dir, t_file)
#
#--- update the data
#
    get_data(tstart, tstop, year, msid_list, out_dir)
#
#--- zip the fits file from the last year at the beginning of the year
#
    ecf.check_zip_possible(out_dir)
def find_violation():
    """
    find msids which violate yellow and/or red upper and lower limits
    input:  none but read from sqlite database (<house_keeping>/v_table.sqlite3
    output: v_list  --- a list of msids
            vdict   --- a dictionary of msids<--->[yl_lim, yt_lim, rl_lim, rt_lim]
    """
#
#--- find which msids have the sub html page cared
#
    cmd = 'ls ' + web_dir + '*_plot.html > ' + zspace
    os.system(cmd)

    data  = ecf.read_file_data(zspace, 1)

    v_list = []
    vdict  = {}
    for ent in data:
        atemp = re.split('\/', ent)
        btemp = re.split('_plot', atemp[-1])
        msid  = btemp[0]
#
#--- check the violation status of the msid from the database
#
        out   = ved.read_v_estimate(msid)
        chk   = 0
        for test in out:
            if (test != 0) or (test != na):
                chk = 1
                break
        if  chk > 0:
            v_list.append(msid)
            vdict[msid] = out

    return [v_list, vdict]
示例#8
0
def update_acis_ctemp():
    """
    update acis temp data in C
    input: none
    output: <out_dir>/<msid>_full_data_<year>fits
    """

    t_file  = '1cbat_full_data_*.fits*'
    out_dir = deposit_dir + '/Comp_save/Compaciscent/'

    ifile = house_keeping + 'msid_list_compaciscent'
    data  = ecf.read_file_data(ifile)
    acis_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        acis_list.append(atemp[0])



    [tstart, tstop, year] = ecf.find_data_collecting_period(out_dir, t_file)

    get_data(tstart, tstop, year, acis_list, out_dir)
#
#--- zip the fits file from the last year at the beginning of the year
#
    ecf.check_zip_possible(out_dir)
def create_sub_html():
    """
    creates html pages for different categories of msids
    input:  none but read from <house_keeping>/sub_html_list_all
    output: <web_address>/Htmls/<category>_main.html
    """
#
#--- get today's date in fractional year
#
    sec1998 = ecf.find_current_stime()
    ytime   = ecf.stime_to_frac_year(sec1998)
#
#--- create dictionary of unit and dictionary of descriptions for msid
#
    [udict, ddict] = ecf.read_unit_list()

    lfile = house_keeping + 'sub_html_list_all'
    data  = ecf.read_file_data(lfile)
#
#--- create indivisual html pages under each category
#
    for ent in data:
        atemp = re.split('::', ent)
        catg  = atemp[0]
        msids = re.split(':', atemp[1])

        create_html(catg, msids, ytime, udict, ddict)
def delate_old_file():
    """
    remove html files older than one day old from Interactive directory
    input:  none, but read from the directory
    oupupt: none
    """
    #
    #--- find html files in Interactive directory
    #
    cmd = 'ls ' + web_dir + 'Interactive/*.html > ' + zspace
    os.system(cmd)

    dlist = ecf.read_file_data(zspace, remove=1)
    #
    #--- set one day ago
    #
    cdate = time.time() - 60.0 * 60.0 * 24.0
    #
    #--- remove any files created older than one day ago
    #
    for cfile in dlist:
        mc = re.search('html', cfile)
        if mc is not None:
            ftime = os.path.getmtime(cfile)
            if ftime < cdate:
                cmd = 'rm -rf  ' + cfile
                os.system(cmd)
def select_example_trend():
    """
    select one plot as an example for the front page
    input:  none but read from the <web_dir>/Futre directory
    ouput:  cont    --- plot in a html formt
    """
    #
    #--- <web_dir>/Future contains file with html formated plot output
    #--- of those with future violation potentials
    #
    cmd = 'ls ' + web_dir + 'Future/* > ' + zspace
    os.system(cmd)
    #
    #--- choose one of the plot using random #
    #
    data = ecf.read_file_data(zspace, 1)
    dlen = len(data)

    if dlen > 0:
        pos = int(dlen * random.random())
        f = open(data[pos], 'r')
        cont = f.read()
        f.close()
    else:
        cont = ''

    return cont
示例#12
0
def extract_data(catg, msid, ltype, mtype):
    """
    read fitting results
    input:  catg    --- category of the msids
            msid    --- msid
            ltype   --- data length: week, short, one, five, long
            mtype   --- min, max, med
    output: [a, b, d, avg, std, da, db, dd], fitting results and their errors
    """
    sfile = web_dir + catg + '/' + msid.capitalize(
    ) + '/Plots/' + msid + '_fit_results'
    sdata = ecf.read_file_data(sfile)
    dfile = web_dir + catg + '/' + msid.capitalize(
    ) + '/Plots/' + msid + '_dev_fit_results'
    ddata = ecf.read_file_data(dfile)

    a = '0'
    b = '0'
    d = '0'
    avg = '0'
    std = '0'
    da = '0'
    db = '0'
    dd = '0'
    for ent in sdata:
        mc1 = re.search(ltype, ent)
        mc2 = re.search(mtype, ent)
        if (mc1 is not None) and (mc2 is not None):
            atemp = re.split(':', ent)
            a = atemp[2]
            b = atemp[3]
            d = atemp[4]
            avg = atemp[5]
            std = atemp[6]
            break

    for ent in ddata:
        mc1 = re.search(ltype, ent)
        mc2 = re.search(mtype, ent)
        if (mc1 is not None) and (mc2 is not None):
            atemp = re.split(':', ent)
            da = atemp[2]
            db = atemp[3]
            dd = atemp[4]
            break

    return [a, b, d, avg, std, da, db, dd]
def create_html_page():
    """
    create indivisual html pages for all msids in database
    input:  none
    output: <web_dir>/<msid>_plot.html
    """
#
#--- clean out future estimate direcotry
#
    cmd = 'rm -rf ' + web_dir + 'Future/* 2>/dev/null'
    os.system(cmd)
#
#---  get dictionaries of msid<-->unit and msid<-->description
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- get the list of the names of the data files
#
    cmd = 'ls ' + data_dir + '*_data > ' + zspace
    os.system(cmd)

    data = ecf.read_file_data(zspace, 1)
    for ent in data:
        atemp = re.split('\/', ent)
        btemp = re.split('_data', atemp[-1])
        msid  = btemp[0]

#    for msid in ['1dactbt']:               #--- test test test 
#    for msid in ['2detart_off']:               #--- test test test 
#    for msid in ["cpa1pwr", "pftankop"]:               #--- test test test 

        print 'Processing: ' + msid

        try: 
            unit    = udict[msid]
            descrip = ddict[msid]
        except:
            unit    = ''
            descrip = ''

        #try:
#
#--- create an interactive plot
#
        pout = plot_interactive_trend(msid, unit)
#
#--- if there is not enough data, clean out the limit violation database
#
        if pout == False:
            vtdata = [-999, -999, -999, -999]
            ved.incert_data(msid, vtdata)
  
            print "No plot for : " + msid + ' (data points < 10)'
#
#--- add the plot to the html page
#
        create_plot_html_page(msid, descrip, pout)
def create_html_page():
    """
    create indivisual html pages for all msids in database
    input:  none
    output: <web_dir>/<msid>_plot.html
    """
    #
    #--- clean out future estimate direcotry
    #
    cmd = 'rm -rf ' + web_dir + 'Future/* 2>/dev/null'
    os.system(cmd)
    #
    #---  get dictionaries of msid<-->unit and msid<-->description
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- get the list of the names of the data files
    #
    cmd = 'ls ' + data_dir + '*_data > ' + zspace
    os.system(cmd)

    data = ecf.read_file_data(zspace, 1)
    for ent in data:
        atemp = re.split('\/', ent)
        btemp = re.split('_data', atemp[-1])
        msid = btemp[0]

        #    for msid in ['1dactbt']:               #--- test test test
        #    for msid in ['2detart_off']:               #--- test test test
        #    for msid in ["cpa1pwr", "pftankop"]:               #--- test test test

        print 'Processing: ' + msid

        try:
            unit = udict[msid]
            descrip = ddict[msid]
        except:
            unit = ''
            descrip = ''

        #try:
#
#--- create an interactive plot
#
        pout = plot_interactive_trend(msid, unit)
        #
        #--- if there is not enough data, clean out the limit violation database
        #
        if pout == False:
            vtdata = [-999, -999, -999, -999]
            ved.incert_data(msid, vtdata)

            print "No plot for : " + msid + ' (data points < 10)'
#
#--- add the plot to the html page
#
        create_plot_html_page(msid, descrip, pout)
示例#15
0
def get_data(start, stop, year, msid_list):

    print str(start) + '<-->' + str(stop)

    line = 'operation=retrieve\n'
    line = line + 'dataset = flight\n'
    line = line + 'detector = ephin\n'
    line = line + 'level = 0\n'
    line = line + 'filetype =ephhk \n'
    line = line + 'tstart = ' + start + '\n'
    line = line + 'tstop = ' + stop + '\n'
    line = line + 'go\n'

    fo = open(zspace, 'w')
    fo.write(line)
    fo.close()

    try:
        cmd = ' /proj/sot/ska/bin/arc5gl  -user isobe -script ' + zspace + '> ztemp_out'
        os.system(cmd)
    except:
        cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out'
        os.system(cmd)

    mcf.rm_file(zspace)

    data_list = ecf.read_file_data('ztemp_out')
    data_list = data_list[1:]
    #
    #--- uppend the data to the local fits data files
    #
    for fits in data_list:

        [cols, tbdata] = ecf.read_fits_file(fits)

        time = tbdata['time']

        for col in msid_list:
            #
            #--- ignore columns with "ST_" (standard dev) and time
            #
            mdata = tbdata[col]
            cdata = [time, mdata]
            ocols = ['time', col.lower()]

            if not os.path.isdir(out_dir):
                cmd = 'mkdir ' + out_dir
                os.system(cmd)

            ofits = out_dir + col.lower() + '_full_data_' + str(year) + '.fits'
            if os.path.isfile(ofits):
                update_fits_file(ofits, ocols, cdata)
            else:
                create_fits_file(ofits, ocols, cdata)

        mcf.rm_file(fits)
示例#16
0
def check_file_update_date():
    """
    find the files which are not updated for a while
    input:  none, but read from <data_dir>
    output: if there are problems, mail wil be sent out
    """
    #
    #--- the files listed in <house_keeping>/ignore_list are not updated
    #
    ifile = house_keeping + 'ignore_list'
    ignore = ecf.read_file_data(ifile)

    cmd = 'ls ' + data_dir + '*/*fits > ' + zspace
    os.system(cmd)

    f = open(zspace, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    stday = Chandra.Time.DateTime(stday).secs - 86400.0 * 8.0

    save = []
    for ent in data:
        out = find_modified_time(ent, stday)
        if out < 0:
            if ent in ignore:
                continue

            save.append(ent)

    if len(save) > 0:
        line = 'Following files are not updated more than a week\n\n'
        for ent in save:
            line = line + ent + '\n'

        fo = open(zspace, 'w')
        fo.write(line)
        fo.close()
        cmd = 'cat ' + zspace + ' | mailx -s "Subject: MTA Trending data update problem!" [email protected]'
        os.system(cmd)

        mcf.rm_file(zspace)

    else:
        line = 'Secondary data update finished: ' + time.strftime(
            "%a, %d %b %Y %H:%M:%S", time.localtime()) + '\n'

        fo = open(zspace, 'w')
        fo.write(line)
        fo.close()
        cmd = 'cat ' + zspace + ' | mailx -s "Subject: Secondary Data Update" [email protected]'
        os.system(cmd)

        mcf.rm_file(zspace)
示例#17
0
def special_case_database(msid):
    """
    read special glimmon entry limit database and create list
    input:  msid        --- a msid
    output: l_list      --- a list of limits
    """

    file = house_keeping + 'glimmon_special_limit'
    data = ecf.read_file_data(file)

    bstart = 48902399.0  #---- 1999:202:00:00:00
    mstop = 3218831995.0  #---- 2100:001:00:00:00

    l_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        tmsid = atemp[0].strip()
        tmsid = tmsid.lower()

        if tmsid != msid:
            continue

        start = float(atemp[5])
        limit = [
            start, mstop,
            float(atemp[1]),
            float(atemp[2]),
            float(atemp[3]),
            float(atemp[4])
        ]
        #
        #--- check glimmon to find any limit data previous to starting date
        #
        glimit = read_glimmon_main(msid, 0)

        l_list = []
        for lent in glimit:
            stime = float(lent[0])
            ltime = float(lent[1])

            if ltime < start:
                l_list.append(lent)

            elif (stime < start) and (ltime >= start):
                lent[1] = start
                l_list.append(lent)
                break

            elif ltime > start:
                break

        l_list.append(limit)

    return l_list
示例#18
0
def update_acis_ctemp():
    """
    """
    #
    #--- read msid list
    #
    afile = house_keeping + 'msid_list_compaciscent'
    data = ecf.read_file_data(afile)
    acis_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        acis_list.append(atemp[0])

    for year in range(1999, 2019):
        nyear = year
        if tcnv.isLeapYear(year) == 1:
            mon_list = mon_list2
        else:
            mon_list = mon_list1

        for mon in range(0, 12):
            if year == 1999:
                if mon < 7:
                    continue
            if year == 2018:
                if mon > 1:
                    break

            if mon == 11:
                bday = mon_list[mon]
                eday = 1
                nyear += 1
            else:
                bday = mon_list[mon]
                eday = mon_list[mon + 1]

            cbday = str(bday)
            if bday < 10:
                cbday = '00' + cbday
            elif bday < 100:
                cbday = '0' + cbday

            ceday = str(eday)
            if eday < 10:
                ceday = '00' + ceday
            elif eday < 100:
                ceday = '0' + ceday

            start = str(year) + ':' + cbday + ':00:00:00'
            stop = str(nyear) + ':' + ceday + ':00:00:00'

            get_data(start, stop, year, acis_list)
示例#19
0
def update_fit_result_file(otype, mtype, rfile, result):
    """
    update fitting line result file for the give type
    input:  otype   --- length of the data type (week, short, one, five, long)
            mtype   --- data type (mid, min, max)
            rfile   --- the result file name
            result  --- the new fitted result
    output: rfile   --- updated result file
    """
#
#--- read the saved results
#
    try:
        out  = ecf.read_file_data(rfile)    
        out  = list(set(out))
    except:
        out  = []
#
#--- find the line with the same type and replace it with the new result
#
    save = []
    chk  = 0
    for ent in out:
        test = otype + ':' + mtype
#
#--- remove the following few lines after cleaning finishes (Jan 17, 2018)
#
        mc = re.search(test, ent)
        if (ent.startswith('w:')) or  (ent.startswith('e:')) or (ent.startswith('k:')):
            continue

        if mc is not None:
            save.append(result)
            chk = 1
        else:
            if ent == "" or ent == '\s+':
                continue
            save.append(ent)

    if chk == 0:
        save.append(result)
#
#--- update the file
#
    fo = open(rfile, 'w')
    for ent in save:
        if ent == '':
            continue
        fo.write(ent)
        fo.write('\n')

    fo.close()
示例#20
0
def find_group_name(msid):

    fname = house_keeping + 'msid_list_all'
    data = ecf.read_file_data(fname)

    group = ''
    for ent in data:
        atemp = re.split('\s+', ent)
        if atemp[0] == msid:
            group = atemp[1]
            break

    return group
示例#21
0
def create_sub_html(inter=''):
    """
    creates html pages for different categories of msids
    input:  inter ---   indicator of which period(s) to be proccessed
                        if "": 'short', 'one', 'five', 'long', otherwise: 'week'
            read from <house_keeping>/sub_html_list_all
    output: <web_dir>/Htmls/<category>_main.html
    """
    #
    #--- get today's date in fractional year
    #
    sec1998 = ecf.find_current_stime()
    ytime = ecf.stime_to_frac_year(sec1998)
    #
    #--- create dictionary of unit and dictionary of descriptions for msid
    #
    [udict, ddict] = ecf.read_unit_list()

    lfile = house_keeping + 'sub_html_list_all'
    data = ecf.read_file_data(lfile)
    #
    #--- create indivisual html pages under each category
    #
    for ent in data:
        atemp = re.split('::', ent)
        catg = atemp[0].lower()
        catg = catg.capitalize()

        dchk = web_dir + catg
        if not os.path.isdir(dchk):
            cmd = 'mkdir ' + dchk
            os.system(cmd)

        msids = re.split(':', atemp[1])

        if inter == '':
            l_list = ('short', 'one', 'five', 'long')
        else:
            l_list = ('week', '')

        for ltype in l_list:
            if ltype == '':
                continue

            for mtype in ('mid', 'min', 'max'):
                for ptype in ('static', ''):
                    if ptype == '':  #---- no more interactive page (11/14/17)
                        continue
                    create_html(catg, msids, ytime, udict, ddict, ltype, mtype,
                                ptype)
示例#22
0
def read_msids():
    """
    create a list of msids which can get from dataseeker
    input:  none but read from <house_keeping>/dataseeker_entry_list
    output: msid_list   --- a list of msids
    """
    dfile = house_keeping + 'mta_env_msid_list'
    data  = ecf.read_file_data(dfile)
    
    msid_list = []
    ulist     = []
    for ent in data:
        if ent[0] == '#':
            continue
        msid_list.append(ent)

    return msid_list
示例#23
0
def read_msids():
    """
    create a list of msids which can get from dataseeker
    input:  none but read from <house_keeping>/dataseeker_entry_list
    output: msid_list   --- a list of msids
    """
    dfile = house_keeping + 'mta_env_msid_list'
    data = ecf.read_file_data(dfile)

    msid_list = []
    ulist = []
    for ent in data:
        if ent[0] == '#':
            continue
        msid_list.append(ent)

    return msid_list
示例#24
0
def update_msid_data(msid_list='msid_list_fetch'):
    """
    update all msid listed in msid_list
    input:  msid_list   --- a list of msids to processed. default: msid_list_fetch
    output: <msid>_data.fits/<msid>_short_data.fits/<msid>_week_data.fits
    """

    ifile = house_keeping + msid_list
    data = ecf.read_file_data(ifile)

    for ent in data:
        atemp = re.split('\s+', ent)
        msid = atemp[0]
        group = atemp[1]

        print "Updating: " + group + ': ' + msid

        uds.run_update_with_ska(msid, group)
示例#25
0
def update_grad_data():
    """
    update grad related data in full resolution
    input:  none
    output: <out_dir>/<msid>_full_data_<year>.fits
    """
    t_file  = 'hcapgrd1_full_data_*.fits*'
    out_dir = deposit_dir + '/Grad_save/'
    tdir    = out_dir + 'Gradcap/'
#
#--- read grad group name
#
    gfile     = house_keeping + 'grad_list'
    grad_list = ecf.read_file_data(gfile)

    [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)

    get_data(tstart, tstop, year, grad_list, out_dir)
示例#26
0
def fix_range():

    infile = house_keeping + 'msid_list_sun_angle'
    data = ecf.read_file_data(infile)

    fo = open('./msid_list_sun_angle', 'w')
    for ent in data:
        atemp = re.split('\s+', ent)
        msid = atemp[0]
        group = atemp[1]
        low = atemp[2]
        top = atemp[3]
        [low, top] = adjust(low, top)

        if len(msid) < 8:
            msid = msid + '\t'
        if len(group) < 8:
            group = group + '\t'

        line = msid + '\t' + group + '\t' + low + '\t' + top + '\t0.011\n'
        fo.write(line)

    fo.close()
示例#27
0
def update_eph_l1():
    """
    """

    ifile = house_keeping + 'msid_list_ephkey'
    data = ecf.read_file_data(ifile)
    msid_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        msid_list.append(atemp[0])

    for year in range(1999, 2019):
        nyear = year

        for month in range(1, 13):
            if year == 1999:
                if month < 8:
                    continue
            if year == 2018:
                if month > 2:
                    break

            cmon = str(month)
            if month < 10:
                cmon = '0' + cmon
            nmon = month + 1
            if nmon > 12:
                nmon = 1
                nyear += 1
            cnmon = str(nmon)
            if nmon < 10:
                cnmon = '0' + cnmon

            start = str(year) + '-' + cmon + '-01T00:00:00'
            stop = str(nyear) + '-' + cnmon + '-01T00:00:00'

            get_data(start, stop, year, msid_list)
示例#28
0
def set_data_periods(msid):
    """
    find unprocessed data periods
    input:  msid    --- msid
    output: periods --- a list of list of data collection interval (in seconds from 1998.1.1)
    """

    all_periods = create_data_period_list()

    dname = data_dir + msid + '_data'

    try:
        odata = ecf.read_file_data(dname)

        if len(odata) > 0:
            lent = odata[-1]
            atemp = re.split('\s+', lent)
            lstop = float(atemp[1])

            periods = []
            for ltime in all_periods:
                if ltime[0] >= lstop:
                    periods.append(ltime)
        else:
            periods = all_periods
    except:
        #
        #--- if there is no data yet, start from beginning and also create the output file
        #
        periods = all_periods
        fo = open(dname, 'w')
        fo.close()

    if len(periods) == 0:
        periods = all_periods

    return periods
示例#29
0
def set_data_periods(msid):
    """
    find unprocessed data periods
    input:  msid    --- msid
    output: periods --- a list of list of data collection interval (in seconds from 1998.1.1)
    """

    all_periods = create_data_period_list()

    dname = data_dir + msid + '_data'

    try:
        odata = ecf.read_file_data(dname)

        if len(odata) > 0:
            lent  = odata[-1]
            atemp = re.split('\s+', lent)
            lstop = float(atemp[1])
    
            periods = []
            for ltime in all_periods:
                if ltime[0] >= lstop:
                    periods.append(ltime)
        else:
            periods = all_periods
    except:
#
#--- if there is no data yet, start from beginning and also create the output file
#
        periods = all_periods
        fo  = open(dname, 'w')
        fo.close()

    if len(periods) == 0:
        periods = all_periods

    return periods
示例#30
0
#
import convertTimeFormat as tcnv  #---- contains MTA time conversion routines
import mta_common_functions as mcf  #---- contains other functions commonly used in MTA scripts
import glimmon_sql_read as gsr
import envelope_common_function as ecf
import fits_operation as mfo
#
#--- set a temporary file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- read grad group name
#
gfile = house_keeping + 'grad_list'
grad_list = ecf.read_file_data(gfile)

mon_list1 = [1, 32, 60, 91, 121, 152, 192, 213, 244, 274, 305, 335]
mon_list2 = [1, 32, 61, 92, 122, 153, 193, 214, 245, 275, 306, 336]

dout = './Outdir/Compsimoffset/'

#-------------------------------------------------------------------------------------------
#-- update_sim_offset:
#-------------------------------------------------------------------------------------------


def update_sim_offset():
    """
    """
    for year in range(1999, 2019):
import find_moving_average_bk   as fmab #---- moving average (backword fitting version)
import robust_linear            as rfit #---- robust fit rountine
#
#--- set a temporary file name
#
rtail  = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- other settings
#
na     = 'na'
#
#--- read category data
#
cfile         = house_keeping + 'sub_html_list_all'
category_list = ecf.read_file_data(cfile)
#
#--- set several values used in the plots
#
color_table  = ['red', 'blue', 'green', 'lime']
marker_table = ['s',   '*',    '^',     'o']
marker_size  = [50,    80,     70,      50]

css = """
    body{
        width:600px;
        height:300px;
        background-color:#FAEBD7;
        ;font-family:Georgia, "Times New Roman", Times, serif;
    }
    p{
示例#32
0
def update_simdiag_data(date=''):
    """
    collect sim diag msids
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    #sfile = house_keeping + 'msid_list_simdiag'
    sfile = './msid_list_ephkey'
    data = ecf.read_file_data(sfile)
    cols = []
    g_dir = {}
    for ent in data:
        atemp = re.split('\s+', ent)
        cols.append(atemp[0])
        g_dir[atemp[0]] = atemp[1]
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = read_cross_check_table()

    day_list = []
    for year in range(2017, 2019):
        cyear = str(year)
        for mon in range(1, 13):
            if year == 2017:
                if mon < 7:
                    continue
            if year == 2018:
                if mon > 1:
                    break

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            if tcnv.isLeapYear(year) == 1:
                lday = mday_list2[mon - 1]
            else:
                lday = mday_list[mon - 1]

            for day in range(1, lday + 1):
                cday = str(day)
                if day < 10:
                    cday = '0' + cday

                sday = cyear + '-' + cmon + '-' + cday
                day_list.append(sday)

    chk = 0
    for sday in day_list:
        if sday == '2018-07-17':
            chk = 1
        if chk == 0:
            continue
        if sday == '2018-01-21':
            break
        print "Date: " + sday

        start = sday + 'T00:00:00'
        stop = sday + 'T23:59:59'

        line = 'operation=retrieve\n'
        line = line + 'dataset = flight\n'
        line = line + 'detector = ephin\n'
        line = line + 'level = 0\n'
        line = line + 'filetype =ephhk\n'
        line = line + 'tstart = ' + start + '\n'
        line = line + 'tstop = ' + stop + '\n'
        line = line + 'go\n'

        fo = open(zspace, 'w')
        fo.write(line)
        fo.close()

        try:
            cmd = ' /proj/sot/ska/bin/arc5gl  -user isobe -script ' + zspace + '> ztemp_out'
            os.system(cmd)
        except:
            cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out'
            os.system(cmd)

        mcf.rm_file(zspace)
        #
        #--- find the names of the fits files of the day of the group
        #
        try:
            flist = ecf.read_file_data('ztemp_out', remove=1)
            flist = flist[1:]
        except:
            print "\t\tNo data"
            continue

        if len(flist) < 1:
            print "\t\tNo data"
            continue
#
#--- combined them
#
        flen = len(flist)

        if flen == 0:
            continue

        elif flen == 1:
            cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
            os.system(cmd)

        else:
            appendFitsTable_ascds(flist[0], flist[1], 'ztemp.fits')
            if flen > 2:
                for k in range(2, flen):
                    appendFitsTable_ascds('ztemp.fits', flist[k], 'out.fits')
                    cmd = 'mv out.fits ztemp.fits'
                    os.system(cmd)
#
#--- remove indivisual fits files
#

        for ent in flist:
            cmd = 'rm -rf ' + ent
            os.system(cmd)

#
#--- read out the data for the full day
#
        [cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits')

        cmd = 'rm -f ztemp.fits out.fits'
        os.system(cmd)
        #
        #--- get time data in the list form
        #
        dtime = list(tbdata.field('time'))

        for k in range(0, len(cols)):
            #
            #---- extract data in a list form
            #
            col = cols[k]
            data = list(tbdata.field(col))
            #
            #--- change col name to msid
            #
            msid = col.lower()
            #
            #--- get limit data table for the msid
            #
            try:
                tchk = convert_unit_indicator(udict[msid])
            except:
                tchk = 0

            glim = get_limit(msid, tchk, mta_db, mta_cross)
            #
            #--- update database
            #
            tstart = convert_time_format(start)
            tstop = convert_time_format(stop)

            update_database(msid,
                            g_dir[msid],
                            dtime,
                            data,
                            glim,
                            pstart=tstart,
                            pstop=tstop)
示例#33
0
def update_grad_and_comp_data(date=''):
    """
    collect grad and  comp data for trending
    input:  date    ---- the data colletion  end date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    sfile = house_keeping + 'mp_process_list'
    glist = ecf.read_file_data(sfile)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()
    #
    #--- find date to read the data
    #
    if date == '':
        yesterday = datetime.date.today() - datetime.timedelta(1)
        yesterday = str(yesterday).replace('-', '')
        date_list = find_the_last_entry_time(yesterday)

    else:
        date_list = [date]

    for day in date_list:
        #
        #--- find the names of the fits files of the day of the group
        #
        print "Date: " + str(day)

        for group in glist:
            print "Group: " + str(group)
            cmd = 'ls /data/mta_www/mp_reports/' + day + '/' + group + '/data/mta*fits* > ' + zspace
            os.system(cmd)

            flist = ecf.read_file_data(zspace, remove=1)
            #
            #--- combined them
            #
            flen = len(flist)

            if flen == 0:
                continue

            elif flen == 1:
                cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                os.system(cmd)

            else:
                mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                if flen > 2:
                    for k in range(2, flen):
                        mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                        cmd = 'mv out.fits ztemp.fits'
                        os.system(cmd)
#
#--- read out the data for the full day
#
            [cols, tbdata] = ecf.read_fits_file('ztemp.fits')

            cmd = 'rm -f ztemp.fits out.fits'
            os.system(cmd)
            #
            #--- get time data in the list form
            #
            dtime = list(tbdata.field('time'))

            for k in range(1, len(cols)):
                #
                #--- select col name without ST_ (which is standard dev)
                #
                col = cols[k]
                mc = re.search('ST_', col)
                if mc is not None:
                    continue
#
#---- extract data in a list form
#
                data = list(tbdata.field(col))
                #
                #--- change col name to msid
                #
                msid = col.lower()
                #
                #--- get limit data table for the msid
                #
                try:
                    tchk = ecf.convert_unit_indicator(udict[msid])
                except:
                    tchk = 0

                glim = get_limit(msid, tchk, mta_db, mta_cross)
                #
                #--- update database
                #
                update_database(msid, group, dtime, data, glim)
示例#34
0
def update_eph_data(date=''):
    """
    collect grad and  comp data for trending
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    #sfile = 'eph_list'
    #glist = ecf.read_file_data(sfile)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = read_cross_check_table()

    day_list = []
    for year in range(1999, 2018):  #---- CHANGE CHANGE CHAGE!!!!!
        lyear = year
        for mon in range(1, 13):
            if year == 2016 and mon < 9:
                continue
            if year == 2017 and mon > 10:
                continue

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            nmon = mon + 1
            if nmon > 12:
                nmon = 1
                lyear += 1

            clmon = str(nmon)
            if nmon < 10:
                clmon = '0' + clmon

            start = str(year) + '-' + cmon + '-01T00:00:00'
            stop = str(lyear) + '-' + clmon + '-01T00:00:00'

            print "Period: " + str(start) + "<--->" + str(stop)

            for group in glist:
                print "Group: " + group
                #
                #---CHANGE THE DETECTOR/FILETYPE BEFORE RUNNING IF IT IS DIFFERENT FROM EPHHK
                #
                line = 'operation=retrieve\n'
                line = line + 'dataset=flight\n'
                line = line + 'detector=ephin\n'
                line = line + 'level=0\n'
                line = line + 'filetype=epheio\n'
                line = line + 'tstart=' + start + '\n'
                line = line + 'tstop=' + stop + '\n'
                line = line + 'go\n'

                fo = open(zspace, 'w')
                fo.write(line)
                fo.close()

                try:
                    cmd = ' /proj/sot/ska/bin/arc5gl  -user isobe -script ' + zspace + '> ztemp_out'
                    os.system(cmd)
                except:
                    cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out'
                    os.system(cmd)

                mcf.rm_file(zspace)
                #
                #--- find the names of the fits files of the day of the group
                #
                try:
                    flist = ecf.read_file_data('ztemp_out', remove=1)
                    flist = flist[1:]
                except:
                    print "\t\tNo data"
                    continue

                if len(flist) < 1:
                    print "\t\tNo data"
                    continue
#
#--- combined them
#
                flen = len(flist)

                if flen == 0:
                    continue

                elif flen == 1:
                    cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                    os.system(cmd)

                else:
                    mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                    if flen > 2:
                        for k in range(2, flen):
                            mfo.appendFitsTable('ztemp.fits', flist[k],
                                                'out.fits')
                            cmd = 'mv out.fits ztemp.fits'
                            os.system(cmd)
#
#--- remove indivisual fits files
#

                for ent in flist:
                    cmd = 'rm -rf ' + ent
                    os.system(cmd)

#
#--- read out the data
#
                [cols, tbdata] = ecf.read_fits_file('ztemp.fits')

                cmd = 'rm -f ztemp.fits out.fits'
                os.system(cmd)
                #
                #--- get time data in the list form
                #
                dtime = list(tbdata.field('time'))

                for k in range(1, len(cols)):
                    #
                    #--- select col name without ST_ (which is standard dev)
                    #
                    col = cols[k]
                    mc = re.search('ST_', col)
                    if mc is not None:
                        continue
                    mc = re.search('quality', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('mjf', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('gap', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('dataqual', col, re.IGNORECASE)
                    if mc is not None:
                        continue
                    mc = re.search('tlm_fmt', col, re.IGNORECASE)
                    if mc is not None:
                        continue
#
#---- extract data in a list form
#
                    data = list(tbdata.field(col))
                    #
                    #--- change col name to msid
                    #
                    msid = col.lower()
                    #
                    #--- get limit data table for the msid
                    #
                    try:
                        tchk = convert_unit_indicator(udict[msid])
                    except:
                        tchk = 0

                    glim = get_limit(msid, tchk, mta_db, mta_cross)
                    #
                    #--- update database
                    #
                    update_database(msid, group, dtime, data, glim)
def read_data(msid):
    """
    read the data of msid
    input:  msid    --- msid
    output: pdata   --- a list of lists of data
                        xtime  = pdata[0]
                        dnum   = pdata[1]
                        start  = pdata[2]
                        stop   = pdata[3]
                        avg    = pdata[4]
                        med    = pdata[5]
                        std    = pdata[6]
                        dmin   = pdata[7]
                        dmax   = pdata[8]
                        ylow   = pdata[9]
                        ytop   = pdata[10]
                        rlow   = pdata[11]
                        rtop   = pdata[12]
                        yl_lim = pdata[13]
                        yu_lim = pdata[14]
                        rl_lim = pdata[15]
                        ru_lim = pdata[16]
    """

    dfile = data_dir + msid + '_data'
    data  = ecf.read_file_data(dfile)

    if len(data) == 0:
        return na

    xtime  = []
    dnum   = []
    pcolor = []
    start  = []
    stop   = []
    avg    = []
    med    = []
    std    = []
    dmin   = []
    dmax   = []
    ylow   = []
    ytop   = []
    rlow   = []
    rtop   = []
    yl_lim = []
    yu_lim = []
    rl_lim = []
    ru_lim = []

    for ent in data:
        atemp = re.split('\s+', ent)
#
#--- if standard deviation is really large, something wrong with the data; so drop it
#
        tst = float(atemp[5])
        if abs(tst) > 1e6:
            continue

        tstart = float(atemp[0])
        tstop  = float(atemp[1])

        tmid   = 0.5 * (tstart + tstop)
        tmid   = ecf.stime_to_frac_year(tmid)
        xtime.append(tmid)

        start.append(ecf.covertfrom1998sec(tstart))
        stop.append(ecf.covertfrom1998sec(tstop))

        dnum.append(atemp[2])

        avg.append(float(atemp[3]))
        val = float(atemp[4])
        med.append(val)
        std.append(atemp[5])
        dmin.append(float(atemp[6]))
        dmax.append(float(atemp[7]))
        ylow.append(atemp[8])
        ytop.append(atemp[9])
        rlow.append(atemp[10])
        rtop.append(atemp[11])
        yl_lim.append(atemp[12])
        yu_lim.append(atemp[13])
        rl_lim.append(atemp[14])
        ru_lim.append(atemp[15])

        yl = float(atemp[12])
        yu = float(atemp[13])
        rl = float(atemp[14])
        ru = float(atemp[15])
        
        if yl == 'na':
            pcolor.append('blue')
        else:
            if (ru not in [998, 999])     and (val > ru):
                pcolor.append('red')
            elif (rl not in [-999, -998]) and (val < rl):
                pcolor.append('red')
            elif (yu not in  [998, 999])  and (val > yu): 
                pcolor.append('#FFA500')
            elif (yl not in [-999, -998]) and (val < yl):
                pcolor.append('#FFA500')
            else:
                pcolor.append('blue')
#
#--- if the avg is totally flat, the plot wil bust; so change tiny bit at the last entry
#
    test = numpy.std(avg)
    if test == 0:
        alen = len(avg) - 1
        avg[alen] = avg[alen] * 1.0001
        
    return [xtime, dnum,  start, stop, avg, med, std,  \
            dmin, dmax, ylow, ytop, rlow, rtop, yl_lim, yu_lim, rl_lim, ru_lim, pcolor]
示例#36
0
def update_mta_comp_database():
    """
    updata database of mta computed msids
    input:  none but read from /data/mta4/Deriv/*fits files
    outpu:  updated data file: <data_dir>/<msid>_data
    """
#
#--- get a list of data fits file names
#
    infile = house_keeping + 'mta_comp_fits_files'
    data   = ecf.read_file_data(infile)

    for fits in data:
#
#--- hrc has 4 different cases (all data, hrc i, hrc s, and off). tail contain which one this one is
#--- if this is not hrc (or hrc all), tail = 2
#
        mc = re.search('hrc', fits)
        if mc is not None:
            atemp = re.split('_', fits)
            btemp = re.split('.fits', atemp[1])
            tail  =  btemp[0]
        else:
            tail  = 2

        [cols, tbdata] = ecf.read_fits_file(fits)

        time = []
        for ent in tbdata.field('time'):
            stime = float(ent)
#
#--- check whether the time is in dom 
#   
            if stime < 31536000:
                stime = ecf.dom_to_stime(float(ent))

            time.append(stime)

        for col in cols:
            col = col.lower()
#
#--- we need only *_avg columns
#
            mc = re.search('_avg', col)
            if mc is not None:

                vals = tbdata.field(col)
             
                ctime = []
                cvals = []
                for m in range(0, len(time)):
#
#--- skip the data value "nan" and dummy values (-999, -998, -99, 99, 998, 999)
#
                    if str(vals[m]) in  ['nan', 'NaN', 'NAN']:
                        continue

                    nval = float(vals[m])
                    if nval in [-999, -998, -99, 99, 998, 999]:
                        continue
                    else:
                        ctime.append(time[m])
                        cvals.append(nval)
    
                atemp = re.split('_', col)
                msid  = atemp[-2]

                if mcf.chkNumeric(tail):
                    oname = msid
                else:
                    oname = msid + '_' + tail
    
                print "MSID: " + str(oname)

                cmd = 'rm ' + data_dir + oname + '_data'
                os.system(cmd)
#
#--- read limit table for the msid
#
                l_list   = ecf.set_limit_list(msid)
                if len(l_list) == 0:
                    try:
                        l_list = mta_db[msid]
                    except:
                        l_list = []
    
                update_data(msid, l_list, dset = tail, time=ctime, vals=cvals)
示例#37
0
import convertTimeFormat as tcnv  #---- converTimeFormat contains MTA time conversion routines
import mta_common_functions as mcf  #---- mta common functions
import envelope_common_function as ecf  #---- collection of functions used in envelope fitting
import violation_estimate_data as ved  #---- save violation estimated times in sqlite database v_table
#
#--- set a temporary file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)

web_adress = 'https://' + web_address
#
#--- a list of thoese with sub groups
#
sub_list_file = house_keeping + 'sub_group_list'
sub_group_list = ecf.read_file_data(sub_list_file)

#----------------------------------------------------------------------------------
#-- create_sub_html: creates html pages for different categories of msids        --
#----------------------------------------------------------------------------------


def create_sub_html(inter=''):
    """
    creates html pages for different categories of msids
    input:  inter ---   indicator of which period(s) to be proccessed
                        if "": 'short', 'one', 'five', 'long', otherwise: 'week'
            read from <house_keeping>/sub_html_list_all
    output: <web_dir>/Htmls/<category>_main.html
    """
    #
示例#38
0
def update_grad_and_comp_data(date=''):
    """
    collect grad and  comp data for trending
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
    #
    #--- read group names which need special treatment
    #
    sfile = 'grad_special_list'
    glist = ecf.read_file_data(sfile)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = read_cross_check_table()

    day_list = []
    for year in range(1999, 2019):
        cyear = str(year)

        for mon in range(1, 13):
            if year == 1999:
                if mon < 8:

                    continue
            if year == 2018:
                if mon > 2:
                    break

            cmon = str(mon)
            if mon < 10:
                cmon = '0' + cmon

            if tcnv.isLeapYear(year) == 1:
                lday = mday_list2[mon - 1]
            else:
                lday = mday_list[mon - 1]

            for day in range(1, lday + 1):
                cday = str(day)
                if day < 10:
                    cday = '0' + cday

                sday = cyear + '-' + cmon + '-' + cday
                day_list.append(sday)

    for sday in day_list:
        print "Date: " + sday

        start = sday + 'T00:00:00'
        stop = sday + 'T23:59:59'

        for group in glist:
            print "Group: " + group

            line = 'operation=retrieve\n'
            line = line + 'dataset = mta\n'
            line = line + 'detector = grad\n'
            line = line + 'level = 0.5\n'
            line = line + 'filetype = ' + group + '\n'
            line = line + 'tstart = ' + start + '\n'
            line = line + 'tstop = ' + stop + '\n'
            line = line + 'go\n'

            fo = open(zspace, 'w')
            fo.write(line)
            fo.close()

            try:
                cmd = ' /proj/sot/ska/bin/arc5gl  -user isobe -script ' + zspace + '> ztemp_out'
                os.system(cmd)
            except:
                cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out'
                os.system(cmd)

            mcf.rm_file(zspace)
            #
            #--- find the names of the fits files of the day of the group
            #
            try:
                flist = ecf.read_file_data('ztemp_out', remove=1)
                flist = flist[1:]
            except:
                print "\t\tNo data"
                continue

            if len(flist) < 1:
                print "\t\tNo data"
                continue
#
#--- combined them
#
            flen = len(flist)

            if flen == 0:
                continue

            elif flen == 1:
                cmd = 'cp ' + flist[0] + ' ./ztemp.fits'
                os.system(cmd)

            else:
                mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits')
                if flen > 2:
                    for k in range(2, flen):
                        mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits')
                        cmd = 'mv out.fits ztemp.fits'
                        os.system(cmd)
#
#--- remove indivisual fits files
#

            for ent in flist:
                cmd = 'rm -rf ' + ent
                os.system(cmd)

#
#--- read out the data for the full day
#
            [cols, tbdata] = ecf.read_fits_file('ztemp.fits')

            cmd = 'rm -f ztemp.fits out.fits'
            os.system(cmd)
            #
            #--- get time data in the list form
            #
            dtime = list(tbdata.field('time'))

            for k in range(1, len(cols)):
                #
                #--- select col name without ST_ (which is standard dev)
                #
                col = cols[k]
                mc = re.search('ST_', col)
                if mc is not None:
                    continue
#
#---- extract data in a list form
#
                data = list(tbdata.field(col))
                #
                #--- change col name to msid
                #
                msid = col.lower()
                #
                #--- get limit data table for the msid
                #
                try:
                    tchk = convert_unit_indicator(udict[msid])
                except:
                    tchk = 0

                glim = get_limit(msid, tchk, mta_db, mta_cross)
                #
                #--- update database
                #
                update_database(msid, group, dtime, data, glim)
示例#39
0
def dea_full_data_update(chk):
    """
    update deahk search database
    input:  chk --- whether to request full data update: chk == 1:yes
    output: <deposit_dir>/Deahk/<group>/<msid>_full_data_<year>fits
    """

    tyear = int(float(time.strftime("%Y", time.gmtime())))

    cmd   = 'ls ' + data_dir + 'Deahk_*/*_week_data.fits > ' + zspace
    os.system(cmd)
    data  = ecf.read_file_data(zspace, remove=1)
    
    for ent in data:
        atemp = re.split('\/', ent)
        group = atemp[-2]
        btemp = re.split('_', atemp[-1])
        msid  = btemp[0]
        print "MSID: " + str(msid) + ' in ' + group
        
        [cols, tbdata] = ecf.read_fits_file(ent)

        time  = tbdata['time']
        tdata = tbdata[msid]
        cols  = ['time', msid]
#
#--- regular data update
#
        if chk == 0:
#
#--- normal daily data update
#
            ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_' + str(tyear) + '.fits'
            if os.pathisfile(ofits):
                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear+1) + ':001:00:00:00'
                nchk  = 0
#
#--- if the data is over the year boundray, fill up the last year and create a new one for the new year
#
            else:
                ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_' + str(tyear-1) + '.fits'
                nfits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_' + str(tyear) + '.fits'
                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear) + ':001:00:00:00'
                nchk  = 1

            select = [(time > ltime) & (time < ctime)]
            stime  = time[select]
            sdata  = tdata[select] 
            cdata  = [stime, sdata]
            ecf.update_fits_file(ofits, cols, cdata)

            if nchk > 0:
                select = [time >= ctime]
                stime  = time[select]
                sdata  = tdata[select] 
                cdata  = [stime, sdata]
                ecf.create_fits_file(nfits, cols, cdata)
#
#--- start from beginning (year 1999)
#
        else:
            for year in range(1999, tyear+1):
                tstart = str(year)     + ':001:00:00:00'
                tstart = Chandra.Time.DateTime(tstart).secs
                tstop  = str(year + 1) + ':001:00:00:00'
                tstop  = Chandra.Time.DateTime(tstop).secs
    
                select = [(time >= tstart) & (time < tstop)]
                stime  = time[select]
                sdata  = tdata[select]
                cdata  = [stime, sdata]
    
                out    = deposit_dir + 'Deahk_save/' + group + '/'
                if not os.path.isdir(out):
                    cmd = 'mkdir ' + out
    
                out    = out + msid + '_full_data_' + str(year) + '.fits'
    
                ecf.create_fits_file(out, cols, cdata)