def set_limit_list_old(msid): """ read upper and lower yellow and red limits for each period input: msid --- msid output: l_list --- a list of list of [<start time>, <stop time>, <yellow min>, <yellow max>, <red min>, <red max>] """ udict = ecf.read_unit_list() tchk = 0 try: unit = udict[msid.lower()] if unit.lower() == 'degc': tchk = 1 elif unit.lower() == 'degf': tchk = 2 except: pass l_list = gsr.read_glimmon(msid, tchk) if len(l_list) == 0: try: l_list = mta_db[msid] except: l_list = [] return l_list
def create_sub_html(): """ creates html pages for different categories of msids input: none but read from <house_keeping>/sub_html_list_all output: <web_address>/Htmls/<category>_main.html """ # #--- get today's date in fractional year # sec1998 = ecf.find_current_stime() ytime = ecf.stime_to_frac_year(sec1998) # #--- create dictionary of unit and dictionary of descriptions for msid # [udict, ddict] = ecf.read_unit_list() lfile = house_keeping + 'sub_html_list_all' data = ecf.read_file_data(lfile) # #--- create indivisual html pages under each category # for ent in data: atemp = re.split('::', ent) catg = atemp[0] msids = re.split(':', atemp[1]) create_html(catg, msids, ytime, udict, ddict)
def compute_sim_flex(): """ compute the difference between sim flex temp and set point input: none, but read data from achieve output: <msid>_data.fits/<msid>_short_data.fits/<msid>_week_data.fits """ # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() group = 'Compsimoffset' for msid in ['flexadif', 'flexbdif', 'flexcdif']: # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit_for_acis_power(msid, mta_db) # #--- update database # update_database(msid, group, glim)
def compute_acis_power(): """ compute acis power from existing msid values and update database input: none, but read data from achieve output: <msid>_data.fits/<msid>_short_data.fits/<msid>_week_data.fits """ # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() group = 'Compacispwr' for msid in ['1dppwra', '1dppwrb']: # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit_for_acis_power(msid, mta_db) # #--- update database # update_database(msid, group, glim)
def compute_acis_power(): """ compute acis power from existing msid values and update database input: none, but read data from achieve output: <msid>_data.fits/<msid>_short_data.fits/<msid>_week_data.fits """ # #--- set a couple of values/list # group = 'Compacispwr' msid_list = ['1dppwra', '1dppwrb'] msid_sub = [['1dppwra', '1dp28avo', '1dpicacu', '*'], ['1dppwrb', '1dp28bvo', '1dpicbcu', '*']] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() for msid in msid_list: # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit_for_acis_power(msid, mta_db) # #--- update database # uds.update_week_database(msid, group, glim, msid_sub = msid_sub)
def create_html_page(): """ create indivisual html pages for all msids in database input: none output: <web_dir>/<msid>_plot.html """ # #--- clean out future estimate direcotry # cmd = 'rm -rf ' + web_dir + 'Future/* 2>/dev/null' os.system(cmd) # #--- get dictionaries of msid<-->unit and msid<-->description # [udict, ddict] = ecf.read_unit_list() # #--- get the list of the names of the data files # cmd = 'ls ' + data_dir + '*_data > ' + zspace os.system(cmd) data = ecf.read_file_data(zspace, 1) for ent in data: atemp = re.split('\/', ent) btemp = re.split('_data', atemp[-1]) msid = btemp[0] # for msid in ['1dactbt']: #--- test test test # for msid in ['2detart_off']: #--- test test test # for msid in ["cpa1pwr", "pftankop"]: #--- test test test print 'Processing: ' + msid try: unit = udict[msid] descrip = ddict[msid] except: unit = '' descrip = '' #try: # #--- create an interactive plot # pout = plot_interactive_trend(msid, unit) # #--- if there is not enough data, clean out the limit violation database # if pout == False: vtdata = [-999, -999, -999, -999] ved.incert_data(msid, vtdata) print "No plot for : " + msid + ' (data points < 10)' # #--- add the plot to the html page # create_plot_html_page(msid, descrip, pout)
def get_data(msid, start, stop): """ create an interactive html page for a given msid input: msid --- msid oup --- group name start --- start time stop --- stop time output: ttime --- a list of time data tdata --- a list of data """ # start = ecf.check_time_format(start) # stop = ecf.check_time_format(stop) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- get limit data table for the msid # try: uck = udict[msid] if uck.lower() == 'k': tchk = 1 else: tchk = ecf.convert_unit_indicator(uchk) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- extract data from archive # chk = 0 try: out = fetch.MSID(msid, start, stop) tdata = out.vals ttime = out.times except: tdata = [] ttime = [] return [ttime, tdata]
def create_sub_html(): """ creates html pages for different categories of msids input: none, but read from <house_keeping>/msid_list_all read from <house_keeping>/sub_html_list_* output: <web_dir>/Htmls/<category>_main.html """ # #--- get today's date in fractional year # sec1998 = ecf.find_current_stime() ytime = ecf.stime_to_frac_year(sec1998) # #--- create dictionary of unit and dictionary of descriptions for msid # [udict, ddict] = ecf.read_unit_list() # #--- create category list and a dictionary of catg <--> [msid list] # lfile = house_keeping + 'msid_list_all' data = mcf.read_data_file(lfile) catg_dict = {} catg_list = [] for ent in data: atemp = re.split('\s+', ent) msid = atemp[0].strip() catg = atemp[1].strip() try: out = catg_dict[catg] out = out + [msid] catg_dict[catg] = out except: catg_dict[catg] = [msid] catg_list.append(catg) # #--- just in a case there is not directory for the page, create it # dchk = web_dir + catg if not os.path.isdir(dchk): cmd = 'mkdir ' + dchk os.system(cmd) # #--- create each dtype, mtype and category web page # for dtype in ['week', 'short', 'year', 'five', 'long']: for mtype in ['mid', 'min', 'max']: for catg in catg_list: create_html(catg, catg_dict[catg], ytime, udict, ddict, dtype, mtype)
def create_sub_html(inter=''): """ creates html pages for different categories of msids input: inter --- indicator of which period(s) to be proccessed if "": 'short', 'one', 'five', 'long', otherwise: 'week' read from <house_keeping>/sub_html_list_all output: <web_dir>/Htmls/<category>_main.html """ # #--- get today's date in fractional year # sec1998 = ecf.find_current_stime() ytime = ecf.stime_to_frac_year(sec1998) # #--- create dictionary of unit and dictionary of descriptions for msid # [udict, ddict] = ecf.read_unit_list() lfile = house_keeping + 'sub_html_list_all' data = ecf.read_file_data(lfile) # #--- create indivisual html pages under each category # for ent in data: atemp = re.split('::', ent) catg = atemp[0].lower() catg = catg.capitalize() dchk = web_dir + catg if not os.path.isdir(dchk): cmd = 'mkdir ' + dchk os.system(cmd) msids = re.split(':', atemp[1]) if inter == '': l_list = ('short', 'one', 'five', 'long') else: l_list = ('week', '') for ltype in l_list: if ltype == '': continue for mtype in ('mid', 'min', 'max'): for ptype in ('static', ''): if ptype == '': #---- no more interactive page (11/14/17) continue create_html(catg, msids, ytime, udict, ddict, ltype, mtype, ptype)
def create_interactive_page(msid, group, start, stop, step): """ create an interactive html page for a given msid input: msid --- msid start --- start time stop --- stop time step --- bin size in seconds """ start = check_time_format(start) stop = check_time_format(stop) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- extract needed data and save in fits file # try: out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002') except: missed = house_keeping + '/missing_data' fo = open(missed, 'a') fo.write(msid) fo.write('\n') fo.close() fits_data = update_database(msid, group, glim, start, stop, step)
def compute_sim_flex(): """ compute the difference between sim flex temp and set point input: none, but read data from achieve output: <msid>_data.fits/<msid>_short_data.fits/<msid>_week_data.fits """ # #--- set a couple of values/lists # group = 'Compsimoffset' msid_list = ['flexadif', 'flexbdif', 'flexcdif'] msid_sub = [['flexadif', '3faflaat', '3sflxast', '-'],\ ['flexbdif', '3faflbat', '3sflxbst', '-'],\ ['flexcdif', '3faflcat', '3sflxcst', '-']] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() for msid in msid_list: # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit_for_acis_power(msid, mta_db) # #--- update database # udfs.update_database(msid, group, glim, msid_sub=msid_sub)
def create_interactive_page(msid, group, start, stop, step): """ create an interactive html page for a given msid input: msid --- msid group --- group name start --- start time stop --- stop time step --- bin size in seconds """ start = ecf.check_time_format(start) stop = ecf.check_time_format(stop) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- extract data from archive # chk = 0 try: out = fetch.MSID(msid, start, stop) tdata = out.vals ttime = out.times except: # #--- if no data in archive, try mta local database # try: [ttime, tdata] = uds.get_mta_fits_data(msid, start, stop) # #--- if it is also failed, return the empty data set # except: chk = 1 # #--- only short_p can change step size (by setting "step") # if chk == 0: [week_p, short_p, long_p] = uds.process_day_data(msid, ttime, tdata, glim, step=step) # #--- try to find data from ska or mta local data base # try: fits_data = create_inter_fits(msid, short_p) # #--- for the case, the data is mta special cases # except: fits_data = 'na' else: fits_data = 'na' # #--- create interactive html page # create_html_page(msid, fits_data, step) # #--- remove fits file # if fits_data != 'na': cmd = 'rm -rf ' + fits_data os.system(cmd)
def update_eph_data_from_comm(date = ''): """ collect eph data for trending input: date ---- the data collection end date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # #sfile = house_keeping + 'eph_list' #glist = mcf.read_data_file(sfile) glist = ['ephhk',] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- find date to read the data # if date == '': yesterday = datetime.date.today() - datetime.timedelta(1) yesterday = str(yesterday).replace('-', '') date_list = create_date_list(yesterday) else: date_list = [date] error_message = '' for day in date_list: # #--- find the names of the fits files of the day of the group # dline = "Date: " + str(day) print(dline) for group in glist: print("Group: " + str(group)) cmd = 'ls /data/mta_www/mp_reports/' + day + '/' + group + '/data/* > ' + zspace os.system(cmd) tlist = mcf.read_data_file(zspace, remove=1) flist = [] for ent in tlist: mc = re.search('_STephhk_static_eio0.fits', ent) if mc is not None: flist.append(ent) # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mcf.rm_files('ztemp.fits') mfo. appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo. appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- read out the data for the full day # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(1, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] mc = re.search('ST_', col) if mc is not None: continue mc = re.search('quality', col, re.IGNORECASE) if mc is not None: continue # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # wline = uds.update_database(msid, group.capitalize(), dtime, data, glim) if wline != "": error_message = error_message + dline + '\n' + wline # #--- if there are errors, sending error message # if error_message != "": error_message = 'MTA limit trend EPH got problems: \n' + error_message fo = open(zspace, 'w') fo.write(error_message) fo.close() cmd = 'cat ' + zspace + ' | mailx -s "Subject: EPH data update problem "' cmd = cmd + '*****@*****.**' os.system(cmd) mcf.rm_files(zspace)
def update_simsuppl_data(date=''): """ collect sim diag msids input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to simdiag """ # #--- read group names which need special treatment # sfile = house_keeping + 'msid_list_simactu_supple' data = ecf.read_file_data(sfile) cols = [] g_dir = {} for ent in data: atemp = re.split('\s+', ent) cols.append(atemp[0]) g_dir[atemp[0]] = atemp[1] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() # #--- find date to read the data # if date == '': yesterday = datetime.date.today() - datetime.timedelta(1) yesterday = str(yesterday).replace('-', '') date_list = find_the_last_entry_time(yesterday) else: date_list = [date] for sday in date_list: print "Date: " + sday start = sday + 'T00:00:00' stop = sday + 'T23:59:59' line = 'operation=retrieve\n' line = line + 'dataset = flight\n' line = line + 'detector = sim\n' line = line + 'level = 0\n' line = line + 'filetype = sim\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(0, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, g_dir[msid], dtime, data, glim)
def update_msid_data(msid_list='msid_list_fetch'): """ update all msid listed in msid_list input: msid_list --- a list of msids to processed. default: msid_list_fetch output: <msid>_data.fits/<msid>_short_data.fits """ start_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- read msid list # mfile = house_keeping + msid_list data = mcf.read_data_file(mfile) for ent in data: # #--- find msid and group name # mc = re.search('#', ent) if mc is not None: continue try: [msid, group] = re.split('\s+', ent) except: atemp = re.split('\s+', ent) msid = atemp[0] group = atemp[1] msid.strip() group.strip() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 ####glim = gsr.read_glimmon(msid, tchk) if msid in sp_limt_case_c: tchk = 1 ###(print "I AM HERE TCHK : " + str(tchk) + "<--->" + str(udict[msid])) glim = get_limit(msid, tchk, mta_db, mta_cross) ###(print "I AM HERE GLIM: " + str(glim)) ###exit(1) # #--- update database # # try: # out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002') # print("MSID: " + msid) # except: # out = get_mta_fits_data(msid, '2017:001:00:00:00', '2017:002') # # if out == False: # missed = house_keeping + '/missing_data' # fo = open(missed, 'a') # fo.write(msid) # fo.write('\n') # fo.close() # # continue update_database(msid, group, glim) # #--- test entry to check how long it took # end_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) line = "trending plotting: \n" line = line + "Started: " + start_time + '\n' line = line + "Ended: " + end_time + '\n' print(line)
def update_grad_and_comp_data(date=''): """ collect grad and comp data for trending input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # sfile = 'grad_special_list' glist = ecf.read_file_data(sfile) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() day_list = [] for year in range(1999, 2019): cyear = str(year) for mon in range(1, 13): if year == 1999: if mon < 8: continue if year == 2018: if mon > 2: break cmon = str(mon) if mon < 10: cmon = '0' + cmon if tcnv.isLeapYear(year) == 1: lday = mday_list2[mon - 1] else: lday = mday_list[mon - 1] for day in range(1, lday + 1): cday = str(day) if day < 10: cday = '0' + cday sday = cyear + '-' + cmon + '-' + cday day_list.append(sday) for sday in day_list: print "Date: " + sday start = sday + 'T00:00:00' stop = sday + 'T23:59:59' for group in glist: print "Group: " + group line = 'operation=retrieve\n' line = line + 'dataset = mta\n' line = line + 'detector = grad\n' line = line + 'level = 0.5\n' line = line + 'filetype = ' + group + '\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(1, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] mc = re.search('ST_', col) if mc is not None: continue # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)
def create_ephin_leak_html_page(): """ create ephin - eph quantity page input: none, but read from <house_keeping>/msid_list__eph_tephin output: <web_dir>/<group name>/<msid>/<msid>_eph_tephin.html """ # #--- get dictionary of msid<-->unit and msid<-->description # [udict, ddict] = ecf.read_unit_list() # #--- read msid lists # ifile = house_keeping + 'msid_list_eph_tephin' f = open(ifile, 'r') data = [line.strip() for line in f.readlines()] f.close() # #--- read template # tfile = house_keeping + 'Templates/slide_template.html' f = open(tfile, 'r') template = f.read() f.close() tfile = house_keeping + 'Templates/html_close' f = open(tfile, 'r') tail_part = f.read() f.close() tail_part = tail_part.replace("#JAVASCRIPT#", '') # #--- for each msid, create a html page # msid_list = [] group_list = [] g_dict = {} gchk = '' m_save = [] for ent in data: atemp = re.split('\s+', ent) msid = atemp[0] group = atemp[1] # #--- we don't need to compare with itself # if msid == '5ephint': continue msid_list.append(msid) group_list.append(group) # #--- create group name <---> msid list dictionary # if gchk == "": gchk = group m_save.append(msid) elif gchk != group: g_dict[gchk] = m_save m_save = [msid] gchk = group else: m_save.append(msid) if len(m_save) > 0: g_dict[group] = m_save for ltype in ['mid', 'min', 'max']: this_year = int(float(time.strftime("%Y", time.gmtime()))) year_list = range(1999, this_year + 1) tot = len(year_list) # #--- dot click table # line1 = '<tr>\n' for k in range(1, tot): line1 = line1 + '\t<th>\n' line1 = line1 + '\t<span class="dot" onclick="currentSlide(' + str( k + 1) + ')"></span>' line1 = line1 + str(year_list[k]) + '\n\t</th>\n' if (k < tot) and ((year_list[k] + 1) % 10 == 0): line1 = line1 + '\t</tr>\n\t<tr>\n' # #--- sliding figures # line2 = '<div class="slideshow-container">\n' for k in range(0, tot): line2 = line2 + '\t<div class="mySlides xfade">\n' line2 = line2 + '\t\t<div class="numbertext">' + str( k + 1) + '/' + str(tot) + '</div>\n' line2 = line2 + '\t\t<img src="' + web_address + 'Eleak/' line2 = line2 + msid.capitalize( ) + '/Plots/' + msid + '_' + ltype + '_' + str(year_list[k]) line2 = line2 + '.png" style="width:100%">\n' line2 = line2 + '\t\t<!--<div class="text"> Text</div> -->\n' line2 = line2 + '\t</div>\n\n' line2 = line2 + '\t<a class="prev" onclick="plusSlides(-1)">❮</a>\n' line2 = line2 + '\t<a class="next" onclick="plusSlides(1)">❯</a>\n' line2 = line2 + '</div>\n' # #--- replace the variable parts # hpage = template hpage = hpage.replace('#MSID#', msid.upper()) hpage = hpage.replace('#MSID_L#', msid.lower()) hpage = hpage.replace('#GROUP#', group.capitalize()) hpage = hpage.replace('#Group#', 'Eleak/') hpage = hpage.replace('#GROUP_L#', group.lower()) hpage = hpage.replace('#LTYPE#', ltype.lower()) hpage = hpage.replace('#DOT_SELECT#', line1) hpage = hpage.replace('#SLID_FIGURES#', line2) hpage = hpage.replace('Angle to Sun Center', 'Tephin') hpage = hpage.replace('mta_trending_sun_angle_main', 'mta_trending_eph_tephin_main') hpage = hpage.replace('_long_sun_angle', '_eph_tephin') hpage = hpage.replace('Sun Angle', 'Tephin') # #--- print out the html page # oname = web_dir + 'Eleak/' + msid.capitalize() + '/' if not os.path.isdir(oname): cmd = 'mkdir ' + oname os.system(cmd) oname = oname + msid + '_' + ltype + '_eph_tephin.html' hpage = hpage + tail_part try: fo = open(oname, 'w') fo.write(hpage) fo.close() except: print "cannot create: " + oname # #--- create the mid level (group level) web pages # hfile = house_keeping + 'Templates/html_head' f = open(hfile, 'r') head_part = f.read() f.close() head_part = head_part.replace("#MSID#", 'Tephin - EPH') head_part = head_part.replace("#JAVASCRIPT#", '') for group in group_list: msids = g_dict[group] for mtype in ['mid', 'min', 'max']: line = '' if mtype == 'mid': mdisc = 'Mean' else: mdisc = mtype.capitalize() line = line + '<h2>' + group.upper() + ' --- ' + mdisc + '</h2>\n' line = line + '<div style="float:right;padding-right:50px;">' line = line + '<a href="' + web_address + 'mta_trending_eph_tephin_main.html"><b>Back to Top</b></a>\n' line = line + '</div>\n' line = line + '<table border=1 cellpadding=2 cellspacing=1><tr>\n' tlink = web_address + 'Eleak/' + group.lower() if mtype == 'mid': line = line + '<th>Mean</th>\n' line = line + '<th><a href="' + tlink + '_max_eph_tephin.html">Max</a></th>\n' line = line + '<th><a href="' + tlink + '_min_eph_tephin.html">Min</a></th>\n' elif mtype == 'max': line = line + '<th><a href="' + tlink + '_max_eph_tephin.html">Mean</a></th>\n' line = line + '<th>Max</th>\n' line = line + '<th><a href="' + tlink + '_min_eph_tephin.html">Min</a></th>\n' elif mtype == 'min': line = line + '<th><a href="' + tlink + '_min_eph_tephin.html">Mean</a></th>\n' line = line + '<th><a href="' + tlink + '_max_eph_tephin.html">Max</a></th>\n' line = line + '<th>Min</th>\n' line = line + '</tr></table>\n' line = line + '<br /><br />\n' line = line + '<p>Please click a msid to open the tephin - eph page.</p>\n' line = line + '<table border=1 cellpadding=2 cellspacing=1 style="margin-left:auto;margin-right:auto;text-align:center;">\n' line = line + '<tr><th>MSID</th><th>Description</th></tr>\n' for msid in msids: pfile = web_address + 'Eleak/' + msid.capitalize() + '/' + msid pfile = pfile + '_' + mtype + '_eph_tephin.html' try: dtext = ddict[msid] except: dtext = '' line = line + '<tr><th><a href="' + pfile + '">' + msid + '</a></th>\n' line = line + '<td style="text-align:center;">' + dtext + '</td></tr>' line = line + '</table>\n' oline = head_part + line + tail_part oname = web_dir + 'Eleak/' + group.lower( ) + '_' + mtype + '_eph_tephin.html' fo = open(oname, 'w') fo.write(oline) fo.close()
def update_eph_data(date=''): """ collect grad and comp data for trending input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # #sfile = 'eph_list' #glist = ecf.read_file_data(sfile) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() day_list = [] for year in range(1999, 2018): #---- CHANGE CHANGE CHAGE!!!!! lyear = year for mon in range(1, 13): if year == 2016 and mon < 9: continue if year == 2017 and mon > 10: continue cmon = str(mon) if mon < 10: cmon = '0' + cmon nmon = mon + 1 if nmon > 12: nmon = 1 lyear += 1 clmon = str(nmon) if nmon < 10: clmon = '0' + clmon start = str(year) + '-' + cmon + '-01T00:00:00' stop = str(lyear) + '-' + clmon + '-01T00:00:00' print "Period: " + str(start) + "<--->" + str(stop) for group in glist: print "Group: " + group # #---CHANGE THE DETECTOR/FILETYPE BEFORE RUNNING IF IT IS DIFFERENT FROM EPHHK # line = 'operation=retrieve\n' line = line + 'dataset=flight\n' line = line + 'detector=ephin\n' line = line + 'level=0\n' line = line + 'filetype=epheio\n' line = line + 'tstart=' + start + '\n' line = line + 'tstop=' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(1, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] mc = re.search('ST_', col) if mc is not None: continue mc = re.search('quality', col, re.IGNORECASE) if mc is not None: continue mc = re.search('mjf', col, re.IGNORECASE) if mc is not None: continue mc = re.search('gap', col, re.IGNORECASE) if mc is not None: continue mc = re.search('dataqual', col, re.IGNORECASE) if mc is not None: continue mc = re.search('tlm_fmt', col, re.IGNORECASE) if mc is not None: continue # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)
def update_ephkey_l1_data(date = ''): """ update ephkey L1 data input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # file = house_keeping + 'msid_list_ephkey' f = open(file, 'r') data = [line.strip() for line in f.readlines()] f.close() msid_list = [] for ent in data: atemp = re.split('\s+', ent) msid_list.append(atemp[0]) group = atemp[1] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- create date list from the next day from the next entry to today # if date == '': # #--- the date of the last entry # stemp = ecf.find_the_last_entry_time(test_fits) stemp = Chandra.Time.DateTime(stemp).date atemp = re.split(':', stemp) syear = int(float(atemp[0])) sday = int(float(atemp[1])) # #--- if the data is missing more than 6 hours, fill that day again # shh = int(float(atemp[2])) if shh < 18: sday -= 1 if sday < 0: syear -= 1 if tcnv.isLeapYear(syear) == 1: sday = 366 else: sday = 365 # #--- find today's date # stemp = time.strftime("%Y:%j", time.gmtime()) atemp = re.split(':', stemp) lyear = int(float(atemp[0])) lday = int(float(atemp[1])) date_list = [] if syear == lyear: for day in range(sday+1, lday): lday = ecf.add_lead_zeros(day, 2) date = str(syear) + ':' + lday date_list.append(date) else: if tcnv.isLeapYear(syear) == 1: base = 367 else: base = 366 for day in range(sday+1, base): lday = ecf.add_lead_zeros(day, 2) date = str(syear) + ':' + lday date_list.append(date) for day in range(1, lday): lday = ecf.add_lead_zeros(day, 2) date = str(lyear) + ':' + lday date_list.append(date) else: date_list.append(date) for date in (date_list): tstart = date + ':00:00:00' tstop = date + ':23:59:59' uds.run_update_with_archive(msid_list, group, date_list, 'ephin', '0', 'ephhk', tstart, tstop)
def tephin_leak_data_update(year=''): """ update tephin - ephin rate/leak current data input: year --- year of the data to be updated. if it is '', the current year is used output: <data_dir>/<msid>/<msid>_data_<year>.fits """ # #--- set data extraction period # tout = set_time_period(year) if len(tout) == 6: [lstart, lstop, lyear, tstart, tstop, year] = tout chk = 1 else: [tstart, tstop, year] = tout chk = 0 # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() # #--- extract tephin data # tchk = convert_unit_indicator(udict['tephin']) glim = get_limit('tephin', tchk, mta_db, mta_cross) # #--- for the case the time span goes over the year boundary # if chk == 1: ltephin = update_database('tephin', 'Eleak', glim, ltstart, ltstop, lyear) tephin = update_database('tephin', 'Eleak', glim, tstart, tstop, year) # #--- read msid list # mfile = house_keeping + 'msid_list_eph_tephin' data = ecf.read_file_data(mfile) for ent in data: # #--- find msid and group name # mc = re.search('#', ent) if mc is not None: continue try: [msid, group] = re.split('\s+', ent) except: atemp = re.split('\s+', ent) msid = atemp[0] group = atemp[1] msid.strip() group.strip() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # try: out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002') print "MSID: " + msid except: missed = house_keeping + '/missing_data' fo = open(missed, 'a') fo.write(msid) fo.write('\n') fo.close() continue # #--- for the case, the time span goes over the year boundary # if chk == 1: update_database(msid, group, glim, ltstart, ltstop, lyear, sdata=ltephin) update_database(msid, group, glim, tstart, tstop, year, sdata=tephin)
def update_simdiag_data(date=''): """ collect sim diag msids input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # #sfile = house_keeping + 'msid_list_simdiag' sfile = './msid_list_simsupple' data = mcf.read_data_file(sfile) cols = [] g_dir = {} for ent in data: atemp = re.split('\s+', ent) cols.append(atemp[0]) g_dir[atemp[0]] = atemp[1] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() day_list = [] for year in range(1999, 2021): cyear = str(year) for mon in range(1, 13): if year == 1999: if mon < 8: continue if year == 2020: if mon > 1: break cmon = str(mon) if mon < 10: cmon = '0' + cmon if mcf.is_leapyear(year): lday = mday_list2[mon - 1] else: lday = mday_list[mon - 1] for day in range(1, lday + 1): cday = str(day) if day < 10: cday = '0' + cday sday = cyear + '-' + cmon + '-' + cday day_list.append(sday) for sday in day_list: if sday == '2020-01-17': break print("Date: " + sday) start = sday + 'T00:00:00' stop = sday + 'T23:59:59' line = 'operation=retrieve\n' line = line + 'dataset = flight\n' line = line + 'detector = sim\n' line = line + 'level = 0\n' line = line + 'filetype = sim\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' flist = mcf.run_arc5gl_process(line) if len(flist) < 1: print("\t\tNo data") continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(0, len(cols)): # #---- extract data in a list form # col = cols[k] data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # tstart = convert_time_format(start) tstop = convert_time_format(stop) update_database(msid, g_dir[msid], dtime, data, glim, pstart=tstart, pstop=tstop)
def get_limit_table(): """ create msid <---> limit table dictionary input: none but read from <limit_dir>/op_limit_new.db output: lim_dict --- a dict of lists of limit data. Each inner list contains: [ <period start time>, <period end time>, cnd_msid, <possibe key lists>, <limit dictonary: key <--> [y_low, y_top, r_low, r_top] ] cnd_dict --- a dictionary of msid <---> condition msid """ # #--- create msid <--> unit dict # [unit_dict, disc_dict] = ecf.read_unit_list() # #--- read limit data table # ifile = limit_dir + 'Limit_data/op_limits_new.db' ldata = mcf.read_data_file(ifile) # #--- create a list of lists in the form of #--- [<time stamp>, <condition msid>, <switch>, <y_low>, <y_top>, <r_low>, <r_top>] # lim_dict = {} cnd_dict = {} msid = '' save = [] csave = 'none' for ent in ldata: if ent[0] == '#': continue atemp = re.split('#', ent) btemp = re.split('\t+', atemp[0]) # #--- if the msid is same as one before add the data to the save # if btemp[0].strip() == msid: try: unit = unit_dict[msid].lower() except: unit = '' if unit == 'psia': alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]) / kptops, float(btemp[2]) / kptops, float(btemp[3]) / kptops, float(btemp[4]) / kptops ] else: alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]), float(btemp[2]), float(btemp[3]), float(btemp[4]) ] save.append(alist) cnd_msid = btemp[5].strip() if cnd_msid != 'none': csave = cnd_msid else: # #--- the first msid set # if msid == '': msid = btemp[0].strip() try: unit = unit_dict[msid].lower() except: unit = '' if unit == 'psia': alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]) / kptops, float(btemp[2]) / kptops, float(btemp[3]) / kptops, float(btemp[4]) / kptops ] else: alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]), float(btemp[2]), float(btemp[3]), float(btemp[4]) ] save = [alist] csave = btemp[6].strip() else: # #--- add ending time of the limit check: year 2200.01.01 # alist = [ 6374591994, 'none', 'none', -9999998.0, 9999998.0, -9999999.0, 9999999.0 ] save.append(alist) asave = create_limit_table(save) lim_dict[msid] = asave cnd_dict[msid] = csave # #--- start for the next msid # msid = btemp[0].strip() try: unit = unit_dict[msid].lower() except: unit = '' if unit == 'psia': alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]) / kptops, float(btemp[2]) / kptops, float(btemp[3]) / kptops, float(btemp[4]) / kptops ] else: alist = [ int(float(btemp[7])), btemp[5], btemp[6], float(btemp[1]), float(btemp[2]), float(btemp[3]), float(btemp[4]) ] save = [alist] csave = 'none' # #--- the last entry # if save != []: alist = [ 6374591994, 'none', 'none', -9999998.0, 9999998.0, -9999999.0, 9999999.0 ] save.append(alist) asave = create_limit_table(save) lim_dict[msid] = asave cnd_dict[msid] = csave return [lim_dict, cnd_dict]
def create_hrcveto_eph_page(): """ create ephin - eph quantity page input: none output: <web_dir>/<group name>/<msid>/<msid>_hrcveto_eph.html """ # #--- get dictionary of msid<-->unit and msid<-->description # [udict, ddict] = ecf.read_unit_list() # #--- read template # tfile = house_keeping + 'Templates/slide_template.html' f = open(tfile, 'r') template = f.read() f.close() tfile = house_keeping + 'Templates/html_close' f = open(tfile, 'r') tail_part = f.read() f.close() # #--- for each msid, create a html page # for msid in msid_list: for ltype in ['mid', 'min', 'max']: this_year = int(float(time.strftime("%Y", time.gmtime()))) year_list = range(1999, this_year + 1) tot = len(year_list) # #--- dot click table # line1 = '<tr>\n' for k in range(1, tot): line1 = line1 + '\t<th>\n' line1 = line1 + '\t<span class="dot" onclick="currentSlide(' + str( k + 1) + ')"></span>' line1 = line1 + str(year_list[k]) + '\n\t</th>\n' if (k < tot) and ((year_list[k] + 1) % 10 == 0): line1 = line1 + '\t</tr>\n\t<tr>\n' # #--- sliding figures # line2 = '<div class="slideshow-container">\n' for k in range(0, tot): line2 = line2 + '\t<div class="mySlides xfade">\n' line2 = line2 + '\t\t<div class="numbertext">' + str( k + 1) + '/' + str(tot) + '</div>\n' line2 = line2 + '\t\t<img src="' + web_address + 'Hrcveto_eph/' line2 = line2 + msid.capitalize( ) + '/Plots/' + msid + '_' + ltype + '_' + str(year_list[k]) line2 = line2 + '.png" style="width:100%">\n' line2 = line2 + '\t\t<!--<div class="text"> Text</div> -->\n' line2 = line2 + '\t</div>\n\n' line2 = line2 + '\t<a class="prev" onclick="plusSlides(-1)">❮</a>\n' line2 = line2 + '\t<a class="next" onclick="plusSlides(1)">❯</a>\n' line2 = line2 + '</div>\n' # #--- replace the variable parts # hpage = template hpage = hpage.replace('#MSID#', msid.upper()) hpage = hpage.replace('#MSID_L#', msid.lower()) hpage = hpage.replace('#GROUP#', ltype.capitalize()) hpage = hpage.replace('#Group#', 'Hrcveto_eph/') hpage = hpage.replace('#GROUP_L#', group.lower()) hpage = hpage.replace('#LTYPE#', ltype.lower()) hpage = hpage.replace('#DOT_SELECT#', line1) hpage = hpage.replace('#SLID_FIGURES#', line2) hpage = hpage.replace('Angle to Sun Center', 'SHEVART') hpage = hpage.replace('mta_trending_sun_angle_main', 'mta_trending_hrcveto_eph_main') hpage = hpage.replace('_long_sun_angle', '_hrcveto_eph') hpage = hpage.replace('Sun Angle', 'Shevart') hpage = hpage + tail_part # #--- print out the html page # oname = web_dir + 'Hrcveto_eph/' + msid.capitalize() + '/' if not os.path.isdir(oname): cmd = 'mkdir ' + oname os.system(cmd) oname = oname + msid + '_' + ltype + '_hrcveto_eph.html' try: fo = open(oname, 'w') fo.write(hpage) fo.close() except: print "cannot create: " + oname # #--- create the mid level (group level) web pages # hfile = house_keeping + 'Templates/html_head' f = open(hfile, 'r') head_part = f.read() f.close() head_part = head_part.replace("#MSID#", 'Shevart - Eph Rate') head_part = head_part.replace("#JAVASCRIPT#", '') for mtype in ['mid', 'min', 'max']: line = '' if mtype == 'mid': mdisc = 'Mean' else: mdisc = mtype.capitalize() line = line + '<h2>' + group.upper() + ' --- ' + mdisc + '</h2>\n' line = line + '<div style="float:right;padding-right:50px;">' line = line + '<a href="' + web_address + 'how_to_create_hrcveto_eph.html">How the plots are created</a><br />\n' line = line + '<a href="' + web_address + 'mta_trending_hrcveto_eph_main.html"><b>Back to Top</b></a>\n' line = line + '</div>\n' line = line + '<table border=1 cellpadding=2 cellspacing=1><tr>\n' tlink = web_address + 'Hrcveto_eph/' + group.lower() if mtype == 'mid': line = line + '<th>Mean</th>\n' line = line + '<th><a href="' + tlink + '_max_hrcveto_eph.html">Max</a></th>\n' line = line + '<th><a href="' + tlink + '_min_hrcveto_eph.html">Min</a></th>\n' elif mtype == 'max': line = line + '<th><a href="' + tlink + '_mid_hrcveto_eph.html">Mean</a></th>\n' line = line + '<th>Max</th>\n' line = line + '<th><a href="' + tlink + '_min_hrcveto_eph.html">Min</a></th>\n' elif mtype == 'min': line = line + '<th><a href="' + tlink + '_mid_hrcveto_eph.html">Mean</a></th>\n' line = line + '<th><a href="' + tlink + '_max_hrcveto_eph.html">Max</a></th>\n' line = line + '<th>Min</th>\n' line = line + '</tr></table>\n' line = line + '<br /><br />\n' line = line + '<p>This page shows the replation between ' line = line + '<a href="https://cxc.cfa.harvard.edu/mta/MSID_Trends/Hrcveto/Shevart/shevart_mid_static_long_plot.html">shevart</a>\n' line = line + '(shield events) and ephin key rates.</p>\n' line = line + '<p>The data are divided into one-year length to show the possible time evolution\n' line = line + 'of the relation between the shevart and the msid.</p>\n' line = line + '<table border=1 cellpadding=2 cellspacing=1 style="margin-left:auto;margin-right:auto;text-align:center;">\n' line = line + '<tr><th>MSID</th><th>Description</th></tr>\n' for msid in msid_list: pfile = web_address + 'Hrcveto_eph/' + msid.capitalize( ) + '/' + msid pfile = pfile + '_' + mtype + '_hrcveto_eph.html' try: dtext = ddict[msid] except: dtext = '' line = line + '<tr><th><a href="' + pfile + '">' + msid + '</a></th>\n' line = line + '<td style="text-align:center;">' + dtext + '</td></tr>' line = line + '</table>\n' oline = head_part + line + tail_part oname = web_dir + 'Hrcveto_eph/' + group.lower( ) + '_' + mtype + '_hrcveto_eph.html' fo = open(oname, 'w') fo.write(oline) fo.close() # #--- update the top page # top_template = house_keeping + 'Templates/mta_trending_hrcveto_eph_main_template' f = open(top_template, 'r') top_page = f.read() f.close() top_page = top_page + tail_part top_out = web_dir + 'mta_trending_hrcveto_eph_main.html' fo = open(top_out, 'w') fo.write(top_page) fo.close()
def update_ephhk_data(): """ update eph hk related msid data input: none output: updated data fits files """ # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- find the data period # [tstart, tstop] = find_data_period() # #--- extract fits files from archive # ofits = extract_archive_data(tstart, tstop) # #--- if no data is extracted, stop # if ofits == False: exit(1) # #--- read out the data # [cols, tbdata] = ecf.read_fits_file(ofits) cmd = 'rm -f out.fits ' + ofits os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) # #--- find useable colnames # col_list = find_col_names(cols) for col in col_list: # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, 'ephhk', dtime, data, glim)
def update_grad_and_comp_data(date=''): """ collect grad and comp data for trending input: date ---- the data colletion end date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # sfile = house_keeping + 'mp_process_list' glist = ecf.read_file_data(sfile) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- find date to read the data # if date == '': yesterday = datetime.date.today() - datetime.timedelta(1) yesterday = str(yesterday).replace('-', '') date_list = find_the_last_entry_time(yesterday) else: date_list = [date] for day in date_list: # #--- find the names of the fits files of the day of the group # print "Date: " + str(day) for group in glist: print "Group: " + str(group) cmd = 'ls /data/mta_www/mp_reports/' + day + '/' + group + '/data/mta*fits* > ' + zspace os.system(cmd) flist = ecf.read_file_data(zspace, remove=1) # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- read out the data for the full day # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(1, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] mc = re.search('ST_', col) if mc is not None: continue # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)
import random rtail = int(time.time() * random.random()) zspace = '/tmp/zspace' + str(rtail) # #--- other settings # na = 'na' # #--- read category data # cfile = house_keeping + 'sub_html_list_all' category_list = mcf.read_data_file(cfile) # #--- get dictionaries of msid<-->unit and msid<-->description # [udict, ddict] = ecf.read_unit_list() # #--- a list of groups excluded from interactive page creation # efile = house_keeping + 'exclude_from_interactive' eout = mcf.read_data_file(efile) exclude_from_interactive = [] for ent in eout: atemp = re.split('\s+', ent) exclude_from_interactive.append(atemp[0]) # #--- the top web page address # web_address = 'https://' + web_address # #--- alias dictionary
def create_interactive_page(msid, group, mtype, start, stop, step): """ create an interactive html page for a given msid input: msid --- msid group --- group name mtype --- mid, mde, min, or max start --- start time stop --- stop time step --- bin size in seconds """ start = ecf.check_time_format(start) stop = ecf.check_time_format(stop) # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = ecf.read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = ecf.read_cross_check_table() # #--- get limit data table for the msid # try: uck = udict[msid] if uck.lower() == 'k': tchk = 1 else: tchk = ecf.convert_unit_indicator(uchk) except: tchk = 0 glim = make_glim(msid) # #--- extract data from archive # chk = 0 try: [ttime, tdata] = rf.get_data(msid, start, stop) except: # #--- if no data in archive, try mta local database # try: [ttime, tdata] = get_mta_fits_data(msid, group, start, stop) # #--- if it is also failed, return the empty data set # except: chk = 1 ttime = [] tdata = [] # #--- only short_p can change step size (by setting "step") # if chk == 0: data_p = process_day_data(msid, ttime, tdata, glim, step=step) else: data_p = 'na' # #--- create interactive html page # create_html_page(msid, group, data_p, mtype, step)
def recover_hrcveto_data(): """ recover hrc veto data input: none output: fits file data related to grad and comp """ # #--- read group names which need special treatment # #sfile = 'eph_list' #glist = ecf.read_file_data(sfile) glist = ['Hrcveto'] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() day_list = [] for year in range(1999, 2018): lyear = year cyear = str(year) for mon in range(1, 13): if year == 1999: if mon < 8: continue if year == 2017: if mon > 10: break cmon = str(mon) if mon < 10: cmon = '0' + cmon nmon = mon + 1 if nmon > 12: nmon = 1 lyear += 1 cnmon = str(nmon) if nmon < 10: cnmon = '0' + cnmon start = str(year) + '-' + cmon + '-01T00:00:00' stop = str(lyear) + '-' + cnmon + '-01T00:00:00' for group in glist: print "Group: " + group + ' : ' + str(start) + '<-->' + str( stop) line = 'operation=retrieve\n' line = line + 'dataset = flight\n' line = line + 'detector = hrc\n' line = line + 'level = 0\n' line = line + 'filetype = hrcss\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cols = ['TLEVART', 'VLEVART', 'SHEVART'] cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for col in cols: # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)
def update_simdiag_data(date=''): """ collect sim diag msids input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to grad and comp """ # #--- read group names which need special treatment # #sfile = house_keeping + 'msid_list_simdiag' sfile = './msid_list_ephkey' data = ecf.read_file_data(sfile) cols = [] g_dir = {} for ent in data: atemp = re.split('\s+', ent) cols.append(atemp[0]) g_dir[atemp[0]] = atemp[1] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() day_list = [] for year in range(2017, 2019): cyear = str(year) for mon in range(1, 13): if year == 2017: if mon < 7: continue if year == 2018: if mon > 1: break cmon = str(mon) if mon < 10: cmon = '0' + cmon if tcnv.isLeapYear(year) == 1: lday = mday_list2[mon - 1] else: lday = mday_list[mon - 1] for day in range(1, lday + 1): cday = str(day) if day < 10: cday = '0' + cday sday = cyear + '-' + cmon + '-' + cday day_list.append(sday) chk = 0 for sday in day_list: if sday == '2018-07-17': chk = 1 if chk == 0: continue if sday == '2018-01-21': break print "Date: " + sday start = sday + 'T00:00:00' stop = sday + 'T23:59:59' line = 'operation=retrieve\n' line = line + 'dataset = flight\n' line = line + 'detector = ephin\n' line = line + 'level = 0\n' line = line + 'filetype =ephhk\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: appendFitsTable_ascds(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): appendFitsTable_ascds('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols_xxx, tbdata] = ecf.read_fits_file('ztemp.fits') cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(0, len(cols)): # #---- extract data in a list form # col = cols[k] data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # tstart = convert_time_format(start) tstop = convert_time_format(stop) update_database(msid, g_dir[msid], dtime, data, glim, pstart=tstart, pstop=tstop)
def extract_hrcveto_data(): """ extract hrc veto data input: none output: fits file data related to grad and comp """ # #--- read group names which need special treatment # glist = ['Hrcveto'] # #--- create msid <---> unit dictionary # [udict, ddict] = ecf.read_unit_list() # #--- read mta database # mta_db = read_mta_database() # #--- read mta msid <---> sql msid conversion list # mta_cross = read_cross_check_table() # #--- find the date to be filled # day_list = find_the_last_entry_time() for sday in day_list: print "Date: " + sday start = sday + 'T00:00:00' stop = sday + 'T23:59:59' for group in glist: print "Group: " + group line = 'operation=retrieve\n' line = line + 'dataset = flight\n' line = line + 'detector = hrc\n' line = line + 'level = 0\n' line = line + 'filetype = hrcss\n' line = line + 'tstart = ' + start + '\n' line = line + 'tstop = ' + stop + '\n' line = line + 'go\n' fo = open(zspace, 'w') fo.write(line) fo.close() try: cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) except: cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out' os.system(cmd) mcf.rm_file(zspace) # #--- find the names of the fits files of the day of the group # try: flist = ecf.read_file_data('ztemp_out', remove=1) flist = flist[1:] except: print "\t\tNo data" continue if len(flist) < 1: print "\t\tNo data" continue # #--- combined them # flen = len(flist) if flen == 0: continue elif flen == 1: cmd = 'cp ' + flist[0] + ' ./ztemp.fits' os.system(cmd) else: mfo.appendFitsTable(flist[0], flist[1], 'ztemp.fits') if flen > 2: for k in range(2, flen): mfo.appendFitsTable('ztemp.fits', flist[k], 'out.fits') cmd = 'mv out.fits ztemp.fits' os.system(cmd) # #--- remove indivisual fits files # for ent in flist: cmd = 'rm -rf ' + ent os.system(cmd) # #--- read out the data for the full day # [cols, tbdata] = ecf.read_fits_file('ztemp.fits') cols = ['TLEVART', 'VLEVART', 'SHEVART'] cmd = 'rm -f ztemp.fits out.fits' os.system(cmd) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for col in cols: # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = convert_unit_indicator(udict[msid]) except: tchk = 0 glim = get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)