def run_update_with_ska(msid, group, msid_sub_list=[], glim=''): """ extract data from ska database and update the data for the msids in the msid_list input: msid --- a list of msids group --- the group of the msids msid_sub_list --- a list of lists of: [msid, msid_1, msid_2, operand] this is used to compute the first msid from following two msid values with operand (+/-/*) glim --- glim usually found in this function, but you can give it; default: '' output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits """ # #--- get basic information dict/list # [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() # #--- set starting and stopping data period # test_fits = data_dir + group.capitalize() + '/' + msid + '_data.fits' if os.path.isfile(test_fits): tstart = ecf.find_the_last_entry_time(test_fits) ttemp = time.strftime("%Y:%j:00:00:00", time.gmtime()) tstop = Chandra.Time.DateTime(ttemp).secs - 86400.0 if tstop < tstart: exit(1) if len(msid_sub_list) != 0: [dtime, tdata] = compute_sub_msid(msid, msid_sub_list, tstart, tstop) else: out = fetch.MSID(msid, tstart, tstop) ok = ~out.bads dtime = out.times[ok] tdata = out.vals[ok] # #--- fetch occasionally adds -999.xxx to the output data of some msids; remove them (Jun 13, 2018) # tind = [(tdata > -999) | (tdata <= -1000)] dtime = dtime[tind] tdata = tdata[tind] # #--- get limit data table for the msid # if glim == '': try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 if msid in sp_limt_case_c: tchk = 1 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, tdata, glim)
def extract_hrcveto_data(): """ extract hrc veto data input: none output: fits file data related to grad and comp """ # #--- set basic information # group = 'Hrcveto' cols = ['TLEVART', 'VLEVART', 'SHEVART'] [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() # #--- find the date to be filled # ctime = ecf.find_the_last_entry_time(testfits) start = Chandra.Time.DateTime(ctime).date today = time.strftime("%Y:%j:00:00:00", time.gmtime()) ctime = Chandra.Time.DateTime(today).secs - 43200.0 stop = Chandra.Time.DateTime(ctime).date print("Group: " + group + ': ' + str(start) + '<-->' + str(stop)) [xxx, tbdata] = uds.extract_data_arc5gl('hrc', '0', 'hrcss', start, stop) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for col in cols: # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # uds.update_database(msid, group, dtime, data, glim)
def run_update_with_archive(msid_list, group, date_list, detector, level, filetype, tstart, tstop, sub=''): """ extract data using arc5gl and update the data for the msids in the msid_list input: msid_list --- the name of the list of msids group --- a group name date_list --- a list of date to be processed in the form of <yyyy>:<ddd> detector --- detector name level --- level filetype --- file name tstart --- starting time tstop --- stopping time sub --- subdetector name; defalut "" --- no sub detector output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits """ # #--- get basic information dict/list # [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() # #--- extract data using arc5gl # [cols, tbdata] = extract_data_arc5gl(detector, level, filetype, tstart, tstop, sub='') # #--- get time data in the list form # dtime = list(tbdata.field('time')) for col in msid_list: # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, group, dtime, data, glim)
def update_simsuppl_data(date=''): """ collect sim msids data input: date ---- the date in yyyymmdd format. if not given, yesterday's date is used output: fits file data related to sim """ # #--- read group names which need special treatment # sfile = house_keeping + 'msid_list_simactu_supple' data = mcf.read_data_file(sfile) cols = [] g_dir = {} for ent in data: atemp = re.split('\s+', ent) cols.append(atemp[0]) g_dir[atemp[0]] = atemp[1] # #--- get the basic information # [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() # #--- find date to read the data # if date == '': date_list = ecf.create_date_list_to_yestaday(testfits) else: date_list = [date] for sday in date_list: sday = sday[:4] + '-' + sday[4:6] + '-' + sday[6:] print("Date: " + sday) start = sday + 'T00:00:00' stop = sday + 'T23:59:59' [xxx, tbdata] = extract_data_arc5gl('sim', '0', 'sim', start, stop) # #--- get time data in the list form # dtime = list(tbdata.field('time')) for k in range(0, len(cols)): # #--- select col name without ST_ (which is standard dev) # col = cols[k] # #---- extract data in a list form # data = list(tbdata.field(col)) # #--- change col name to msid # msid = col.lower() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # update_database(msid, g_dir[msid], dtime, data, glim)
def tephin_leak_data_update(year=''): """ update tephin - ephin rate/leak current data input: year --- year of the data to be updated. if it is '', the current year is used output: <data_dir>/<msid>/<msid>_data_<year>.fits """ # #--- set data extraction period # tout = set_time_period(year) if len(tout) == 6: [ltstart, ltstop, lyear, tstart, tstop, year] = tout chk = 1 else: [tstart, tstop, year] = tout chk = 0 # #--- get the basic information # [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() # #--- extract tephin data # tchk = ecf.convert_unit_indicator(udict['tephin']) glim = ecf.get_limit('tephin', tchk, mta_db, mta_cross) # #--- for the case the time span goes over the year boundary # if chk == 1: ltephin = update_database('tephin', 'Eleak', glim, ltstart, ltstop, lyear) tephin = update_database('tephin', 'Eleak', glim, tstart, tstop, year) # #--- read msid list # mfile = house_keeping + 'msid_list_eph_tephin' data = mcf.read_data_file(mfile) for ent in data: # #--- find msid and group name # mc = re.search('#', ent) if mc is not None: continue try: [msid, group] = re.split('\s+', ent) except: atemp = re.split('\s+', ent) msid = atemp[0] group = atemp[1] msid.strip() group.strip() # #--- get limit data table for the msid # try: tchk = ecf.convert_unit_indicator(udict[msid]) except: tchk = 0 glim = ecf.get_limit(msid, tchk, mta_db, mta_cross) # #--- update database # try: out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002') print("MSID: " + msid) except: missed = house_keeping + '/missing_data' with open(missed, 'a') as fo: fo.write(msid + '\n') continue # #--- for the case, the time span goes over the year boundary # if chk == 1: update_database(msid, group, glim, ltstart, ltstop, lyear, sdata=ltephin) try: update_database(msid, group, glim, tstart, tstop, year, sdata=tephin) except: pass
def gratgen_categorize_data(): """ separate gratgen data into different categories input: none but use <data_dir>/Gratgen/*.fits output: <data_dir>/Gratgen_<catogry>/*.fits """ # #--- get the basic information # [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict() for msid in msid_list: cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\ 'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\ 'ylimupper', 'rlimlower', 'rlimupper'] glim = ecf.get_limit(msid, 0, mta_db, mta_cross) for category in cname_list: print("Running: " + str(msid) + '<-->' + category) cfile1 = data_dir + 'Gratgen/' + category.capitalize( ) + '/' + msid + '_data.fits' cfile2 = data_dir + 'Gratgen/' + category.capitalize( ) + '/' + msid + '_short_data.fits' cfile3 = data_dir + 'Gratgen/' + category.capitalize( ) + '/' + msid + '_week_data.fits' stday = time.strftime("%Y:%j:00:00:00", time.gmtime()) tcut1 = 0.0 tcut2 = Chandra.Time.DateTime( stday).secs - 31622400.0 #--- a year agao tcut3 = Chandra.Time.DateTime( stday).secs - 864000.0 #--- 10 days ago if os.path.isfile(cfile1): tchk = ecf.find_the_last_entry_time(cfile1) else: tchk = 0 ifile = house_keeping + category data = mcf.read_data_file(ifile) start = [] stop = [] for ent in data: atemp = re.split('\s+', ent) val1 = float(atemp[0]) val2 = float(atemp[1]) if val1 > tchk: start.append(val1) stop.append(val2) if len(start) == 0: continue for k in range(0, len(start)): diff = stop[k] - start[k] if diff < 300: start[k] -= 100 stop[k] = start[k] + 300. data = fetch.MSID(msid, start[k], stop[k]) if k == 0: ttime = list(data.times) tdata = list(data.vals) else: ttime = ttime + list(data.times) tdata = tdata + list(data.vals) if len(ttime) == 0: continue stat_out1 = get_stat(ttime, tdata, glim, 86400.0) stat_out2 = get_stat(ttime, tdata, glim, 3600.0) stat_out3 = get_stat(ttime, tdata, glim, 300.0) if tchk > 0: ecf.update_fits_file(cfile1, cols, stat_out1, tcut=tcut1) ecf.update_fits_file(cfile2, cols, stat_out2, tcut=tcut2) ecf.update_fits_file(cfile3, cols, stat_out3, tcut=tcut3) else: ecf.create_fits_file(cfile1, cols, stat_out1, tcut=tcut1) ecf.create_fits_file(cfile2, cols, stat_out2, tcut=tcut2) ecf.create_fits_file(cfile3, cols, stat_out3, tcut=tcut3)