def run_update_with_ska(msid, group, msid_sub_list=[], glim=''):
    """
    extract data from ska database and update the data for the msids in the msid_list
    input:  msid            --- a list of msids
            group           --- the group of the msids
            msid_sub_list   --- a list of lists of:
                                [msid, msid_1, msid_2, operand]
                                this is used to compute the first msid from following two
                                msid values with operand (+/-/*)
            glim            --- glim usually found in this function, but you can give it; default: ''
    output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits
    """
#
#--- get basic information dict/list
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
#
#--- set starting and stopping data period
#
    test_fits = data_dir + group.capitalize() + '/' + msid + '_data.fits'

    if os.path.isfile(test_fits):
        tstart    = ecf.find_the_last_entry_time(test_fits)
    
        ttemp = time.strftime("%Y:%j:00:00:00", time.gmtime())
        tstop = Chandra.Time.DateTime(ttemp).secs - 86400.0
    
        if tstop < tstart:
            exit(1)
    
        if len(msid_sub_list) != 0:
            [dtime, tdata] = compute_sub_msid(msid, msid_sub_list, tstart, tstop)
        else:
            out     = fetch.MSID(msid, tstart, tstop)
            ok      = ~out.bads
            dtime   = out.times[ok]
            tdata   = out.vals[ok]
#
#--- fetch occasionally adds -999.xxx to the output data of some msids; remove them (Jun 13, 2018)
#
            tind    = [(tdata > -999) | (tdata <= -1000)]
            dtime   = dtime[tind]
            tdata   = tdata[tind]
#
#--- get limit data table for the msid
#
        if glim == '':
            try:
                tchk  = ecf.convert_unit_indicator(udict[msid])
            except:
                tchk  = 0
    
            if msid in sp_limt_case_c:
                tchk = 1

            glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
        update_database(msid, group, dtime, tdata, glim)
Beispiel #2
0
def extract_hrcveto_data():
    """
    extract hrc veto data
    input:  none
    output: fits file data related to grad and comp
    """
    #
    #--- set basic information
    #
    group = 'Hrcveto'
    cols = ['TLEVART', 'VLEVART', 'SHEVART']
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
    #
    #--- find the date to be filled
    #
    ctime = ecf.find_the_last_entry_time(testfits)
    start = Chandra.Time.DateTime(ctime).date

    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    ctime = Chandra.Time.DateTime(today).secs - 43200.0
    stop = Chandra.Time.DateTime(ctime).date

    print("Group: " + group + ': ' + str(start) + '<-->' + str(stop))

    [xxx, tbdata] = uds.extract_data_arc5gl('hrc', '0', 'hrcss', start, stop)
    #
    #--- get time data in the list form
    #
    dtime = list(tbdata.field('time'))

    for col in cols:
        #
        #---- extract data in a list form
        #
        data = list(tbdata.field(col))
        #
        #--- change col name to msid
        #
        msid = col.lower()
        #
        #--- get limit data table for the msid
        #
        try:
            tchk = ecf.convert_unit_indicator(udict[msid])
        except:
            tchk = 0

        glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
        #
        #--- update database
        #
        uds.update_database(msid, group, dtime, data, glim)
Beispiel #3
0
def find_starting_date():
    """
    set starting and stopping time from the last entry of a fits file
    input: none but read from hvpsstat_data.fits
    output: start   --- starting time <yyyy>-<mm>-<dd>T00:00:00
            stop    --- stoping time  <yyyy>-<mm>-<dd>T23:59:59
    """

    test = dpath + 'hvpsstat_data.fits'
    ltime = ecf.find_the_last_entry_time(test)
    #
    #--- convert time from sec from 1998.1.1 to year and ydate
    #
    out = Chandra.Time.DateTime(ltime).date
    atemp = re.split(':', out)
    year = int(float(atemp[0]))
    yday = int(float(atemp[1]))

    yday += 1
    #
    #--- check whether the date crosses the year end
    #
    if tcnv.isLeapYear(year) == 1:
        if yday > 366:
            yday = 1
            year += 1
    else:
        if yday > 365:
            yday = 1
            year += 1
#
#--- convert to month and mday
#
    [mon, day] = tcnv.changeYdateToMonDate(year, yday)

    lmon = str(mon)
    if mon < 10:
        lmon = '0' + lmon
    lday = str(day)
    if day < 10:
        lday = '0' + lday

    start = str(year) + '-' + lmon + '-' + lday + 'T00:00:00'
    stop = str(year) + '-' + lmon + '-' + lday + 'T23:59:59'

    return [start, stop]
Beispiel #4
0
def find_starting_date():
    """
    set starting and stopping time from the last entry of a fits file
    input: none but read from hvpsstat_data.fits
    output: start   --- starting time <yyyy>-<mm>-<dd>T00:00:00
            stop    --- stoping time  <yyyy>-<mm>-<dd>T23:59:59
    """
    test    = dpath + 'hvpsstat_data.fits'
    ltime   = ecf.find_the_last_entry_time(test) + 86400.0
    ltime   = mcf.convert_date_format(ltime, ifmt='chandra', ofmt='%Y:%j:00:00:00')
    ltime   = Chandra.Time.DateTime(ltime).secs

    today   = time.strftime('%Y:%j:00:00:00', time.gmtime())
    stime   = Chandra.Time.DateTime(today).secs - 86400.0
    t_list  = [ltime]
    while ltime < stime:
        ltime += 86400.0
        if ltime > stime:
            break
        else:
            t_list.append(ltime)

    return t_list
def dea_full_data_update(chk):
    """
    update deahk search database
    input:  chk --- whether to request full data update: chk == 1:yes
    output: <deposit_dir>/Deahk/<group>/<msid>_full_data_<year>fits
    """
    tyear = int(float(time.strftime("%Y", time.gmtime())))

    cmd = 'ls ' + data_dir + 'Deahk_*/*_week_data.fits > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)

    for ent in data:
        atemp = re.split('\/', ent)
        group = atemp[-2]
        btemp = re.split('_', atemp[-1])
        msid = btemp[0]
        print("MSID: " + str(msid) + ' in ' + group)

        [cols, tbdata] = ecf.read_fits_file(ent)

        time = tbdata['time']
        tdata = tbdata[msid]
        cols = ['time', msid]
        #
        #--- regular data update
        #
        if chk == 0:
            #
            #--- normal daily data update
            #
            ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_'
            ofits = ofits + str(tyear) + '.fits'
            if os.pathisfile(ofits):
                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear + 1) + ':001:00:00:00'
                nchk = 0
#
#--- if the data is over the year boundray, fill up the last year and create a new one for the new year
#
            else:
                ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid
                ofits = ofits + '_full_data_' + str(tyear - 1) + '.fits'
                nfits = deposit_dir + 'Deahk_save/' + group + '/' + msid
                nfits = nfits + '_full_data_' + str(tyear) + '.fits'

                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear) + ':001:00:00:00'
                nchk = 1

            select = [(time > ltime) & (time < ctime)]
            stime = time[select]
            sdata = tdata[select]
            cdata = [stime, sdata]
            ecf.update_fits_file(ofits, cols, cdata)

            if nchk > 0:
                select = [time >= ctime]
                stime = time[select]
                sdata = tdata[select]
                cdata = [stime, sdata]
                ecf.create_fits_file(nfits, cols, cdata)
#
#--- start from beginning (year 1999)
#
        else:
            for year in range(1999, tyear + 1):
                tstart = str(year) + ':001:00:00:00'
                tstart = Chandra.Time.DateTime(tstart).secs
                tstop = str(year + 1) + ':001:00:00:00'
                tstop = Chandra.Time.DateTime(tstop).secs

                select = [(time >= tstart) & (time < tstop)]
                stime = time[select]
                sdata = tdata[select]
                cdata = [stime, sdata]

                out = deposit_dir + 'Deahk_save/' + group + '/'
                if not os.path.isdir(out):
                    cmd = 'mkdir ' + out

                out = out + msid + '_full_data_' + str(year) + '.fits'

                ecf.create_fits_file(out, cols, cdata)
Beispiel #6
0
def create_long_term_dea_data(dhead, group, drange):
    """
    convert week time rdb data files into a long term data fits files
    input:  dhead   --- data file name header
            group   --- group name
            period  --- week or short
            drange  --- deahk data number list
    output: <data_dir>/deahk<#>_data.fits
    """
    #
    #--- find today date in seconds from 1998.1.1
    #
    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    atemp = re.split(':', today)
    tyear = int(atemp[0])
    today = Chandra.Time.DateTime(today).secs
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- how may dea entries
#
    ntot = len(drange)
    #
    #--- checking the last entry date
    #
    efits = data_dir + group + '/' + name_list[0] + '_data.fits'

    if os.path.isfile(efits):
        ltime = ecf.find_the_last_entry_time(efits)
        try:
            ltime = find_starting_of_the_day(ltime)
            out = Chandra.Time.DateTime(ltime)
            atemp = re.split(':', out)
            syear = int(atemp[0])
            lchk = 1
        except:
            ltime = 52185599.0
            syear = 1999
            lchk = 0
    else:
        ltime = 52185599.0
        syear = 1999
        lchk = 0
#
#--- read data
#
    fchk = 0
    for pyear in range(syear, tyear + 1):
        dfile = dhead + str(pyear) + '.rdb'
        data = mcf.read_data_file(dfile)
        #
        #--- starting time/stopping time and how many columns in the data
        #
        atemp = re.split('\s+', data[0])
        tot = len(atemp)
        start = float(atemp[0])

        xtemp = re.split('\s+', data[-1])
        stop = float(xtemp[0])
        #
        #--- separate each column into a list
        #
        if fchk == 0:  #--- initialize once at the beginning of the loop
            dlist = []  #--- will keep the lists of daily avg of each columns
            for k in range(0, tot):
                dlist.append([])
            fchk = 1

        dsum = []  #--- will keep the sums of each columns for a given
        #--- time interval (a day)
        for k in range(0, tot):
            dsum.append(0)

        chk = 0
        cnt = 0
        ntime = ltime + 86400.0

        while ntime < start:
            ltime = ntime
            ntime = ltime + 86400.0
        tlist = []

        dlen = len(data)

        for ent in data:
            atemp = re.split('\s+', ent)
            ftime = float(atemp[0])
            if ftime >= ltime:

                chk += 1
                if ftime < ntime and len(atemp) == tot:
                    tlist.append(ftime)
                    for k in range(0, tot):
                        dsum[k] += float(atemp[k])
                        cnt += 1
                else:
                    if cnt == 0 or len(tlist) == 0:
                        ltime = ntime
                        ntime = ltime + 86400.0
                        continue
#
#--- take mid point for the time and take averages for the other quantities
#
                    dlist[0].append(tlist[int(0.5 * len(tlist))])
                    for k in range(1, tot):

                        dlist[k].append(dsum[k] / cnt)
                        dsum[k] = 0

                    ltime = ntime
                    ntime = ltime + 86400.0
                    tlist = []
                    cnt = 0
#
#--- if no new data, stop
#
    if chk == 0:
        return 'No new data'
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    for k in range(0, ntot):
        msid = name_list[k]

        print('MSID:  ' + msid)

        fits = data_dir + group + '/' + msid + '_data.fits'
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                 'yupper', 'rlower', 'rupper', 'dcount',\
                 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper', 'state']

        mstart = mstop
        mstop = mstart + 5
        #
        #--- the following quantities are not in the database; add default values
        #
        tlen = len(dlist[0])
        ylr = [0] * tlen
        yur = [0] * tlen
        rlr = [0] * tlen
        rur = [0] * tlen
        yl = [-9e9] * tlen
        yu = [9e9] * tlen
        rl = [-9e9] * tlen
        ru = [9e9] * tlen
        dc = [-999] * tlen
        state = ['none'] * tlen

        cdata = [dlist[0]]
        cdata = cdata + dlist[mstart:mstop] + [
            ylr, yur, rlr, rur, dc, yl, yu, rl, ru, state
        ]
        #
        #---  creat new fits file
        #
        if lchk == 0:
            ecf.create_fits_file(fits, cols, cdata)
#
#--- append to the existing fits file
        else:
            ecf.update_fits_file(fits, cols, cdata)

    return 'New data added'
def update_ephkey_l1_data(date = ''):
    """
    update ephkey L1 data 
    input:  date    ---- the date in yyyymmdd format. if not given, yesterday's date is used
    output: fits file data related to grad and comp
    """
#
#--- read group names which need special treatment
#
    file   = house_keeping + 'msid_list_ephkey'
    f      = open(file, 'r')
    data   = [line.strip() for line in f.readlines()]
    f.close()

    msid_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        msid_list.append(atemp[0])
        group = atemp[1]
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- create date list from the next day from the next entry to today
#
    if date == '':
#
#--- the date of the last entry
#
        stemp = ecf.find_the_last_entry_time(test_fits)
        stemp = Chandra.Time.DateTime(stemp).date
        atemp = re.split(':', stemp)
        syear = int(float(atemp[0]))
        sday  = int(float(atemp[1]))
#
#--- if the data is missing more than 6 hours, fill that day again
#
        shh   = int(float(atemp[2]))
        if shh < 18:
            sday -= 1
            if sday < 0:
                syear -= 1
                if tcnv.isLeapYear(syear) == 1:
                    sday = 366 
                else:
                    sday = 365
    
#
#--- find today's date
#
        stemp = time.strftime("%Y:%j", time.gmtime())
        atemp = re.split(':', stemp)
        lyear = int(float(atemp[0]))
        lday  = int(float(atemp[1]))
    
        date_list = []
        if syear == lyear:
            for day in range(sday+1, lday):
                lday = ecf.add_lead_zeros(day, 2)
    
                date = str(syear) + ':' + lday
                date_list.append(date)
        else:
            if tcnv.isLeapYear(syear) == 1:
                base = 367 
            else:
                base = 366
    
            for day in range(sday+1, base):
                lday = ecf.add_lead_zeros(day, 2)
    
                date = str(syear) + ':' + lday
                date_list.append(date)
    
            for day in range(1, lday):
                lday = ecf.add_lead_zeros(day, 2)
    
                date = str(lyear) + ':' + lday
                date_list.append(date)
    else:
        date_list.append(date)


    for date in (date_list):
        tstart = date + ':00:00:00'
        tstop  = date + ':23:59:59'

        uds.run_update_with_archive(msid_list, group, date_list, 'ephin', '0', 'ephhk', tstart, tstop)
def set_time_period(year):
    """
    setting data extract data period
    input:  year    --- year of the data to be extracted. if it is '', the current year will be used
    output: start   --- starting time in sec from 1998.1.1
            stop    --- stopping time in sec from 1998.1.1
            tyear   --- year

            when the period is going over the year boundary, output provide 6 output including:

            lstart  --- starting time in the last year 
            lstop   --- stopping time in the last year
            lyear   --- last year
    """
    if year == '':
        #
        #--- find this year
        #
        tyear = int(time.strftime("%Y", time.localtime()))
        #
        #--- find the latest tephin_data fits file
        #
        tephin = data_dir + 'Eleak/Tephin/tephin_data' + str(tyear) + '.fits'
        if os.path.isfile(tephin):
            start = ecf.find_the_last_entry_time(tephin)
            #
            #--- today's date
            #
            stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
            stop = Chandra.Time.DateTime(stday).secs

            return [start, stop, tyear]

        else:
            #
            #--- if the time span goes over the year boundary, return two sets of periods
            #
            start = str(tyear) + ':001:00:00:00'
            start = Chandra.Time.DateTime(start).secs
            stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
            stop = Chandra.Time.DateTime(stday).secs
            #
            #-- the last year update
            #
            lyear = tyear - 1
            ltephin = data_dir + 'Eleak/Tephin/tephin_data' + str(
                lyear) + '.fits'
            lstart = ecf.find_the_last_entry_time(ltephin)
            lstop = str(tyear) + ':001:00:00:00'
            lstop = Chandra.Time.DateTime(stop).secs

            return [lstart, lstop, lyear, start, stop, tyear]

    else:
        #
        #--- the case creating the entire year
        #
        start = str(year) + ':001:00:00:00'
        start = Chandra.Time.DateTime(start).secs
        stop = str(year + 1) + ':001:00:00:00'
        stop = Chandra.Time.DateTime(stop).secs

        return [start, stop, year]
def gratgen_categorize_data():
    """
    separate gratgen data into different categories
    input: none but use <data_dir>/Gratgen/*.fits
    output: <data_dir>/Gratgen_<catogry>/*.fits
    """
    #
    #--- get the basic information
    #
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()

    for msid in msid_list:
        cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\
                'ylimupper', 'rlimlower', 'rlimupper']

        glim = ecf.get_limit(msid, 0, mta_db, mta_cross)

        for category in cname_list:
            print("Running: " + str(msid) + '<-->' + category)

            cfile1 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_data.fits'
            cfile2 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_short_data.fits'
            cfile3 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_week_data.fits'

            stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
            tcut1 = 0.0
            tcut2 = Chandra.Time.DateTime(
                stday).secs - 31622400.0  #--- a year agao
            tcut3 = Chandra.Time.DateTime(
                stday).secs - 864000.0  #--- 10 days ago

            if os.path.isfile(cfile1):
                tchk = ecf.find_the_last_entry_time(cfile1)
            else:
                tchk = 0

            ifile = house_keeping + category
            data = mcf.read_data_file(ifile)
            start = []
            stop = []
            for ent in data:
                atemp = re.split('\s+', ent)
                val1 = float(atemp[0])
                val2 = float(atemp[1])
                if val1 > tchk:
                    start.append(val1)
                    stop.append(val2)

            if len(start) == 0:
                continue

            for k in range(0, len(start)):
                diff = stop[k] - start[k]
                if diff < 300:
                    start[k] -= 100
                    stop[k] = start[k] + 300.

                data = fetch.MSID(msid, start[k], stop[k])

                if k == 0:
                    ttime = list(data.times)
                    tdata = list(data.vals)
                else:
                    ttime = ttime + list(data.times)
                    tdata = tdata + list(data.vals)

            if len(ttime) == 0:
                continue

            stat_out1 = get_stat(ttime, tdata, glim, 86400.0)
            stat_out2 = get_stat(ttime, tdata, glim, 3600.0)
            stat_out3 = get_stat(ttime, tdata, glim, 300.0)

            if tchk > 0:
                ecf.update_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.update_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.update_fits_file(cfile3, cols, stat_out3, tcut=tcut3)
            else:
                ecf.create_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.create_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.create_fits_file(cfile3, cols, stat_out3, tcut=tcut3)