Ejemplo n.º 1
0
def get_data(start, stop, year, out_dir):
    """
    update sim flex offset data for the given data period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- data extracted year
            out_dir --- output_directory
    output: <out_dir>/Comp_save/Compsimoffset/<msid>_full_data_<year>.fits
    """

    print str(start) + '<-->' + str(stop)

    for msid in ['flexadif', 'flexbdif', 'flexcdif']:

        if msid == 'flexadif':
            msid_t = '3faflaat'
            msid_s = '3sflxast'

        elif msid == 'flexbdif':
            msid_t = '3faflbat'
            msid_s = '3sflxbst'
        else:
            msid_t = '3faflcat'
            msid_s = '3sflxcst'

        out = fetch.MSID(msid_t, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_s, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 - tdat2]

        ofits = out_dir + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 2
0
def get_data(start, stop, year, out_dir):
    """
    update acis electric pwer data for a gvien period
    input:  start   --- starting time in seconds from 1998.1.1
            stop    --- stopping time in seconds from 1998.1.1
            year    --- year of the data extracted
            out_dir --- output directory
    output: <out_dir>/1dppwra_full_data_<year>.fits, <out_dir>/1dppwrb_full_data_<year>fits
    """

    print str(start) + '<-->' + str(stop)

    for msid in ['1dppwra', '1dppwrb']:

        if msid == '1dppwra':
            msid_v = '1dp28avo'
            msid_a = '1dpicacu'
        else:
            msid_v = '1dp28bvo'
            msid_a = '1dpicbcu'

        out = fetch.MSID(msid_v, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_a, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 * tdat2]

        ofits = out_dir + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 3
0
def run_update_with_ska(msid, group, msid_sub_list=[], glim=''):
    """
    extract data from ska database and update the data for the msids in the msid_list
    input:  msid            --- a list of msids
            group           --- the group of the msids
            msid_sub_list   --- a list of lists of:
                                [msid, msid_1, msid_2, operand]
                                this is used to compute the first msid from following two
                                msid values with operand (+/-/*)
            glim            --- glim usually found in this function, but you can give it; default: ''
    output: <msid>_data.fits, <msid>_short_data,fits, <msid>_week_data.fits
    """
#
#--- get basic information dict/list
#
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()
#
#--- set starting and stopping data period
#
    test_fits = data_dir + group.capitalize() + '/' + msid + '_data.fits'

    if os.path.isfile(test_fits):
        tstart    = ecf.find_the_last_entry_time(test_fits)
    
        ttemp = time.strftime("%Y:%j:00:00:00", time.gmtime())
        tstop = Chandra.Time.DateTime(ttemp).secs - 86400.0
    
        if tstop < tstart:
            exit(1)
    
        if len(msid_sub_list) != 0:
            [dtime, tdata] = compute_sub_msid(msid, msid_sub_list, tstart, tstop)
        else:
            out     = fetch.MSID(msid, tstart, tstop)
            ok      = ~out.bads
            dtime   = out.times[ok]
            tdata   = out.vals[ok]
#
#--- fetch occasionally adds -999.xxx to the output data of some msids; remove them (Jun 13, 2018)
#
            tind    = [(tdata > -999) | (tdata <= -1000)]
            dtime   = dtime[tind]
            tdata   = tdata[tind]
#
#--- get limit data table for the msid
#
        if glim == '':
            try:
                tchk  = ecf.convert_unit_indicator(udict[msid])
            except:
                tchk  = 0
    
            if msid in sp_limt_case_c:
                tchk = 1

            glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- update database
#
        update_database(msid, group, dtime, tdata, glim)
Ejemplo n.º 4
0
def get_data(start, stop, year, msid_list, out_dir):
    """
    update msid data in msid_list for the given data period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- the year in which data is extracted
            msid_list   --- a list of msids
            out_dir --- output_directory
    """
    print(str(start) + '<-->' + str(stop))

    for msid in msid_list:

        out   = fetch.MSID(msid, start, stop)
        tdat  = out.vals - 273.15
        ttime = out.times

        ocols = ['time', msid]
        cdata = [ttime, tdat]
           
        ofits = out_dir + msid + '_full_data_' + str(year) +'.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 5
0
def compute_sub_msid(msid, msid_sub_list, tstart, tstop):
    """
    return computed msid's data
    input:  msid    --- msid
            msid_sub_list   ---a list of lists of:
                            [msid, msid_1, msid_2, operand]
                            this is used to compute the first msid from following two
                            msid values with operand (+/-/*)
            tstart  --- start time in seconds from 1998.1.1
            tstop   --- stop tim in seconds from 1998.1.1
    output: ttime   --- array of time values
            tdata   --- array of the data values
    """

    chk = 0
    for m in range(0, len(msid_sub_list)):
        if msid == msid_sub_list[m][0]:
            msid_t = msid_sub_list[m][1]
            msid_s = msid_sub_list[m][2]
            opr = msid_sub_list[m][3]
            chk = 1
            break

    if chk > 0:
        out = fetch.MSID(msid_t, tstart, tstop)
        tdata_t = out.vals
        ttime = out.times
        out = fetch.MSID(msid_s, tstart, tstop)
        tdata_s = out.vals
        if opr == '-':
            tdata = tdata_t - tdata_s
        elif opr == '*':
            tdata = tdata_t * tdata_s
        else:
            tdata = tdata_t + tdata_s
    else:
        try:
            out = fetch.MSID(msid, tstart, tstop)
            tdata = out.vals
            ttime = out.times
        except:
            tdata = []
            ttime = []

    return [ttime, tdata]
Ejemplo n.º 6
0
def get_data(start, stop, year):

    print str(start) + '<-->' + str(stop)

    for msid in ['flexadif', 'flexbdif', 'flexcdif']:

        if msid == 'flexadif':
            msid_t = '3faflaat'
            msid_s = '3sflxast'

        elif msid == 'flexbdif':
            msid_t = '3faflbat'
            msid_s = '3sflxbst'
        else:
            msid_t = '3faflcat'
            msid_s = '3sflxcst'

        out = fetch.MSID(msid_t, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_s, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 - tdat2]

        ofits = dout + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            update_fits_file(ofits, ocols, cdata)
        else:
            create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 7
0
def get_data(start, stop, year):

    print str(start) + '<-->' + str(stop)

    for msid in ['1dppwra', '1dppwrb']:

        if msid == '1dppwra':
            msid_v = '1dp28avo'
            msid_a = '1dpicacu'
        else:
            msid_v = '1dp28bvo'
            msid_a = '1dpicbcu'

        out = fetch.MSID(msid_v, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_a, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 * tdat2]

        ofits = dout + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            update_fits_file(ofits, ocols, cdata)
        else:
            create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 8
0
def fetch_eng_data(msid, start, stop):
    """
    get eng data from archieve
    input:  msid            --- msid
            start           --- start time in sec from 1998.1.1
            stop            --- stop time in  sec from 1988.1.1
    output: [time, data]    --- a list of time array and data array
    """
    #
    #--- read data from database
    #
    out = fetch.MSID(msid, start, stop)
    #
    #--- collect data in 5 min intervals and take an average
    #
    stime = []
    data = []
    tsave = []
    pstart = start
    pstop = pstart + 300.0
    for k in range(0, len(out.times)):

        if out.times[k] < pstart:
            continue
#
#--- collected data for the last 5 mins
#
        elif out.times[k] > pstop:
            stime.append(pstart + 150.0)
            #
            #--- if no data, just put 0.0
            #
            if len(tsave) == 0:
                data.append(0.0)
#
#--- take an avearge
#
            else:
                data.append(numpy.mean(tsave))
                tsave = []
            pstart = pstop
            pstop = pstart + 300.0

        else:
            tsave.append(out.vals[k])
#
#--- convert the list into an array form before returning
#
    atime = numpy.array(stime)
    adata = numpy.array(data)

    return [atime, adata]
Ejemplo n.º 9
0
def get_data(msid, start, stop):
    """
    create an interactive html page for a given msid
    input:  msid    --- msid
            oup   --- group name
            start   --- start time
            stop    --- stop time
    output: ttime   --- a list of time data
            tdata   --- a list of data
    """
#    start = ecf.check_time_format(start)
#    stop  = ecf.check_time_format(stop)
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- get limit data table for the msid
#
    try:
        uck   = udict[msid]
        if uck.lower() == 'k':
            tchk = 1
        else:
            tchk  = ecf.convert_unit_indicator(uchk)
    except:
        tchk  = 0

    glim  = ecf.get_limit(msid, tchk, mta_db, mta_cross)
#
#--- extract data from archive
#
    chk = 0
    try:
        out     = fetch.MSID(msid, start, stop)
        tdata   = out.vals
        ttime   = out.times
    except:
        tdata   = []
        ttime   = []


    return [ttime, tdata]
Ejemplo n.º 10
0
def get_data_from_ska(msid, tstart, tstop):
    """
    extract data from ska database
    input:  msid    --- msid
            tstart  --- starting time in seconds from 1998.1.1
            tstop   --- stopping time in seconds from 1998.1.1
    output: time    --- a list of time
            data    --- a list of data
    """
    out = fetch.MSID(msid, tstart, tstop)
    time = out.times
    data = out.vals

    return [time, data]
Ejemplo n.º 11
0
def get_temp_data(msid, xdata):
    """
    get temperature data of msid for the given time spots in the list
    input:  msid    --- msid
            xdata   --- a list of time data in seconds from 1998.1.1
    output: temperature --- a list of temperature corresponding to the time list
    """
    temperature = []
    for m in range(0, len(xdata)):
        start = xdata[m] - 60.0
        stop = xdata[m] + 60.0
        out = fetch.MSID(msid, start, stop)
        val = numpy.mean(out.vals)
        temperature.append(val)

    return temperature
Ejemplo n.º 12
0
def fetchTelemetry(msid, start, now, filter_bad=True):
    '''
    Fetch the telemetry from the Ska archive for the given MSID.
    filter_bad will call fetch.py's option to automatically filter
    bad data. The stat call should be None, as we don't want daily statistics.

    Returns an instance of class MSID
    '''

    telemetry = fetch.MSID(msid,
                           start,
                           now,
                           filter_bad=filter_bad,
                           stat='daily')

    return telemetry
Ejemplo n.º 13
0
def read_data(tstart, tstop):
    """
    extract needed data from sot database
    input:  tstart  --- starting time in seconds from 1998.1.1
            tstop   --- stopping time in seconds from 1998.1.1
    output: data    --- a list of arrays of data
    """
    save = []
    for msid in bias_list:

        out = fetch.MSID(msid, tstart, tstop)
        val = out.vals
        save.append(val)

    data = [out.times] + save

    return data
Ejemplo n.º 14
0
def get_data(start, stop, year, acis_list):

    print str(start) + '<-->' + str(stop)

    for msid in acis_list:

        out = fetch.MSID(msid, start, stop)
        tdat = out.vals - 273.15
        ttime = out.times

        ocols = ['time', msid]
        cdata = [ttime, tdat]

        ofits = out_dir + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            update_fits_file(ofits, ocols, cdata)
        else:
            create_fits_file(ofits, ocols, cdata)
Ejemplo n.º 15
0
def create_interactive_page(msid, group, start, stop, step):
    """
    create an interactive html page for a given msid
    input:  msid    --- msid
            start   --- start time
            stop    --- stop time
            step    --- bin size in seconds
    """
    start = check_time_format(start)
    stop = check_time_format(stop)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = read_cross_check_table()
    #
    #--- get limit data table for the msid
    #
    try:
        tchk = convert_unit_indicator(udict[msid])
    except:
        tchk = 0

    glim = get_limit(msid, tchk, mta_db, mta_cross)
    #
    #--- extract needed data and save in fits file
    #
    try:
        out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
    except:
        missed = house_keeping + '/missing_data'
        fo = open(missed, 'a')
        fo.write(msid)
        fo.write('\n')
        fo.close()

    fits_data = update_database(msid, group, glim, start, stop, step)
Ejemplo n.º 16
0
def fetch_eng_data(msid_list, start, stop):
    """
    get eng data from archieve
    input:  msid_list   --- a list of msids
            start       --- start time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
            stop        --- stop time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
    output: results     --- a list of lists of data extracted including time at beginning
    """

    results = []
    chk = 0
    for msid in msid_list:
        out = fetch.MSID(msid, start, stop)

        if chk == 0:
            time = list(out.times)
            results.append(time)
            chk = 1

        data = list(out.vals)
        results.append(data)

    return results
Ejemplo n.º 17
0
    sys.path.insert(0, '..')
    if not 'ENG_ARCHIVE' in os.environ:
        os.environ['ENG_ARCHIVE'] = os.path.abspath(os.getcwd() + '/eng_archive')
    outfile = opt.out + '.test'
else:
    outfile = opt.out + '.flight'

import Ska.engarchive.fetch as fetch
print 'Fetch file is', fetch.__file__
print 'ENG_ARCHIVE is', os.environ.get('ENG_ARCHIVE')

msids = ('1crat', 'fptemp_11', 'orbitephem0_x', 'sim_z', 'tephin', 'cvcductr', 'dp_dpa_power')
attrs = ('times', 'vals', 'quals', 'stds', 'mins', 'maxes', 'means',
         'p01s', 'p05s', 'p16s', 'p50s', 'p84s', 'p95s', 'p99s')
out = dict()
for msid in msids:
    print 'Getting', msid
    dat = fetch.MSID(msid, opt.start, opt.stop)
    dat5 = fetch.MSID(msid, opt.start, opt.stop, stat='5min')
    datd = fetch.MSID(msid, opt.start, opt.stop, stat='daily')
    out[msid] = dict(dat=dict((x, getattr(dat, x)) for x in attrs if hasattr(dat, x)),
                     dat5=dict((x, getattr(dat5, x)) for x in attrs if hasattr(dat5, x)),
                     datd=dict((x, getattr(datd, x)) for x in attrs if hasattr(datd, x)))

out['ENG_ARCHIVE'] = os.environ.get('ENG_ARCHIVE')
out['file'] = fetch.__file__
pickle.dump(out, open(outfile, 'w'), protocol=-1)


                     
def run_condtion_msid(msid, start, stop, period, alimit, cnd_msid):
    """
    extract data from ska database and analyze data
    input:  msid    --- msid 
            start   --- starting time in seconds from 1998.1.1
            stop    --- stopping time in seconds from 1998.1.1
            period  --- data collection interval in seconds (e.g. 300, 3600, or 86400)
            alimit  --- a list of lists of limits
            cnd_msid    ---- msid which tells which limit set to use for given time
    output: save    --- a list of list of data:
                            time, average, median, std, min, max, 
                            ratio of yellow lower violation,
                            ratio of yellow upper violation,
                            ratio of rd  lower violation,
                            ratio of red upper violation,
                            total data in the period,
                            yellow lower limit, yellow upper limit,
                            red lower limit, red upper limit
                            state
    """
    #
    #--- extract data with ska fetch for the given time period
    #
    out = fetch.MSID(msid, start, stop)
    ok = ~out.bads
    dtime = out.times[ok]
    if len(dtime) < 1:
        return []

    tdata = out.vals[ok]
    tmax = dtime[-1]
    #
    #--- for the case this is multi limit case
    #
    if cnd_msid != 'none':
        out = fetch.MSID(cnd_msid, start, stop)
        mtime = out.times
        mdata = out.vals
        mlen = len(mdata)
#
#--- for the case this is single limit case
#
    else:
        mdata = ['none'] * len(dtime)
#
#--- there are 15 elements to keep in the output data
#
    save = []
    for k in range(0, 16):
        save.append([])
#
#--- compute how many data collection periods exist for a given data period
#
    n_period = int((stop - start) / period) + 1
    #
    #--- collect data in each time period and compute statistics
    #
    for k in range(0, n_period):
        begin = start + k * period
        end = begin + period
        ctime = begin + 0.5 * period
        #
        #--- find the state of condition msid for this period of time
        #
        if cnd_msid == 'none':
            mkey = 'none'
        else:
            pos = int(mlen * begin / tmax) - 1
            if pos < 0:
                pos = 0
            if pos >= mlen:
                pos = mlen - 1
            mkey = mdata[pos].lower()
#
#--- set limit range only once at the beginning of each data collection period
#
        try:
            limit_table = find_limits(begin, mkey, alimit)
            [y_low, y_top, r_low, r_top] = limit_table
        except:
            limit_table = [-9999998.0, 9999998.0, -9999999.0, 9999999.0]
            [y_low, y_top, r_low,
             r_top] = [-9999998.0, 9999998.0, -9999999.0, 9999999.0]
#
#--- select data between the period
#
        ind = dtime >= begin
        btemp = dtime[ind]
        sdata = tdata[ind]

        ind = btemp < end
        sdata = sdata[ind]
        dcnt = len(sdata)
        if dcnt < 1:
            continue
#
#--- get stats
#
        dmin = min(sdata)
        dmax = max(sdata)
        avg = numpy.mean(sdata)
        #
        #--- if the value is too large something is wrong: so skip it
        #
        if abs(avg) > 100000000.0:
            continue

        med = numpy.median(sdata)
        std = numpy.std(sdata)
        #
        #--- count number of violations
        #
        [y_lc, y_uc, r_lc, r_uc] = find_limit_violatons(sdata, limit_table)
        #
        #--- save the resuts
        #
        save[0].append(float(int(ctime)))
        save[1].append(float("%3.2f" % avg))
        save[2].append(float("%3.2f" % med))
        save[3].append(float("%3.2f" % std))
        save[4].append(float("%3.2f" % dmin))
        save[5].append(float("%3.2f" % dmax))
        save[6].append(float("%1.3f" % (y_lc / dcnt)))
        save[7].append(float("%1.3f" % (y_uc / dcnt)))
        save[8].append(float("%1.3f" % (r_lc / dcnt)))
        save[9].append(float("%1.3f" % (r_uc / dcnt)))
        save[10].append(dcnt)
        save[11].append(float("%3.2f" % y_low))
        save[12].append(float("%3.2f" % y_top))
        save[13].append(float("%3.2f" % r_low))
        save[14].append(float("%3.2f" % r_top))
        save[15].append(mkey)

    return save
Ejemplo n.º 19
0
def get_acen_data(tstart, tstop, file_id, fid_detect):
    """
    for given fid light information, extract acen fits files and extract needed information
    input:  tstart  --- starting time
            tstop   --- stopping time
            file_id --- id of the acen fits file
            fid_detect  --- a dictionary of information [<slot id>, <id string> <id number>]
    output: udated data file, e.g., I-1, S-3, H-I-1, H-S-3 etc in <data_dir>
    """
    #
    #--- first just get a list of potential acent fits files
    #
    acent_list = call_arc5gl('browse', 'pcad', 1, tstart=tstart, tstop=tstop,\
                              filetype='acacent', sub='aca')
    #
    #--- compare the list with a file id, and if it is found, procceed farther
    #
    for fname in file_id:
        chk = 0
        for comp in acent_list:
            mc = re.search(fname, comp)
            if mc is not None:
                filename = comp
                chk = 1
                break
        if chk == 0:
            continue
#
#--- extract an acen fits file
#
        [fits] = call_arc5gl('retrieve', 'pcad', 1, tstart='', tstop='',\
                             filetype='acacent', filename=filename, sub='aca')

        ff = pyfits.open(fits)
        data = ff[1].data
        ff.close()
        mcf.rm_file(fits)
        #
        #--- extract needed information for each slot
        #
        for m in range(0, len(fid_detect[fname][0])):
            slot_id = fid_detect[fname][0][m]
            mask = data['slot'] == slot_id
            out = data[mask]
            time = out['time']
            cent_i = out['cent_i']
            cent_j = out['cent_j']
            ang_y = out['ang_y']
            ang_z = out['ang_z']
            alg = out['alg']

            if len(time) == 0:
                continue
#
#---- take 5 min average for the data
#
            try:
                begin = time[0]
            except:
                continue
            end = begin + 300.0
            m = 0
            k_list = []
            sline = ''
            for k in range(m, len(time)):
                if time[k] < begin:
                    continue
                elif time[k] >= begin and time[k] < end:
                    k_list.append(k)
                else:
                    try:
                        atime = numpy.mean(time[k_list[0]:k_list[-1]])
                        acent_i = numpy.mean(cent_i[k_list[0]:k_list[-1]])
                        acent_j = numpy.mean(cent_j[k_list[0]:k_list[-1]])
                        aang_y = numpy.mean(ang_y[k_list[0]:k_list[-1]])
                        aang_z = numpy.mean(ang_z[k_list[0]:k_list[-1]])
                        aalg = alg[k_list[-1]]
                    except:
                        continue
#
#--- find fapos and tscpos info near to the given time interval
#
                    flist = fetch.MSID('3fapos', time[k_list[0]],
                                       time[k_list[-1]])
                    tslist = fetch.MSID('3tscpos', time[k_list[0]],
                                        time[k_list[-1]])
                    fapos = numpy.mean(flist.vals)
                    tscpos = numpy.mean(tslist.vals)

                    sline = sline + str(atime) + '\t'
                    sline = sline + str(fid_detect[fname][0][m]) + '\t'
                    sline = sline + str(fid_detect[fname][2][m]) + '\t'
                    sline = sline + str(aalg) + '\t'
                    sline = sline + str(format(acent_i, '.3f')) + '\t'
                    sline = sline + str(format(acent_j, '.3f')) + '\t'
                    sline = sline + str(format(aang_y, '.6f')) + '\t'
                    sline = sline + str(format(aang_z, '.6f')) + '\t'
                    sline = sline + str(fapos) + '\t'
                    sline = sline + str(tscpos) + '\n'

                    k_list = []
                    begin = end
                    end += 300.0
#
#--- write out the results
#
            ofile = data_dir + fid_detect[fname][1][m]
            with open(ofile, 'a') as fo:
                fo.write(sline)
Ejemplo n.º 20
0
def get_data(start, stop, year, out_dir):
    """
    extract data and update the compgradkodak related data sets for the given period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            out_dir --- output_directory
    output: <out_dir>/<msid>_fill_data_<year>.fits
    """
    empty = [0]
    #
    #--- extract  4rt*** data
    #
    rt7 = []
    for k in range(0, 12):
        if k < 10:
            msid = '4rt70' + str(k) + 't'
        else:
            msid = '4rt7' + str(k) + 't'

        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            ttime = out.times
            tlist = list(ttime)
            rt7.append(data)
        except:
            rt7.append(empty)
#
#--- extract 4rt575t separately
#
    out = fetch.MSID('4rt575t', start, stop)
    rt575 = out.vals
    #
    #--- create empty array and initialize ohrthr and oobthr lists
    #
    tlen = len(ttime)
    empty = numpy.zeros(tlen)
    ohrthr = [empty]
    oobthr = [empty]
    #
    #--- fill them up
    #
    for k in range(1, 65):
        if k < 10:
            msid = 'ohrthr0' + str(k)
        else:
            msid = 'ohrthr' + str(k)
        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            otime = out.times
            #
            #--- since 4rt arrays are 36 time dense, match the ohrthr and oobtrhr
            #--- by filling the gaps between
            #
            adata = fill_gaps(ttime, otime, data)

            ohrthr.append(adata)
        except:
            ohrthr.append(empty)

        if k < 10:
            msid = 'oobthr0' + str(k)
        else:
            msid = 'oobthr' + str(k)
        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            otime = out.times

            adata = fill_gaps(ttime, otime, data)

            oobthr.append(adata)
        except:
            oobthr.append(empty)
#
#--- now compute each quantity for the given time period
#
    hrmaavg = []
    hrmacav = []
    hrmaxgrd = []
    hrmaradgrd = []
    obaavg = []
    obaconeavg = []

    fwblkhdt = []
    aftblkhdt = []
    obaaxgrd = []

    mzobacone = []
    pzobacone = []
    obadiagrad = []

    hrmarange = []
    tfterange = []
    hrmastrutrnge = []
    scstrutrnge = []
    #
    #--- save time stamp separately for each data
    #
    t_hrmaavg = []
    t_hrmacav = []
    t_hrmaxgrd = []
    t_hrmaradgrd = []
    t_obaavg = []
    t_obaconeavg = []

    t_fwblkhdt = []
    t_aftblkhdt = []
    t_obaaxgrd = []

    t_mzobacone = []
    t_pzobacone = []
    t_obadiagrad = []

    t_hrmarange = []
    t_tfterange = []
    t_hrmastrutrnge = []
    t_scstrutrnge = []

    for k in range(0, tlen):
        out = compute_hrmaavg(ohrthr, k)
        if out != 'na':
            hrmaavg.append(out)
            t_hrmaavg.append(tlist[k])

#-------------------------
        out = compute_hrmacav(ohrthr, k)
        if out != 'na':
            hrmacav.append(out)
            t_hrmacav.append(tlist[k])
#-------------------------
        out = compute_hrmaxgrd(ohrthr, k)
        if out != 'na':
            hrmaxgrd.append(out)
            t_hrmaxgrd.append(tlist[k])
#------------------------
        out = compute_hrmaradgrd(ohrthr, k)
        if out != 'na':
            hrmaradgrd.append(out)
            t_hrmaradgrd.append(tlist[k])
#------------------------
        out = compute_obaavg(oobthr, k)
        if out != 'na':
            obaavg.append(out)
            t_obaavg.append(tlist[k])
#------------------------
        out = compute_obaconeavg(oobthr, k)
        if out != 'na':
            obaconeavg.append(out)
            t_obaconeavg.append(tlist[k])
#------------------------
        out = compute_fwblkhdt(oobthr, rt7, k)
        chk1 = 0
        if out != 'na':
            fwblkhdt.append(out)
            t_fwblkhdt.append(tlist[k])
            chk1 = 1
#------------------------
        out = compute_aftblkhdt(oobthr, k)
        chk2 = 0
        if out != 'na':
            aftblkhdt.append(out)
            t_aftblkhdt.append(tlist[k])
            chk2 = 1
#------------------------
        if (chk1 == 1) and (chk2 == 1):
            out = compute_obaaxgrd(fwblkhdt[-1], aftblkhdt[-1])
            if out != 'na':
                obaaxgrd.append(out)
                t_obaaxgrd.append(tlist[k])
#------------------------
        out = compute_mzobacone(oobthr, rt575, k)
        chk1 = 0
        if out != 'na':
            mzobacone.append(out)
            t_mzobacone.append(tlist[k])
            chk1 = 1
#------------------------
        out = compute_pzobacone(oobthr, k)
        chk2 = 0
        if out != 'na':
            pzobacone.append(out)
            t_pzobacone.append(tlist[k])
            chk2 = 1
#------------------------
        if (chk1 == 1) and (chk2 == 1):
            out = compute_obadiagrad(mzobacone[-1], pzobacone[-1])
            if out != 'na':
                obadiagrad.append(out)
                t_obadiagrad.append(tlist[k])
#------------------------
        out = compute_hrmarange(ohrthr, k)
        if out != 'na':
            hrmarange.append(out)
            t_hrmarange.append(tlist[k])
#------------------------
        out = compute_tfterange(oobthr, k)
        if out != 'na':
            tfterange.append(out)
            t_tfterange.append(tlist[k])
#------------------------
        out = compute_hrmastrutrnge(oobthr, k)
        if out != 'na':
            hrmastrutrnge.append(out)
            t_hrmastrutrnge.append(tlist[k])
#------------------------
        out = compute_scstrutrnge(oobthr, k)
        if out != 'na':
            scstrutrnge.append(out)
            t_scstrutrnge.append(tlist[k])
#
#--- now create/update output fits files
#
    for col in ['hrmaavg', 'hrmacav', 'hrmaxgrd', 'hrmaradgrd', 'obaavg', 'obaconeavg', 'fwblkhdt',\
                'aftblkhdt', 'obaaxgrd', 'mzobacone', 'pzobacone', 'obadiagrad', 'hrmarange',\
                'tfterange', 'hrmastrutrnge', 'scstrutrnge']:

        exec "odata = %s" % (col)
        exec "tdata = t_%s" % (col)

        olen = len(odata)

        tdata = numpy.array(tdata)
        odata = numpy.array(odata)

        cdata = [tdata, odata]
        cols = ['time', col]

        fits = out_dir + col + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(fits):
            ecf.update_fits_file(fits, cols, cdata)
        else:
            ecf.create_fits_file(fits, cols, cdata)
Ejemplo n.º 21
0
def check_condition(msid, tstart, tstop, condition, achk=0):
    """
    find the periods of the time when the condition is met
    input:  msid        --- msid
            tstart      --- starting time in seconds from 1998.1.1
            tstop       --- stopping time in seconds from 1998.1.1
            condition   --- condition value. if the value is larger than this
                            it is "ON" period
            achk        --- indicator of whether we need data cleaning; achk=1: yes
    output: c_start     --- a list of starting time of the active period
            c_stop      --- a list of stopping time of the active period
    """
    #print msid + '<-->' + str(tstart) + '<-->' + str(tstop)

    out    = fetch.MSID(msid, tstart, tstop)
    cptime = out.times
    cvals  = out.vals
#
#--- if voltage cases (achk ==1), use only data in the ceratin range
#
    if achk == 1:
        atime   = []
        avals   = []
        for k in range(0, len(cvals)):
            if cvals[k] > 200:
                continue

            if cvals[k] > 5 and cvals[k] < 80:
                continue

            atime.append(cptime[k])
            avals.append(cvals[k])
#
#--- non voltage case
#
    else:
        atime = cptime
        avals = cvals
#
#--- find start and stop time of the period where the condtion is met
#--- if the value is larger than condition, it is "ON".
#
    c_start = []
    c_stop  = []
    chk     = 0
    for k in range(0, len(atime)):

        if chk == 0 and  avals[k] > condition:
            c_start.append(atime[k])
            chk = 1

        elif chk == 1 and avals[k] < condition:
            c_stop.append(atime[k])
            chk = 0

        else:
            continue
#
#--- if the period is not closed, use the last entry to close it
#
    if chk == 1:
            c_stop.append(atime[-1])

    return [c_start, c_stop]
Ejemplo n.º 22
0
## The basic process of fetching data always starts with importing the module
## into the python session::
# Licensed under a 3-clause BSD style license - see LICENSE.rst

print "Welcome to the fetch module!"
import Ska.engarchive.fetch as fetch

# <demo> --- stop ---
## The ``as fetch`` part of the ``import`` statement just creates an short alias 
## to avoid always typing the somewhat lengthy ``Ska.engarchive.fetch.MSID(..)``.  
## Fetching and plotting full time-resolution data for a single MSID is then quite 
## easy::

tephin = fetch.MSID('tephin', '2009:001', '2009:007') # (MSID, start, stop)
clf()
plot(tephin.times, tephin.vals)
# <demo> --- stop ---

## The ``tephin`` variable returned by ``fetch.MSID()`` is an ``MSID`` object and
## we can access the various object attributes with ``<object>.<attr>``.  The
## timestamps ``tephin.times`` and the telemetry values ``tephin.vals`` are both
## numpy arrays.  As such you can inspect them and perform numpy operations and
## explore their methods::

print type(tephin)
print type(tephin.vals)
print tephin.vals.mean()
print tephin.vals.min()
print tephin.times[1:20]
print tephin.vals[1:20]
Ejemplo n.º 23
0
#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python

import os
import sys
import re
import string
import time
import numpy
import astropy.io.fits as pyfits
import Ska.engarchive.fetch as fetch
import Chandra.Time

if len(sys.argv) > 1:
    msid = sys.argv[1]

else:
    print("msid??")

try:
    #out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
    out = fetch.MSID(msid, '2000:060:00:00:00', '2000:062:00:00:00')
    tdata = out.vals
    print("I AM HERE: " + str(tdata))
except:
    print("msid is not in the database")
Ejemplo n.º 24
0
def find_cold_plates(t_list):
    """
    create cold plate temperature data lists corresponding to the given time list
    input:  t_list  --- a list of time
    output: crat    --- a list of temperature of plate A
            crbt    --- a list of temperature of plate B
    """
#
#--- set data time interval
#
    start = t_list[0]
    stop  = t_list[-1]
#
#--- cold plate A
#
    out   = fetch.MSID('1crat', start, stop)
    tlist = out.times
    alist = out.vals
#
#--- cold plate B
#
    out   = fetch.MSID('1crbt', start, stop)
    blist = out.vals
#
#--- make sure that data have the same numbers of entries
#
    alen  = len(alist)
    blen  = len(blist)
    if alen < blen:
        blist = blist[:alen]
    elif alen > blen:
        for k in range(blen, alen):
            blist.append(blist[-1])
#
#--- find the cold plate temperatures correpond to the given time
#
    crat = []
    crbt = []
    for tent in t_list:
#
#-- take +/- 10 seconds 
#
        begin = tent - 30
        end   = tent + 30

        m     = 0
        chk   = 0
        for k in range(m, alen):
            if (tlist[k] >= begin) and (tlist[k] <= end):
                crat.append(float(alist[k]) - 273.15)
                crbt.append(float(blist[k]) - 273.15)
                m = k - 10
                if m < 0:
                    m = 0
                chk = 1
                break
        if chk == 0:
            crat.append(999.0)
            crbt.append(999.0)

    return [crat, crbt]
Ejemplo n.º 25
0
def create_interactive_page(msid, group, start, stop, step):
    """
    create an interactive html page for a given msid
    input:  msid    --- msid
            group   --- group name
            start   --- start time
            stop    --- stop time
            step    --- bin size in seconds
    """
    start = ecf.check_time_format(start)
    stop = ecf.check_time_format(stop)
    #
    #--- create msid <---> unit dictionary
    #
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = ecf.read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = ecf.read_cross_check_table()
    #
    #--- get limit data table for the msid
    #
    try:
        tchk = ecf.convert_unit_indicator(udict[msid])
    except:
        tchk = 0

    glim = ecf.get_limit(msid, tchk, mta_db, mta_cross)
    #
    #--- extract data from archive
    #
    chk = 0
    try:
        out = fetch.MSID(msid, start, stop)
        tdata = out.vals
        ttime = out.times
    except:
        #
        #--- if no data in archive, try mta local database
        #
        try:
            [ttime, tdata] = uds.get_mta_fits_data(msid, start, stop)
#
#--- if it is also failed, return the empty data set
#
        except:
            chk = 1
#
#--- only short_p can change step size (by setting "step")
#
    if chk == 0:
        [week_p, short_p, long_p] = uds.process_day_data(msid,
                                                         ttime,
                                                         tdata,
                                                         glim,
                                                         step=step)
        #
        #--- try to find data from ska or mta local data base
        #
        try:
            fits_data = create_inter_fits(msid, short_p)
#
#--- for the case, the data is mta special cases
#
        except:
            fits_data = 'na'
    else:
        fits_data = 'na'
#
#--- create interactive html page
#
    create_html_page(msid, fits_data, step)
    #
    #--- remove fits file
    #
    if fits_data != 'na':
        cmd = 'rm -rf ' + fits_data
        os.system(cmd)
Ejemplo n.º 26
0
def get_data_from_archive(msid, start, stop, glim, step=300.0):
    """
    extract data from the archive and compute the stats
    input:  msid    --- msid of the data
            start   --- start time
            stop    --- stop time
            glim    --- a list of limit tables
            step    --- interval of the data. defalut: 3600 sec
    output: a list of two lists which contain:
            week_p:
                wtime   --- a list of time in sec from 1998.1.1
                wdata   --- a list of the  mean of each interval
                wmed    --- a list of the median of each interval
                wstd    --- a list of the std of each interval
                wmin    --- a list of the min of each interval
                wmax    --- a list of the max of each interval
                wyl     --- a list of the rate of yellow lower violation
                wyu     --- a list of the rate of yellow upper violation
                wrl     --- a list of the rate of red lower violation
                wru     --- a list of the rate of red upper violation
                wcnt    --- a list of the total data counts
                wyl     --- a list of the lower yellow limits
                wyu     --- a list of the upper yellow limits
                wrl     --- a list of the lower red limits
                wru     --- a list of the upper red limits
    """
    #
    #--- 5 min step data
    #
    wtime = []
    wdata = []
    wmed = []
    wstd = []
    wmin = []
    wmax = []
    wyl = []
    wyu = []
    wrl = []
    wru = []
    wcnt = []
    wsave = []

    try:
        out = fetch.MSID(msid, start, stop)
        tdata = out.vals
        ttime = out.times
        data = []
        dtime = []

        for k in range(0, len(tdata)):
            try:
                test = int(float(tdata[k]))
            except:
                continue

            if int(abs(tdata[k])) == 999:
                continue

            data.append(tdata[k])
            dtime.append(ttime[k])

        data = numpy.array(data)
        dtime = numpy.array(dtime)

        spos = 0
        chk = 1
        send = dtime[spos] + step

        for k in range(0, len(dtime)):

            if dtime[k] < send:
                chk = 0
            else:
                sdata = data[spos:k]
                avg = sdata.mean()
                med = numpy.median(sdata)
                sig = sdata.std()
                amin = sdata.min()
                amax = sdata.max()
                ftime = dtime[spos + int(0.5 * (k - spos))]
                vlimits = find_violation_range(glim, ftime)
                [yl, yu, rl, ru, tot] = find_violation_rate(sdata, vlimits)

                wtime.append(ftime)
                wdata.append(avg)
                wmed.append(med)
                wstd.append(sig)
                wmin.append(amin)
                wmax.append(amax)
                wyl.append(yl)
                wyu.append(yu)
                wrl.append(rl)
                wru.append(ru)
                wcnt.append(tot)
                wsave.append(vlimits)

                spos = k
                send = dtime[k] + step
                chk = 1
#
#--- check whether there are any left over; if so add it to the data lists
#
        if chk == 0:
            rdata = data[spos:k]
            avg = rdata.mean()
            med = numpy.median(rdata)
            sig = rdata.std()
            amin = rdata.min()
            amax = rdata.max()
            ftime = dtime[spos + int(0.5 * (k - spos))]
            vlimits = find_violation_range(glim, ftime)
            [yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)

            wtime.append(dtime[spos + int(0.5 * (k - spos))])
            wdata.append(avg)
            wmed.append(med)
            wstd.append(sig)
            wmin.append(amin)
            wmax.append(amax)
            wyl.append(yl)
            wyu.append(yu)
            wrl.append(rl)
            wru.append(ru)
            wcnt.append(tot)
            wsave.append(vlimits)

    except:
        pass

    week_p = [wtime, wdata, wmed, wstd, wmin, wmax, wyl, wyu, wrl, wru, wcnt]
    #
    #--- adding limits to the table
    #
    vtemp = [[], [], [], []]
    for k in range(0, len(wsave)):
        for m in range(0, 4):
            vtemp[m].append(wsave[k][m])
    week_p = week_p + vtemp

    return [week_p]
Ejemplo n.º 27
0
def get_data_from_archive(msid, start, stop, glim, step):
    """
    extract data from the archive and compute the stats
    input:  msid    --- msid of the data
            start   --- start time
            stop    --- stop time
            glim    --- a list of limit tables
            step    --- a bin size in seconds
    output: a list of two lists which contain:
            week_p:
                wtime   --- a list of time in sec from 1998.1.1
                wdata   --- a list of the  mean of each interval
                wmed    --- a list of the median of each interval
                wstd    --- a list of the std of each interval
                wmin    --- a list of the min of each interval
                wmax    --- a list of the max of each interval
                wyl     --- a list of the rate of yellow lower violation
                wyu     --- a list of the rate of yellow upper violation
                wrl     --- a list of the rate of red lower violation
                wru     --- a list of the rate of red upper violation
                wcnt    --- a list of the total data counts
                wyl     --- a list of the lower yellow limits
                wyu     --- a list of the upper yellow limits
                wrl     --- a list of the lower red limits
                wru     --- a list of the upper red limits
    """

    #
    #
    wtime = []
    wdata = []
    wmed = []
    wstd = []
    wmin = []
    wmax = []
    wyl = []
    wyu = []
    wrl = []
    wru = []
    wcnt = []

    wsave = []
    vsave = []
    #
    #--- extract data from archive
    #
    xxx = 9999
    if xxx == 9999:
        ###try:
        out = fetch.MSID(msid, start, stop)
        tdata = out.vals
        ttime = out.times
        data = []
        dtime = []
        #
        #--- if the data is not given, the database desplay it as -999.999 (or similar); so drop them
        #
        for k in range(0, len(tdata)):

            try:
                test = int(float(tdata[k]))
            except:
                continue

            if int(abs(tdata[k])) == 999:
                continue

            data.append(tdata[k])
            dtime.append(ttime[k])

        data = numpy.array(data)
        dtime = numpy.array(dtime)
        #
        #--- if a full resolution is asked...
        #
        if step == 0.0:
            wtime = dtime
            wdata = data
            wmed = data
            wstd = [0] * len(data)
            wmin = data
            wmax = data
            for m in range(0, len(dtime)):
                vlimits = find_violation_range(glim, dtime[m])
                darray = numpy.array([data[m]])
                [yl, yu, rl, ru, tot] = find_violation_rate(darray, vlimits)
                wyl.append(yl)
                wyu.append(yu)
                wrl.append(rl)
                wru.append(ru)
                wcnt.append(1)
                wsave.append(vlimits)

#
#--- if asked, devide the data into a smaller period (step size)
#
        else:
            spos = 0
            spos2 = 0
            chk = 1
            chk2 = 2
            send2 = dtime[spos2] + step

            for k in range(0, len(dtime)):

                if dtime[k] < send2:
                    chk2 = 0
                else:
                    sdata = data[spos2:k]

                    if len(sdata) <= 0:
                        spos2 = k
                        send2 = dtime[k] + step
                        chk2 = 1
                        continue

                    avg = sdata.mean()
                    med = numpy.median(sdata)
                    sig = sdata.std()
                    amin = sdata.min()
                    amax = sdata.max()
                    ftime = dtime[spos2 + int(0.5 * (k - spos2))]
                    vlimits = find_violation_range(glim, ftime)
                    [yl, yu, rl, ru, tot] = find_violation_rate(sdata, vlimits)

                    wtime.append(ftime)
                    wdata.append(avg)
                    wmed.append(med)
                    wstd.append(sig)
                    wmin.append(amin)
                    wmax.append(amax)
                    wyl.append(yl)
                    wyu.append(yu)
                    wrl.append(rl)
                    wru.append(ru)
                    wcnt.append(tot)
                    wsave.append(vlimits)

                    spos2 = k
                    send2 = dtime[k] + step
                    chk2 = 1
#
#--- check whether there are any left over; if so add it to the data lists
#
            if chk2 == 0:
                rdata = data[spos2:k]
                if len(rdata) > 0:
                    avg = rdata.mean()
                    med = numpy.median(rdata)
                    sig = rdata.std()
                    amin = rdata.min()
                    amax = rdata.max()
                    ftime = dtime[spos2 + int(0.5 * (k - spos2))]
                    vlimits = find_violation_range(glim, ftime)
                    [yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)

                    wtime.append(dtime[spos2 + int(0.5 * (k - spos2))])
                    wdata.append(avg)
                    wmed.append(med)
                    wstd.append(sig)
                    wmin.append(amin)
                    wmax.append(amax)
                    wyl.append(yl)
                    wyu.append(yu)
                    wrl.append(rl)
                    wru.append(ru)
                    wcnt.append(tot)
                    wsave.append(vlimits)

    else:  #----REMOVE!!
        ###except:
        pass

    week_p = [wtime, wdata, wmed, wstd, wmin, wmax, wyl, wyu, wrl, wru, wcnt]
    #
    #--- adding limits to the table
    #
    vtemp = [[], [], [], []]
    for k in range(0, len(wsave)):
        for m in range(0, 4):
            vtemp[m].append(wsave[k][m])
    week_p = week_p + vtemp

    return week_p
Ejemplo n.º 28
0
def tephin_leak_data_update(year=''):
    """
    update tephin - ephin rate/leak current data
    input:  year    --- year of the data to be updated. if it is '', the current year is used
    output: <data_dir>/<msid>/<msid>_data_<year>.fits
    """
    #
    #--- set data extraction period
    #
    tout = set_time_period(year)
    if len(tout) == 6:
        [lstart, lstop, lyear, tstart, tstop, year] = tout
        chk = 1
    else:
        [tstart, tstop, year] = tout
        chk = 0
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
    #
    #--- read mta database
    #
    mta_db = read_mta_database()
    #
    #--- read mta msid <---> sql msid conversion list
    #
    mta_cross = read_cross_check_table()
    #
    #--- extract tephin data
    #
    tchk = convert_unit_indicator(udict['tephin'])
    glim = get_limit('tephin', tchk, mta_db, mta_cross)
    #
    #--- for the case the time span goes over the year boundary
    #
    if chk == 1:
        ltephin = update_database('tephin', 'Eleak', glim, ltstart, ltstop,
                                  lyear)

    tephin = update_database('tephin', 'Eleak', glim, tstart, tstop, year)

    #
    #--- read msid list
    #
    mfile = house_keeping + 'msid_list_eph_tephin'
    data = ecf.read_file_data(mfile)

    for ent in data:
        #
        #--- find msid and group name
        #
        mc = re.search('#', ent)
        if mc is not None:
            continue
        try:
            [msid, group] = re.split('\s+', ent)
        except:
            atemp = re.split('\s+', ent)
            msid = atemp[0]
            group = atemp[1]

        msid.strip()
        group.strip()
        #
        #--- get limit data table for the msid
        #
        try:
            tchk = convert_unit_indicator(udict[msid])
        except:
            tchk = 0
        glim = get_limit(msid, tchk, mta_db, mta_cross)
        #
        #--- update database
        #
        try:
            out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
            print "MSID: " + msid
        except:
            missed = house_keeping + '/missing_data'
            fo = open(missed, 'a')
            fo.write(msid)
            fo.write('\n')
            fo.close()

            continue
#
#--- for the case, the time span goes over the year boundary
#
        if chk == 1:
            update_database(msid,
                            group,
                            glim,
                            ltstart,
                            ltstop,
                            lyear,
                            sdata=ltephin)

        update_database(msid, group, glim, tstart, tstop, year, sdata=tephin)
Ejemplo n.º 29
0
def get_data_from_archive(msid, start, stop, glim, step=3600.0):
    """
    extract data from the archive and compute the stats
    input:  msid    --- msid of the data
            start   --- start time
            stop    --- stop time
            glim    --- a list of limit tables
            step    --- interval of the data. defalut: 3600 sec
    output: a list of two lists which contain:
            week_p:
                wtime   --- a list of time in sec from 1998.1.1
                wdata   --- a list of the  mean of each interval
                wmed    --- a list of the median of each interval
                wstd    --- a list of the std of each interval
                wmin    --- a list of the min of each interval
                wmax    --- a list of the max of each interval
                wyl     --- a list of the rate of yellow lower violation
                wyu     --- a list of the rate of yellow upper violation
                wrl     --- a list of the rate of red lower violation
                wru     --- a list of the rate of red upper violation
                wcnt    --- a list of the total data counts
                wyl     --- a list of the lower yellow limits
                wyu     --- a list of the upper yellow limits
                wrl     --- a list of the lower red limits
                wru     --- a list of the upper red limits
            short_p:
                btime   --- a list of time in sec from 1998.1.1
                bdata   --- a list of the  mean of each interval
                bmed    --- a list of the median of each interval
                bstd    --- a list of the std of each interval
                bmin    --- a list of the min of each interval
                bmax    --- a list of the max of each interval
                byl     --- a list of the rate of yellow lower violation
                byu     --- a list of the rate of yellow upper violation
                brl     --- a list of the rate of red lower violation
                bru     --- a list of the rate of red upper violation
                bcnt    --- a list of the total data counts
                byl     --- a list of the lower yellow limits
                byu     --- a list of the upper yellow limits
                brl     --- a list of the lower red limits
                bru     --- a list of the upper red limits
            long_p:
                    --- all in one element list form
                ftime   --- a mid time of the entier extracted data period
                fdata   --- the mean of the entire extracted data 
                fstd    --- the std of the entire extracted data
                fmin    --- the min of the entire extracte data
                fmax    --- the max of the entire extracte data
                ylow    --- the reate of yellow lower violation
                yupper  --- the reate of yellow upper violation
                rlow    --- the reate of red lower violation
                rupper  --- the reate of red upper violation
                tcnt    --- the total counts of the data
                ylow    --- the lower yellow limit
                yup     --- the upper yellow limit
                rlow    --- the lower red limit
                rup     --- the upper red limit
    """
    #
    #--- week long data 5 min step
    #
    wtime = []
    wdata = []
    wmed = []
    wstd = []
    wmin = []
    wmax = []
    wyl = []
    wyu = []
    wrl = []
    wru = []
    wcnt = []
    step2 = 86400
    wstart = stop - 86400.0 * 14.0  #---- two weeks ago

    #
    #--- one year long data 1 hr step
    #
    btime = []
    bdata = []
    bmed = []
    bstd = []
    bmin = []
    bmax = []
    byl = []
    byu = []
    brl = []
    bru = []
    bcnt = []

    wsave = []
    vsave = []
    #
    #--- extract data from archive
    #
    try:
        out = fetch.MSID(msid, start, stop)
        ok = ~out.bads
        tdata = out.vals[ok]
        ttime = out.times[ok]

        tind = [(tdata > -999) | (tdata <= -1000)]
        tdata = tdata[tind]
        ttime = ttime[tind]

    except:
        try:
            [ttime, tdata] = get_mta_fits_data(msid, start, stop)
        except:
            rdata = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0],
                     [-9.0e9], [-9.0e9], [9.0e9], [9.0e9]]

            return [rdata, rdata, rdata]

    data = []
    dtime = []
    #
    #--- if the data is not given, the database desplay it as -999.999 (or similar); so drop them
    #
    for k in range(0, len(tdata)):

        try:
            test = int(float(tdata[k]))
        except:
            continue

        if int(abs(tdata[k])) == 999:
            continue

        data.append(tdata[k])
        dtime.append(ttime[k])

    data = numpy.array(data)
    dtime = numpy.array(dtime)
    #
    #--- get stat for the entire period
    #
    ftime = dtime.mean()
    fdata = data.mean()
    fmed = numpy.median(data)
    fstd = data.std()
    fmin = data.min()
    fmax = data.max()
    #
    #--- find the violation limits of that time
    #
    vlimits = find_violation_range(glim, ftime)
    #
    #--- get the violation rate of the entier period
    #
    [ylow, yupper, rlow, rupper, tcnt] = find_violation_rate(data, vlimits)
    #
    #--- if asked, devide the data into a smaller period (step size)
    #
    if step != 0:
        spos = 0
        spos2 = 0
        chk = 1
        chk2 = 2
        send = dtime[spos] + step
        send2 = dtime[spos2] + step2

        for k in range(0, len(dtime)):

            if dtime[k] > wstart:
                if dtime[k] < send2:
                    chk2 = 0
                else:
                    sdata = data[spos2:k]
                    avg = sdata.mean()
                    med = numpy.median(sdata)
                    sig = sdata.std()
                    amin = sdata.min()
                    amax = sdata.max()
                    ftime = dtime[spos2 + int(0.5 * (k - spos2))]
                    vlimits = find_violation_range(glim, ftime)
                    [yl, yu, rl, ru, tot] = find_violation_rate(sdata, vlimits)

                    wtime.append(ftime)
                    wdata.append(avg)
                    wmed.append(med)
                    wstd.append(sig)
                    wmin.append(amin)
                    wmax.append(amax)
                    wyl.append(yl)
                    wyu.append(yu)
                    wrl.append(rl)
                    wru.append(ru)
                    wcnt.append(tot)
                    wsave.append(vlimits)

                    spos2 = k
                    send2 = dtime[k] + step2
                    chk2 = 1
            else:
                send2 = dtime[spos2] + step2

            if dtime[k] < send:
                chk = 0
            else:
                rdata = data[spos:k]
                avg = rdata.mean()
                med = numpy.median(rdata)
                sig = rdata.std()
                amin = rdata.min()
                amax = rdata.max()
                ftime = dtime[spos + int(0.5 * (k - spos))]
                vlimits = find_violation_range(glim, ftime)
                [yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)

                btime.append(ftime)
                bdata.append(avg)
                bmed.append(med)
                bstd.append(sig)
                bmin.append(amin)
                bmax.append(amax)
                byl.append(yl)
                byu.append(yu)
                brl.append(rl)
                bru.append(ru)
                bcnt.append(tot)
                vsave.append(vlimits)

                spos = k
                send = dtime[k] + step
                chk = 1
#
#--- check whether there are any left over; if so add it to the data lists
#
        if chk2 == 0:
            rdata = data[spos2:k]
            avg = rdata.mean()
            med = numpy.median(rdata)
            sig = rdata.std()
            amin = rdata.min()
            amax = rdata.max()
            ftime = dtime[spos2 + int(0.5 * (k - spos2))]
            vlimits = find_violation_range(glim, ftime)
            [yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)

            wtime.append(dtime[spos2 + int(0.5 * (k - spos2))])
            wdata.append(avg)
            wmed.append(med)
            wstd.append(sig)
            wmin.append(amin)
            wmax.append(amax)
            wyl.append(yl)
            wyu.append(yu)
            wrl.append(rl)
            wru.append(ru)
            wcnt.append(tot)
            wsave.append(vlimits)

        if chk == 0:
            rdata = data[spos:k]
            avg = rdata.mean()
            med = numpy.median(rdata)
            sig = rdata.std()
            amin = rdata.min()
            amax = rdata.max()
            ftime = dtime[spos + int(0.5 * (k - spos))]
            vlimits = find_violation_range(glim, ftime)
            [yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)

            btime.append(dtime[spos + int(0.5 * (k - spos))])
            bdata.append(avg)
            bmed.append(med)
            bstd.append(sig)
            bmin.append(amin)
            bmax.append(amax)
            byl.append(yl)
            byu.append(yu)
            brl.append(rl)
            bru.append(ru)
            bcnt.append(tot)
            vsave.append(vlimits)

    week_p = [wtime, wdata, wmed, wstd, wmin, wmax, wyl, wyu, wrl, wru, wcnt]
    short_p = [btime, bdata, bmed, bstd, bmin, bmax, byl, byu, brl, bru, bcnt]
    #
    #--- adding limits to the table
    #
    vtemp = [[], [], [], []]
    for k in range(0, len(wsave)):
        for m in range(0, 4):
            vtemp[m].append(wsave[k][m])
    week_p = week_p + vtemp
    #
    vtemp = [[], [], [], []]
    for k in range(0, len(vsave)):
        for m in range(0, 4):
            vtemp[m].append(vsave[k][m])
    short_p = short_p + vtemp

    long_p = [[ftime], [fdata], [fmed], [fstd], [fmin], [fmax]]
    long_p = long_p + [[ylow], [yupper], [rlow], [rupper], [tcnt]]
    long_p = long_p + [[vlimits[0]], [vlimits[1]], [vlimits[2]], [vlimits[3]]]

    return [week_p, short_p, long_p]
Ejemplo n.º 30
0
def update_msid_data(msid_list='msid_list_fetch'):
    """
    update all msid listed in msid_list
    input:  msid_list   --- a list of msids to processed. default: msid_list_fetch
    output: <msid>_data.fits/<msid>_short_data.fits
    """
    start_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
#
#--- create msid <---> unit dictionary
#
    [udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
    mta_db = ecf.read_mta_database()

#
#--- read mta msid <---> sql msid conversion list
#
    mta_cross = ecf.read_cross_check_table()
#
#--- read msid list
#
    mfile = house_keeping + msid_list
    data  = mcf.read_data_file(mfile)

    for ent in data:
#
#--- find msid and group name
#
        mc = re.search('#', ent)
        if mc is not None:
            continue
        try:
            [msid, group] = re.split('\s+', ent)
        except:
            atemp = re.split('\s+', ent)
            msid  = atemp[0]
            group = atemp[1]

        msid.strip()
        group.strip()
#
#--- get limit data table for the msid
#
        try:
            tchk  = convert_unit_indicator(udict[msid])
        except:
            tchk  = 0
        ####glim  = gsr.read_glimmon(msid, tchk)
        if msid in sp_limt_case_c:
            tchk = 1
        ###print "I AM HERE TCHK : " + str(tchk) + "<--->" + str(udict[msid])
        glim  = get_limit(msid, tchk, mta_db, mta_cross)
        ###print "I AM HERE GLIM: " + str(glim)
        ###exit(1)
#
#--- update database
#
        try:
            out = fetch.MSID(msid, '2017:001:00:00:00', '2017:002')
            print("MSID: " + msid)
        except:
            out = get_mta_fits_data(msid, '2017:001:00:00:00', '2017:002')

            if out == False:
                missed = house_keeping + '/missing_data'
                with open(missed, 'a') as fo
                    fo.write(msid+ '\n')
                continue