def update_week_database(msid, group, glim, pstart, pstop, step):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1
            pstop   --- stopping time in seconds from 1998.1.1
            step    --- time interval of the short time data
    output: <msid>_data.fits, <msid>_short_data.fits
    """

    cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
              'yupper', 'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    out_dir = data_dir + group + '/'
#
#--- make sure that the sub directory exists
#
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    week_p = get_data_from_archive(msid, pstart, pstop, glim, step)

    fits3 = out_dir + msid + '_week_data.fits'
    ecf.create_fits_file(fits3, cols, week_p)

    return fits3
Beispiel #2
0
def get_data(start, stop, year, msid_list, out_dir):
    """
    update msid data in msid_list for the given data period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- the year in which data is extracted
            msid_list   --- a list of msids
            out_dir --- output_directory
    """
    print(str(start) + '<-->' + str(stop))

    for msid in msid_list:

        out   = fetch.MSID(msid, start, stop)
        tdat  = out.vals - 273.15
        ttime = out.times

        ocols = ['time', msid]
        cdata = [ttime, tdat]
           
        ofits = out_dir + msid + '_full_data_' + str(year) +'.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
def remove_old_data(fits, cols, cut):
    """
    remove the data older the cut time
    input:  fits    --- fits file name
            cols    --- a list of column names
            cut     --- cut time in seconds from 1998.1.1
    output: updated fits file
    """

    f = pyfits.open(fits)
    data = f[1].data
    f.close()
    #
    #--- find where the cut time
    #
    pos = 0
    dtime = list(data['time'])
    for k in range(0, len(dtime)):
        if dtime[k] >= cut:
            pos = k
            break
#
#--- remove the data before the cut time
#
    udata = []
    for k in range(0, len(cols)):
        udata.append(list(data[cols[k]][pos:]))

    mcf.rm_files(fits)
    ecf.create_fits_file(fits, cols, udata)
Beispiel #4
0
def update_database(msid, group, glim, pstart=0, pstop=0, step=3600.0):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            step    --- time interval of the short time data set:default 3600.0
    output: <msid>_data.fits, <msid>_short_data.fits
    """
    test = str(msid[-2] + msid[-1]).lower()
    if test == 'tc':
        tmsid = msid[:-1]
    else:
        tmsid = msid

    cols  = ['time',tmsid, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    pstart = 49852799
    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    pstop = Chandra.Time.DateTime(stday).secs - 86400.0

    pstep  = 86400
    out_dir = './Outdir/' + group.capitalize() + '/'
#
#--- make sure that the sub directory exists
#
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    fits  = out_dir + tmsid + '_data.fits'
    fits2 = out_dir + tmsid + '_short_data.fits'
    fits3 = out_dir + tmsid + '_week_data.fits'

    print("FITS: " + fits)
    begin = pstart
    end   = begin + 2592000 
    while end <= pstop:
        print(str(begin) + '<--->' + str(end))

        [week_p, short_p, long_p] = get_data_from_archive(msid, begin, end, glim, step=pstep)
        begin = end
        end   = begin + 2592000 

        if end > pstop:
            [week_p, short_p, long_p] = get_data_from_archive(msid, begin, pstop, glim, step=pstep)
            ecf.update_fits_file(fits, cols, short_p)
            break
#

        if os.path.isfile(fits):
            ecf.update_fits_file(fits, cols, short_p)
        else:
            ecf.create_fits_file(fits, cols, short_p)
Beispiel #5
0
def get_data(start, stop, year, out_dir):
    """
    update sim flex offset data for the given data period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- data extracted year
            out_dir --- output_directory
    output: <out_dir>/Comp_save/Compsimoffset/<msid>_full_data_<year>.fits
    """

    print str(start) + '<-->' + str(stop)

    for msid in ['flexadif', 'flexbdif', 'flexcdif']:

        if msid == 'flexadif':
            msid_t = '3faflaat'
            msid_s = '3sflxast'

        elif msid == 'flexbdif':
            msid_t = '3faflbat'
            msid_s = '3sflxbst'
        else:
            msid_t = '3faflcat'
            msid_s = '3sflxcst'

        out = fetch.MSID(msid_t, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_s, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 - tdat2]

        ofits = out_dir + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
Beispiel #6
0
def get_data(start, stop, year, msid_list, out_dir):
    """
    update eph l1 related data for the given data peirod
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- data extracted year
            msid_list   --- list of msids
            out_dir --- output_directory
    output: <out_dir>/<msid>_full_data_<year>.fits
    """
    print(str(start) + '<-->' + str(stop))

    line = 'operation=retrieve\n'
    line = line + 'dataset = flight\n'
    line = line + 'detector = ephin\n'
    line = line + 'level = 0\n'
    line = line + 'filetype =ephhk \n'
    line = line + 'tstart = ' + str(start) + '\n'
    line = line + 'tstop = ' + str(stop) + '\n'
    line = line + 'go\n'

    data_list = mcf.run_arc5gl_process(line)
    #
    #--- uppend the data to the local fits data files
    #
    for fits in data_list:

        [cols, tbdata] = ecf.read_fits_file(fits)

        time = tbdata['time']

        for col in msid_list:
            #
            #--- ignore columns with "ST_" (standard dev) and time
            #
            mdata = tbdata[col]
            cdata = [time, mdata]
            ocols = ['time', col.lower()]

            if not os.path.isdir(out_dir):
                cmd = 'mkdir ' + out_dir
                os.system(cmd)

            ofits = out_dir + col.lower() + '_full_data_' + str(year) + '.fits'
            if os.path.isfile(ofits):
                ecf.update_fits_file(ofits, ocols, cdata)
            else:
                ecf.create_fits_file(ofits, ocols, cdata)

        mcf.rm_files(fits)
Beispiel #7
0
def get_data(start, stop, year, out_dir):
    """
    update acis electric pwer data for a gvien period
    input:  start   --- starting time in seconds from 1998.1.1
            stop    --- stopping time in seconds from 1998.1.1
            year    --- year of the data extracted
            out_dir --- output directory
    output: <out_dir>/1dppwra_full_data_<year>.fits, <out_dir>/1dppwrb_full_data_<year>fits
    """

    print str(start) + '<-->' + str(stop)

    for msid in ['1dppwra', '1dppwrb']:

        if msid == '1dppwra':
            msid_v = '1dp28avo'
            msid_a = '1dpicacu'
        else:
            msid_v = '1dp28bvo'
            msid_a = '1dpicbcu'

        out = fetch.MSID(msid_v, start, stop)
        tdat1 = out.vals
        ttime = out.times
        out = fetch.MSID(msid_a, start, stop)
        tdat2 = out.vals

        tlen1 = len(tdat1)
        tlen2 = len(tdat2)
        if tlen1 == 0 or tlen2 == 0:
            continue

        if tlen1 > tlen2:
            diff = tlen1 - tlen2
            for k in range(0, diff):
                tdat2 = numpy.append(tdat2, tadt2[-1])
        elif tlen1 < tlen2:
            diff = tlen2 - tlen1
            for k in range(0, diff):
                tdat1 = numpy.append(tdat1, tadt1[-1])

        ocols = ['time', msid]
        cdata = [ttime, tdat1 * tdat2]

        ofits = out_dir + msid + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, ocols, cdata)
        else:
            ecf.create_fits_file(ofits, ocols, cdata)
def remove_old_data_from_fits(fits, cut):
    """
    remove old part of the data from fits file
    input:  fits    --- fits file name
            cut     --- cut date in seconds from 1998.1.1
    output: fits    --- updated fits file
    """
    #
    #--- open the fits file
    #
    hbdata = pyfits.open(fits)
    data = hbdata[1].data
    cols = hbdata[1].columns
    col_list = cols.names
    hbdata.close()
    #
    #--- create a mask
    #
    dtime = data['time']
    index = dtime > cut
    #
    #--- using the mask get only data > cut
    #
    udata = []
    for col in col_list:
        out = data[col]
        nout = out[index]
        udata.append(list(nout))
#
#--- update the data and save then in the fits file
#
    sfits = fits + '~'
    cmd = 'mv ' + fits + ' ' + sfits
    os.system(cmd)
    try:
        ecf.create_fits_file(fits, cols, udata)
        mcf.rm_file(sfits)
    except:
        cmd = 'mv ' + sfits + ' ' + fits
        os.system(cmd)
Beispiel #9
0
def update_database(msid, group, glim, pstart=0, pstop=0, step=3600.0):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            step    --- time interval of the short time data set:default 3600.0
    output: <msid>_data.fits, <msid>_short_data.fits
    """

    test = str(msid[-2] + msid[-1]).lower()
    if test == 'tc':
        tmsid = msid[:-1]
    else:
        tmsid = msid

    cols  = ['time', tmsid, 'med', 'std', 'min', 'max', 'ylower', 'yupper',\
             'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    pstop = Chandra.Time.DateTime(stday).secs - 86400.0  
    pstart = pstop - 86400.0 * 14.0           #---- two weeks ago

    pstep  = 300 
    out_dir = './Outdir/' + group.capitalize() + '/'
#
#--- make sure that the sub directory exists
#
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    fits3 = out_dir + tmsid + '_week_data.fits'
    print("FITS: " + str(fits3))

    [week_p, short_p, long_p] = get_data_from_archive(msid, pstart, pstop, glim, step=pstep)
#
    ecf.create_fits_file(fits3, cols, short_p)
Beispiel #10
0
def create_inter_fits(msid, gdata):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            gdata   --- a list of 15 lists related to the data (see col below)
    output: <data_dir>/Interactive/<msid>+inter_data.fits
    """

    cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    out_dir = data_dir + 'Interactive/'
    #
    #--- make sure that the sub directory exists
    #
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    ofits = out_dir + msid + '_inter_data.fits'
    ecf.create_fits_file(ofits, cols, gdata)

    return ofits
    vtemp   = [[], [], [], []]
    for k in range(0, len(vsave)):
        for m in range(0, 4):
            vtemp[m].append(vsave[k][m])
    short_p = short_p + vtemp
#
    vtemp   = [[], [], [], []]
    for k in range(0, len(ysave)):
        for m in range(0, 4):
            vtemp[m].append(ysave[k][m])
    long_p = long_p + vtemp

    return [week_p, short_p, long_p]


ecf.create_fits_file(fits, cols, udata)

#-------------------------------------------------------------------------------------------
#-- find_the_last_entry_time: find the last logged time                                   --
#-------------------------------------------------------------------------------------------

def find_the_last_entry_time(yesterday):
    """
    find the last entry date and then make a list of dates up to yesterday
    input:  yesterday   --- date of yesterday in the format of yyyymmdd
    output: otime       --- a list of date in the format of yyyymmdd
    """
#
#--- find the last entry date from the "testfits" file
#
    f = pyfits.open(testfits)
Beispiel #12
0
def get_data(tstart, tstop, year, grad_list, out_dir):
    """
    update msid data in msid_list for the given data period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- the year in which data is extracted
            grad_list   --- a list of  group name in grads
            out_dir --- output_directory
    """
    print("Period: " + str(tstart) + '<-->' + str(tstop) + ' in Year: ' +
          str(year))
    #
    #--- extract ecach group data
    #
    for group in grad_list:
        print(group)

        line = 'operation=retrieve\n'
        line = line + 'dataset = mta\n'
        line = line + 'detector = grad\n'
        line = line + 'level = 0.5\n'
        line = line + 'filetype = ' + group + '\n'
        line = line + 'tstart = ' + str(tstart) + '\n'
        line = line + 'tstop = ' + str(tstop) + '\n'
        line = line + 'go\n'

        data_list = mcf.run_arc5gl_process(line)
        #
        #---  read the first fits file and prep for the data list
        #
        [cols, tbdata] = ecf.read_fits_file(data_list[0])
        col_list = []
        for ent in cols:
            if ent.lower() == 'time':
                continue
            mc = re.search('st_', ent.lower())
            if mc is not None:
                continue

            col_list.append(ent)

        mcf.rm_files(data_list[0])
        tdata = tbdata['time']
        mdata = []
        for col in col_list:
            mdata.append(tbdata[col])
#
#--- read the rest of the data
#
        clen = len(col_list)
        for k in range(1, len(data_list)):
            fits = data_list[k]
            [cols, tbdata] = ecf.read_fits_file(fits)
            tdata = numpy.append(tdata, tbdata['time'])

            for m in range(0, clen):
                cdata = tbdata[col_list[m]]
                mdata[m] = numpy.append(mdata[m], cdata)

            mcf.rm_files(fits)

        dout = out_dir + group.capitalize() + '/'

        if not os.path.isdir(dout):
            cmd = 'mkdir ' + dout
            os.system(cmd)
#
#--- write out the data to fits file
#
        for k in range(0, clen):
            col = col_list[k]
            ocols = ['time', col.lower()]
            cdata = [tdata, mdata[k]]

            ofits = dout + col.lower() + '_full_data_' + str(year) + '.fits'

            if os.path.isfile(ofits):
                ecf.update_fits_file(ofits, ocols, cdata)
            else:
                ecf.create_fits_file(ofits, ocols, cdata)

#
#--- zip the fits file from the last year at the beginning of the year
#
        ecf.check_zip_possible(dout)
def create_msid_sun_angle_file(msid_list, inyear=''):
    """
    create sun angle - msid data fits file
    input:  msid_list   --- the name of msid list which list msids and group id
            inyear      --- the year in which you want to extract the data; if "all", 1999-current year
    output: <data_dir>/<group>/<msid>_sun_angle_<year>.fits
    """
    if inyear == '':
        tyear = int(float(time.strftime('%Y', time.gmtime())))
        tday = int(float(time.strftime('%j', time.gmtime())))
        if tday < 5:
            year_list = [tyear - 1, tyear]
        else:
            year_list = [tyear]

    elif inyear == 'all':
        this_year = int(float(time.strftime("%Y", time.gmtime())))
        year_list = range(1999, this_year + 1)

    else:
        year_list = [inyear]

    [s_time, s_angle] = ecf.read_fits_col(sun_angle_file,
                                          ['time', 'sun_angle'])

    ifile = house_keeping + msid_list
    data = mcf.read_data_file(ifile)

    for ent in data:
        atemp = re.split('\s+', ent)
        msid = atemp[0]
        group = atemp[1]
        mc = re.search('#', msid)
        if mc is not None:
            continue

        print("msid: " + msid)

        odir = data_dir + group.capitalize() + '/' + msid.capitalize()
        if not os.path.isdir(odir):
            cmd = 'mkdir ' + odir
            os.system(cmd)

        mfile = data_dir + group.capitalize() + '/' + msid + '_data.fits'

        if not os.path.isfile(mfile):
            print("No data file found: " + str(mfile))
            continue

        try:
            [m_time, m_val, m_min,
             m_max] = ecf.read_fits_col(mfile, ['time', msid, 'min', 'max'])
        except:
            print("Could not read: " + str(mfile))
            continue

        for year in year_list:

            print("Year: " + str(year))

            ofits = odir + '/' + msid + '_sun_angle_' + str(year) + '.fits'
            cols = ['sun_angle', msid, 'min', 'max']
            cdata = match_the_data(s_time, s_angle, m_time, m_val, m_min,
                                   m_max, year)

            ecf.create_fits_file(ofits, cols, cdata)
Beispiel #14
0
def update_database(msid,
                    group,
                    dtime,
                    data,
                    glim,
                    pstart=0,
                    pstop=0,
                    step=3600.0):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            group   --- group name
            dtime   --- array of time
            data    --- array of data
            glim    --- g limit data
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            step    --- time interval of the short time data set:default 3600.0
    output: <msid>_data.fits, <msid>_short_data.fits
    """

    cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
             'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\
             'ylimupper', 'rlimlower', 'rlimupper', 'state']

    out_dir = data_dir + group + '/'
    #
    #--- make sure that the sub directory exists
    #
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    fits = out_dir + msid + '_data.fits'
    fits2 = out_dir + msid + '_short_data.fits'
    fits3 = out_dir + msid + '_week_data.fits'
    #
    #-- if the starting time and stopping time are given, use them.
    #-- otherwise find from the data for the starting time and today's date -1 for the stopping time
    #
    if pstart == 0:
        stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
        stday = Chandra.Time.DateTime(
            stday).secs - 86400.0  #--- set the ending to the day before
    else:
        stday = pstop

    mago = stday - 31536000.0  #--- a year ago
    mago2 = stday - 604800.0  #--- a week ago
    #
    #--- if the fits files already exist, append the data  --------------------
    #
    wline = ""

    if os.path.isfile(fits):
        #
        #--- extract data from archive one day at a time
        #
        if len(dtime) > 0:
            [week_p, short_p,
             long_p] = process_day_data(msid, dtime, data, glim)
            #
            #--- add to the data to the long term fits file
            #
            ecf.update_fits_file(fits, cols, long_p)
        else:
            week_p = []
            short_p = []
            long_p = []
#
#--- remove the older data from the short term fits file, then append the new data
#
        try:
            remove_old_data(fits2, cols, mago)
            ecf.update_fits_file(fits2, cols, short_p)
        except:
            try:
                ecf.create_fits_file(fits2, cols, short_p)
            except:
                wline = wline + "Fail: short term: " + fits2 + '\n'

#--- remove the older data from the week long data fits file, then append the new data
#
        try:
            remove_old_data(fits3, cols, mago2)
            ecf.update_fits_file(fits3, cols, week_p)
        except:
            try:
                ecf.create_fits_file(fits3, cols, week_p)
            except:
                wline = wline + "Fail: week term: " + fits3 + '\n'
#
#--- if the fits files do not exist, create new ones ----------------------
#
    else:
        if pstart == 0:
            start = 48988799  #--- 1999:203:00:00:00
            stop = stday
        else:
            start = pstart
            stop = pstop
#
#--- one day step; a long term data
#
        if len(dtime) > 0:
            [week_p, short_p,
             long_p] = process_day_data(msid, dtime, data, glim)
            try:
                ecf.create_fits_file(fits, cols, long_p)
            except:
                wline = wline + "Fail: long term: " + fits + '\n'
#
#--- short term data
#
            mago = stop - 31536000.0  #--- a year ago
            short_d = cut_the_data(short_p, mago)
            try:
                ecf.create_fits_file(fits2, cols, short_d)
            except:
                wline = wline + "Fail: short term: " + fits2 + '\n'
#
#
#--- week long data
#
            mago = stop - 604800.0
            week_d = cut_the_data(week_p, mago)

            try:
                ecf.create_fits_file(fits3, cols, week_d)
            except:
                wline = wline + "Fail: week term: " + fits3 + '\n'

    return wline
def update_database(msid,
                    group,
                    dtime,
                    data,
                    glim,
                    pstart=0,
                    pstop=0,
                    step=3600.0):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            step    --- time interval of the short time data set:default 3600.0
    output: <msid>_data.fits, <msid>_short_data.fits
    """

    cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper',\
             'rlower', 'rupper', 'dcount', 'ylimlower', 'ylimupper', \
             'rlimlower', 'rlimupper', 'state']

    out_dir = data_dir + group + '/'
    #
    #--- make sure that the sub directory exists
    #
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    fits = out_dir + msid + '_data.fits'
    fits2 = out_dir + msid + '_short_data.fits'
    fits3 = out_dir + msid + '_week_data.fits'
    #
    #-- if the starting time and stopping time are given, use them.
    #-- otherwise find from the data for the starting time and today's date -1 for the stopping time
    #
    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    stday = Chandra.Time.DateTime(
        stday).secs - 86400.0  #--- set the ending to the day before

    mago = stday - 31536000.0  #--- a year ago
    mago2 = stday - 604800.0  #--- a week ago
    #
    #--- if the fits files already exist, append the data  --------------------
    #
    if os.path.isfile(fits):
        #
        #--- extract data from archive one day at a time
        #
        [week_p, short_p, long_p] = process_day_data(msid,
                                                     dtime,
                                                     data,
                                                     glim,
                                                     step=3600.)
        #
        #--- add to the data to the long term fits file
        #
        ecf.update_fits_file(fits, cols, long_p)

        #--- remove the older data from the short term fits file, then append the new data
        #
        if mago <= pstart:
            try:
                ecf.update_fits_file(fits2, cols, short_p)
            except:
                ecf.create_fits_file(fits2, cols, short_p)
#
#--- remove the older data from the week long data fits file, then append the new data
#
        try:
            ecf.update_fits_file(fits3, cols, week_p)
        except:
            ecf.create_fits_file(fits3, cols, week_p)
#
#--- if the fits files do not exist, create new ones ----------------------
#
    else:
        #
        #--- one day step; a long term data
        #
        [week_p, short_p, long_p] = process_day_data(msid,
                                                     dtime,
                                                     data,
                                                     glim,
                                                     step=3600.)
        ecf.create_fits_file(fits, cols, long_p)
        #
        #--- short term data
        #
        if mago <= pstart:
            ecf.create_fits_file(fits2, cols, short_p)

#
#--- week long data
#
        ecf.create_fits_file(fits3, cols, week_p)
Beispiel #16
0
def get_data(start, stop, year, out_dir):
    """
    extract data and update the compgradkodak related data sets for the given period
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            out_dir --- output_directory
    output: <out_dir>/<msid>_fill_data_<year>.fits
    """
    empty = [0]
    #
    #--- extract  4rt*** data
    #
    rt7 = []
    for k in range(0, 12):
        if k < 10:
            msid = '4rt70' + str(k) + 't'
        else:
            msid = '4rt7' + str(k) + 't'

        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            ttime = out.times
            tlist = list(ttime)
            rt7.append(data)
        except:
            rt7.append(empty)
#
#--- extract 4rt575t separately
#
    out = fetch.MSID('4rt575t', start, stop)
    rt575 = out.vals
    #
    #--- create empty array and initialize ohrthr and oobthr lists
    #
    tlen = len(ttime)
    empty = numpy.zeros(tlen)
    ohrthr = [empty]
    oobthr = [empty]
    #
    #--- fill them up
    #
    for k in range(1, 65):
        if k < 10:
            msid = 'ohrthr0' + str(k)
        else:
            msid = 'ohrthr' + str(k)
        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            otime = out.times
            #
            #--- since 4rt arrays are 36 time dense, match the ohrthr and oobtrhr
            #--- by filling the gaps between
            #
            adata = fill_gaps(ttime, otime, data)

            ohrthr.append(adata)
        except:
            ohrthr.append(empty)

        if k < 10:
            msid = 'oobthr0' + str(k)
        else:
            msid = 'oobthr' + str(k)
        try:
            out = fetch.MSID(msid, start, stop)
            data = out.vals
            otime = out.times

            adata = fill_gaps(ttime, otime, data)

            oobthr.append(adata)
        except:
            oobthr.append(empty)
#
#--- now compute each quantity for the given time period
#
    hrmaavg = []
    hrmacav = []
    hrmaxgrd = []
    hrmaradgrd = []
    obaavg = []
    obaconeavg = []

    fwblkhdt = []
    aftblkhdt = []
    obaaxgrd = []

    mzobacone = []
    pzobacone = []
    obadiagrad = []

    hrmarange = []
    tfterange = []
    hrmastrutrnge = []
    scstrutrnge = []
    #
    #--- save time stamp separately for each data
    #
    t_hrmaavg = []
    t_hrmacav = []
    t_hrmaxgrd = []
    t_hrmaradgrd = []
    t_obaavg = []
    t_obaconeavg = []

    t_fwblkhdt = []
    t_aftblkhdt = []
    t_obaaxgrd = []

    t_mzobacone = []
    t_pzobacone = []
    t_obadiagrad = []

    t_hrmarange = []
    t_tfterange = []
    t_hrmastrutrnge = []
    t_scstrutrnge = []

    for k in range(0, tlen):
        out = compute_hrmaavg(ohrthr, k)
        if out != 'na':
            hrmaavg.append(out)
            t_hrmaavg.append(tlist[k])

#-------------------------
        out = compute_hrmacav(ohrthr, k)
        if out != 'na':
            hrmacav.append(out)
            t_hrmacav.append(tlist[k])
#-------------------------
        out = compute_hrmaxgrd(ohrthr, k)
        if out != 'na':
            hrmaxgrd.append(out)
            t_hrmaxgrd.append(tlist[k])
#------------------------
        out = compute_hrmaradgrd(ohrthr, k)
        if out != 'na':
            hrmaradgrd.append(out)
            t_hrmaradgrd.append(tlist[k])
#------------------------
        out = compute_obaavg(oobthr, k)
        if out != 'na':
            obaavg.append(out)
            t_obaavg.append(tlist[k])
#------------------------
        out = compute_obaconeavg(oobthr, k)
        if out != 'na':
            obaconeavg.append(out)
            t_obaconeavg.append(tlist[k])
#------------------------
        out = compute_fwblkhdt(oobthr, rt7, k)
        chk1 = 0
        if out != 'na':
            fwblkhdt.append(out)
            t_fwblkhdt.append(tlist[k])
            chk1 = 1
#------------------------
        out = compute_aftblkhdt(oobthr, k)
        chk2 = 0
        if out != 'na':
            aftblkhdt.append(out)
            t_aftblkhdt.append(tlist[k])
            chk2 = 1
#------------------------
        if (chk1 == 1) and (chk2 == 1):
            out = compute_obaaxgrd(fwblkhdt[-1], aftblkhdt[-1])
            if out != 'na':
                obaaxgrd.append(out)
                t_obaaxgrd.append(tlist[k])
#------------------------
        out = compute_mzobacone(oobthr, rt575, k)
        chk1 = 0
        if out != 'na':
            mzobacone.append(out)
            t_mzobacone.append(tlist[k])
            chk1 = 1
#------------------------
        out = compute_pzobacone(oobthr, k)
        chk2 = 0
        if out != 'na':
            pzobacone.append(out)
            t_pzobacone.append(tlist[k])
            chk2 = 1
#------------------------
        if (chk1 == 1) and (chk2 == 1):
            out = compute_obadiagrad(mzobacone[-1], pzobacone[-1])
            if out != 'na':
                obadiagrad.append(out)
                t_obadiagrad.append(tlist[k])
#------------------------
        out = compute_hrmarange(ohrthr, k)
        if out != 'na':
            hrmarange.append(out)
            t_hrmarange.append(tlist[k])
#------------------------
        out = compute_tfterange(oobthr, k)
        if out != 'na':
            tfterange.append(out)
            t_tfterange.append(tlist[k])
#------------------------
        out = compute_hrmastrutrnge(oobthr, k)
        if out != 'na':
            hrmastrutrnge.append(out)
            t_hrmastrutrnge.append(tlist[k])
#------------------------
        out = compute_scstrutrnge(oobthr, k)
        if out != 'na':
            scstrutrnge.append(out)
            t_scstrutrnge.append(tlist[k])
#
#--- now create/update output fits files
#
    for col in ['hrmaavg', 'hrmacav', 'hrmaxgrd', 'hrmaradgrd', 'obaavg', 'obaconeavg', 'fwblkhdt',\
                'aftblkhdt', 'obaaxgrd', 'mzobacone', 'pzobacone', 'obadiagrad', 'hrmarange',\
                'tfterange', 'hrmastrutrnge', 'scstrutrnge']:

        exec "odata = %s" % (col)
        exec "tdata = t_%s" % (col)

        olen = len(odata)

        tdata = numpy.array(tdata)
        odata = numpy.array(odata)

        cdata = [tdata, odata]
        cols = ['time', col]

        fits = out_dir + col + '_full_data_' + str(year) + '.fits'
        if os.path.isfile(fits):
            ecf.update_fits_file(fits, cols, cdata)
        else:
            ecf.create_fits_file(fits, cols, cdata)
Beispiel #17
0
def extract_hrcelec_data(start, stop, mago1, mago2, mlen):
#
#--- extract hrchk fits files
#
    flist = extract_hrchk(start, stop)
#
#--- combine extracted fits files for the day
#
    outfits   = 'comb_data.fits'
    comb_fits = ecf.combine_fits(flist, outfits)
#
#--- work on each msid
#
    for k in range(0, mlen):
        msid  = msid_list[k]
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                 'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\
                 'ylimupper', 'rlimlower', 'rlimupper']
#
#--- long term data  (one day interval)
#
        ofits = dpath + msid + '_data.fits'
        cdata = get_stat(comb_fits, msid)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- short term data (one hour interval)
#
        ofits = dpath + msid + '_short_data.fits'
        cdata = get_stat_interval(comb_fits, msid, 3600.0)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- remove older data
#
        try:
            uds.remove_old_data(ofits, cols, mago1)
        except:
            pass
#
#--- week long data (five minute interval)
#
        ofits = dpath + msid + '_week_data.fits'
        cdata = get_stat_interval(comb_fits, msid, 300.0)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- remove older data
#
        try:
            uds.remove_old_data(ofits, cols, mago2)
        except:
            pass

    mcf.rm_files('comb_data.fits')
Beispiel #18
0
def get_data(start, stop, year, msid_list, out_dir):
    """
    update eph l1 related data for the given data peirod
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- data extracted year
            msid_list   --- list of msids
            out_dir --- output_directory
    output: <out_dir>/<msid>_full_data_<year>.fits
    """

    print str(start) + '<-->' + str(stop)

    line = 'operation=retrieve\n'
    line = line + 'dataset = flight\n'
    line = line + 'detector = ephin\n'
    line = line + 'level = 0\n'
    line = line + 'filetype =ephhk \n'
    line = line + 'tstart = ' + str(start) + '\n'
    line = line + 'tstop = '  + str(stop)  + '\n'
    line = line + 'go\n'

    fo = open(zspace, 'w')
    fo.write(line)
    fo.close()

    try:
        cmd = ' /proj/sot/ska/bin/arc5gl  -user isobe -script ' + zspace + '> ztemp_out'
        os.system(cmd)
    except:
        cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + '> ztemp_out'
        os.system(cmd)

    mcf.rm_file(zspace)

    data_list = ecf.read_file_data('ztemp_out', remove=1)
    data_list = data_list[1:]
#
#--- uppend the data to the local fits data files
#
    for fits in data_list:

        [cols, tbdata] = ecf.read_fits_file(fits)

        time  = tbdata['time']

        for col in msid_list:
#
#--- ignore columns with "ST_" (standard dev) and time
#
            mdata = tbdata[col]
            cdata = [time, mdata]
            ocols = ['time', col.lower()]

            if not os.path.isdir(out_dir):
                cmd = 'mkdir ' + out_dir
                os.system(cmd)

            ofits = out_dir + col.lower()+ '_full_data_' + str(year) +'.fits'
            if os.path.isfile(ofits):
                ecf.update_fits_file(ofits, ocols, cdata)
            else:
                ecf.create_fits_file(ofits, ocols, cdata)

        mcf.rm_file(fits)
Beispiel #19
0
def create_long_term_dea_data(dhead, group, drange):
    """
    convert week time rdb data files into a long term data fits files
    input:  dhead   --- data file name header
            group   --- group name
            period  --- week or short
            drange  --- deahk data number list
    output: <data_dir>/deahk<#>_data.fits
    """
    #
    #--- find today date in seconds from 1998.1.1
    #
    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    atemp = re.split(':', today)
    tyear = int(atemp[0])
    today = Chandra.Time.DateTime(today).secs
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- how may dea entries
#
    ntot = len(drange)
    #
    #--- checking the last entry date
    #
    efits = data_dir + group + '/' + name_list[0] + '_data.fits'

    if os.path.isfile(efits):
        ltime = ecf.find_the_last_entry_time(efits)
        try:
            ltime = find_starting_of_the_day(ltime)
            out = Chandra.Time.DateTime(ltime)
            atemp = re.split(':', out)
            syear = int(atemp[0])
            lchk = 1
        except:
            ltime = 52185599.0
            syear = 1999
            lchk = 0
    else:
        ltime = 52185599.0
        syear = 1999
        lchk = 0
#
#--- read data
#
    fchk = 0
    for pyear in range(syear, tyear + 1):
        dfile = dhead + str(pyear) + '.rdb'
        data = mcf.read_data_file(dfile)
        #
        #--- starting time/stopping time and how many columns in the data
        #
        atemp = re.split('\s+', data[0])
        tot = len(atemp)
        start = float(atemp[0])

        xtemp = re.split('\s+', data[-1])
        stop = float(xtemp[0])
        #
        #--- separate each column into a list
        #
        if fchk == 0:  #--- initialize once at the beginning of the loop
            dlist = []  #--- will keep the lists of daily avg of each columns
            for k in range(0, tot):
                dlist.append([])
            fchk = 1

        dsum = []  #--- will keep the sums of each columns for a given
        #--- time interval (a day)
        for k in range(0, tot):
            dsum.append(0)

        chk = 0
        cnt = 0
        ntime = ltime + 86400.0

        while ntime < start:
            ltime = ntime
            ntime = ltime + 86400.0
        tlist = []

        dlen = len(data)

        for ent in data:
            atemp = re.split('\s+', ent)
            ftime = float(atemp[0])
            if ftime >= ltime:

                chk += 1
                if ftime < ntime and len(atemp) == tot:
                    tlist.append(ftime)
                    for k in range(0, tot):
                        dsum[k] += float(atemp[k])
                        cnt += 1
                else:
                    if cnt == 0 or len(tlist) == 0:
                        ltime = ntime
                        ntime = ltime + 86400.0
                        continue
#
#--- take mid point for the time and take averages for the other quantities
#
                    dlist[0].append(tlist[int(0.5 * len(tlist))])
                    for k in range(1, tot):

                        dlist[k].append(dsum[k] / cnt)
                        dsum[k] = 0

                    ltime = ntime
                    ntime = ltime + 86400.0
                    tlist = []
                    cnt = 0
#
#--- if no new data, stop
#
    if chk == 0:
        return 'No new data'
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    for k in range(0, ntot):
        msid = name_list[k]

        print('MSID:  ' + msid)

        fits = data_dir + group + '/' + msid + '_data.fits'
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                 'yupper', 'rlower', 'rupper', 'dcount',\
                 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper', 'state']

        mstart = mstop
        mstop = mstart + 5
        #
        #--- the following quantities are not in the database; add default values
        #
        tlen = len(dlist[0])
        ylr = [0] * tlen
        yur = [0] * tlen
        rlr = [0] * tlen
        rur = [0] * tlen
        yl = [-9e9] * tlen
        yu = [9e9] * tlen
        rl = [-9e9] * tlen
        ru = [9e9] * tlen
        dc = [-999] * tlen
        state = ['none'] * tlen

        cdata = [dlist[0]]
        cdata = cdata + dlist[mstart:mstop] + [
            ylr, yur, rlr, rur, dc, yl, yu, rl, ru, state
        ]
        #
        #---  creat new fits file
        #
        if lchk == 0:
            ecf.create_fits_file(fits, cols, cdata)
#
#--- append to the existing fits file
        else:
            ecf.update_fits_file(fits, cols, cdata)

    return 'New data added'
Beispiel #20
0
def create_dea_fits_file(dhead, group, period, drange):
    """
    convert week and short time rdb data files into fits files
    input:  dhead   --- data file name header
            group   --- group name
            period  --- week or short
            drange  --- deahk data number list
    output: <data_dir>/deahk<#>_week_data.fits
            <data_dir>/deahk<#>_short_data.fits
    """
    #
    #--- find today date in seconds from 1998.1.1
    #
    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    atemp = re.split(':', today)
    tyear = int(atemp[0])
    today = Chandra.Time.DateTime(today).secs
    #
    #--- how many dea entries
    #
    ntot = len(drange)
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- set data period
#
    cut = 0
    if period == '_short':
        cut = today - 31536000.0  #--- a year ago
    elif period == '_week':
        cut = today - 604800.0  #--- two weeks ago

    lcut = Chandra.Time.DateTime(cut).date
    atemp = re.split(':', lcut)
    lyear = int(atemp[0])
    #
    #--- read data
    #
    if period == '_short':
        dfile = dhead + '.rdb'
        data = mcf.read_data_file(dfile)

    elif period == '_week':
        data = []
        for pyear in range(lyear, tyear + 1):
            dfile = dhead + str(pyear) + '.rdb'
            out = mcf.read_data_file(dfile)
            data = data + out
#
#--- how many columns in the data
#
    tot = len(re.split('\s+', data[0]))
    #
    #--- separate each column into a list
    #
    dlist = []
    for k in range(0, tot):
        dlist.append([])

    chk = 0
    for ent in data:
        atemp = re.split('\s+', ent)
        if float(atemp[0]) > cut and len(atemp) == tot:
            chk += 1
            for k in range(0, tot):
                dlist[k].append(atemp[k])
#
#--- if no new data, stop
#
    if chk == 0:
        if period == '_short':
            nline = 'No new data for Short term'
        else:
            nline = 'No new data for Week data'
        return nline
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    for k in range(0, ntot):
        msid = name_list[k]

        print("MSID: " + msid)

        fits = data_dir + group + '/' + msid + period + '_data.fits'
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                 'yupper', 'rlower', 'rupper', 'dcount',\
                 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper', 'state']

        mstart = mstop
        mstop = mstart + 5
        #
        #--- the following quantities are not in the database; add default values
        #
        tlen = len(dlist[0])
        ylr = [0] * tlen
        yur = [0] * tlen
        rlr = [0] * tlen
        rur = [0] * tlen
        yl = [-9e9] * tlen
        yu = [9e9] * tlen
        rl = [-9e9] * tlen
        ru = [9e9] * tlen
        dc = [-999] * tlen
        state = ['none'] * tlen

        cdata = [dlist[0]]
        cdata = cdata + dlist[mstart:mstop] + [
            ylr, yur, rlr, rur, dc, yl, yu, rl, ru, state
        ]
        #
        #--- create/update the fits file
        #
        cmd = 'chmod 766 ' + fits
        os.system(cmd)
        cmd = 'rm -rf ' + fits
        os.system(cmd)

        ecf.create_fits_file(fits, cols, cdata)

    if period == '_short':
        nline = 'short data fits file update'
    else:
        nline = 'week data fits file update'

    return nline
Beispiel #21
0
def create_dea_fits_file(dfile, group, period, drange):
    """
    convert week and short time rdb data files into fits files
    input:  dfile   --- data file name
            group   --- group name
            period  --- week or short
            drange  --- deahk data number list
    output: <data_dir>/deahk<#>_week_data.fits
            <data_dir>/deahk<#>_short_data.fits
    """
    #
    #--- find today date in seconds from 1998.1.1
    #
    today = time.strftime("%Y:%j:00:00:00", time.gmtime())
    today = tcnv.axTimeMTA(today)
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- how may dea entries
#
    ntot = len(drange)
    #
    #--- checking the last entry date
    #
    efits = data_dir + group + '/' + name_list[0] + period + '_data.fits'

    cut = 0
    if period == '_short':
        cut = today - 31536000.0  #--- a year ago
#
#--- read data
#
    f = open(dfile, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    #
    #--- how many columns in the data
    #
    tot = len(re.split('\s+', data[0]))
    #
    #--- separate each column into a list
    #
    dlist = []
    for k in range(0, tot):
        dlist.append([])

    chk = 0
    for ent in data:
        atemp = re.split('\s+', ent)
        if float(atemp[0]) > cut and len(atemp) == tot:
            chk += 1
            for k in range(0, tot):
                dlist[k].append(atemp[k])
#
#--- if no new data, stop
#
    if chk == 0:
        return 'No new data'
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    for k in range(0, ntot):
        msid = name_list[k]

        print "MSID: " + msid

        fits = data_dir + group + '/' + msid + period + '_data.fits'
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper', 'dcount',\
                         'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

        mstart = mstop
        mstop = mstart + 5
        #
        #--- the following quantities are not in the database; add default values
        #
        tlen = len(dlist[0])
        ylr = [0] * tlen
        yur = [0] * tlen
        rlr = [0] * tlen
        rur = [0] * tlen
        yl = [-9e9] * tlen
        yu = [9e9] * tlen
        rl = [-9e9] * tlen
        ru = [9e9] * tlen
        dc = [-999] * tlen

        cdata = [dlist[0]]
        cdata = cdata + dlist[mstart:mstop] + [
            ylr, yur, rlr, rur, dc, yl, yu, rl, ru
        ]
        #
        #--- create/update the fits file
        #
        cmd = 'rm -rf ' + fits
        os.system(cmd)

        ecf.create_fits_file(fits, cols, cdata)

    return 'New data added'
Beispiel #22
0
def create_long_term_dea_data(dfile, group, drange):
    """
    convert week time rdb data files into a long term data fits files
    input:  dfile   --- data file name
            group   --- group name
            period  --- week or short
            drange  --- deahk data number list
    output: <data_dir>/deahk<#>_data.fits
    """
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- how may dea entries
#
    ntot = len(drange)
    #
    #--- checking the last entry date
    #
    efits = data_dir + group + '/' + name_list[0] + '_data.fits'

    if os.path.isfile(efits):
        ltime = udfs.find_the_last_entry_time(efits)
        try:
            ltime = find_starting_of_the_day(ltime) + 86400.0
            lchk = 1
        except:
            ltime = 0.0
            lchk = 0
    else:
        ltime = 0.0
        lchk = 0
#
#--- read data
#
    f = open(dfile, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    #
    #--- how many columns in the data
    #
    atemp = re.split('\s+', data[0])
    tot = len(atemp)
    start = float(atemp[0])

    xtemp = re.split('\s+', data[-1])
    stop = float(xtemp[0])
    #
    #--- separate each column into a list
    #
    dlist = []  #--- will keep the lists of daily avg of each columns
    dsum = [
    ]  #--- will keep the sums of each columns for a given time interval (a day)
    for k in range(0, tot):
        dlist.append([])
        dsum.append(0)

    chk = 0
    cnt = 0
    ntime = ltime + 86400.0

    while ntime < start:
        ltime = ntime
        ntime = ltime + 86400.0
    tlist = []

    dlen = len(data)

    for ent in data:
        atemp = re.split('\s+', ent)
        ftime = float(atemp[0])
        if ftime >= ltime:

            chk += 1
            if ftime < ntime and len(atemp) == tot:
                tlist.append(ftime)
                for k in range(0, tot):
                    dsum[k] += float(atemp[k])
                    cnt += 1
            else:
                if cnt == 0 or len(tlist) == 0:
                    ltime = ntime
                    ntime = ltime + 86400.0
                    continue
#
#--- take mid point for the time and take averages for the other quantities
#
                dlist[0].append(tlist[int(0.5 * len(tlist))])
                for k in range(1, tot):

                    dlist[k].append(dsum[k] / cnt)
                    dsum[k] = 0

                ltime = ntime
                ntime = ltime + 86400.0
                tlist = []
                cnt = 0
#
#--- if no new data, stop
#
    if chk == 0:
        return 'No new data'
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    for k in range(0, ntot):
        msid = name_list[k]

        print 'MSID:  ' + msid

        fits = data_dir + group + '/' + msid + '_data.fits'
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper', 'dcount',\
                         'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

        mstart = mstop
        mstop = mstart + 5
        #
        #--- the following quantities are not in the database; add default values
        #
        tlen = len(dlist[0])
        ylr = [0] * tlen
        yur = [0] * tlen
        rlr = [0] * tlen
        rur = [0] * tlen
        yl = [-9e9] * tlen
        yu = [9e9] * tlen
        rl = [-9e9] * tlen
        ru = [9e9] * tlen
        dc = [-999] * tlen

        cdata = [dlist[0]]
        cdata = cdata + dlist[mstart:mstop] + [
            ylr, yur, rlr, rur, dc, yl, yu, rl, ru
        ]
        #
        #---  creat new fits file
        #
        if lchk == 0:
            ecf.create_fits_file(fits, cols, cdata)
#
#--- append to the existing fits file
        else:
            ecf.update_fits_file(fits, cols, cdata)

    return 'New data added'
def gratgen_categorize_data():
    """
    separate gratgen data into different categories
    input: none but use <data_dir>/Gratgen/*.fits
    output: <data_dir>/Gratgen_<catogry>/*.fits
    """
    #
    #--- get the basic information
    #
    [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()

    for msid in msid_list:
        cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\
                'ylimupper', 'rlimlower', 'rlimupper']

        glim = ecf.get_limit(msid, 0, mta_db, mta_cross)

        for category in cname_list:
            print("Running: " + str(msid) + '<-->' + category)

            cfile1 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_data.fits'
            cfile2 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_short_data.fits'
            cfile3 = data_dir + 'Gratgen/' + category.capitalize(
            ) + '/' + msid + '_week_data.fits'

            stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
            tcut1 = 0.0
            tcut2 = Chandra.Time.DateTime(
                stday).secs - 31622400.0  #--- a year agao
            tcut3 = Chandra.Time.DateTime(
                stday).secs - 864000.0  #--- 10 days ago

            if os.path.isfile(cfile1):
                tchk = ecf.find_the_last_entry_time(cfile1)
            else:
                tchk = 0

            ifile = house_keeping + category
            data = mcf.read_data_file(ifile)
            start = []
            stop = []
            for ent in data:
                atemp = re.split('\s+', ent)
                val1 = float(atemp[0])
                val2 = float(atemp[1])
                if val1 > tchk:
                    start.append(val1)
                    stop.append(val2)

            if len(start) == 0:
                continue

            for k in range(0, len(start)):
                diff = stop[k] - start[k]
                if diff < 300:
                    start[k] -= 100
                    stop[k] = start[k] + 300.

                data = fetch.MSID(msid, start[k], stop[k])

                if k == 0:
                    ttime = list(data.times)
                    tdata = list(data.vals)
                else:
                    ttime = ttime + list(data.times)
                    tdata = tdata + list(data.vals)

            if len(ttime) == 0:
                continue

            stat_out1 = get_stat(ttime, tdata, glim, 86400.0)
            stat_out2 = get_stat(ttime, tdata, glim, 3600.0)
            stat_out3 = get_stat(ttime, tdata, glim, 300.0)

            if tchk > 0:
                ecf.update_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.update_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.update_fits_file(cfile3, cols, stat_out3, tcut=tcut3)
            else:
                ecf.create_fits_file(cfile1, cols, stat_out1, tcut=tcut1)
                ecf.create_fits_file(cfile2, cols, stat_out2, tcut=tcut2)
                ecf.create_fits_file(cfile3, cols, stat_out3, tcut=tcut3)
def update_database(msid,
                    group,
                    dtime,
                    data,
                    glim,
                    pstart=0,
                    pstop=0,
                    step=3600.0):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            step    --- time interval of the short time data set:default 3600.0
    output: <msid>_data.fits, <msid>_short_data.fits
    """
    out_dir = data_dir + group.capitalize() + '/'
    #
    #--- make sure that the sub directory exists
    #
    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    mc = re.search('tc', msid[-2:])
    if mc is not None:
        msidl = msid[:-1]
    else:
        msidl = msid

    cols  = ['time', msidl, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    fits = out_dir + msidl + '_data.fits'
    fits2 = out_dir + msidl + '_short_data.fits'
    fits3 = out_dir + msidl + '_week_data.fits'
    #
    #-- if the starting time and stopping time are given, use them.
    #-- otherwise find from the data for the starting time and today's date -1 for the stopping time
    #
    if pstart == 0:
        stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
        stday = Chandra.Time.DateTime(
            stday).secs - 86400.0  #--- set the ending to the day before
    else:
        stday = pstop

    mago = stday - 31536000.0  #--- a year ago
    mago2 = stday - 604800.0  #--- a week ago
    #
    #--- if the fits files already exist, append the data  --------------------
    #
    if os.path.isfile(fits):
        #
        #--- extract data from archive one day at a time
        #
        [week_p, short_p, long_p] = process_day_data(msid,
                                                     dtime,
                                                     data,
                                                     glim,
                                                     step=step)
        #
        #--- add to the data to the long term fits file
        #
        ecf.update_fits_file(fits, cols, long_p)
        #
        #--- remove the older data from the short term fits file, then append the new data
        #
        try:
            udfs.remove_old_data(fits2, cols, mago)
            ecf.update_fits_file(fits2, cols, short_p)
        except:
            pass
#
#--- remove the older data from the week long data fits file, then append the new data
#
        udfs.remove_old_data(fits3, cols, mago2)
        ecf.update_fits_file(fits3, cols, week_p)
#
#--- if the fits files do not exist, create new ones ----------------------
#
    else:
        if pstart == 0:
            start = 48988799  #--- 1999:203:00:00:00
            stop = stday
        else:
            start = pstart
            stop = pstop
#
#--- one day step; a long term data
#
        [week_p, short_p, long_p] = process_day_data(msid,
                                                     dtime,
                                                     data,
                                                     glim,
                                                     step=step)
        try:
            ecf.create_fits_file(fits, cols, long_p)
        except:
            pass
#
#--- short term data
#
        mago = stop - 31536000.0  #--- a year ago
        short_d = udfs.cut_the_data(short_p, mago)
        try:
            ecf.create_fits_file(fits2, cols, short_d)
        except:
            pass

#
#--- week long data
#
        mago = stop - 604800.0
        mago = 0.0
        week_d = udfs.cut_the_data(week_p, mago)

        try:
            ecf.create_fits_file(fits3, cols, week_p)
        except:
            pass
Beispiel #25
0
def extract_data():

    mlen = len(msid_list)
    clen = 15
    chk = 0

    #for year in range(1999, 2018):
    for year in range(2017, 2019):
        for month in range(1, 13):
            if year == 2017 and month < 5:
                continue
            if year == 2018 and month < 6:
                break

            if year == 2017 and month >= 5:  #---- short term start
                chk = 1

            print("Period: " + str(year) + ': ' + str(month))
            #
            #--- initialize 2D array
            #
            darray = []
            for k in range(0, mlen):
                darray.append([])

                for m in range(0, clen):
                    darray[k].append([])
#
#--- check a leap year
#
            if mcf.is_leapyear(year):
                lday = mday2[month - 1]
            else:
                lday = mday1[month - 1]

            lmon = str(month)
            if month < 10:
                lmon = '0' + lmon

            for day in range(1, lday + 1):

                if day < 5:
                    continue

                lday = str(day)
                if day < 10:
                    lday = '0' + lday
#
#--- extract fits files for one day
#
                start = str(year) + '-' + lmon + '-' + lday + 'T00:00:00'
                stop = str(year) + '-' + lmon + '-' + lday + 'T23:59:59'

                flist = extract_hrchk(start, stop)
                #
                #--- combine extracted fits files
                #
                comb_fits = combine_fits(flist)
                #
                #--- work on each msid
                #
                for k in range(0, mlen):
                    msid = msid_list[k]
                    cdata = get_stat(comb_fits, msid)

                    for m in range(0, clen):
                        darray[k][m].append(cdata[m])
#
#--- short term data (past one year)
#
                    if chk > 0:
                        ddata = get_stat_short(comb_fits, msid)
                        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\
                                'yupper', 'rlower', 'rupper',\
                                'dcount', 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

                        ofits = 'Results/' + msid + '_short_data.fits'

                        if os.path.isfile(ofits):
                            ecf.update_fits_file(ofits, cols, ddata)
                        else:
                            ecf.create_fits_file(ofits, cols, ddata)

#
#--- for a long term data, create once a month
#
            for k in range(0, mlen):
                msid = msid_list[k]
                cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper',\
                         'rlower', 'rupper', 'dcount', 'ylimlower', 'ylimupper',\
                         'rlimlower', 'rlimupper']

                cdata = darray[k]
                ofits = 'Results/' + msid + '_data.fits'

                if os.path.isfile(ofits):
                    ecf.update_fits_file(ofits, cols, cdata)
                else:
                    ecf.create_fits_file(ofits, cols, cdata)

        cmd = 'rm -f comb_data.fits'
        os.system(cmd)
def dea_full_data_update(chk):
    """
    update deahk search database
    input:  chk --- whether to request full data update: chk == 1:yes
    output: <deposit_dir>/Deahk/<group>/<msid>_full_data_<year>fits
    """
    tyear = int(float(time.strftime("%Y", time.gmtime())))

    cmd = 'ls ' + data_dir + 'Deahk_*/*_week_data.fits > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)

    for ent in data:
        atemp = re.split('\/', ent)
        group = atemp[-2]
        btemp = re.split('_', atemp[-1])
        msid = btemp[0]
        print("MSID: " + str(msid) + ' in ' + group)

        [cols, tbdata] = ecf.read_fits_file(ent)

        time = tbdata['time']
        tdata = tbdata[msid]
        cols = ['time', msid]
        #
        #--- regular data update
        #
        if chk == 0:
            #
            #--- normal daily data update
            #
            ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_'
            ofits = ofits + str(tyear) + '.fits'
            if os.pathisfile(ofits):
                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear + 1) + ':001:00:00:00'
                nchk = 0
#
#--- if the data is over the year boundray, fill up the last year and create a new one for the new year
#
            else:
                ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid
                ofits = ofits + '_full_data_' + str(tyear - 1) + '.fits'
                nfits = deposit_dir + 'Deahk_save/' + group + '/' + msid
                nfits = nfits + '_full_data_' + str(tyear) + '.fits'

                ltime = ecf.find_the_last_entry_time(ofits)
                ctime = str(tyear) + ':001:00:00:00'
                nchk = 1

            select = [(time > ltime) & (time < ctime)]
            stime = time[select]
            sdata = tdata[select]
            cdata = [stime, sdata]
            ecf.update_fits_file(ofits, cols, cdata)

            if nchk > 0:
                select = [time >= ctime]
                stime = time[select]
                sdata = tdata[select]
                cdata = [stime, sdata]
                ecf.create_fits_file(nfits, cols, cdata)
#
#--- start from beginning (year 1999)
#
        else:
            for year in range(1999, tyear + 1):
                tstart = str(year) + ':001:00:00:00'
                tstart = Chandra.Time.DateTime(tstart).secs
                tstop = str(year + 1) + ':001:00:00:00'
                tstop = Chandra.Time.DateTime(tstop).secs

                select = [(time >= tstart) & (time < tstop)]
                stime = time[select]
                sdata = tdata[select]
                cdata = [stime, sdata]

                out = deposit_dir + 'Deahk_save/' + group + '/'
                if not os.path.isdir(out):
                    cmd = 'mkdir ' + out

                out = out + msid + '_full_data_' + str(year) + '.fits'

                ecf.create_fits_file(out, cols, cdata)
def update_database(msid, group, glim, pstart, pstop, year, sdata=''):
    """
    update/create fits data files of msid
    input:  msid    --- msid
            pstart  --- starting time in seconds from 1998.1.1; defulat = 0 (find from the data)
            pstop   --- stopping time in seconds from 1998.1.1; defulat = 0 (find from the data)
            sdata   --- data set to be added as an independent data
    output: <msid>_data.fits, <msid>_short_data.fits
    """
    cols  = ['time', 'tephin', 'med', 'std', 'min', 'max', 'ylower',\
             'yupper', 'rlower', 'rupper', 'dcount',\
             'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']

    cols2 = ['time', 'tephin',  msid, 'med', 'std', 'min', 'max',\
             'ylower', 'yupper', 'rlower', 'rupper',\
             'dcount', 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']
    #
    #--- make sure that the sub directory exists
    #
    out_dir = data_dir + 'Eleak/' + msid.capitalize() + '/'

    if not os.path.isdir(out_dir):
        cmd = 'mkdir ' + out_dir
        os.system(cmd)

    fits = out_dir + msid + '_data' + str(year) + '.fits'
    #
    #--- tephin data
    #
    out = fetch.MSID(msid, pstart, pstop)
    tdata = out.vals
    ttime = out.times

    [week_p, xxx, xxx2] = uds.process_day_data(msid, ttime, tdata, glim)
    #
    #---- tephin case
    #
    if sdata == '':

        if os.path.isfile(fits):
            ecf.update_fits_file(fits, cols, week_p)
        else:
            ecf.create_fits_file(fits, cols, week_p)

        return week_p[1]
#
#--- all other msids
#
    else:
        slen = len(sdata)
        clen = len(week_p[1])
        if slen == clen:
            new = week_p[:1] + [sdata] + week_p[1:]
        elif slen < clen:
            diff = clen - slen
            for k in range(0, diff):
                sdata.append(sdata[-1])

            new = week_p[:1] + [sdata] + week_p[1:]
        else:

            tdata = sdata[:clen]
            new = week_p[:1] + [tdata] + week_p[1:]

        if os.path.isfile(fits):
            ecf.update_fits_file(fits, cols2, new)
        else:
            ecf.create_fits_file(fits, cols2, new)
Beispiel #28
0
def update_hrcelec_data_hrchk():
    """
    update hrcelec data from archive data
    input:  none but read from data fits files from <data_dir>/Hrcelec/
    output: updated fits data files
    """
    #
    #--- set data cutting time
    #
    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    stday = Chandra.Time.DateTime(
        stday).secs - 86400.0  #--- set the ending to the day before
    mago1 = stday - 31536000.0  #--- a year ago
    mago2 = stday - 604800.0  #--- a week ago

    mlen = len(msid_list)  #--- numbers of msids
    chk = 0
    #
    #--- set te data extraction date to the next date from the the last data extracted date
    #
    [start, stop] = find_starting_date()
    print "Period: " + start + '<-->' + stop
    #
    #--- extract hrchk fits files
    #
    flist = extract_hrchk(start, stop)
    #
    #--- combine extracted fits files for the day
    #
    outfits = 'comb_data.fits'
    comb_fits = ecf.combine_fits(flist, outfits)
    #
    #--- work on each msid
    #
    for k in range(0, mlen):
        msid = msid_list[k]
        cols  = ['time', msid, 'med', 'std', 'min', 'max', 'ylower', 'yupper', 'rlower', 'rupper',\
            'dcount', 'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper']
        #
        #--- long term data  (one day interval)
        #
        ofits = dpath + msid + '_data.fits'
        cdata = get_stat(comb_fits, msid)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- short term data (one hour interval)
#
        ofits = dpath + msid + '_short_data.fits'
        cdata = get_stat_interval(comb_fits, msid, 3600.0)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- remove older data
#
        try:
            uds.remove_old_data(ofits, cols, mago1)
        except:
            pass
#
#--- week long data (five minute interval)
#
        ofits = dpath + msid + '_week_data.fits'
        cdata = get_stat_interval(comb_fits, msid, 300.0)

        if os.path.isfile(ofits):
            ecf.update_fits_file(ofits, cols, cdata)
        else:
            ecf.create_fits_file(ofits, cols, cdata)
#
#--- remove older data
#
        try:
            uds.remove_old_data(ofits, cols, mago2)
        except:
            pass

    mcf.rm_file('comb_data.fits')
Beispiel #29
0
def create_full_dea_data(dhead, group, drange, pyear):
    """
    convert week time rdb data files into a long term data fits files
    input:  dhead   --- data file name header
            group   --- group name
            drange  --- deahk data number list
            pyear   --- year to create the fits data
    output: <data_dir>/deahk<#>_data.fits
    """
    #
    #--- set name; they may not be countinuous
    #
    name_list = []
    for k in drange:
        dname = 'deahk' + str(k)
        name_list.append(dname)
#
#--- how may dea entries
#
    ntot = len(drange)
    #
    #--- read data
    #
    dfile = dhead + str(pyear) + '.rdb'
    data = mcf.read_data_file(dfile)
    if len(data) < 1:
        print("No Ddata")
        return False
#
#--- starting time/stopping time and how many columns in the data
#
    atemp = re.split('\s+', data[0])
    tot = len(atemp)
    start = float(atemp[0])

    xtemp = re.split('\s+', data[-1])
    stop = float(xtemp[0])
    #
    #--- separate each column into a list
    #
    dlist = []  #--- will keep the lists of daily avg of each columns
    for k in range(0, tot):
        dlist.append([])

    for ent in data:
        atemp = re.split('\s+', ent)
        if len(atemp) < tot:
            continue

        for k in range(0, tot):
            dlist[k].append(float(atemp[k]))
#
#--- each fits file has 15 entries, but a half of them are dummy entries
#
    mstop = 1
    dlen = len(dlist[0])
    odir = deposit_dir + 'Deahk_save/' + group
    cmd = 'mkdir -p ' + odir
    os.system(cmd)
    for k in range(0, ntot):
        msid = name_list[k]
        print('MSID:  ' + msid)

        fits = odir + '/' + msid + '_full_data_' + str(pyear) + '.fits'
        cols = ['time', msid]
        cdata = [dlist[0], dlist[k + 1]]

        ecf.create_fits_file(fits, cols, cdata)