示例#1
0
def clean_up_stat_lists():
    """
    sort and clean up the statistics related files
    input:  none, but read from <data_dir>/Stats/*
    output: clean uped data files
    """

    for part in ('*dead*', '*stat_results*', '*status*'):
#
#--- make a list of related file names
#
        cmd  = 'ls ' + data_dir + 'Stats/' + part + '  > ' + zspace
        os.system(cmd)

        slist = hcf.read_file_data(zspace, remove=1)
#
#--- update each file
#
        for lname in slist:
            data = hcf.read_file_data(lname)
#
#--- dead time file
#
            if part == '*dead*':
                update_ddate_list(data, lname)
#
#--- two others
#
            else:
                update_ldate_list(data, lname)
示例#2
0
def remove_duplicate_stat_entries():
    """
    removing duplicated stat results from stat files
    input:  none, but read from <inst>_stat_results
    output: cleaned up <inst>_stat_results
    """
#
#--- clean stat data table
#
    cmd = 'ls ' + stat_dir + '*stat_results > ' + zspace
    os.system(cmd)

    data = hcf.read_file_data(zspace, remove=1)
    
    for sfile in data:
        hcf.remove_duplicate_from_file(sfile)
#
#--- clean deat time table
#    
    cmd = 'ls ' + stat_dir + '*dead_time  > ' + zspace
    os.system(cmd)

    data = hcf.read_file_data(zspace, remove=1)
    
    for sfile in data:
        hcf.remove_duplicate_by_column(sfile, 0)
示例#3
0
def status_bit_report(year, month):
    """
    create status bit statitics tables
    input:  year    --- year  in yyyy form
            month   --- month in degit
    output: hrc_status_stat_<time samp> --- bit statistics of the corresponding evt1 file
            <inst name>_status_stat     --- bit statistics of all evt1 file of the <inst name>
    """

    #cmd  = 'ls ' +  data_dir + str(year) + str(m_list[month-1]) + '/*/hrcf*evt1.fits.gz > ' + zspace
    cmd  = 'ls ' +  data_dir + str(year) + str(m_list[month-1]) + '/*/* > ' + zspace
    os.system(cmd)

    data = hcf.read_file_data(zspace, remove=1)

    for efile in data:
        mc  = re.search('evt1.fits.gz', efile)
        if mc is None:
            continue
#
#--- find an output file names and the date of the data
#
        atemp = re.split('hrcf', efile)
        btemp = re.split('N', atemp[1])
        iname = atemp[0] + 'hrc_status_stat_' + str(btemp[0])  #--- name of the indivisula bits stat output
    
        atemp = re.split('\/', efile)
        hrc_n = stat_dir +  atemp[-2] + '_status_stat'          #--- name of the cumulative data file
        btemp = re.split('hrcf', atemp[-1])
        ctemp = re.split('N', btemp[1])
        date  = hcf.covertfrom1998sec2(int(float(ctemp[0])))

        run_status_bits(efile, iname, hrc_n, date)
示例#4
0
def read_condition(cfile):
    """
    read data extraction condition file and also creates output fits file name
    input:  cfile       --- condition file name
    output: condition   --- a list of lists containing column name and the value range
            fits        --- output fits file name
    """
#
#--- read a condition file
#
    ifile = house_keeping + 'Selection_coditions/' +  cfile
    data  = hcf.read_file_data(ifile)

    condition = []
    for ent in data:
        if ent[0] == '#':
            continue

        atemp = re.split('=', ent)
        condition.append(atemp)
#
#--- create output fits file name
#
    test  = str(cfile)
    test2 = test[-2:]           #--- checking the last two character

    if test2 == '_1' or test2 == '_2':
        test = test[:-2]

    fits = test + '.fits'

    return [condition, fits]
示例#5
0
def hrc_i_param(start):
    """
    set parameters for hrc i case
    input:  start   --- start time in second from 1998.1.1
    output: [ampsatfile, ampsfcorfile, badpixfile, degapfile, evtflatfile, hypfile, obsfile, tapfile, gainfile]
    """
    ampsatfile = calib_dir + 'sattest/hrciD1999-07-22sattestN0002.fits'
    ampsfcorfile = calib_dir + 'amp_sf_cor/hrciD1999-07-22amp_sf_corN0001.fits'
    badpixfile = house_keeping + 'hrcf10702_000N001_bpix1.fits'
    degapfile = calib_dir + 'gaplookup/hrciD1999-07-22gaplookupN0004.fits'
    evtflatfile = calib_dir + 'eftest/hrciD1999-07-22eftestN0001.fits'
    hypfile = calib_dir + 'fptest/hrciD1999-07-22fptestN0003.fits'
    tapfile = calib_dir + 'tapringtest/hrciD1999-07-22tapringN0002.fits'
    obsfile = house_keeping + 'obs.par'
    #
    #--- read gain selection list
    #
    infile = house_keeping + 'Gain_files/gain_selection'
    data = hcf.read_file_data(infile)
    #
    #--- select gain file name
    #
    for ent in data:
        atemp = re.split('\s+', ent)
        begin = int(float(atemp[0]))
        end = int(float(atemp[1]))

        if start >= begin and start < end:
            gainfile = calib_dir + 'gmap/' + atemp[2]
            break

    return [
        ampsatfile, ampsfcorfile, badpixfile, degapfile, evtflatfile, hypfile,
        obsfile, tapfile, gainfile
    ]
def create_cumulative_image():
    """
    create normalized combined image fits files
    input:  none but read from each directory
    output: combine image fits files, e.g. hrc_i_115_total_norm.fits.gz
            it also create png files
    """

    for hdir in ['Hrc_i_115', 'Hrc_s_125_hi', 'Hrc_s_125']:

        head = hdir.lower()
        #
        #--- hrc_s_125 has three parts
        #
        if hdir == 'Hrc_s_125':
            p_list = ['1', '2', '3']
        else:
            p_list = ['']

        for part in p_list:
            #
            #--- lev 1 image
            #
            cmd = 'ls ' + data_dir + hdir + '/' + head + '*_norm' + part + '.fits.gz > ' + zspace
            os.system(cmd)
            #
            #--- exclude the previous combined image file (with "_total_") and instmant (with "_instmap_")
            #
            tlist = hcf.read_file_data(zspace, remove=1)
            slist = [x for x in tlist if 'total' not in x]
            flist = [x for x in slist if 'instmap' not in x]
            add_image_fits_data(flist, part)
            #
            #--- lev 2 image
            #
            cmd = 'ls ' + data_dir + hdir + '/' + head + '_lev2*_norm' + part + '.fits.gz > ' + zspace
            os.system(cmd)

            tlist = hcf.read_file_data(zspace, remove=1)
            flist = [x for x in tlist if 'total' not in x]
            add_image_fits_data(flist, part)
#
#--- create map png files and histogram png files
#
    hpm.plot_hrc_map('total')
示例#7
0
def copy_data():
    """
    copy stat results files and data files from the original location to html data directory
    input: origial data in <data_dir>/Stats/ and <data_dir>/<hrc inst>/*fits.gz
    output: stat result files and data files in <html_dir>/Data/...
    """
    #
    #--- copy stat files
    #
    cmd = 'cp ' + data_dir + 'Stats/* ' + html_dir + 'Data/Stats/.'
    os.system(cmd)
    #
    #--- copy data fits files
    #
    for hdir in ['Hrc_i_115', 'Hrc_s_125', 'Hrc_s_125_hi']:

        odir = data_dir + hdir + '/'
        hdir = html_dir + 'Data/' + hdir + '/'

        cmd = 'ls ' + odir + '*.fits.gz > ' + zspace
        os.system(cmd)
        data = hcf.read_file_data(zspace, remove=1)
        #
        #--- remove a directory path to the file listed
        #
        data1 = get_the_last_part(data)

        cmd = 'ls ' + hdir + '*.fits.gz > ' + zspace
        os.system(cmd)
        data = hcf.read_file_data(zspace, remove=1)
        data2 = get_the_last_part(data)
        #
        #--- check which files are not in the hrml data directory
        #
        missing = list(set(data1) - set(data2))
        #
        #--- copy only the missing files
        #
        for ent in missing:
            cmd = 'cp ' + odir + '/' + ent + ' ' + hdir + '/.'
            os.system(cmd)
示例#8
0
    def test_find_latest_file(self):

        infile = house_keeping + '/Gain_files/gain_selection'
        dlist = hcf.read_file_data(infile)

        [ofile, stime] = find_latest_file(dlist)

        print "FILE NAME: " + ofile + '<--->' + str(stime)

        update_gain_selection_file()

        glist = get_gain_file_list()

        self.assertEquals(glist[-1], ofile)
示例#9
0
def get_evt1_list(year, hdir):
    """
    make a list of evt1 for year and a specified instrument
    input:  year    --- year of the data
            hdir    --- instrument name, e.g., hrc_i_115
    output: out     --- a list of the names of evt1 files with a full path
    """

    cmd = 'ls ' + data_dir + str(
        year) + '*/' + hdir + '/hrcf*evt1.fits* > ' + zspace
    os.system(cmd)

    out = hcf.read_file_data(zspace, remove=1)

    return out
示例#10
0
def update_gain_selection_file():
    """
    check whether a new gain file is added. if so, update gain_selection file
    input:  none, but read from <calib_dir>
    output: updated <house_keeping>/Gain_files/gain_selection
            also a copy of the gain file
    """
    #
    #--- read gain selection list
    #
    infile = house_keeping + 'Gain_files/gain_selection'
    data = hcf.read_file_data(infile)
    #
    #--- read gain file list from <calib_dir>
    #
    gdata = get_gain_file_list()
    #
    #--- check whether a new file is added. if so go farther
    #
    if len(gdata) > len(data):
        [dfile, dtime] = find_latest_file(data)
        [gfile, gtime] = find_latest_file(gdata)

        if gtime > dtime:
            fo = open(infile, 'w')
            for ent in data[:-1]:
                fo.write(ent)
                fo.write('\n')
#
#--- update the last line
#
            atemp = re.split('\s+', data[-1])
            line = atemp[0] + '\t' + str(gtime) + '\t' + atemp[2] + '\n'
            fo.write(line)
            #
            #--- and add the new line
            #
            line = str(gtime) + "\t1.0e12\t\t" + gfile + "\n"
            fo.write(line)

            fo.close()
            #
            #--- copy the file into Gain_files directory
            #
            cmd = 'cp ' + calib_dir + 'gmap/' + gfile + ' ' + house_keeping + 'Gain_files/. '
            os.system(cmd)
            cmd = 'gzip -df ' + house_keeping + 'Gain_files/*.gz'
            os.system(cmd)
def test_data_creation(lyear, lmonth):
    """
    check whether any data files are created
    input:  lyear   --- year of the data
            lmonth  --- month of the data
    output: True or False
    """

    lmonth = tcnv.changeMonthFormat(lmonth)
    lmonth = lmonth.upper()
    cmd = 'ls ' + data_dir + str(lyear) + str(lmonth) + '/*/*evt1* > ' + zspace
    os.system(cmd)
    data = hcf.read_file_data(zspace, remove=1)
    if len(data) > 0:
        return True
    else:
        return False
示例#12
0
def read_dead_time_list(dead_list_name, tyear):
    """
    read dead time list 
    input:  dead_list_name      --- a name of dead time list and return dead time corrected exposure time
            tyear               --- current year
    output: [y_list, e_list]    --- a list of years and a list of dead time corrected exposure time
    """

    y_list = []
    e_list = []

    ifile = data_dir + 'Stats/' + dead_list_name
    data = hcf.read_file_data(ifile)

    #
    #--- accumulate dead time corrected exposure time for each year
    #
    prev = 2000
    esum = 0
    for ent in data:
        atemp = re.split('\s+', ent)
        btemp = re.split('-', atemp[1])
        year = int(float(btemp[0]))

        try:
            val = int(float(atemp[4]))
        except:
            continue

        if year == prev:
            esum += val
        else:
            y_list.append(prev)
            dval = locale.format("%d", esum, grouping=True)
            e_list.append(dval)
            prev = year
            esum = val
#
#--- save the last entry year data (it can be a partial year)
#
    if val != 0:
        y_list.append(prev)
        dval = locale.format("%d", esum, grouping=True)
        e_list.append(dval)

    return [y_list, e_list]
示例#13
0
def get_dead_time(hdir, year, sec=''):
    """
    find a total exposure time for the year of the instument from dead_time list
    input:  hdir    --- the name of the instrument, e.g., hrc_i_115
            year    --- year of the data
            sec     --- section of the chip; used only in hrc_s_125
    output: asu     --- total exposure time in seconds
    """

    if sec == '':
        efile = stat_dir + hdir + '_dead_time'
    else:
        efile = stat_dir + hdir + '_sec' + str(sec) + '_dead_time'

    data = hcf.read_file_data(efile)
    #
    #--- set the time span to be Jan 1 to Dec 31
    #
    start = str(year) + ':001:00:00:00'
    start = tcnv.axTimeMTA(start)

    stop = str(year + 1) + ':001:00:00:00'
    stop = tcnv.axTimeMTA(stop)
    #
    #--- add exposure time of the year
    #
    asum = 0.0
    for ent in data:
        atemp = re.split('\s+', ent)
        if atemp[0] == 'inf':
            continue

        stime = float(atemp[0])
        if stime < start:
            continue
        elif stime > stop:
            break
        try:
            val = float(atemp[4])
        except:
            continue

        asum += val

    return asum
示例#14
0
def get_gain_file_list():
    """
    read gain file list from <calib_dir>
    input:  none
    output: gdata   --- a list of gain files
    """

    cmd = 'ls ' + calib_dir + 'gmap/hrci*sampgain*fits > ' + zspace
    os.system(cmd)
    adata = hcf.read_file_data(zspace, remove=1)
    #
    #--- get only file name
    #
    gdata = []
    for ent in adata:
        atemp = re.split('hrciD', ent)
        name = 'hrciD' + atemp[1]
        gdata.append(name)

    return gdata
示例#15
0
def read_stat_data(infile):
    """
    read a stat data file and return a list of lists of data
    input:  infile    --- input file name (with a full path)
    output:  0: time            --- time in year date
             1: tstart          --- start time in seconds from 1998.1.1
             2: tstop           --- stop time in seconds from 1998.1.1
             3: duration        --- duration in seconds
             4: total_count     --- total counts
             5: count_per_sec   --- counts per seonds
             6: pha_mean        --- pha mean
             7: pha_median      --- pha median
             8: pha_sigma       --- pha sigma
             9: t_mean          --- total count rate mean
            10: t_median        --- total count rate median
            11: t_sigma         --- total count rate sigma
            12: v_mean          --- valid count rate mean
            13: v_median        --- valid count rate median
            14: v_sigma         --- valid count rate sigma
            15: s_mean          --- shield count rate mean
            16: s_median        --- shield count rate median
            17: s_sigma         --- shield count rate sigma
            18: anti_co_mean    --- anti conicidnece rate mean
            19: anti_co_median  --- anti conicidnece rate median
            20: anti_co_sigma   --- anti conicidnece rate sigma
            21: s2hvst          --- s2hvst value
            22: s2hvlv          --- s2jvlv valie
            23: scint           --- scint 
            24: scint_std       --- scint sigma
    """

    data = hcf.read_file_data(infile)

    date = []
    tstart = []
    tstop = []
    duration = []
    total_count = []
    count_per_sec = []
    pha_mean = []
    pha_median = []
    pha_sigma = []
    t_mean = []
    t_median = []
    t_sigma = []
    v_mean = []
    v_median = []
    v_sigma = []
    s_mean = []
    s_median = []
    s_sigma = []
    anti_co_mean = []
    anti_co_median = []
    anti_co_sigma = []
    s2hvst = []
    s2hvlv = []
    scint = []
    scint_std = []
    time = []

    for ent in data:
        aval = re.split('\s+', ent)
        aval3 = float(aval[3])
        #
        #--- if the observation interval is shorter than 900 sec, drop the data set.
        #--- these data set are not accurate enough.
        #
        if aval3 < 900:
            continue
        else:

            try:
                date.append(aval[0])
                start = float(aval[1])
                tstart.append(start)
                tstop.append(float(aval[2]))
                duration.append(aval3)
                total_count.append(float(aval[4]))
                count_per_sec.append(float(aval[5]))
                pha_mean.append(float(aval[6]))
                pha_median.append(float(aval[7]))
                pha_sigma.append(float(aval[8]))
                t_mean.append(float(aval[9]))
                t_median.append(float(aval[10]))
                t_sigma.append(float(aval[11]))
                v_mean.append(float(aval[12]))
                v_median.append(float(aval[13]))
                v_sigma.append(float(aval[14]))
                #
                #--- changing a plotting range for an easy view
                #
                s_mean.append(0.001 * float(aval[15]))
                s_median.append(0.001 * float(aval[16]))
                s_sigma.append(0.001 * float(aval[17]))

                anti_co_mean.append(float(aval[18]))
                anti_co_median.append(float(aval[19]))
                anti_co_sigma.append(float(aval[20]))

                s2hvst.append(int(float(aval[21])))
                s2hvlv.append(int(float(aval[22]) + 0.5))

                scint.append(float(aval[23]))
                scint_std.append(float(aval[24]))

                tval = tcnv.sectoFracYear(start)
                time.append(tval)
            except:
                continue

    return [time, tstart, tstop, duration, total_count, count_per_sec, \
            pha_mean,  pha_median, pha_sigma,  \
            t_mean, t_median, t_sigma, \
            v_mean, v_median, v_sigma, \
            s_mean, s_median, s_sigma, \
            anti_co_mean, anti_co_median, anti_co_sigma, \
            s2hvst, s2hvlv, scint, scint_std]
示例#16
0
def updata_data_selection_page():
    """
    update html gain file information and data_selection.html page
    input:  none but read two gain_selection files (from <house_keeping> and <html_dir>
    output: <html_file>/Gain_files/gain_selection and gain fits file 
            <html_file>/data_selection.html
    """
    #
    #--- check two gain list files are different. if so, new gain file was found
    #
    gain_file1 = house_keeping + 'Gain_files/gain_selection'
    gain_file2 = html_dir + 'Data_save/Gain_files/gain_selection'

    cmd = 'diff ' + gain_file1 + ' ' + gain_file2 + ' >' + zspace
    os.system(cmd)

    if mcf.isFileEmpty(zspace) == 0:
        mcf.rm_file(zspace)

    else:
        mcf.rm_file(zspace)
        #
        #--- find which gain files are added
        #
        data = hcf.read_file_data(gain_file1)
        flist1 = []
        for ent in data:
            atemp = re.split('\s+', ent)
            flist1.append(atemp[-1])

        data2 = hcf.read_file_data(gain_file2)
        flist2 = []
        for ent in data2:
            atemp = re.split('\s+', ent)
            flist2.append(atemp[-1])
#
#--- add new gain file to <html_dir>/Data_save/Gain_files
#
        missing = list(set(flist1) - set(flist2))

        for ent in missing:
            cmd = 'cp -f  ' + house_keeping + 'Gain_files/' + ent + ' ' + html_dir + 'Data_save/Gain_files/.'
            os.system(cmd)

        cmd = 'cp  -f ' + gain_file1 + ' ' + gain_file2
        os.system(cmd)
        #
        #--- update data_selection.html page
        #
        line = ''
        #
        #--- create the table of the gain file list
        #
        for ent in data:
            atemp = re.split('\s+', ent)
            line = line + '<tr>\n\t<th>'
            if atemp[0] != '0':
                dtime1 = gformat_time(atemp[0])
                line = line + 'After ' + str(dtime1)

            if atemp[1] != '1.0e12':
                dtime2 = gformat_time(atemp[1])
                line = line + ' Before ' + str(dtime2)

            line = line + '</th>\n\t<td><a href="./Data_sve/Gain_file/' + atemp[
                2] + '">' + atemp[2] + '</a></td>\n</tr>\n'
#
#--- find the current date
#
        current_time = time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())
        #
        #--- read the template and substitute the table and update date
        #
        template = house_keeping + 'Templates/data_selection_template'
        f = open(template, 'r')
        data = f.read()
        f.close()

        data = data.replace("#TABLE#", line)
        data = data.replace("#UPDATE#", current_time)
        #
        #--- print out the file
        #
        out = html_dir + 'data_selection.html'
        fo = open(out, 'w')
        fo.write(data)
        fo.close()
示例#17
0
def find_time_period():
    """
    find time period from existing data directory name
    input:  none but read from <data_dir>/2*
    output: syear   --- starting year
            smon    --- starting month
            eyear   --- ending year
            emon    --- ending month
            if also return "False" if something wrong (e.g. wrong time order)
    """
#
#--- find the last data set extracted
#
    cmd  = 'ls -d ' + data_dir + '2* > ' + zspace
    os.system(cmd)
    data = hcf.read_file_data(zspace, remove=1)

    syear = 1999
    smon  = 1

    if len(data) > 0:
        for ent in data:
            atemp = re.split('\/', ent)
            val   = atemp[-1]
#
#--- extract year and month in digit from the directory name
#
            year = int(float(str(val[0:4])))
            lmon = val[4:]
    
            mon  = hcf.find_month(lmon)
#
#--- find the latest time
#
            if syear == year:
                if smon < mon:
                    syear = year
                    smon  = mon
            elif syear < year:
                syear = year
                smon  = mon
#
#--- find today's date
#
    today = time.localtime()

    eyear = int(float(today.tm_year))
    emon  = int(float(today.tm_mon))
#
#--- set the last month as we know this month's data are not filled yet
#
    emon -= 1

    if emon < 1:
        emon   = 12
        eyear -= 1
#
#--- quick error check
#
    if eyear < syear:
        return False

    elif eyear == syear:
        if smon >= emon:
            return False

    return [syear, smon, eyear, emon]
示例#18
0
def create_slide_map_pages():
    """
    create three sliding insert html pages
    input:  none
    output: hrc_i_115_slide.html, hrc_s_125_slide.html, and hrc_s_125_hi_slide.html in <html_dir>
    """

    for name in ['Hrc_i_115', 'Hrc_s_125_hi', 'Hrc_s_125']:
        lname = name.lower()
        #
        #--- first check whether there are any years which do not have any data
        #
        cmd = 'ls ' + data_dir + name + '/*evt1* > ' + zspace
        os.system(cmd)

        data = hcf.read_file_data(zspace, remove=1)
        y_list = []
        for ent in data:
            atemp = re.split('_', ent)
            y_list.append(int(float(atemp[-2])))

        tarray = set(numpy.array(y_list))
        y_list = list(tarray)
        y_list.sort()

        stime = tcnv.currentTime()
        tyear = stime[0]
        lyear = tyear - 1

        ymax = lyear
        if ymax < y_list[-1]:
            ymax = y_list[-1]
#
#--- pcant is used to set the width of the table in pixel unit
#--- each year, 70 pixels are asigned for the width
#
        pcnt = 80 * (ymax - 2000)

        outname = html_dir + lname + '_slide.html'

        line = '<!DOCTYPE html> \n'
        line = line + '<html> \n'
        line = line + '<head>\n\t <title>HRC Stowed Background Study ' + name
        line = line + ' Map Display</title>\n</head>\n'
        line = line + '<body>\n'
        line = line + '<table border=0 style="width:' + str(pcnt) + 'px">\n'
        line = line + '<tr> \n'

        for kyear in range(2000, ymax + 1):
            line = line + '<th>' + str(kyear) + '</th>\n'

        line = line + '</tr>\n'
        line = line + '<tr>\n'

        kmax = ymax - 2000
        for k in range(0, kmax + 1):
            kyear = 2000 + k
            if kyear in y_list:
                #
                #--- hrc s 125 has three sections
                #
                if name == 'Hrc_s_125':
                    line = line + '<th style="width:70px">\n'
                    line = line + '\t<a href="./Yearly/' + lname + '_year' + str(
                        kyear) + '.html"'
                    line = line + ' target="blank">\n'
                    for sec in range(1, 4):
                        line = line + '\t\t<img src="./Maps/' + name + '/Simage/' + lname + '_'
                        line = line + str(kyear) + '_thumb' + str(
                            sec) + '.png">\n'
                    line = line + '\n\t</a>\n</th>\n'
#
#--- hrc s 125 hi and hrc i 115 cases
#
                else:
                    if name == 'Hrc_s_125_hi':
                        line = line + '<th style="width:70px">\n'
                    else:
                        line = line + '<th>\n'
                    line = line + '\t<a href="./Yearly/' + lname + '_year' + str(
                        kyear) + '.html"'
                    line = line + ' target="blank">\n'
                    line = line + '\t\t<img src="./Maps/' + name + '/Simage/' + lname + '_'
                    line = line + str(kyear) + '_thumb.png">\n\t</a>\n</th>\n'
#
#--- if there is no data for the year, say so
#
            else:
                line = line + '<th>No<br /> Data</th>\n'

        line = line + '</tr>\n'
        line = line + '</table>\n'
        line = line + '</body>\n'
        line = line + '</html>\n'

        fo = open(outname, 'w')
        fo.write(line)
        fo.close()
示例#19
0
def update_stat_data_table():
    """
    create html pages to display stat results
    input:  none, but read from <data_dir>/Stats/<head>_stat_results
    output: <html_dir><head>_stats.html
    Table columns:
            0   --- Date
            1   --- Tstart
            2   --- Tstop
            3   --- Duration
            4   --- Total Counts
            5   --- Counts per Sec
            6   --- Mean of PHA Peak Position
            7   --- Median of PHA Peak Position
            8   --- Sigma of PHA Peak Position
            9   --- Mean of MCP Valid Count Rate
            10  --- Median of MCP Valid Count Rate
            11  --- Sigma of MCP Valid Count Rate
            12  --- Mean of MCP Total Count Rate
            13  --- Median of MCP Total Count Rate
            14  --- Sigma of MCP Total Count Rate
            15  --- Mean of MCP Shield Count Rate
            16  --- Median of MCP Shield Count Rate
            17  --- Sigma of MCP Shield Count Rate
            18  --- Mean of Valid/Total MCP Rate
            19  --- Median of Valid/Total MCP Rate
            20  --- Sigma of  Valid/Total MCP Rate
            21  --- S2HVST
            22  --- S2HVLV
            23  --- EPHIN Integrated Flux
    """
    #
    #--- the table will display values with either integer or float.
    #--- the following indicies indicate how to round the value. if it is 0, integer is used
    #---- pos : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
    #
    r_ind = [
        0, 0, 0, 0, 5, 2, 2, 0, 3, 3, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 4
    ]
    #
    #--- read a template file
    #
    t_file = house_keeping + 'Templates/stat_table_template'
    f = open(t_file, 'r')
    template = f.read()
    f.close()
    #
    #--- go around three hrc device. hrc_s_125 has 3 sections
    #
    for hname in ['hrc_i_115', 'hrc_s_125_hi', 'hrc_s_125']:
        if hname == 'hrc_s_125':
            s_list = ['_sec1_', '_sec2_', '_sec3_']
        else:
            s_list = ['_']

        for sec in s_list:
            #
            #--- make data table
            #
            ename = 'Stats/' + hname + sec + 'stat_results'
            fname = data_dir + ename
            data = hcf.read_file_data(fname)
            line = ''
            for ent in data:
                val = re.split('\s+', ent)
                line = line + '\t<tr>\n'
                line = line + '\t\t<td>' + val[0] + '</td>\n'
                for k in range(1, 24):

                    try:
                        fval = float(val[k])
                        if r_ind[k] == 0:
                            fval = str(int(fval))
                        else:
                            fval = str(round(fval, r_ind[k]))
                    except:
                        fval = 'NaN'

                    line = line + '\t\t<td>' + fval + '</td>\n'
#
#--- set output file name
#
            oname = html_dir + hname + sec + 'stats.html'
            title = hname + sec
            title = title.replace('_', ' ')
            title2 = title.upper()
            #
            #--- a link to the ascii data
            #
            adata = '<a href="./Data/' + ename + '">Open Plain Text Version</a>'
            #
            #--- today's date
            #
            tdate = time.strftime("%a, %d %b %Y", time.gmtime())
            #
            #--- insert the information to the template and print it out
            #
            out = template
            out = out.replace('#HTITLE#', title)
            out = out.replace('#MTITLE#', title2)
            out = out.replace('#ADATA#', adata)
            out = out.replace('#TABLE#', line)
            out = out.replace('#UPDATE#', tdate)

            fo = open(oname, 'w')
            fo.write(out)
            fo.close()
示例#20
0
def hrc_stowed_background(syear, smonth, eyear, emonth):
    """
    controlling script to set up directory and run all script
    input:  syear   --- starting year
            smonth  --- starting month
            eyear   --- ending year
            emonth  --- ending month
    output: evt0, evt1, ss0, and hk00 fits files corresponding to next_in_line condition
    """
#
#--- update hrc gain list
#
    rhp.update_gain_selection_file()
#
#--- run over the given time period
#
    for year in range(syear, eyear+1):
        for month in range(1, 13):
            if year == syear and month < smonth:
                continue
            elif year == eyear and month > emonth:
                break
#
#--- start and stop time in mm/dd/yy,hh:mm:ss format
#
            begin = hcf.conv_time_format(year, month)         
            end   = hcf.conv_time_format(year, month, next=1)
#
#--- start and stop time in seconds from 1998.1.1
#
            start = hcf.convertto1998sec(begin)
            stop  = hcf.convertto1998sec(end)
#
#--- make saving directory
#
            lmon   = hcf.find_month(month)
            outdir = data_dir  + str(year) + lmon + '/'

            if mcf.chkFile(outdir) == 0:
                cmd = 'mkdir ' + outdir
                os.system(cmd)

            for ent in comb_dirs:
                udir = outdir + ent
                if mcf.chkFile(udir) == 0:
                    cmd = 'mkdir ' +  udir
                    os.system(cmd)
#
#--- now run the main script
#
            #try:
            rnl.extract_next_in_line_data(begin, end, start, stop, outdir)
            #except:
            #    print 'Year: ' + str(year) + ' Month: ' + str(month) + '--- The process failed. '
            #    cmd  = 'mv tscpos_positive *period *fits ' + outdir  + ' 2> /dev/null'
            #    os.system(cmd)
            #    cmd  = 'gzip ' + outdir + '*fits ' + outdir + '*/*fits 2>/dev/null'
            #    os.system(cmd)
            #    continue
#
#--- move other files to appropriated directories
#
            cmd = 'mv tscpos_positive ' + outdir
            os.system(cmd)

            #cmd = 'ls *period > ' + zspace
            cmd = 'ls * > ' + zspace
            os.system(cmd)

            data = hcf.read_file_data(zspace, remove=1)

            for ent in data:
                mc = re.search('period', ent)
                if mc is None:
                    continue 

                atemp = re.split('hrc_', ent)
                btemp = re.split('_', atemp[1])
                dname = 'hrc_' + btemp[0] + '_' + btemp[1]
                mc    = re.search('_hi', ent)
                if mc is not None:
                    dname = dname + '_hi' 
                
                cmd = 'mv ' + ent + ' ' + outdir + dname + '/'
                os.system(cmd)

            cmd = 'rm -f *fits  ./Temp_dir/* 2> /dev/null'
            os.system(cmd)

            cmd  = 'gzip -f ' + outdir + '*/*.fits  2> /dev/null' 
            os.system(cmd)
#
#---- update stat tables
#
            hnt.hrc_nil_table(year, month)
            hnt.status_bit_report(year, month)
#
#---- clean up stat files
#
    clean_up_stat_lists()