Beispiel #1
0
def find_nth(fits_file = 'NA', cut= 10):

    """
    find nth brightest value    input: fits file/ cut = upper limit
    """

    if fits_file == 'NA':
        fits_file = raw_input('Fits file name: ')
        cut       = raw_input('Where to Cut?: ')
#
#-- make histgram
#
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmimghist infile=' + fits_file + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    cmd  = cmd1 + cmd2
    bash(cmd,  env=ascdsenv)

    
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 =' dmlist infile=outfile.fits outfile=./zout opt=data'
    cmd  = cmd1 + cmd2
    bash(cmd,  env=ascdsenv)

    f    = open('./zout', 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    os.system('rm outfile.fits ./zout')
#
#--- read bin # and its count rate
#
    hbin = []
    hcnt = []

    for ent in data:
        try:
            atemp = re.split('\s+|\t+', ent)
            if (len(atemp) > 3) and mtac.chkNumeric(atemp[1])  and mtac.chkNumeric(atemp[2])  and (int(atemp[4]) > 0):
                hbin.append(float(atemp[1]))
                hcnt.append(int(atemp[4]))
        except:
            pass

#
#--- checking 10 th bright position
#
    limit = cut -1
    try:
        j = 0
        for i in  range(len(hbin)-1, 0, -1):
            if j == limit:
                val = i
                break
            else:
                if hcnt[i] > 0:                 #---- only when the value is larger than 0, record as count
                    j += 1

        return hbin[val]
    except:
        return 'I/INDEF'
Beispiel #2
0
def find_nth(fits_file='NA', cut=10):
    """
    find nth brightest value    input: fits file/ cut = upper limit
    """

    if fits_file == 'NA':
        fits_file = raw_input('Fits file name: ')
        cut = raw_input('Where to Cut?: ')
#
#-- make histgram
#
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmimghist infile=' + fits_file + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmlist infile=outfile.fits outfile=./zout opt=data'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    f = open('./zout', 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    os.system('rm outfile.fits ./zout')
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []

    for ent in data:
        try:
            atemp = re.split('\s+|\t+', ent)
            if (len(atemp) > 3) and mtac.chkNumeric(
                    atemp[1]) and mtac.chkNumeric(
                        atemp[2]) and (int(atemp[4]) > 0):
                hbin.append(float(atemp[1]))
                hcnt.append(int(atemp[4]))
        except:
            pass

#
#--- checking 10 th bright position
#
    limit = cut - 1
    try:
        j = 0
        for i in range(len(hbin) - 1, 0, -1):
            if j == limit:
                val = i
                break
            else:
                if hcnt[i] > 0:  #---- only when the value is larger than 0, record as count
                    j += 1

        return hbin[val]
    except:
        return 'I/INDEF'
Beispiel #3
0
def find_10th(fits_file):
    """
    find 10th brightest value    input: fits file
    """
    #
    #-- make histgram
    #
    cmd = ' dmimghist infile=' + fits_file + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    os.system(cmd)
    os.system('dmlist infile=outfile.fits outfile=./zout opt=data')

    f = open('./zout', 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    os.system('rm outfile.fits ./zout')
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []

    for ent in data:
        try:
            atemp = re.split('\s+|\t+', ent)
            if (len(atemp) > 3) and mtac.chkNumeric(
                    atemp[1]) and mtac.chkNumeric(
                        atemp[2]) and (int(atemp[4]) > 0):
                hbin.append(float(atemp[1]))
                hcnt.append(int(atemp[4]))
        except:
            pass

#
#--- checking 10 th bright position
#
    try:
        j = 0
        for i in range(len(hbin) - 1, 0, -1):
            if j == 9:
                val = i
                break
            else:
                if hcnt[i] > 0:  #---- only when the value is larger than 0, record as count
                    j += 1

        return hbin[val]
    except:
        return 'I/INDEF'
Beispiel #4
0
def convert_to_ydate(time, startYear):
    """
    convert the time in second to ydate
    input:  time    --- time in seconds from 1998.1.1
            startYear   --- the year of the first data point
    output: xdate   --- a list of day of year
    """

    if isLeapYear(startYear) == 1:
        base = 366
    else:
        base = 365

    xdate = []
    for ent in time:
        if mcf.chkNumeric(ent) and ent != 'NA':
            ytime = tcnv.axTimeMTA(int(ent))
            atemp = re.split(':', ytime)
            year = int(float(atemp[0]))

            dofy = float(atemp[1]) + float(atemp[2]) / 24.0
            dofy = dofy + float(atemp[3]) / 1440.0 + float(atemp[4]) / 86400.0

            if year > startYear:
                dofy += base

            xdate.append(dofy)

    return xdate
Beispiel #5
0
def convert_to_fyear(cdate):
    """
    convert time format to fractional year
    input:  cdate   --- time in either <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss> or seconds from 1998.1.1
    output: fyear   --- time in fractional year
    """

    try:
        mc = re.search('T', cdate)
    except:
        mc = None
#
#--- for the case the time format is: <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
#
    if mc is not None:
        atemp = re.split('T', cdate)
        btemp = re.split('-', atemp[0])
        ctemp = re.split(':', atemp[1])
        year = float(btemp[0])
        mon = float(btemp[1])
        day = float(btemp[2])
        hh = float(ctemp[0])
        mm = float(ctemp[1])
        ss = float(ctemp[2])

        ydate = mon_list[int(mon) - 1] + day
        if tcnv.isLeapYear(year) == 1:
            if mon > 2:
                ydate += 1
#
#---- for the case the time format is seconds from 1998.1.1
#
    elif mcf.chkNumeric(cdate):
        out = Chandra.Time.DateTime(float(cdate)).date
        atemp = re.split(':', out)

        year = float(atemp[0])
        ydate = float(atemp[1])
        hh = float(atemp[2])
        mm = float(atemp[3])
        ss = float(atemp[4])

    else:
        atemp = re.split(':', cdate)
        year = float(atemp[0])
        ydate = float(atemp[1])
        hh = float(atemp[2])
        mm = float(atemp[3])
        ss = float(atemp[4])

    ydate = ydate + hh / 24.0 + mm / 1440.0 + ss / 86400.0

    if tcnv.isLeapYear(year) == 1:
        base = 366.0
    else:
        base = 365.0

    fyear = year + ydate / base

    return fyear
Beispiel #6
0
def hrc_gain_run(c_input):
    """
    extract hrc evt2 file, find the brightest object and create pha distribution
    this is a control script

    Input:  c_inut      --- if it is obsid, use it as a input
                            otherwise, a list of new candidates will be create based database
    Output: <header>_pha.dat    --- pha distribution data
            <header>_gfit.png   --- a plot of pha data
            fitting_results     --- a table of fitted results
    """
    #
    #--- if an obsid is provided, analyize that, else get new obsids from databases
    #
    if mcf.chkNumeric(c_input):
        candidate_list = [c_input]
    elif isinstance(c_input, list):
        candidate_list = c_input
    else:
        candidate_list = get_arlac_list()
#
#--- run the main script only when candidates exist
#
    if len(candidate_list) > 0:

        hgfv.hrc_gain_fit_voigt(candidate_list)
        hgtp.hrc_gain_trend_plot()
Beispiel #7
0
def  hrc_gain_run(c_input):

    """
    extract hrc evt2 file, find the brightest object and create pha distribution
    this is a control script

    Input:  c_inut      --- if it is obsid, use it as a input
                            otherwise, a list of new candidates will be create based database
    Output: <header>_pha.dat    --- pha distribution data
            <header>_gfit.png   --- a plot of pha data
            fitting_results     --- a table of fitted results
    """
#
#--- if an obsid is provided, analyize that, else get new obsids from databases
#
    if mcf.chkNumeric(c_input):
        candidate_list = [c_input]
    elif isinstance(c_input, list):
        candidate_list = c_input
    else:
        candidate_list = get_arlac_list()
#
#--- run the main script only when candidates exist
#
    if len(candidate_list) > 0:

        hgfv.hrc_gain_fit_voigt(candidate_list)
        hgtp.hrc_gain_trend_plot()
def find_two_sigma_value(fits):
#
#-- make histgram
#
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmimghist infile=' + fits + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    cmd  = cmd1 + cmd2
    bash(cmd,  env=ascdsenv)

    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmlist infile=outfile.fits outfile=' + zspace + ' opt=data'
    cmd  = cmd1 + cmd2
    bash(cmd,  env=ascdsenv)

    
    f= open(zspace, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    mcf.rm_file(zspace)
#
#--- read bin # and its count rate
#
    hbin = []
    hcnt = []
    vsum = 0

    for ent in data:
        atemp = re.split('\s+|\t+', ent)
        if mcf.chkNumeric(atemp[0]):
            hbin.append(float(atemp[1]))
            val = int(atemp[4])
            hcnt.append(val)
            vsum += val

#
#--- checking one sigma and two sigma counts
#

    if len(hbin) > 0:
        v68= int(0.68 * vsum)
        v95= int(0.95 * vsum)
        v99= int(0.997 * vsum)
        sigma1 = -999
        sigma2 = -999
        sigma3 = -999
        acc= 0
        for i in range(0, len(hbin)):
            acc += hcnt[i]
            if acc > v68 and sigma1 < 0:
                sigma1 = hbin[i]
            elif acc > v95 and sigma2 < 0:
                sigma2 = hbin[i]
            elif acc > v99 and sigma3 < 0:
                sigma3 = hbin[i]
                break
    
        return (sigma1, sigma2, sigma3)
    
    else:
        return(0, 0, 0)
def find_data_collection_interval():

    tlist = tcnv.currentTime(format='UTC')
    tyear = tlist[0]
    tyday = tlist[7]
    tdom  = tcnv.YdateToDOM(tyear, tyday)

    file  = data_dir + 'Disp_dir/hist_ccd3'
    f     = open(file, 'r')
    data  = [line.strip() for line in f.readlines()]
    f.close()

    chk   = 0
    k     = 1
    while(chk == 0):
        atemp = re.split('<>', data[len(data)-k])
        ldom  = atemp[0]
        if mcf.chkNumeric(ldom) == True:
            ldom = int(ldom)
            chk = 1
            break
        else:
            k += 1

    ldom += 1
    return(ldom, tdom)
Beispiel #10
0
def check_time_format(intime):
    """
    return time in Chandra time
    input:  intime  --- time in <yyyy>:<ddd>:<hh>:<mm>:<ss> or <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss> or chandra time
    output: time in chandra time (seconds from 1998.1.1)
    """
    mc1 = re.search('-', intime)
    mc2 = re.search(':', intime)
    #
    #--- it is already chandra format
    #
    if mcf.chkNumeric(intime):
        return int(float(intime))
#
#--- time in <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
#
    elif mc1 is not None:
        mc2 = re.search('T', intime)
        if mc2 is not None:
            stime = mcf.convert_date_format(intime,
                                            ifmt='%Y-%m-%d:%H:%M:%S',
                                            ofmt='chandra')
        else:
            stime = mcf.convert_date_format(intime,
                                            ifmt='%Y-%m-%d',
                                            ofmt='chandra')

        return stime
#
#--- time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
#
    elif mc2 is not None:

        return Chandra.Time.DateTime(intime).secs
Beispiel #11
0
def find_data_collection_interval():

    tlist = tcnv.currentTime(format='UTC')
    tyear = tlist[0]
    tyday = tlist[7]
    tdom = tcnv.YdateToDOM(tyear, tyday)

    file = data_dir + 'Disp_dir/hist_ccd3'
    f = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    chk = 0
    k = 1
    while (chk == 0):
        atemp = re.split('<>', data[len(data) - k])
        ldom = atemp[0]
        if mcf.chkNumeric(ldom) == True:
            ldom = int(ldom)
            chk = 1
            break
        else:
            k += 1

    ldom += 1
    return (ldom, tdom)
Beispiel #12
0
def set_format_for_col(name, cdata):
    """
    find a format of the input data and set column object
    input:  name    --- column name
            cdata   --- column data in numpy array form
    output: column object
    """
    test = str(list(cdata)[0])
    mc1 = re.search('True', test)
    mc2 = re.search('False', test)
    #
    #--- check whether the value is numeric
    #
    if mcf.chkNumeric(test):
        ft = 'E'
#
#--- check whether the value is logical
#
    elif (mc1 is not None) or (mc2 is not None):
        tcnt = len(list(cdata)[0])
        ft = str(tcnt) + 'L'
#
#--- all others are set as character set
#
    else:
        tcnt = len(test)
        ft = str(tcnt) + 'A'

    return fits.Column(name=name, format=ft, array=cdata)
def find_two_sigma_value(fits):
    #
    #-- make histgram
    #
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmimghist infile=' + fits + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmlist infile=outfile.fits outfile=' + zspace + ' opt=data'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    f = open(zspace, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    mcf.rm_file(zspace)
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []
    vsum = 0

    for ent in data:
        atemp = re.split('\s+|\t+', ent)
        if mcf.chkNumeric(atemp[0]):
            hbin.append(float(atemp[1]))
            val = int(atemp[4])
            hcnt.append(val)
            vsum += val

#
#--- checking one sigma and two sigma counts
#

    if len(hbin) > 0:
        v68 = int(0.68 * vsum)
        v95 = int(0.95 * vsum)
        v99 = int(0.997 * vsum)
        sigma1 = -999
        sigma2 = -999
        sigma3 = -999
        acc = 0
        for i in range(0, len(hbin)):
            acc += hcnt[i]
            if acc > v68 and sigma1 < 0:
                sigma1 = hbin[i]
            elif acc > v95 and sigma2 < 0:
                sigma2 = hbin[i]
            elif acc > v99 and sigma3 < 0:
                sigma3 = hbin[i]
                break

        return (sigma1, sigma2, sigma3)

    else:
        return (0, 0, 0)
def check_time_format(intime):
    """
    return time in Chandra time
    input:  intime  --- time in <yyyy>:<ddd>:<hh>:<mm>:<ss> or <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss> or chandra time
    output: time in chandra time (seconds from 1998.1.1)
    """

    mc1 = re.search('-', intime)
    mc2 = re.search(':', intime)
    #
    #--- it is already chandra format
    #
    if mcf.chkNumeric(intime):
        return int(float(intime))
#
#--- time in <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
#
    elif mc1 is not None:
        mc2 = re.search('T', intime)
        if mc2 is not None:
            atemp = re.split('T', intime)
            btemp = re.split('-', atemp[0])
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            ctemp = re.split(':', atemp[1])
            hrs = ctemp[0]
            mins = ctemp[1]
            secs = ctemp[2]

        else:
            btemp = re.split('-', intime)
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            hrs = '00'
            mins = '00'
            secs = '00'

        yday = datetime.date(year, mon, day).timetuple().tm_yday

        cyday = str(yday)
        if yday < 10:
            cyday = '00' + cyday
        elif yday < 100:
            cyday = '0' + cyday

        ytime = btemp[0] + ':' + cyday + ':' + hrs + ':' + mins + ':' + secs

        return Chandra.Time.DateTime(ytime).secs
#
#--- time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
#
    elif mc2 is not None:

        return Chandra.Time.DateTime(intime).secs
def generate_ephin_rate_plot(directory):

    """
    create ephin rate plots
    Input: directory --- a directory where the data is kept and the plot will be created
           <directory>/ephin_rate --- ephin data file
    Ouput: <directory>/ephin_rate.png
    """

    xname  = 'Time (DOM)'
    yname  = 'Count/Sec'

    file   = directory + '/ephin_rate'
    chk  = mcf.chkFile(file)
    if chk == 0:
        return ""

    f      = open(file, 'r')
    data   = [line.strip() for line in f.readlines()]
    f.close()

    dom   = []
    p4    = []
    e150  = []
    e300  = []
    e1300 = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0]) and mcf.chkNumeric(atemp[1]):
            dom.append(float(atemp[0]))
            p4.append(float(atemp[1])    / 300.0)
            e150.append(float(atemp[2])  / 300.0)
            e300.append(float(atemp[3])  / 300.0)
            e1300.append(float(atemp[4]) / 300.0)

    x_set_list = [dom, dom,  dom,  dom]
    y_set_list = [p4,  e150, e300, e1300]
    yname_list = [yname, yname, yname, yname]
    title_list = ['P4', 'E150', 'E300', 'E1300']
    outname    = directory + '/ephin_rate.png'

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname)
Beispiel #16
0
def generate_ephin_rate_plot(directory):
    """
    create ephin rate plots
    Input: directory --- a directory where the data is kept and the plot will be created
           <directory>/ephin_rate --- ephin data file
    Ouput: <directory>/ephin_rate.png
    """

    xname = 'Time (DOM)'
    yname = 'Count/Sec'

    file = directory + '/ephin_rate'
    chk = mcf.chkFile(file)
    if chk == 0:
        return ""

    f = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    dom = []
    p4 = []
    e150 = []
    e300 = []
    e1300 = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0]) and mcf.chkNumeric(atemp[1]):
            dom.append(float(atemp[0]))
            p4.append(float(atemp[1]) / 300.0)
            e150.append(float(atemp[2]) / 300.0)
            e300.append(float(atemp[3]) / 300.0)
            e1300.append(float(atemp[4]) / 300.0)

    x_set_list = [dom, dom, dom, dom]
    y_set_list = [p4, e150, e300, e1300]
    yname_list = [yname, yname, yname, yname]
    title_list = ['P4', 'E150', 'E300', 'E1300']
    outname = directory + '/ephin_rate.png'

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list,
                     outname)
def clean_the_input(line):
    """
    check the input is numeric and if so, round to two decimal
    input:  line    --- input quantity
    output: line    --- if it is a numeric value, a value of two decimal, 
                        otherwise, just return the value as it was
    """

    if mcf.chkNumeric(line):
        line = str(ecf.round_up(float(line)))

    return line
Beispiel #18
0
def clean_the_input(line):
    """
    check the input is numeric and if so, round to two decimal
    input:  line    --- input quantity
    output: line    --- if it is a numeric value, a value of two decimal, 
                        otherwise, just return the value as it was
    """

    if mcf.chkNumeric(line):
        line = str(ecf.round_up(float(line)))

    return line
Beispiel #19
0
def quickChandra_time(ent):
    """
    axTime3 replacement
    input:  ent --- either seconds from 1998.1.1 or date in <yyyy>:<ddd>:<hh>:<mm>:<ss>
    output: out --- either seconds from 1998.1.1 or date in <yyyy>:<ddd>:<hh>:<mm>:<ss>
    """

    if mcf.chkNumeric(ent):
        out = Chandra.Time.DateTime(float(ent)).date
    else:
        out = Chandra.Time.DateTime(str(ent)).secs

    return out
Beispiel #20
0
    def checkValue(self, param, value):
        """
        for a given parameter and numeric value, return whether the value is in the range
        this also include 'NULL' value
        """
        vrange = self.showRange(
            param
        )  #--- a value or the list of possible values for the parameter
        v_list = re.split('\,', vrange)

        mchk = 0  #--- whether the value is in the vrange. if so 1. if 2, skip the secondary test.

        if vrange == 'MUST':
            if ocf.null_value(value) != True:
                mchk = 1
                ipos = 0

        elif vrange == 'OPEN' or vrange == 'NA':
            mchk = 1
            ipos = 0
            if ocf.null_value(value):
                mchk = 2

        elif ocf.null_value(value):  #--- value is "NULL" case
            if ocf.find_pattern('NULL', vrange):
                mchk = 1
                ipos = 0

        elif mcf.chkNumeric(value):  #--- value is a numeric case
            for ipos in range(0, len(v_list)):
                if v_list[ipos] == 'OPEN':
                    mchk = 1
                    break
                elif ocf.find_pattern('<>', v_list[ipos]):
                    cond = re.split('<>', v_list[ipos])
                    vtest = float(value)
                    lower = float(cond[0])
                    upper = float(cond[1])
                    if vtest >= lower and vtest <= upper:
                        mchk = 1
                        break
        else:  #--- value is a word case
            for ipos in range(0, len(v_list)):
                if value == v_list[ipos]:
                    mchk = 1
                    break

        return [mchk, ipos]
def extract_hist_data(file):
    '''
    extracting acis hist data from fits file 
    input: fits file name
    output: one cloumn histgram data

    '''

    tdata = Table.read(file, hdu=1)
    cols = tdata.columns
    sdata = tdata['COUNTS']
    hist_data = []
    for ent in sdata:
        if mcf.chkNumeric(ent):
            hist_data.append(float(ent))

    return hist_data
Beispiel #22
0
def check_time_format(intime):

    mc1 = re.search('-', intime)
    mc2 = re.search(':', intime)

    if mcf.chkNumeric(intime):
        return int(float(intime))

    elif mc1 is not None:
        mc2 = re.search('T', intime)
        if mc2 is not None:
            atemp = re.split('T', intime)
            btemp = re.split('-', atemp[0])
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            ctemp = re.split(':', atemp[1])
            hrs = ctemp[0]
            mins = ctemp[1]
            secs = ctemp[2]

        else:
            btemp = re.split('-', intime)
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            hrs = '00'
            mins = '00'
            secs = '00'

        yday = datetime.date(year, mon, day).timetuple().tm_yday

        cyday = str(yday)
        if yday < 10:
            cyday = '00' + cyday
        elif yday < 100:
            cyday = '0' + cyday

        ytime = btemp[0] + ':' + cyday + ':' + hrs + ':' + mins + ':' + secs

        return Chandra.Time.DateTime(ytime).secs

    elif mc2 is not None:

        return Chandra.Time.DateTime(intime).secs
Beispiel #23
0
def read_data(file):

    """
    read data from a given file
    Input:  file    --- input file name
    Output: date_list   --- a list of date
            ede_list    --- a list of ede value
            error_list  --- a list of computed ede error
    """

    f    = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    date_list  = []
    ede_list   = []
    error_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0])== False:
            continue

        fwhm = float(atemp[2])
        ferr = float(atemp[3])
        ede  = float(atemp[4])
        dom  = float(atemp[10])
        (year, ydate) = tcnv.DOMtoYdate(dom)
        if tcnv.isLeapYear(year) == 1:
            base = 366
        else:
            base = 365
        fyear = year + ydate/base
        date_list.append(fyear)
        ede_list.append(ede)
#
#--- the error of EdE is computed using FWHM and its error value
#
        error = math.sqrt(ede*ede* ((ferr*ferr) / (fwhm*fwhm)))

        error_list.append(error)


    return [date_list, ede_list, error_list]
Beispiel #24
0
def obsid_from_mp_report(nlist):
    """
    extract obsid from a given path to the data
             entry line looks line: "/data/mta/www/mp_reports/photons/acis/cti/62311_0"
    Input:  nlist      --- a list of data pathes to the data
    Ouput:  obsid_list --- a list of obsids
    """

    obsid_list = []
    for ent in nlist:
        atemp = re.split('/', ent)
        btemp = re.split('_', atemp[len(atemp) - 1])
        #
        #--- check whether the obsid is really number
        #
        if mcf.chkNumeric(btemp[0]):
            obsid_list.append(btemp[0])

    return obsid_list
Beispiel #25
0
def read_data(file):

    """
    read data from a given file
    Input:  file    --- input file name
    Output: date_list   --- a list of date
            ede_list    --- a list of ede value
            error_list  --- a list of computed ede error
    """

    f    = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    date_list  = []
    date_list2 = []
    ede_list   = []
    error_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0])== False:
            continue

        fwhm  = float(atemp[2])
        ferr  = float(atemp[3])
        ede   = float(atemp[4])
        date  = atemp[5]
        sdate = float(atemp[6])
        fyear = change_time_format_fyear(date)

        date_list.append(fyear)
        date_list2.append(sdate)
        ede_list.append(ede)
#
#--- the error of EdE is computed using FWHM and its error value
#
        error = math.sqrt(ede*ede* ((ferr*ferr) / (fwhm*fwhm)))

        error_list.append(error)


    return [date_list, date_list2, ede_list, error_list]
def get_quad_cti(ent, pos = 0):

    """
    get cti parts of error parts from input lines
    Input:  ent     --- line contains cti of quad0, 1, 2, 3 values in the positin 1, 2, 3, 4
            pos     --- if 0, cti value, if 1, error value
    Output: list    --- a list of 4 values of either quad cti or their error
    """

    list  = []
    atemp = re.split(ent)
    for i in range(1,5):
        btemp = re.split('+-', atemp[i])
        if  mcf.chkNumeric(btemp[pos]):
            val = float(btemp[pos])
        else:
            val = -99999;
        list.append(val)

    return list
Beispiel #27
0
def read_data(file):
    """
    read data from a given file
    Input:  file    --- input file name
    Output: date_list   --- a list of date
            ede_list    --- a list of ede value
            error_list  --- a list of computed ede error
    """

    f = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    date_list = []
    ede_list = []
    error_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0]) == False:
            continue

        fwhm = float(atemp[2])
        ferr = float(atemp[3])
        ede = float(atemp[4])
        dom = float(atemp[10])
        (year, ydate) = tcnv.DOMtoYdate(dom)
        if tcnv.isLeapYear(year) == 1:
            base = 366
        else:
            base = 365
        fyear = year + ydate / base
        date_list.append(fyear)
        ede_list.append(ede)
        #
        #--- the error of EdE is computed using FWHM and its error value
        #
        error = math.sqrt(ede * ede * ((ferr * ferr) / (fwhm * fwhm)))

        error_list.append(error)

    return [date_list, ede_list, error_list]
def read_xmm_orbit():
    """
    read xmm orbital elements and return list of distance to XMM
    input: none but read from /data/mta4/proj/rac/ops/ephem/TLE/xmm.spctrk
    output: [time, alt] time in seconds from 1998.1.1
                        alt  in km from the center of the Earth
    """
#
#--- read xmm orbital elements
#
    cxofile = "/data/mta4/proj/rac/ops/ephem/TLE/xmm.spctrk"
    f       = open(cxofile, 'r')
    data    = [line.strip() for line in f.readlines()]
    f.close()

    atime = []
    alt  = []
    chk  = 0
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.chkNumeric(atemp[0]) == False:
           continue 
#
#--- compute the distance to xmm from the center of the earth
#
        try:
            x = float(atemp[6])
        except:
            continue

        y    = float(atemp[7])
        z    = float(atemp[8])

        dist = math.sqrt(x * x + y * y + z * z)
        alt.append(dist)
#
#--- convert time to seconds from 1998.1.1
#
        atime.append(float(atemp[0])- tcorrect)

    return[atime, alt]
Beispiel #29
0
def read_data(infile, remove=0):

    try:
        f = open(infile, 'r')
        data = [line.strip() for line in f.readlines()]
        f.close()

        if remove == 1:
            mcf.rm_file(infile)

        out = []
        for ent in data:
            if mcf.chkNumeric(ent):
                val = int(float(ent))
            else:
                val = ent
            out.append(val)

        return out
    except:
        return []
Beispiel #30
0
def extract_hist_data(file):
    '''
    extracting acis hist data from fits file 
    input: fits file name
    output: one cloumn histgram data

    '''

    #
    #--- check whether the temp file exists. if so, remove it
    #
    cfile = exc_dir + 'zout'
    chk = mtac.chkFile(cfile)
    if chk > 0:
        cmd = 'rm ' + exc_dir + 'zout'
        os.system(cmd)
#
#--- extract data
#
    cmd = 'dmlist "' + file + '[cols counts]" outfile = ' + exc_dir + 'zout  opt=data'
    os.system(cmd)

    file = exc_dir + 'zout'
    f = open(file, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()

    cmd = 'rm ' + exc_dir + 'zout'
    os.system(cmd)

    hist_data = []
    for ent in data:
        atemp = re.split('\s+|\t+', ent)
        if mtac.chkNumeric(atemp[0]):
            hist_data.append(float(atemp[1]))

    return hist_data
def extract_ephin_data(file, out_dir, comp_test=''):

    """
    extract ephine data from a given data file name and save it in out_dir
    Input:  file    --- ephin data file name
            out_dir --- directory which the data is saved
    Output: <out_dir>/ephin_data --- ephin data (300 sec accumulation) 
    """
#
#--- extract time and ccd id information from the given file
#
    data      = pyfits.getdata(file, 1)
    time_r    = data.field("TIME")
    scp4_r    = data.field("SCP4")
    sce150_r  = data.field("SCE150")
    sce300_r  = data.field("SCE300")
    sce1500_r = data.field("SCE1300")
#
#--- initialize
#
    diff       = 0
    chk        = 0
    ephin_data = []
#
#--- sdata[0]: scp4, sdata[1]: sce150, sdata[2]: sce300, and sdata[3]: sce1300
#
    sdata = [0 for x in range(0,4)]

#
#--- check each line and count the numbers of ccd in the each 300 sec intervals
#

    for k in range(0, len(time_r)):
        if mcf.chkNumeric(time_r[k]):
            ftime  = float(time_r[k])
            if ftime > 0:
                if chk == 0:
#                    for j in range(2, 6):
#                        sdata[j-2] += atemp[j]

                    if mcf.chkNumeric(scp4_r[k]) and mcf.chkNumeric(sce150_r[k]) \
                        and mcf.chkNumeric(sce300_r[k]) and mcf.chkNumeric(sce1500_r[k]):
                        sdata[0] += float(scp4_r[k])
                        sdata[1] += float(sce150_r[k])
                        sdata[2] += float(sce300_r[k])
                        sdata[3] += float(sce1500_r[k])

                    s_time = ftime
                    diff   = 0
                    chk    = 1
                elif diff >= 300.0:
#
#--- convert time in dom
#
                    dom = tcnv.stimeToDom(s_time)
#
#--- print out counts per 300 sec 
#
                    line = str(dom) + '\t' 
                    for j in range(0, 4):
                        line = line + str(sdata[j]) + '\t'
                        sdata[j] = 0
                    line = line + '\n'
                    ephin_data.append(line)
                    chk = 0
#
#--- re initialize for the next round
#
                    if mcf.chkNumeric(scp4_r[k]) and mcf.chkNumeric(sce150_r[k]) \
                        and mcf.chkNumeric(sce300_r[k]) and mcf.chkNumeric(sce1500_r[k]):
                        sdata[0] += float(scp4_r[k])
                        sdata[1] += float(sce150_r[k])
                        sdata[2] += float(sce300_r[k])
                        sdata[3] += float(sce1500_r[k])
                    s_time = ftime
                    diff   = 0
#
#--- accumurate the count until the 300 sec interval is reached
#
                else:
                    diff = ftime - s_time
                    if mcf.chkNumeric(scp4_r[k]) and mcf.chkNumeric(sce150_r[k]) \
                        and mcf.chkNumeric(sce300_r[k]) and mcf.chkNumeric(sce1500_r[k]):
                        sdata[0] += float(scp4_r[k])
                        sdata[1] += float(sce150_r[k])
                        sdata[2] += float(sce300_r[k])
                        sdata[3] += float(sce1500_r[k])
#
#--- for the case the last interval is less than 300 sec, 
#--- estimate the the numbers of hit and adjust
#
    if diff > 0 and diff < 300:

        line = str(dom) + '\t' 

        ratio = 300.0 / diff
        for j in range(0, 4):
            var  = sdata[j] * ratio
            line = line + str(var) + '\t'

        line = line + '\n'
        ephin_data.append(line)
#
#--- if this is a test, reutrn the result
#
    if comp_test == 'test':
        return ephin_data
#
#--- otherwise, print the result
#
    else:

        file = out_dir + '/ephin_rate'
        f    = open(file, 'a')
        for ent in ephin_data:
            f.write(ent)
        f.close()
Beispiel #32
0
def extract_sim_data():

    """
    extract sim data from PRIMARYCCDM_*.*.tl
    input: none but read from <dumpdir>/PRIMARYCCDM_*.*.tl
    output: <outdir>sim_data.out
    """
#
#--- find the time of the last entry from the sim_data.out
#
    sfile = outdir + 'sim_data.out'
    f     = open(sfile, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
#
#--- cleaning up the data; drop the data which the date starts from ":" e.g. :2014
#
    pdata = []
    for ent in data:
        if re.search('^:', ent):
            continue
        else:
            pdata.append(ent)

#
#--- the last entiry values
#
    if len(pdata) > 0:
        atemp  = re.split('\s+', pdata[len(pdata)-1])
        ltime  = tcnv.axTimeMTA(atemp[0])               #--- converting time to sec from 1998.1.1
        time_2 = atemp[0]
        col1_2 = atemp[1]
        col2_2 = atemp[2]
        col3_2 = atemp[3]
    else:
        ltime  = 0
        time_2 = 0
        col1_2 = ''
        col2_2 = ''
        col3_2 = ''
#
#--- check whether input files exists 
#
    cmd = 'ls -rt ' + dumpdir + 'PRIMARYCCDM_*.*.tl >' + zspace
    os.system(cmd)

    f    = open(zspace, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    cmd = 'rm ' + zspace
    os.system(cmd)

    dlen = len(data)

    if dlen < 1:
        exit(1)

#
#--- files exist. read the data from the last 10 files
#
    tlist = data[dlen-40:]

    for ent in tlist:
        cmd = 'cat ' +ent + ' >> ' + zspace
        os.system(cmd)

    f    = open(zspace, 'r')
    data = [line.strip() for line in f.readlines()]
    f.close()
    cmd = 'rm ' + zspace
    os.system(cmd)

    prev = ''
    fo = open('./temp_save', 'w')
#
#--- go though each data line
#
    for ent in data:
        try:
#
#--- expect the first letter of the data line is numeric (e.g. 2014).
#
            val = float(ent[0])         
        except:
            continue
#
#--- only data with "FMT" format will be used
#
        mc    = re.search('FMT', ent)
        if mc is None:
            continue

        atemp = re.split('\t+', ent)
#
#--- if there are less than 20 entries, something wrong; skip it
#
        if len(atemp) < 20:             
            continue
#
#--- convert time format
#
        time  = atemp[0]
        time  = time.strip();
        time  = time.replace(' ', ':')
        time  = time.replace(':::', ':00')
        time  = time.replace('::', ':0')
#
#--- if the time is exactly same as one before, skip it
#
        if time == time_2:
            continue
#
#--- if the time is already in the database, keip it
#
        stime = tcnv.axTimeMTA(time)
        if stime <= ltime:
            continue
#
#--- use only data which tscpos and fapos have numeric values
#
        tscpos = atemp[4].strip()
        fapos  = atemp[5].strip()

        if tscpos == "" or fapos == "":
            continue
        else:
            tscpos = int(float(tscpos))
            fapos  = int(float(fapos))

#        aopc   = atemp[11].strip()
#        if aopc == '':
#            aopc = '0'

        mpwm = atemp[12].strip()
        if mcf.chkNumeric(mpwm):
            mpwm = int(float(mpwm))
            mpwm = str(mpwm)
        else:
            mpwm = '0'


#
#--- we want to print only beginning and ending of the same data entries.
#--- skip the line if all three entiries are same as one before, except the last one
#
        if col1_2 == tscpos and col2_2 == fapos and col3_2 == mpwm:
            time_2 = time
            continue

        line = time + '\t' + str(tscpos) + '\t' + str(fapos) + '\t' + mpwm + '\n'
        if line == prev:
            continue
        else:
            pline = time_2  + '\t' + str(col1_2) + '\t' + str(col2_2) + '\t' + str(col3_2) + '\n'
            fo.write(pline)
            fo.write(line)
            prev   = line
            time_2 = time
            col1_2 = tscpos
            col2_2 = fapos
            col3_2 = mpwm

    fo.close()

    sfile2 = sfile + '~'
    cmd    = 'cp  ' + sfile + ' ' + sfile2
    os.system(cmd)
    cmd    = 'cat ./temp_save >> ' + sfile
    os.system(cmd)
Beispiel #33
0
def find_excess_file(lev = 'Lev2'):
    """
    find data with extremely high radiation and remove it. 
    this is done mainly in Lev2 and copied the procesure in Lev2
    input:  lev --- level. default Lev2 (other option is Lev1)
    output: excess radiation data fits files in ./lres/Save/.
    """

    if lev == 'Lev2':
        lres = s_dir + lev + '/Outdir/lres/'

        cmd  = 'ls ' + lres + 'mtaf*fits > ' + zspace
        os.system(cmd)
        data = scf.read_file(zspace, remove=1)
    
        cmd  = 'mkdir ' + lres + 'Save'
        os.system(cmd)

        for ent in data:
            cmd = 'dmlist ' + ent + ' opt=data > ' + zspace
            scf.run_ascds(cmd)

            out = scf.read_file(zspace, remove=1)
            ssoft   = 0.0
            soft    = 0.0
            med     = 0.0
            hard    = 0.0
            harder  = 0.0
            hardest = 0.0
            tot     = 0
            for val in out:
                atemp    = re.split('\s+', val)
                if mcf.chkNumeric(atemp[0]):
                    ssoft   += float(atemp[6])
                    soft    += float(atemp[7])
                    med     += float(atemp[8])
                    hard    += float(atemp[9])
                    harder  += float(atemp[10])
                    hardest += float(atemp[11])
                    tot     += 1
                else:
                    continue

            if tot > 1:
                ssoft   /= tot
                soft    /= tot
                med     /= tot
                hard    /= tot
                harder  /= tot
                hardest /= tot

            mc = re.search('acis6', ent)
            chk = 0
            if mc is not None:
                if (med > 200):
                    chk = 1
            else:
                if (soft > 500) or (med > 150):
                    chk = 1

            if chk > 0:
                cmd = 'mv ' + ent + ' ' + lres + 'Save/.'
                os.system(cmd)

    else:
#
#--- for Lev1, we move the files which removed in Lev2. we assume that we already
#--- run Lev2 on this function
#
        epath =  s_dir + '/Lev2/Outdir/lres/Save/'
        if os.listdir(epath) != []:

            cmd = 'ls ' + s_dir + '/Lev2/Outdir/lres/Save/*fits > ' + zspace
            os.system(cmd)
            data = scf.read_file(zspace, remove=1)
    
            l1_lres =  s_dir + '/Lev1/Outdir/lres/'
            l1_dir  =  l1_lres  + '/Save/'
            cmd     = 'mkdir ' + l1_dir
            os.system(cmd)
     
            for ent in data:
                atemp = re.split('mtaf', ent)
                btemp = re.split('N', atemp[1])
                mc = re.search('_', btemp[0])
                if mc is not None:
                    ctemp = re.split('_', btemp[0])
                    obsid = ctemp[0]
                else:
                    obsid = btemp[0]
    
                atemp = re.split('acis', ent)
                btemp = re.split('lres', atemp[1])
                ccd   = btemp[0]
                cid   = 'acis' + str(ccd) + 'lres_sibkg.fits'
    
                cmd = 'mv ' + l1_lres + 'mtaf' + obsid + '*' + cid + '  '  + l1_dir + '/.'
                os.system(cmd)
Beispiel #34
0
def get_lines(grating):
    """
    extract line statistics for a given grating
    input:  grating --- hetg, metg, or letg
    output: acis_<grating>_<line>_data
    """
    #
    #--- read data file ehader
    #
    infile = house_keeping + 'data_header'
    f = open(infile, 'r')
    header = f.read()
    f.close()
    #
    #--- set which grating data to extract
    #
    if grating == 'hetg':
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1HEGp1_linelist.txt >' + zspace
        ofile = 'acis_hetg_'
        l_list = h_lines
    elif grating == 'metg':
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1MEGp1_linelist.txt >' + zspace
        ofile = 'acis_metg_'
        l_list = m_lines
    else:
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1LEGp1_linelist.txt >' + zspace
        ofile = 'hrc_letg_'
        l_list = l_lines

    os.system(cmd)
    d_list = read_data_file(zspace, remove=1)

    sdate_list = [[], [], [], [], [], [], []]
    line_list = [{}, {}, {}, {}, {}, {}, {}]
    lcnt = len(l_list)

    #
    #---- go though each files
    #
    for dfile in d_list:

        out = find_info(dfile)
        if out == 'na':
            continue
        else:
            [obsid, ltime, stime] = out
#
#--- extract line information. if energy or fwhm are either "*" or "NaN", skip
#
        data = read_data_file(dfile)
        for ent in data:
            atemp = re.split('\s+', ent.strip())
            if mcf.chkNumeric(atemp[0]):
                energy = atemp[2]
                fwhm = atemp[3]

                if energy == 'NaN':
                    continue

                if (fwhm == '*') or (fwhm == 'NaN'):
                    continue
                energy = adjust_digit(energy, 6)
                peak = float(energy)
                err = atemp[4]
                ede = atemp[5]
                line = str(
                    obsid
                ) + '\t' + energy + '\t' + fwhm + '\t' + err + '\t' + ede + '\t'
                line = line + str(ltime) + '\t' + str(int(stime)) + '\n'
                #
                #--- find the line value within +/-5 of the expected line center position
                #
                for k in range(0, lcnt):
                    center = l_list[k]
                    low = (center - 5) / 1000.0
                    top = (center + 5) / 1000.0
                    if (peak >= low) and (peak <= top):

                        sdate_list[k].append(stime)
                        line_list[k][stime] = line
#
#--- output file name
#
    for k in range(0, lcnt):
        val = str(l_list[k])
        if len(val) < 4:
            val = '0' + val

        odata = data_dir + ofile + val + '_data'
        fo = open(odata, 'w')
        fo.write(header)
        fo.write('\n')
        #
        #--- print out the data
        #
        slist = sdate_list[k]
        slist.sort()
        for sdate in slist:
            ent = line_list[k][sdate]
            fo.write(ent)
        fo.close()
def update_data(msid, l_list, dset = 1, time=[], vals=[]):
    """
    update data for the given msid
    input:  msid    --- msid
            l_list  --- a list of list of [<start time>, <stop time>, <yellow min>, <yellow max>, <red min>, <red max>]
            dset    --- indicate which data set we are handling. 1: main, 2: secondary, i, s, off: hrc sub category
            time    --- a list of time entry for secondary case
            vals    --- a list of msid values for secondary case
    output: data for msid updated (<data_dir>/<msid>_data
    """
#
#--- set which data periods we need to process the data; start from the one after the last one
#
    periods = set_data_periods(msid)

    tc = 0
    for tlist in periods:
        start = tlist[0]
        stop  = tlist[1]
#
#--- occasionally the data period accross the date of  limit changes.
#--- if that happens, devide the period into before and after the date
#--- lperiods has atmost two entries, but usually only one
#
        lperiods = set_limits_for_period(l_list, start, stop)

        if dset != 1:               #--- setting for mta comp case only
            kcnt = 0             
            dcnt = len(time)

        for limits in lperiods:

            tstart = limits[0]
            tstop  = limits[1]
            y_min  = limits[2]
            y_max  = limits[3]
            r_min  = limits[4]
            r_max  = limits[5]
#
#--- main trend case: need to extract data
#
            if dset == 1:
                sdata  = get_data(msid, tstart, tstop)
#
#--- secondary trend case: data are passed from the calling function
#
            else:
                sdata  = []
                for m in range(0, dcnt):
                    if (time[m] >= tstart) and (time[m] < tstop):
                        sdata.append(vals[m])
                    if time[m] >=tstop:
                        kcnt = m -1
                        break
#
#--- if the data is too small, just skip the period
#
            if len(sdata) < 5:
                continue
#
#--- compute statistics
#
            stat_results = compute_stats(sdata, y_min, y_max, r_min, r_max)
#
#--- special treatment for mta comp hrc values: it has i, s, or off suffix
#
            if mcf.chkNumeric(dset):
                oname = msid
            else:
                oname = msid + '_' + dset
#
#--- append data to the data file
#
            print_results(oname, start, stop, stat_results, y_min, y_max, r_min, r_max)
Beispiel #36
0
            btemp = re.split('\)', atemp[1])
            ent = re.split('\,', btemp[0])
            temp = tcnv.DOMtoYdate(float(ent[0]))
            start_list.append(temp[1])

            temp = tcnv.DOMtoYdate(float(ent[1]))
            stop_list.append(temp[1])

        rad_zone = [start_list, stop_list]

    return rad_zone


#---------------------------------------------------------------------------------------------------

if __name__ == '__main__':

    #
    #--- a period name (e.g. 2061206) is given, the script computes only for that period
    #--- otherwise, it will recompute the entire period
    #
    if len(sys.argv) == 2:
        if mcf.chkNumeric(sys.argv[1]):
            period = sys.argv[1]
        else:
            period = 'all'
    else:
        period = 'all'

    read_xmm_and_process(period)
def print_html_page(comp_test, in_year=1, in_mon=1):
    """
    driving function to print all html pages for ACIS Dose Plots
    Input:  comp_test --- test indicator. if it is "test", it will run the test version
            in_year/in_mon --- if in_year and in_mon are given, the file is created for 
                               that year/month, otherwise, the files are created in the current year/month
    Output: html pages in <web_dir> and <web_dir>/<mon_dir_name>  (e.g. JAN2013)
    """

#
#---  find today's date and convert them appropriately
#
    if comp_test == 'test':
        bchk  = 0
        tday  = 13;
        umon  = 2;
        uyear = 2013;

        cmon  = tcnv.changeMonthFormat(umon)
        cmon  = cmon.upper()
        ldate = str(uyear) + '-' + str(umon) + '-' + str(tday)          #-- update date
    else:
#
#--- find today's date
#
        [uyear, umon, tday, hours, min, sec, weekday, yday, dst] = tcnv.currentTime()
#
#--- change month in digit into letters
#
        cmon  = tcnv.changeMonthFormat(umon)
        cmon  = cmon.upper()
        ldate = str(uyear) + '-' + str(umon) + '-' + str(tday)          #-- update date
#
#--- if year and month is given, create for that month. otherwise, create for this month
#
        bchk = 0
        if mcf.chkNumeric(in_year) and mcf.chkNumeric(in_mon):
            if in_year > 1900 and (in_mon >0 and in_mon < 13):
                bchk = 1
        if bchk > 0:
            uyear = in_year
            umon  = in_mon
            cmon  = tcnv.changeMonthFormat(umon)
            cmon  = cmon.upper()

    mon_dir_name = cmon + str(uyear);

#
#--- check whether this monnth web page already opens or not
#
    dname = web_dir + mon_dir_name
    chk   = mcf.chkFile(dname)

    if chk > 0:
        if bchk == 0:
#
#-- create only when it is working for the current month
#
            print_main_html(ldate, uyear, umon);

        print_month_html(mon_dir_name, ldate, uyear, umon);

        print_png_html(mon_dir_name, ldate, uyear, umon);
#
#--- change permission level and the owner of the files
#
        cmd = 'chgrp mtagroup ' + web_dir + '/* ' + web_dir + '/*/*'
        os.system(cmd)
        cmd = 'chmod 755 '+ web_dir + '/* ' + web_dir + '/*/*'
        os.system(cmd)
def extract_data(file, out_dir, comp_test =''):

    """
    extract time and ccd_id from the fits file and create count rate data
    Input:  file    --- fits file data
            out_dir --- the directory in which data will be saved
    Output: ccd<ccd>--- 5 min accumulated count rate data file
    """
#
#--- extract time and ccd id information from the given file
#
    data      = pyfits.getdata(file, 0)
    time_col  = data.field('TIME')
    ccdid_col = data.field('CCD_ID')
#
#--- initialize
#
    diff  = 0
    chk   = 0
    ccd_c = [0  for x in range(0, 10)]
    ccd_h = [[] for x in range(0, 10)]
#
#--- check each line and count the numbers of ccd in the each 300 sec intervals
#
    for k in range(0, len(time_col)):

        if mcf.chkNumeric(time_col[k]) and mcf.chkNumeric(ccdid_col[k]) :
            ftime  = float(time_col[k])

            if ftime > 0:
                ccd_id = int(ccdid_col[k])

                if chk == 0:
                    ccd_c[ccd_id] += 1
                    s_time = ftime
                    diff   = 0
                    chk    = 1
                elif diff >= 300.0:
#
#--- convert time in dom
#
                    dom = tcnv.stimeToDom(s_time)
#
#--- print out counts per 300 sec 
#
                    for i in range(0, 10):
                        line = str(dom) + '\t' + str(ccd_c[i]) + '\n'
                        ccd_h[i].append(line)
#
#--- re initialize for the next round
#
                        ccd_c[i] = 0

                    ccd_c[ccd_id] += 1
                    s_time = ftime
                    diff   = 0
                    chk    = 0
#
#--- accumurate the count until the 300 sec interval is reached
#
                else:
                    diff = ftime - s_time
                    ccd_c[ccd_id] += 1
#
#--- for the case the last interval is less than 300 sec, 
#--- estimate the the numbers of hit and adjust
#
    if diff > 0 and diff < 300:
        ratio = 300.0 / diff

        for i in range(0, 10):
            ccd_c[i] *= ratio

            line = str(dom) + '\t' + str(ccd_c[i]) + '\n'
            ccd_h[i].append(line)
#
#--- if this is a test, return output 
#
    if comp_test == 'test':

        return ccd_h
#
#--- otherwise, print out the results
#
    else:
        for i in range(0, 10):
            file = out_dir + '/ccd' + str(i)
            f    = open(file, 'a')

            for ent in ccd_h[i]:
                f.write(ent)
            f.close()
def generate_count_rate_plot(directory):

    """
    create count rate plots
    Input: directory --- the directory where data is located and the plot will be created
            <directory>/ccd<ccd> --- count rate data file
    Output: <directory>/acis_dose_ccd<ccd>.png
            <directory>/acis_dose_ccd_5_7.png
    """

    xname  = 'Time (DOM)'
    yname  = 'Count/Sec'

    data1_x = []
    data1_y = []
    data2_x = []
    data2_y = []
    data3_x = []
    data3_y = []
#
#--- plot count rates for each ccd
#
    for ccd in range(0, 10):
        file = directory + '/ccd' + str(ccd)
        chk  = mcf.chkFile(file)
        if chk == 0:
            continue

        f    = open(file, 'r')
        data = [line.strip() for line in f.readlines()]
        f.close()
        xdata = []
        ydata = []
        for ent in data:
            atemp = re.split('\s+', ent)
            if mcf.chkNumeric(atemp[0]) and mcf.chkNumeric(atemp[1]):
                xdata.append(float(atemp[0]))
#
#--- normalized to cnts/sec
#
                ydata.append(float(atemp[1]) / 300.0)

        title   = 'ACIS Count Rate: CCD' + str(ccd)
        outname = directory + '/acis_dose_ccd' + str(ccd) + '.png'

        plot_panel(xdata, ydata, xname, yname, title, outname)
#
#--- save data for three panel plot
#
        if ccd == 5:
            data1_x = xdata
            data1_y = ydata
        elif ccd == 6:
            data2_x = xdata
            data2_y = ydata
        elif ccd == 7:
            data3_x = xdata
            data3_y = ydata
#
#--- create three panel plot for ccd5, ccd6, and ccd7
#
    title1  = 'ACIS Count Rate: CCD5'
    title2  = 'ACIS Count Rate: CCD6'
    title3  = 'ACIS Count Rate: CCD7'
    outname = directory + '/acis_dose_ccd_5_7.png'

    x_set_list = [data1_x, data2_x, data3_x]
    y_set_list = [data1_y, data2_y, data3_y]
    yname_list = [yname,   yname,   yname]
    title_list = [title1,  title2, title3]

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname)
def convert_avg(input_list):
    """
    compute avg and std of each column entry in gradkodak for the given data set
    input: input_list   --- a list of lists. each sub-list contains dataseeker output for "_avg"
                            each entry is a dictionary which contains avg, std, min, max 
    output: a list of avg and std of each columns of gradkodak. first len(gradkodak)
            are avg of the columns and the next len(gradkodak) are std.
    """
    #
    #--- open the list of lists
    #
    (_4rt575t_avg, _4rt700t_avg, _4rt701t_avg, _4rt702t_avg, _4rt703t_avg, _4rt704t_avg,\
     _4rt705t_avg, _4rt706t_avg, _4rt707t_avg, _4rt708t_avg, _4rt709t_avg, _4rt710t_avg,\
     _4rt711t_avg, \
     ohrthr02_avg, ohrthr03_avg, ohrthr04_avg, ohrthr05_avg, ohrthr06_avg, ohrthr07_avg, \
     ohrthr08_avg, ohrthr09_avg, ohrthr10_avg, ohrthr11_avg, ohrthr12_avg, ohrthr13_avg, \
     ohrthr14_avg, ohrthr15_avg, ohrthr17_avg, ohrthr21_avg, ohrthr22_avg, ohrthr23_avg, \
     ohrthr24_avg, ohrthr25_avg, ohrthr26_avg, ohrthr27_avg, ohrthr28_avg, ohrthr29_avg, \
     ohrthr30_avg, ohrthr31_avg, ohrthr33_avg, ohrthr34_avg, ohrthr35_avg, ohrthr36_avg, \
     ohrthr37_avg, ohrthr39_avg, ohrthr40_avg, ohrthr42_avg, ohrthr44_avg, ohrthr45_avg, \
     ohrthr46_avg, ohrthr47_avg, ohrthr49_avg, ohrthr50_avg, ohrthr51_avg, ohrthr52_avg, \
     ohrthr53_avg, ohrthr54_avg, ohrthr55_avg, ohrthr56_avg, ohrthr57_avg, ohrthr58_avg, \
     ohrthr60_avg, ohrthr61_avg, \
     oobthr02_avg, oobthr03_avg, oobthr04_avg, oobthr05_avg, oobthr06_avg, oobthr07_avg, \
     oobthr08_avg, oobthr09_avg, oobthr10_avg, oobthr11_avg, oobthr12_avg, oobthr13_avg, \
     oobthr14_avg, oobthr15_avg, oobthr17_avg, oobthr18_avg, oobthr19_avg, oobthr20_avg, \
     oobthr21_avg, oobthr22_avg, oobthr23_avg, oobthr24_avg, oobthr25_avg, oobthr26_avg, \
     oobthr27_avg, oobthr28_avg, oobthr29_avg, oobthr30_avg, oobthr31_avg, oobthr33_avg, \
     oobthr34_avg, oobthr35_avg, oobthr36_avg, oobthr37_avg, oobthr38_avg, oobthr39_avg, \
     oobthr40_avg, oobthr41_avg, oobthr42_avg, oobthr43_avg, oobthr44_avg, oobthr45_avg, \
     oobthr48_avg, oobthr49_avg, oobthr50_avg, oobthr51_avg, oobthr52_avg, oobthr53_avg, \
     oobthr54_avg, oobthr55_avg, oobthr56_avg, oobthr57_avg, oobthr58_avg, oobthr59_avg, \
     oobthr60_avg, oobthr61_avg, oobthr62_avg, oobthr63_avg \
     ) = input_list
    #
    #--- get avg and std of the enter entries of the given list
    #
    hrmarange_list = range(2, 14) + range(21, 27) + [28, 29, 30, 33, 36, 37
                                                     ] + range(44, 49) + range(
                                                         49, 54) + [55, 56]
    [hrmaavg, hrmadev] = get_avg_std('ohrthr', hrmarange_list, input_list)

    #------------

    hrmacs_list = range(6, 16) + [17, 25, 26, 29, 30, 31] + range(
        33, 38) + [39, 40] + range(50, 59) + [60, 61]
    [hrmacavg, hrmacdev] = get_avg_std('ohrthr', hrmacs_list, input_list)

    #-----------

    hrmaxgrad_list1 = [10, 11, 34, 35, 55, 56]
    [hrmaxgrd1, xxx] = get_avg_std('ohrthr', hrmaxgrad_list1, input_list)

    hrmaxgrad_list2 = [12, 13, 35, 37, 57, 58]
    [hrmaxgrd2, xxx] = get_avg_std('ohrthr', hrmaxgrad_list2, input_list)
    hrmaxgrd = hrmaxgrd1 - hrmaxgrd2
    #
    #--- compute std separately for the case two outputs are add/subtracted after computed
    #
    dev_list = hrmaxgrad_list1 + hrmaxgrad_list2
    [z, hrmaxgrd_dev] = get_avg_std('ohrthr', dev_list, input_list)

    #-----------

    hrmarad1grd_list = [8, 31, 33, 52]
    [hrmarad1grd, xxx] = get_avg_std('ohrthr', hrmarad1grd_list, input_list)
    hrmarad2grd_list = [9, 53, 54]
    [hrmarad2grd, xxx] = get_avg_std('ohrthr', hrmarad2grd_list, input_list)
    hrmaradgrd = hrmarad1grd - hrmarad2grd

    dev_list = hrmarad1grd_list + hrmarad2grd_list
    [z, hrmaradgrd_dev] = get_avg_std('ohrthr', dev_list, input_list)

    #-----------

    obaavg_list = range(8, 33) + range(33, 42) + [44, 45]
    [obaavg, obadev] = get_avg_std('oobthr', obaavg_list, input_list)

    #-----------

    obacone_list = range(8, 16) + range(17, 31) + range(57, 62)
    [obaconeavg, obacone_dev] = get_avg_std('oobthr', obacone_list, input_list)

    #-----------
    #
    #--- for the case two different header entries are needed;
    #
    fwblkhd_list1 = [62, 63]
    fwblkhd_list2 = [700, 712]
    [fwblkhdt, fwblkhdt_dev] = get_avg_std('oobthr',
                                           fwblkhd_list1,
                                           input_list,
                                           '_4rt',
                                           fwblkhd_list2,
                                           tail='t')

    aftblkhdt_list = [31, 33, 34]
    [aftblkhdt, aftblkhdt_dev] = get_avg_std('oobthr', aftblkhdt_list,
                                             input_list)

    obaaxgrd = fwblkhdt - aftblkhdt

    s_list = fwblkhd_list1 + aftblkhdt_list
    [z, obaaxgrd_dev] = get_avg_std('oobthr',
                                    s_list,
                                    input_list,
                                    '_4rt',
                                    fwblkhd_list2,
                                    tail='t')

    #-----------

    mzoba_list1 = [8, 19, 25, 31, 57, 60]
    mzoba_list2 = [575]
    [mzobacone, mzobacone_dev] = get_avg_std('oobthr',
                                             mzoba_list1,
                                             input_list,
                                             '_4rt',
                                             mzoba_list2,
                                             tail='t')

    pzoba_list = [13, 22, 23, 28, 29, 61]
    [pzobacone, pzobacone_dev] = get_avg_std('oobthr', pzoba_list, input_list)

    obadiagrad = mzobacone - pzobacone

    d_list = mzoba_list1 + pzoba_list
    [z, obadiagrad_dev] = get_avg_std('oobthr',
                                      d_list,
                                      input_list,
                                      '_4rt',
                                      mzoba_list2,
                                      tail='t')

    #-----------
    #
    #--- compute the range of the data
    #
    clist = range(2, 14) + range(21, 28) + [29, 30, 33, 36, 37, 42] + range(
        45, 54) + [55, 56]
    [hrmarange, hrmarange_dev] = get_range('ohrthr', clist, input_list)

    clist = range(2, 8)
    [hrmastrutrnge, hrmastrutrnge_dev] = get_range('oobthr', clist, input_list)

    clist = range(42, 45)
    [tfterange, tfterange_dev] = get_range('oobthr', clist, input_list)

    clist = range(49, 55)
    [scstrutrnge, scstrutrnge_dev] = get_range('oobthr', clist, input_list)

    out = []
    for  val in [hrmaavg, hrmacavg, hrmaxgrd, hrmaradgrd, obaavg, obaconeavg, obaaxgrd, obadiagrad, fwblkhdt, \
            aftblkhdt, mzobacone, pzobacone, hrmarange, tfterange, hrmastrutrnge, scstrutrnge, \
            hrmadev, hrmacdev, hrmaxgrd_dev, hrmaradgrd_dev, obadev, obacone_dev, obaaxgrd_dev, obadiagrad_dev, fwblkhdt_dev, \
            aftblkhdt_dev, mzobacone_dev, pzobacone_dev, hrmarange_dev, tfterange_dev, hrmastrutrnge_dev, scstrutrnge_dev]:

        if mcf.chkNumeric(val) == False or str(val) == 'nan':
            val = -99.0

        out.append(val)

    return out
            atemp = re.split('\(', line)
            btemp = re.split('\)', atemp[1])
            ent   = re.split('\,', btemp[0])
            temp   = tcnv.DOMtoYdate(float(ent[0]))
            start_list.append(temp[1])

            temp   = tcnv.DOMtoYdate(float(ent[1]))
            stop_list.append(temp[1])

        rad_zone = [start_list, stop_list]
    
    return rad_zone

#---------------------------------------------------------------------------------------------------

if __name__ == '__main__':

#
#--- a period name (e.g. 2061206) is given, the script computes only for that period
#--- otherwise, it will recompute the entire period
#
    if len(sys.argv) == 2:
        if mcf.chkNumeric(sys.argv[1]):
            period = sys.argv[1]
        else:
            period = 'all'
    else:
        period = 'all'

    read_xmm_and_process(period)
def update_mta_comp_database():
    """
    updata database of mta computed msids
    input:  none but read from /data/mta4/Deriv/*fits files
    outpu:  updated data file: <data_dir>/<msid>_data
    """
#
#--- get a list of data fits file names
#
    infile = house_keeping + 'mta_comp_fits_files'
    data   = ecf.read_file_data(infile)

    for fits in data:
#
#--- hrc has 4 different cases (all data, hrc i, hrc s, and off). tail contain which one this one is
#--- if this is not hrc (or hrc all), tail = 2
#
        mc = re.search('hrc', fits)
        if mc is not None:
            atemp = re.split('_', fits)
            btemp = re.split('.fits', atemp[1])
            tail  =  btemp[0]
        else:
            tail  = 2

        [cols, tbdata] = ecf.read_fits_file(fits)

        time = []
        for ent in tbdata.field('time'):
            stime = float(ent)
#
#--- check whether the time is in dom 
#   
            if stime < 31536000:
                stime = ecf.dom_to_stime(float(ent))

            time.append(stime)

        for col in cols:
            col = col.lower()
#
#--- we need only *_avg columns
#
            mc = re.search('_avg', col)
            if mc is not None:

                vals = tbdata.field(col)
             
                ctime = []
                cvals = []
                for m in range(0, len(time)):
#
#--- skip the data value "nan" and dummy values (-999, -998, -99, 99, 998, 999)
#
                    if str(vals[m]) in  ['nan', 'NaN', 'NAN']:
                        continue

                    nval = float(vals[m])
                    if nval in [-999, -998, -99, 99, 998, 999]:
                        continue
                    else:
                        ctime.append(time[m])
                        cvals.append(nval)
    
                atemp = re.split('_', col)
                msid  = atemp[-2]

                if mcf.chkNumeric(tail):
                    oname = msid
                else:
                    oname = msid + '_' + tail
    
                print "MSID: " + str(oname)

                cmd = 'rm ' + data_dir + oname + '_data'
                os.system(cmd)
#
#--- read limit table for the msid
#
                l_list   = ecf.set_limit_list(msid)
                if len(l_list) == 0:
                    try:
                        l_list = mta_db[msid]
                    except:
                        l_list = []
    
                update_data(msid, l_list, dset = tail, time=ctime, vals=cvals)
def dateFormatCon(elm, *arg):

    "convert various date format into a tuple of (year, month, day, hours, minutes, second, ydate) "

    if mtac.chkNumeric(elm):             #--- for the case, inputs are digits, e.g, year, month, ...

        year = elm
        cnt = len(arg)
        if cnt == 1:
            ydate   = arg[0]
#
#--- check whether ydate is a fractional ydate or not, if so, compute hours, minutes, and second
#
            diff = arg[0] - int(ydate)
            if diff == 0:
                hours   = 0
                minutes = 0
                seconds = 0
            else:
                temp = 24 * diff
                hours   = int(temp)
                temp2 = 60 * (temp - hours)
                minutes = int(temp2)
                temp3 = 60  * (temp2 - minutes)
                seconds = int(temp3)

            (month, day) = changeYdateToMonDate(year, int(ydate))
        elif cnt == 2:
            month   = arg[0]
            day     = int(arg[1])

            ydate   = findYearDate(year, month, day)
        elif cnt == 4:
            ydate   = arg[0]
            hours   = arg[1]
            minutes = arg[2]
            seconds = arg[3]

            (month, day) = changeYdateToMonDate(year, int(ydate))
        elif cnt == 5:
            month   = arg[0]
            day     = arg[1]
            hours   = arg[2]
            minutes = arg[3]
            seconds = arg[4]

            ydate   = findYearDate(year, month, day)
        else:
            ydate   = 1
            month   = 1
            day     = 1
            hours   = 0
            minutes = 0
            seconds = 0


    else:
        atemp = re.split('\s+', elm)
        m = re.search('\/', elm)
        m2= re.search('\,', elm)
        n = re.search('T',  elm)

        if len(atemp) == 6:                 #--- for the case, e.g: Wed Apr  4 10:34:32 EDT 2012
            year    = int(atemp[5])
            month   = changeMonthFormat(atemp[1])
            day     = int(atemp[2])
            btemp   = re.split(':', atemp[3])
            hours   = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday    =  float(day) + float(hours/24.0) + float(minutes/1440.0) + float(seconds/86400)
            ydate   = findYearDate(year, month, yday)

        elif (m is not None) and (m2 is not None):     #--- for the case, e.g. 03/28/12,00:00:00
            atemp   = re.split('\,', elm)
            btemp   = re.split('\/', atemp[0])
            year    = int(btemp[2])

            if year > 90 and year < 1900:
                year += 1900
            elif year < 90:
                year += 2000

            month   = int(btemp[0])
            day     = int(btemp[1])

            btemp   = re.split(':', atemp[1])
            hours   = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday    =  float(day) + float(hours/24.0) + float(minutes/1440.0) + float(seconds/86400)
            ydate   = findYearDate(year, month, yday)

        elif n is not None:                 #--- for the case, e.g. 03/28/12T00:00:00
            atemp   = re.split('T', elm)
            m1      = re.search('\/', elm)
            m2      = re.search('-', elm)
            if m1 is not None:
                btemp   = re.split('\/', atemp[0])
                year    = int(btemp[2])
    
                if year > 90 and year < 1900:
                    year += 1900
                elif year < 90:
                    year += 2000
    
                month   = int(btemp[0])
                day     = int(btemp[1])

            elif m2 is not None:
                btemp = re.split('-', atemp[0])
                year  = int(btemp[0])
                month = int(btemp[1])
                day   = int(btemp[2])
    
            btemp   = re.split(':', atemp[1])
            hours   = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday    =  float(day) + float(hours/24.0) + float(minutes/1440.0) + float(seconds/86400)
            ydate   = findYearDate(year, month, yday)


        else:
            atemp = re.split(':', elm)
            if len(atemp) == 6:                 #--- for the case yyy:mm:dd:hh:mm:ss
                year    = int(atemp[0])
                month   = int(atemp[1])
                day     = int(atemp[2])
                hours   = int(atemp[3])
                minutes = int(atemp[4])
                seconds = int(atemp[5])
    
                yday    =  float(day) + float(hours/24.0) + float(minutes/1440.0) + float(seconds/86400)
                ydate   = findYearDate(year, month, yday)

            else:                               #--- for the cae yyyy:yday:hh:mm:ss
                year    = int(atemp[0])
                ydate   = int(atemp[1])
                hours   = int(atemp[2])
                minutes = int(atemp[3])
                seconds = int(atemp[4])
                [month, day] = changeYdateToMonDate(year, ydate)
    
                yday    =  float(day) + float(hours/24.0) + float(minutes/1440.0) + float(seconds/86400)
                ydate   = findYearDate(year, month, yday)

    line = (year, month, day, hours, minutes, seconds, ydate)
    return line
Beispiel #44
0
def collect_potential_obs():
    """
    read mta mail archive and find potential ddt/too triggered email
    input:  none but read from /stage/mail/mta
    output: obsids  --- a list of obsids
            prpnum  --- a list of proposla numbers
            seqnum  --- a list of sequence numbers
            otypes   --- a list of otype

    """
#
#--- a list of obsids currently in the lists
#
    obs_list = create_obsids_on_the_list()
#
#--- read email  archive
#
    ifile = m_dir + 'mta_mail'
    data  = read_data_file(ifile)

    chk    = 0
    kdate  = ''
    obsids = []
    prpnum = []
    seqnum = []
    otype  = []
    for ent in data:
        mc = re.search('Date:', ent)
        if mc is not None:
            kdate = ent

        if chk == 0:
            mc1 = re.search('Recently Approved ', ent)
            if mc1 is not None:
                mc = re.search('Subject', ent)
                if mc is not None:
                    mc = re.search('Re:', ent)
                    if mc is None:
                        chk = 1
            else:
                continue
        else:
            mc2 = re.search('From:',  ent)
            if mc2 is not None:
                chk = 0
                continue

            mc3 = re.search('Obsid ',  ent)
            mc4 = re.search('Obsids ', ent)
            if (mc3 is not None) or (mc4 is not None):
#
#--- only when the observation is triggered in the past two hours, notify the obsid
#
#                if check_time_limit(kdate, lhr=2) == 0:
#                    continue

                atemp = re.split('\s+', ent)
                for wrd in atemp:
                    mc5 = re.search('\(', wrd)
                    if mc5 is not None:
                        break
                    else:
                        wrd = wrd.replace('\,', '')
                        wrd.strip()
                        if mcf.chkNumeric(wrd):
                            val = int(float(wrd))
#
#--- check whether the obsid is already in too_list or ddt_list
#
                            if val in obs_list:
                                continue
#
#--- check repeater
#
                            if not (val in obsids):
                                obsids.append(val)
#
#--- get proposal # and seq #
#
                                atemp = re.split('#: ' , ent)
                                btemp = re.split(',', atemp[1])
                                prpnum.append(btemp[0].strip())
                
                                btemp = re.split('\)', atemp[2])
                                seqnum.append(btemp[0].strip())
                
                                mc = re.search('DDT', ent)
                                if mc is not None:
                                    otype.append('ddt')
                                else:
                                    oype.append('too')

                chk = 0
#
#--- if there is a new obs, notify
#
    if len(obsids) > 0:
        line = 'The following obsid is activated via email:\n\n'
        for  obsid in obsids:
            line = line + 'Obsid: ' + str(obsid) + '\n'
    
        fo   = open(zspace, 'w')
        fo.write(line)
        fo.close()
    
        cmd = 'cat ' + zspace + '| mailx -s "Subject:TEST!! TEST !! New TOO/DDT in Email" [email protected]'
        os.system(cmd)
    
        mcf.rm_file(zspace)

    
    return [obsids, prpnum, seqnum, otype]
Beispiel #45
0
def dateFormatCon(elm, *arg):

    "convert various date format into a tuple of (year, month, day, hours, minutes, second, ydate) "

    if mtac.chkNumeric(
            elm):  #--- for the case, inputs are digits, e.g, year, month, ...

        year = elm
        cnt = len(arg)
        if cnt == 1:
            ydate = arg[0]
            #
            #--- check whether ydate is a fractional ydate or not, if so, compute hours, minutes, and second
            #
            diff = arg[0] - int(ydate)
            if diff == 0:
                hours = 0
                minutes = 0
                seconds = 0
            else:
                temp = 24 * diff
                hours = int(temp)
                temp2 = 60 * (temp - hours)
                minutes = int(temp2)
                temp3 = 60 * (temp2 - minutes)
                seconds = int(temp3)

            (month, day) = changeYdateToMonDate(year, int(ydate))
        elif cnt == 2:
            month = arg[0]
            day = int(arg[1])

            ydate = findYearDate(year, month, day)
        elif cnt == 4:
            ydate = arg[0]
            hours = arg[1]
            minutes = arg[2]
            seconds = arg[3]

            (month, day) = changeYdateToMonDate(year, int(ydate))
        elif cnt == 5:
            month = arg[0]
            day = arg[1]
            hours = arg[2]
            minutes = arg[3]
            seconds = arg[4]

            ydate = findYearDate(year, month, day)
        else:
            ydate = 1
            month = 1
            day = 1
            hours = 0
            minutes = 0
            seconds = 0

    else:
        atemp = re.split('\s+', elm)
        m = re.search('\/', elm)
        m2 = re.search('\,', elm)
        n = re.search('T', elm)

        if len(atemp
               ) == 6:  #--- for the case, e.g: Wed Apr  4 10:34:32 EDT 2012
            year = int(atemp[5])
            month = changeMonthFormat(atemp[1])
            day = int(atemp[2])
            btemp = re.split(':', atemp[3])
            hours = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday = float(day) + float(hours / 24.0) + float(
                minutes / 1440.0) + float(seconds / 86400)
            ydate = findYearDate(year, month, yday)

        elif (m is not None) and (
                m2 is not None):  #--- for the case, e.g. 03/28/12,00:00:00
            atemp = re.split('\,', elm)
            btemp = re.split('\/', atemp[0])
            year = int(btemp[2])

            if year > 90 and year < 1900:
                year += 1900
            elif year < 90:
                year += 2000

            month = int(btemp[0])
            day = int(btemp[1])

            btemp = re.split(':', atemp[1])
            hours = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday = float(day) + float(hours / 24.0) + float(
                minutes / 1440.0) + float(seconds / 86400)
            ydate = findYearDate(year, month, yday)

        elif n is not None:  #--- for the case, e.g. 03/28/12T00:00:00
            atemp = re.split('T', elm)
            m1 = re.search('\/', elm)
            m2 = re.search('-', elm)
            if m1 is not None:
                btemp = re.split('\/', atemp[0])
                year = int(btemp[2])

                if year > 90 and year < 1900:
                    year += 1900
                elif year < 90:
                    year += 2000

                month = int(btemp[0])
                day = int(btemp[1])

            elif m2 is not None:
                btemp = re.split('-', atemp[0])
                year = int(btemp[0])
                month = int(btemp[1])
                day = int(btemp[2])

            btemp = re.split(':', atemp[1])
            hours = int(btemp[0])
            minutes = int(btemp[1])
            seconds = int(btemp[2])

            yday = float(day) + float(hours / 24.0) + float(
                minutes / 1440.0) + float(seconds / 86400)
            ydate = findYearDate(year, month, yday)

        else:
            atemp = re.split(':', elm)
            if len(atemp) == 6:  #--- for the case yyy:mm:dd:hh:mm:ss
                year = int(atemp[0])
                month = int(atemp[1])
                day = int(atemp[2])
                hours = int(atemp[3])
                minutes = int(atemp[4])
                seconds = int(atemp[5])

                yday = float(day) + float(hours / 24.0) + float(
                    minutes / 1440.0) + float(seconds / 86400)
                ydate = findYearDate(year, month, yday)

            else:  #--- for the cae yyyy:yday:hh:mm:ss
                year = int(atemp[0])
                ydate = int(atemp[1])
                hours = int(atemp[2])
                minutes = int(atemp[3])
                seconds = int(atemp[4])
                [month, day] = changeYdateToMonDate(year, ydate)

                yday = float(day) + float(hours / 24.0) + float(
                    minutes / 1440.0) + float(seconds / 86400)
                ydate = findYearDate(year, month, yday)

    line = (year, month, day, hours, minutes, seconds, ydate)
    return line
Beispiel #46
0
def find_coordinate(target, cyear=''):
    """
    find coordinates of the target 
    input:  target  --- target name
            cyear   --- the year of the observed. it can be in year date. 
                        if it is not given, no corrections for the propoer motion
    output: coordinate  [ra, dec] in decimal format.
    """

#
#--- set several initial values
#
    target = target.strip()
    target = target.replace(' ', '\ ')
    tra    = 'na'
    tdec   = 'na'
    pra    = 0
    pdec   = 0
    schk   = 0
#
#--- call simbad to get the coordinate data from simbad site
#
    cmd    = 'lynx -source http://simbad.u-strasbg.fr/simbad/sim-id\?output.format=ASCII\&Ident=' + target + '>' + zspace
    os.system(cmd)

    f      = open(zspace, 'r')
    data   = [line.strip() for line in f.readlines()]
    f.close()
    mcf.rm_file(zspace)
#
#--- read the coordinates and the proper motion
#
    tchk = 0
    pchk = 0
    for ent in data:
        mc1 = re.search('Coordinates', ent)
        mc2 = re.search('Proper', ent)
        if tchk == 0 and mc1 is not None:
            try:
                atemp = re.split(':', ent)
                btemp = re.split('\s+', atemp[1])
                ahr   = float(btemp[1])
                amin  = float(btemp[2])
                asec  = float(btemp[3])
                tra   = 15.0 * (ahr + amin / 60.0 + asec / 3600.0)
     
                deg   = float(btemp[4])
                dmin  = float(btemp[5])
                dsec  = float(btemp[6])
                sign  = 1
                if deg < 0:
                    sign = -1
     
                tdec  = abs(deg) + dmin / 60.0 + dsec / 3600.0
                tdec *= sign
                tchk += 1
            except:
                schk = 1
                break


        if pchk == 0 and mc2 is not None:
            try:
                atemp = re.split(':', ent)
                btemp = re.split('\s+', atemp[1])
                pra   = btemp[1]
                pdec  = btemp[2]
                if mcf.chkNumeric(pra):
                    pra = float(pra) / 3600.0 / 1000.0
                else:
                    pra  = 0.0
                if mcf.chkNumeric(pdec):
                    pdec = float(pdec) / 3600.0 / 1000.0
                else:
                    pdec = 0.0
    
                pchk += 1
            except:
                pass
        if tchk == 1 and pchk == 1:
            break

#
#--- if the year is given, correct for the proper motion
#
    if schk == 0 and mcf.chkNumeric(cyear):
        dyear = float(cyear) - 2000.0
        try:
            tra  += dyear * pra
            tdec += dyear * pdec
        except:
            pass
        

    return [tra, tdec]
Beispiel #47
0
def hrc_gain_fit_gaus(c_input):
    """
    extract hrc evt2 file, find the brightest object and create pha distribution
    Input:  c_inut      --- if it is obsid, use it as a input
                            otherwise, a list of new candidates will be create based database
    Output: <header>_pha.dat    --- pha distribution data
            <header>_gfit.png   --- a plot of pha data
            fitting_results     --- a table of fitted results
    """
    #
    #--- if an obsid is provided, analyize that, else get new obsids from databases
    #

    if mcf.chkNumeric(c_input):
        candidate_list = [c_input]
    else:
        candidate_list = arlist.hrc_gain_find_ar_lac()

    if len(candidate_list) > 0:
        for obsid in candidate_list:
            file = extract_hrc_evt2(obsid)
            if file == 'na':
                continue
#
#--- get a file name header for the later use
#
            temp = re.split('N', file)
            hname = temp[0]
            #
            #--- extract information from the fits file header
            #
            [
                obsid, detnam, date_obs, date_end, tstart, tstop, ra_pnt,
                dec_pnt, ra_nom, dec_nom, roll_pnt, foc_len, defocus, sim_x,
                sim_y, sim_z
            ] = find_header_info(file)
            #
            #--- find the diffrence between real AR Lac position and nominal postion so that we can determin how much area we should include
            #
            ra_diff = abs(ra - ra_nom) * 60.0
            dec_diff = abs(dec - dec_nom) * 60.0
            rad_diff = math.sqrt(ra_diff * ra_diff + dec_diff * dec_diff)

            if rad_diff < 10.0:
                fit_rad = 60.0
            else:
                fit_rad = 200.0
#
#--- find a location of the brightest object (assume it is AR Lac) in sky coordinates
#
            [x, y] = find_center(file)
            #
            #--- extract pha values in the given area
            #
            pha = extract_pha(file, x, y, fit_rad)
            #
            #--- create pha count distribution
            #
            pmax = max(pha) + 1
            pha_bin = [x for x in range(0, pmax)]
            pha_hist = [0 for x in range(0, pmax)]

            for ent in pha:
                pha_hist[ent] += 1
#
#--- print out the distirbution results
#
            outfile = data_dir + hname + '_pha.dat'
            fo = open(outfile, 'w')
            for i in range(0, pmax):
                line = str(pha_bin[i]) + '\t' + str(pha_hist[i]) + '\n'
                fo.write(line)
            fo.close()
            #
            #--- find median point
            #
            med = find_med(pha_hist)
            #
            #--- fit a normal distribution on the data
            #
            [amp, center, width] = fit_gauss(pha_bin, pha_hist)
            #
            #--- print out the fitting result
            #
            outfile = house_keeping + 'fitting_results'

            copied_file = outfile + '~'
            cmd = 'cp ' + outfile + ' ' + copied_file
            os.system(cmd)

            fo = open(outfile, 'a')
            line = str(obsid) + '\t' + date_obs + '\t' + str(
                tstart) + '\t' + detnam + '\t' + str(ra_pnt) + '\t' + str(
                    dec_pnt) + '\t\t'
            line = line + str(round(ra_diff, 3)) + '\t' + str(
                round(dec_diff, 3)) + '\t' + str(round(
                    rad_diff, 3)) + '\t' + str(med) + '\t\t'
            line = line + str(round(center, 3)) + '\t' + str(round(
                amp, 3)) + '\t' + str(round(width, 3)) + '\t'
            line = line + str(roll_pnt) + '\t' + str(foc_len) + '\t' + str(
                defocus) + '\t'
            line = line + str(sim_x) + '\t' + str(sim_y) + '\t' + str(
                sim_z) + '\n'
            fo.write(line)
            fo.close()
            #
            #--- plot the data
            #
            outfile = plot_dir + hname + '_gfit.png'
            plot_gauss(pha_bin, pha_hist, amp, center, width, file, outfile)
            #
            #--- remove the evt2 file
            #
            mcf.rm_file(file)
Beispiel #48
0
def exclude_sources(fits):
    """
    remove the area around the main source and all point sources from data
    input:  fits        --- input fits file name
    output: out_name    --- source removed fits file (<header>_ccd<ccd>_cleaned.fits)
    """
#
#--- read which ccds are used and several other info from fits header
#
    cmd = ' dmlist ' + fits + ' opt=head > ' + zspace
    scf.run_ascds(cmd)

    data = scf.read_file(zspace, remove=1)

    ccd_list = []
    for ent in data:
        mc = re.search('bias file used', ent)
        if mc is not None:
            atemp = re.split('CCD', ent)
            val   = atemp[1].strip()
            ccd_list.append(val)
            continue

        for name in ['SIM_X', 'SIM_Y', 'SIM_Z', 'RA_NOM', 'DEC_NOM', 'ROLL_NOM', 'RA_TARG', 'DEC_TARG']:
            mc = re.search(name, ent)
            if mc is not None:
                lname = name.lower()
                atemp = re.split('\s+', ent)
                val   = atemp[2].strip()
                exec "%s = %s" % (lname, val)

                break
#
#--- sort ccd list
#
    ccd_list.sort()
#
#--- guess a source center position on the sky coordinates from the information extracted from the header
#
    cmd = ' dmcoords none none opt=cel '
    cmd = cmd + ' ra=' + str(ra_targ)  + ' dec=' + str(dec_targ )
    cmd = cmd + ' sim="' + str(sim_x) + ' ' +  str(sim_y) + ' ' + str(sim_z) + '" ' 
    cmd = cmd + ' detector=acis celfmt=deg '
    cmd = cmd + ' ra_nom=' + str(ra_nom) + ' dec_nom=' + str(dec_nom) + ' roll_nom=' + str(roll_nom) + ' ' 
    cmd = cmd + ' ra_asp=")ra_nom" dec_asp=")dec_nom" verbose=1 >' + zspace 

    scf.run_ascds(cmd)

    data = scf.read_file(zspace, remove=1)

    for ent in data:
        mc = re.search('SKY', ent)
        if mc is not None:
            atemp = re.split('\s+', ent)
            skyx  = atemp[1]
            skyy  = atemp[2]
            break
#
#-- keep the record of the source position for the later use (e.g. used for evt1 processing);
#
    o_fits     = fits.replace('.gz', '')
    coord_file = o_fits.replace('.fits', '_source_coord')
    ofile      = './Reg_files/' + coord_file
    line       = str(skyx) + ':' + str(skyy) + '\n'

    fo         = open(ofile, 'w')
    fo.write(line)
    fo.close()
#
#-- remove the 200 pix radius area around the source
#
    cmd = ' dmcopy "' + fits + '[exclude sky=circle(' + skyx + ',' + skyy + ',200)]" '
    cmd = cmd + ' outfile=source_removed.fits clobber="yes"'
    scf.run_ascds(cmd)
#
#--- get a file size: will be used to measure the size of removed area later.
#--- assumption here is the x-ray hit ccd evenly, but of course it is not, 
#--- but this is the best guess we canget
#
    size = {}
    for ccd in ccd_list:
        cmd = ' dmcopy "' + fits + '[ccd_id=' + str(ccd) + ']" outfile=test.fits clobber=yes'
        scf.run_ascds(cmd)
        
        cmd  = 'ls -l test.fits > ' + zspace
        os.system(cmd)

        data = scf.read_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            if mcf.chkNumeric(atemp[4]):
                size[ccd] = int(float(atemp[4]))
            else:
                size[ccd] = int(float(atemp[3]))

        mcf.rm_file('test.fits')
#
#--- now separate observations to indivisual ccds
#
    file_list = []
    for ccd in ccd_list:
        tail = '_ccd' + str(ccd) + '.fits'
        out  = o_fits.replace('.fits', tail)
        file_list.append(out)

        cmd = ' dmcopy "source_removed.fits[ccd_id=' + ccd + ']" outfile= ' + out + ' clobber=yes'
        scf.run_ascds(cmd)

    mcf.rm_file('source_removed.fits')
#
#--- process each ccd
#
    for pfits in file_list:
        reg_file = pfits.replace('.fits', '_block_src.reg')
#
#--- find point sources
#
        cmd = ' celldetect infile=' + pfits 
        cmd = cmd + ' outfile=acisi_block_src.fits regfile=acisi_block_src.reg clobber=yes'
        scf.run_ascds(cmd)

        data = scf.read_file('acisi_block_src.reg')
        
        exclude = []
        for ent in data:
            atemp =  re.split('\,', ent)
#
#--- increase the area covered around the sources 3time to make sure leaks from a bright source is minimized
#
            val2 = float(atemp[2]) * 3
            val3 = float(atemp[3]) * 3
            line = atemp[0] + ',' + atemp[1] + ',' + str(val2) + ',' + str(val3) +',' + atemp[4]
            exclude.append(line)

        out_name = pfits.replace('.gz','')
        out_name = out_name.replace('.fits', '_cleaned.fits')
#
#--- if we actually found point sources, remove them from the ccds
#
        e_cnt = len(exclude)
        if e_cnt  > 0:
            cnt   = 0
            chk   = 0
            round = 0
            line  = ''
            while cnt < e_cnt:
#
#--- remove 6 sources at a time so that it won't tax memory too much
#
                for i in range(cnt, cnt + 6):
                    if i >= e_cnt:
                        chk += 1
                        break

                    if line == '':
                        line = exclude[i]
                    else:
                        line = line + '+' + exclude[i]

                cnt += 6
                if round == 0:
                    cmd = ' dmcopy "' + pfits + '[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1
                else:
                    cmd = 'mv out.fits temp.fits'
                    os.system(cmd)
                    cmd = ' dmcopy "temp.fits[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1

                if chk > 0:
                    break 
                else:
                    line = ''

            mcf.rm_file('temp.fits')
            cmd = 'mv out.fits ' + out_name
            os.system(cmd)
        else:
            cmd = 'cp ' + pfits + ' ' + out_name
            os.system(cmd)
#
#--- find the size of cleaned up file size
#
        cmd = 'ls -l ' + out_name + '>' + zspace
        os.system(cmd)

        data = scf.read_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            if mcf.chkNumeric(atemp[4]):
                asize = float(atemp[4])
            else:
                asize = float(atempp[3])
    
        for pccd in range(0, 10):
            check = 'ccd' + str(pccd)
            mc  = re.search(check,  out_name)
            if mc is not None:
                break
#
#--- compute the ratio of the cleaned to the original file; 1 - ratio is the  potion that we removed
#--- from the original data
#
        ratio = asize / float(size[str(pccd)])
#
#--- record the ratio for later use
#
        fo   = open('./Reg_files/ratio_table', 'a')
        line = reg_file + ': ' + str(ratio) + '\n'
        fo.write(line)
        fo.close()
                    
        cmd = 'mv acisi_block_src.reg ./Reg_files/' + reg_file
        os.system(cmd)
        mcf.rm_file('acisi_block_src.fits')
def full_range_plot():

    """
    create long term trending plots
    Input: none but all data are read from web_dir/<MON><YEAR> directories
    Output: <web_dir>/long_term_plot.png
            <web_dir>/month_avg_img.png
            <web_dir>/month_avg_spc.png
            <web_dir>/month_avg_bi.png
    """

    ccd5_x = []
    ccd5_y = []
    ccd6_x = []
    ccd6_y = []
    ccd7_x = []
    ccd7_y = []
    for ccd in range(0, 10):

        xname = 'mccd'+ str(ccd) + '_x'
        yname = 'mccd'+ str(ccd) + '_y'
        sname = 'mccd'+ str(ccd) + '_s'
        exec "%s = []" % (xname)
        exec "%s = []" % (yname)
        exec "%s = []" % (sname)

        cmd  = 'ls ' + web_dir + '/*/ccd' + str(ccd) + ' >' + zspace
        os.system(cmd)
        try:
            f    = open(zspace, 'r')
            data = [line.strip() for line in f.readlines()]
            f.close()
            mcf.rm_file(zspace)
        except:
            continue

        for ent in data:
            f     = open(ent, 'r')
            fdata = [line.strip() for line in f.readlines()]
            f.close()
    
            time = []
            sum  = 0
            sum2 = 0
            cnt  = 0
            for line in fdata:
                atemp = re.split('\s+', line)

                if mcf.chkNumeric(atemp[0]) and mcf.chkNumeric(atemp[1]):
                    xt = float(atemp[0])
                    yt = float(atemp[1]) 
                    if xt >= 0 and xt < 20000 and yt >= 0:
                        sum += yt
                        sum2+= yt * yt
                        cnt += 1
                        time.append(xt)
#
#--- for ccd 5, 6,  7, we need 5 min average data (then normalized to cnt /sec)
#
                        if ccd  == 5:
                            ccd5_x.append(xt)
                            ccd5_y.append(yt/300.0)
                        if ccd  == 6:
                            ccd6_x.append(xt)
                            ccd6_y.append(yt/300.0)
                        if ccd  == 7:
                            ccd7_x.append(xt)
                            ccd7_y.append(yt /300.0)
#
#--- get monthly average
#
            date = int(0.5 * (min(time) + max(time)))
            avg  = sum / float(cnt) / 300.0
            sig  = sqrt(sum2 / float(cnt) / 90000.0 - avg * avg)

            xname = 'mccd'+ str(ccd) + '_x'
            yname = 'mccd'+ str(ccd) + '_y'
            sname = 'mccd'+ str(ccd) + '_s'
            exec "%s.append(%s)" % (xname , date)
            exec "%s.append(%s)" % (yname , avg)
            exec "%s.append(%s)" % (sname , sig)
#
#--- ploting starts here
#
    xname = 'Time (DOM)'
    yname = 'Counts/Sec'
#
#---- ccd 7 full history
#
    title = 'ACIS Count Rate: CCD 7'
    outname = web_dir + '/acis_ccd7_dose_plot.png'

    plot_panel(ccd7_x, ccd7_y, xname, yname, title, outname, autox='yes')
#
#--- long term plot (for ccd 5, 6, and 7)
#
    x_set_list = [ccd5_x, ccd6_x, ccd7_x]
    y_set_list = [ccd5_y, ccd6_y, ccd7_y]
    yname_list = [yname,  yname,  yname,  yname]
    title_list = ['CCD5', 'CCD6', 'CCD7']
    outname    = web_dir + '/long_term_plot.png'
    y_limit    = [1000, 1000, 1000]

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname, ylim =2, y_limit=y_limit, autox='yes')

#
#--- imaging ccds full history
#
    x_set_list = [mccd0_x, mccd1_x, mccd2_x, mccd3_x]
    y_set_list = [mccd0_y, mccd1_y, mccd2_y, mccd3_y]
    yname_list = [yname,  yname,  yname,  yname]
    title_list = ['CCD0', 'CCD1', 'CCD2', 'CCD3']
    outname    = web_dir + '/month_avg_img.png'

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname, linew=0, mrk='+', ylim=1, autox='yes')
#
#--- spectral ccds full history
#
    x_set_list = [mccd4_x, mccd6_x, mccd8_x, mccd9_x]
    y_set_list = [mccd4_y, mccd6_y, mccd8_y, mccd9_y]
    yname_list = [yname,  yname,  yname,  yname]
    title_list = ['CCD4', 'CCD6', 'CCD8', 'CCD9']
    outname    = web_dir + '/month_avg_spc.png'

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname, linew=0, mrk='+', ylim=1, autox='yes')
#
#--- backside ccds full history
#
    x_set_list = [mccd5_x, mccd7_x]
    y_set_list = [mccd7_y, mccd7_y]
    yname_list = [yname,  yname,  yname,  yname]
    title_list = ['CCD5', 'CCD7']
    outname    = web_dir + '/month_avg_bi.png'

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname, linew=0, mrk='+', ylim=1, autox='yes')

    plot_multi_panel(x_set_list, y_set_list, xname, yname_list, title_list, outname, autox='yes')
#
#--- write out monthly average data
#
    for ccd in range(0, 10):
        name = web_dir + 'monthly_avg_data_ccd' + str(ccd) + '.dat'
        f    = open(name, 'w')
        exec 'xdat = mccd%s_x' % (ccd)
        exec 'ydat = mccd%s_y' % (ccd)
        exec 'ysig = mccd%s_s' % (ccd)
        xdat =numpy.array(xdat)
        ydat =numpy.array(ydat)
        ysig =numpy.array(ysig)
        sorted_index = numpy.argsort(xdat)
        xsorted = xdat[sorted_index]
        ysorted = ydat[sorted_index]
        ssorted = ysig[sorted_index]

        for i in range(0, len(xsorted)):
            yrnded  = '%.3f' % ysorted[i]
            ysrnded = '%.3f' % ssorted[i]
            line  = str(xsorted[i]) + '\t' + yrnded + '+/-' + ysrnded + '\n'
            f.write(line)
        f.close()
def read_unit_list():
    """
    read unit list and make into a dictionary form
    input: none but read from <house_keeping>/unit_list
    output: udict   --- a dictionary of <msid> <---> <unit>
    """
#
#--- read the main unit file and description of msid
#
    ulist = house_keeping + 'unit_list'
    data  = read_file_data(ulist)

    udict = {}
    ddict = read_description_from_mta_list()

    for ent in data:
        atemp = re.split('\s+', ent)
        try:
            udict[atemp[0]] = atemp[1]
        except:
            pass
#
#--- read dataseeker unit list and replace if they are not same
#
    ulist = house_keeping + 'dataseeker_entry_list'
    data  = read_file_data(ulist)
    for ent in data:
        if ent[0] == '#':
            continue
        atemp = re.split('\t+', ent)
        if len(atemp) < 3:
            continue

        msid =atemp[0].lower()
        if mcf.chkNumeric(atemp[2]) == False:
            if atemp[2] != '':
                udict[msid] =  atemp[2]
        else:
            try:
                test = udict[msid]
            except:
                udict[msid] =  ''
        ddict[msid] = atemp[-1]
#
#--- farther read supplemental lists
#
    ulist = house_keeping + 'unit_supple'
    data  = read_file_data(ulist)
    for ent in data:
        atemp = re.split('\s+', ent)
        udict[atemp[0]] = atemp[1]

    dlist = house_keeping + 'description_supple'
    data  = read_file_data(dlist)
    for ent in data:
        atemp = re.split('\:\:', ent)
        msid  = atemp[0].strip()
        descr = atemp[1].strip()
        ddict[msid] = descr

    return [udict, ddict]
def create_history_file(head):

    """
    create count history file and the information file containing current bad entry information
    Input:  head                --- ccd, hccd, or col to indicate which data to handle
    Output: <head>_ccd<ccd>_cnt --- count history data:<dom><><year:ydate><><cumlative cnt><><cnt for the day>
            <head>_ccd<ccd>_information --- current information of the bad entries. For example,  list warm pixels, 
                                            flickering pixels, totally new pixels, and all past and current warm pixels.

    """

    for ccd in range(0, 10):
#
#--- read data file head is either ccd, hccd, or col
#
        file = data_dir + 'Disp_dir/hist_' + head + str(ccd)
        data = mcf.readFile(file)

        bad_dat_list = []                       #--- save all bad data as elements
        bad_dat_save = []                       #--- save all bad data as a list for each day

        dom   = []
        ydate = []
        dcnt  = []                              #--- keep discreate count history
        ccnt  = []                              #--- keep cumulative count history
        new   = []                              #--- keep totally new bad entries in the last 5 days
        pcnt  = 0
        k     = 0
        tot   = len(data)

        for ent in data:
#
#--- read only data entries written in a correct format: <dom><><year>:<ydate><>:<bad_data>...
#
            atemp = re.split('<>', ent)
            chk1  = mcf.chkNumeric(atemp[0])
            btemp = re.split(':', atemp[1])
            chk2  = mcf.chkNumeric(btemp[1])

            if (chk1 == True) and (int(atemp[0]) > 0)  and (chk2 == True) and (int(btemp[1]) > 0):
                dom.append(atemp[0])
                ydate.append(atemp[1])
#
#--- check the bad data is recorded for the given day
#
                if head == 'ccd' or head == 'hccd':
                    m1 = re.search('\(', atemp[2])
                else:
                    btemp = re.split(':', atemp[2])                 #--- case for warm columns
                    if mcf.chkNumeric(btemp[len(btemp) -1]):
                        m1 = 'OK'
                    else:
                        m1 = None

                if m1 is not None:
                    btemp = re.split(':', atemp[2])
                    if btemp != '':
                        dcnt.append(len(btemp))
#
#--- for the last five days, check whether there are any totally new bad entries exists
#
                    if k > tot - 5:
                        for test in btemp:
                            chk = 0
                            for comp in bad_dat_list:
                                if test == comp:
                                    chk = 1
                                    continue
                            if chk == 0:
                                new.append(test)

                    bad_dat_list = bad_dat_list + btemp
                    out          = list(set(bad_dat_list))

                    pcnt         = len(out)
                    ccnt.append(pcnt)
                    bad_dat_save.append(btemp)
                else:
                    dcnt.append(0)
                    bad_dat_save.append([])
                    ccnt.append(pcnt)

            k += 1                                                  #--- k is inlimented to check the last 5 days
#
#--- find out which entries are warm/hot and flickering
#
        [warm, flick, b_list, p_list]=  find_warm_and_flickering(bad_dat_save)
#
#--- open output file to print current information
#
        line = data_dir + '/Disp_dir/'+ head + str(ccd) + '_information'
        fo   = open(line, 'w')

        fo.write("warm:\t")
        print_data(fo, warm)

        fo.write('flick:\t')
        print_data(fo, flick)

        fo.write('new:\t')
        out = list(set(new))
        print_data(fo, out)

        fo.write('past:\t')
        out = list(set(bad_dat_list))
        print_data(fo, out)

        fo.close()

#
#--- open output file to print out count history
#
        ofile = data_dir + 'Disp_dir/' + head + str(ccd) + '_cnt'
        fo    = open(ofile, 'w')

        for i in range(0, len(dom)):
            if i < 13:
                line = dom[i] + '<>' + ydate[i] + '<>' + str(ccnt[i]) + '<>'  + str(dcnt[i]) + '<>0<>0\n'
            else:
                line = dom[i] + '<>' + ydate[i] + '<>' + str(ccnt[i]) + '<>'  + str(dcnt[i]) + '<>'+ str(b_list[i-13]) + '<>' + str(p_list[i-13]) + '\n'

            fo.write(line)

        fo.close()