コード例 #1
0
def remove_na_from_lists(alist, blist):

    out1 = []
    out2 = []
    for k in range(0, len(alist)):
        if mcf.is_neumeric(alist[k]) and mcf.is_neumeric(blist[k]):
            out1.append(alist[k])
            out2.append(blist[k])

    return [out1, out2]
コード例 #2
0
ファイル: acis_dose_get_data.py プロジェクト: chandra-mta/MTA
def find_10th(fits_file):
    """
    find 10th brightest value    
    input: fits_file    --- image fits file name
    output: 10th brightest value
    """
    #
    #-- make histgram
    #
    cmd = ' dmimghist infile=' + fits_file
    cmd = cmd + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    expf.run_ascds(cmd)

    cmd = ' dmlist infile=outfile.fits outfile=./zout opt=data'
    expf.run_ascds(cmd)

    data = mcf.read_data_file('./zout', remove=1)
    mcf.rm_files('outfile.fits')
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []

    for ent in data:
        try:
            atemp = re.split('\s+|\t+', ent)
            if (len(atemp) > 3) and mcf.is_neumeric(atemp[1])  \
                    and mcf.is_neumeric(atemp[2])  and (int(atemp[4]) > 0):
                hbin.append(float(atemp[1]))
                hcnt.append(int(atemp[4]))
        except:
            pass
#
#--- checking 10 th bright position
#
    try:
        j = 0
        for i in range(len(hbin) - 1, 0, -1):
            if j == 9:
                val = i
                break
            else:
                #
                #--- only when the value is larger than 0, record as count
                #
                if hcnt[i] > 0:
                    j += 1

        return hbin[val]
    except:
        return 'I/INDEF'
コード例 #3
0
def set_format_for_col(name, cdata):
    """
    find a format of the input data and set column object
    input:  name    --- column name
            cdata   --- column data in numpy array form
    output: column object
    """
    test = str(list(cdata)[0])
    mc1 = re.search('True', test)
    mc2 = re.search('False', test)
    #
    #--- check whether the value is numeric
    #
    if mcf.is_neumeric(test):
        ft = 'E'
#
#--- check whether the value is logical
#
    elif (mc1 is not None) or (mc2 is not None):
        tcnt = len(list(cdata)[0])
        ft = str(tcnt) + 'L'
#
#--- all others are set as character set
#
    else:
        tcnt = len(test)
        ft = str(tcnt) + 'A'

    return pyfits.Column(name=name, format=ft, array=cdata)
コード例 #4
0
def find_processed_data(inst):
    """
    find the hrc obsids which are already re-processed
    input:  inst    --- instrument designation: "i" or "s"
    output: out     --- a list of obsids
    """
    if inst == 'i':
        data_dir = '/data/hrc/i/'
    else:
        data_dir = '/data/hrc/s/'

    cmd = 'ls -d ' + data_dir + '/* > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)

    out = []
    for ent in data:
        atemp = re.split('\/', ent)
        try:
            val = int(float(atemp[-1]))
        except:
            continue
        if mcf.is_neumeric(val):
            out.append(val)
#
#--- remove duplicate
#
    oset = set(out)
    out = list(oset)

    return out
コード例 #5
0
def adjust_vega_position(cyear=''):
    """
    adjust Vega position with proper motion
    input:  cyear   --- the year of the observed. it can be in year date. 
                        if it is not given, no corrections for the propoer motion
    output: coordinate  [ra, dec] in decimal format.
    """
    if not mcf.is_neumeric(cyear):
        cyear = 2000.0
#
#--- set several initial values
#
    pfile = house_keeping + 'vega_pos'
    with open(pfile, 'r') as f:
        out = f.read()
    atemp = re.split('::', out)
    tra = float(atemp[0])
    tdec = float(atemp[1])
    pra = float(atemp[2]) / 3600.0 / 1000.0
    pdec = float(atemp[3]) / 3600.0 / 1000.0

    dyear = float(cyear) - 2000.0

    tra += dyear * pra
    tdec += dyear * pdec

    return [tra, tdec]
コード例 #6
0
ファイル: exposureFunctions.py プロジェクト: chandra-mta/MTA
def three_sigma_values(fits_file):
    """
    find 1, 2, and 3 sigma values of an image fits file
    input:  fits_file   --- input image fits file name
    output: sigma1  --- one sigma value
            sigma2  --- two sigma value
            sigma3  --- three sigma value
    """
#
#-- make histgram
#
    cmd = ' dmimghist infile=' + fits_file
    cmd = cmd + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    run_ascds(cmd)
    
    cmd = ' dmlist infile=outfile.fits outfile=' + zspace + ' opt=data'
    run_ascds(cmd)
    
    data = mcf.read_data_file(zspace, remove=1)
#
#--- read bin # and its count rate
#
    hbin = []
    hcnt = []
    vsum = 0
    
    for ent in data:
        atemp = re.split('\s+|\t+', ent)
        if mcf.is_neumeric(atemp[0]):
            hbin.append(float(atemp[1]))
            val = int(atemp[4])
            hcnt.append(val)
            vsum += val
#
#--- checking one, two and three sigma counts
#

    if len(hbin) > 0:
        v68    = int(0.68 * vsum)
        v95    = int(0.95 * vsum)
        v99    = int(0.997 * vsum)
        sigma1 = -999
        sigma2 = -999
        sigma3 = -999
        acc= 0
        for i in range(0, len(hbin)):
            acc += hcnt[i]
            if acc > v68 and sigma1 < 0:
                sigma1 = hbin[i]
            elif acc > v95 and sigma2 < 0:
                sigma2 = hbin[i]
            elif acc > v99 and sigma3 < 0:
                sigma3 = hbin[i]
                break
    
        return [sigma1, sigma2, sigma3]
    
    else:
        return[0, 0, 0]
コード例 #7
0
def find_file_and_mp(n_list, nchk=0):
    """
    find all *.or files and their mp for given directory
    input:  n_list  --- a list of direcotries
            nchk    --- an indicator of cartain operation (check the next month case)
    output: f_list  --- a list of directories
            u_list  --- a list of mp responsible for the directories
    """
    #
    #--- find all files with .or ending
    #
    f_list = []
    u_list = []
    for idir in n_list:
        atemp = re.split('\s+', idir)
        tdir = atemp[-1]
        line = ''
        chk = tdir + '/input/'
        if os.path.isdir(chk):
            if check_orfiles(chk):
                line = line + chk + '*.or '

        chk = tdir + '/scheduled/'
        if os.path.isdir(chk):
            if check_orfiles(chk):
                line = line + chk + '*.or '

        chk = tdir + '/pre_scheduled/'
        if os.path.isdir(chk):
            if check_orfiles(chk):
                line = line + chk + '*.or '

        cmd = 'ls -lrt ' + line + '> ' + zspace
        os.system(cmd)

        sd_list = mcf.read_data_file(zspace, remove=1)
        sd_list.reverse()
        #
        #--- use the lastest valid file
        #
        for dfile in sd_list:
            mc = re.search('\.or', dfile)
            if mc is None:
                continue

            try:
                atemp = re.split('\/', dfile)
                btemp = re.split('\.', atemp[-1])
                ctemp = re.split('_', btemp[0])
                chk = ctemp[1][0]
            except:
                continue

            if mcf.is_neumeric(chk) or (nchk == 0 and (chk in chr_list)):
                dtemp = re.split('\s+', dfile)
                f_list.append(dtemp[-1])
                u_list.append(dtemp[2])

    return [f_list, u_list]
コード例 #8
0
def extract_date_from_old_html(ifile):
    """
    read the original html page created by flt_run_pipe
    input:  ifile   --- original html file
    output: save    --- a list of lists of data
            tsave   --- a list of stating and stopping time in seconds from 1998.1.1
    """
    data = mcf.read_data_file(ifile)
    #
    #--- save the reading in a matirx; initialize with 0.0
    #
    save = []
    tsave = []
    for k in range(0, 14):
        alist = []
        for m in range(0, 7):
            alist.append(0.0)

        save.append(alist)
#
#--- find the section of each instrument by looking for the instrument name
#
    dchk = 0
    for ent in data:
        #
        #--- checking starting and stopping time
        #
        if dchk < 2:
            dmc = re.search('Seconds since', ent)
            if dmc is not None:
                atemp = re.split('\s+', ent)
                stime = float(atemp[0])
                tsave.append(stime)
                dchk += 1

        for n in range(0, 14):
            mc = re.search(inst_list[n], ent)
            if mc is not None:
                k = n
                m = 0
                break
#
#--- assume that the value appears without any html coding around
#
        if mcf.is_neumeric(ent):
            if m < 7:
                save[k][m] = float(ent)
                m += 1

    return save, tsave
コード例 #9
0
def chk_proccess_status(inst, hlist):
    """
    check whether new processed data actually exist, and if so send out a notification
    input:  inst    --- instrument: i or s
            hlist   --- a list of obsid
    output: email sent out 
    """

    if inst == 'i':
        data_dir = '/data/hrc/i/'
    else:
        data_dir = '/data/hrc/s/'

    cmd = 'ls ' + data_dir + '* > ' + zspace
    data = mcf.read_data_file(zspace, remove=1)
    d_list = []
    for ent in data:
        if mcf.is_neumeric(ent):
            d_list.append(int(float(ent)))

    done = []
    for obsid in hlist:
        if obsid in d_list:
            done.append(obsid)

    if len(done) > 0:
        line = 'Following obsids are processed for hrc-' + str(inst) + ':\n'
        for obsid in done:
            line = line + '\t' + str(obsid) + '\n'
#
#--- change the status of processed data
#
            cmd = 'chgrp -R hat /data/hrc/i/' + str(obsid)
            os.system(cmd)
            cmd = 'find /data/hrc/i/ -type d -user isobe -exec chmod a+rx,ug+w,o-w {}'
            os.system(cmd)
            cmd = 'chmod -fR a+r,g+w,o-w /data/hrc/i/' + str(obsid)
            os.system(cmd)


        with opne(zspace, 'w') as fo:
            fo.write(line)

        cmd = 'cat ' + zspace + ' |mailx -s "Subject: HRC Obs Re-processed" [email protected]'
        os.system(cmd)
        cmd = 'cat ' + zspace + ' |mailx -s "Subject: HRC Obs Re-processed" [email protected]'
        os.system(cmd)

        mcf.rm_files(zspace)
コード例 #10
0
def extract_hist_data(ifile):
    """
    extracting acis hist data from fits file 
    input:  ifile       ---  fits file name
    output: hist_data   --- a list of histgram data
    """
    tdata = Table.read(ifile, hdu=1)
    cols = tdata.columns
    sdata = tdata['COUNTS']

    hist_data = []
    for ent in sdata:
        if mcf.is_neumeric(ent):
            hist_data.append(float(ent))

    return hist_data
コード例 #11
0
def correct_hrc_file_naming():
    """
    correct wrong naming of data files
        example: hrcf59_evt2.fits ---> hrcf00059_evt2.fits
    input:  none, but read from /data/mta/<inst>/
    output: corrected file names
    """
    for inst in ['s', 'i']:
        cmd = 'ls -d /data/hrc/'+ inst + '/* > ' + zspace
        os.system(cmd)
    
        data = mcf.read_data_file(zspace, remove=1)
        obsid_list = []
        for ent in data:
            atemp = re.split('\/', ent)
            obsid = atemp[-1]
            if mcf.is_neumeric(obsid):
                correct_naming(obsid, inst)
コード例 #12
0
def  hrc_gain_run(c_input):
    """
    extract hrc evt2 file, find the brightest object and create pha distribution
    this is a control script

    Input:  c_inut      --- if it is obsid, use it as a input
                            otherwise, a list of new candidates will be create based database
    Output: <header>_pha.dat    --- pha distribution data
            <header>_gfit.png   --- a plot of pha data
            fitting_results     --- a table of fitted results
    """
#
#--- if an obsid is provided, analyize that, else get new obsids from databases
#
    if mcf.is_neumeric(c_input):
        candidate_list = [c_input]

    elif isinstance(c_input, list):
        candidate_list = c_input

    else:
        out = ead.extract_calb_arlac()
        candidate_list = out[0] + out[1]
#
#--- run the main script only when candidates exist
#
    if len(candidate_list) > 0:
#
#--- analyze data
#
        chk = hgfv.hrc_gain_fit_voigt(candidate_list)
#
#--- create plots
#
        if chk == True:
            obt.order_by_time()
            hgtp.hrc_gain_trend_plot()
    
            send_notification()
            
            cmd = 'rm -rf ' + exc_dir + 'hrcf*fits*'
            os.system(cme)
コード例 #13
0
def read_ede_data(ifile):
    """
    read data from a given file
    Input:  ifile       --- input file name
    Output: date_list   --- a list of date
            ede_list    --- a list of ede value
            error_list  --- a list of computed ede error
    """
    data = mcf.read_data_file(ifile)

    date_list = []
    date_list2 = []
    ede_list = []
    error_list = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if mcf.is_neumeric(atemp[0]) == False:
            continue

        fwhm = float(atemp[2])
        ferr = float(atemp[3])
        ede = float(atemp[4])
        date = atemp[5]
        sdate = float(atemp[6])

        stime = Chandra.Time.DateTime(date).secs
        fyear = mcf.chandratime_to_fraq_year(stime)

        date_list.append(fyear)
        date_list2.append(sdate)
        ede_list.append(ede)
        #
        #--- the error of EdE is computed using FWHM and its error value
        #
        error = math.sqrt(ede * ede * ((ferr * ferr) / (fwhm * fwhm)))

        error_list.append(error)

    return [date_list, date_list2, ede_list, error_list]
コード例 #14
0
def adjust_hz43_position(date):
    """
    adjust HZ43 position with proper motion
            13 16 21.85329::+29 05 55.3793::-157.96::-110.23
    input:  date    in <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
                        if it is not given, no corrections for the propoer motion
    output: coordinate  [ra, dec] in decimal format.
    """
    #
    #--- evt1 DATE-OBS to fractional year
    #
    if mcf.is_neumeric(date):
        cyear = float(date)
    else:
        cyear = hcf.convert_time_to_fyear(date)
#
#--- set several initial values
#
    pfile = house_keeping + 'hz43_pos'
    with open(pfile, 'r') as f:
        out = f.read()
    atemp = re.split('::', out)
    tra = float(atemp[0])
    tdec = float(atemp[1])
    pra = float(atemp[2]) / 3600.0 / 1000.0
    pdec = float(atemp[3]) / 3600.0 / 1000.0

    dyear = float(cyear) - 2000.0

    tra += dyear * pra
    tdec += dyear * pdec
    #
    #--- convert the foramt to sexagesimal notation
    #
    [ra, dec] = hcf.convert_ra_dec(tra, tdec)

    return [ra, dec]
コード例 #15
0
def create_data_tables(table):
    """
    read input table of observations and create data tables
    input:  table   --- a file which contains a list of observations
                        either obsid list or a list in which the first column is obsid
    output: <data_dir>/hrc_<det/pos>_results 
    """
    data = mcf.read_data_file(table)

    for ent in data:
        if mcf.is_neumeric(ent):
            obsid = ent.strip()
        else:
            atemp = re.split('\s+', ent)
            obsid = atemp[0]

        if mcf.is_neumeric(obsid) == False:
            continue

        print(str(obsid))

        fits = hcf.run_arc5gl(0, 0, obsid=obsid, level='2', filetype='evt2')

        if fits == False:
            write_on_skip_file(obsid)
            print("Data is not extracted")
            continue
#
#--- if there are multiple output, use only first one
#
        if isinstance(fits, list):
            fits = fits[0]

        xxx = 999
        #if xxx == 999:
        try:
            out = extract_count_stats(fits)
        #else:
        except:
            cmd = 'rm -f ' + fits + '*'
            os.system(cmd)
            write_on_skip_file(obsid)
            print("Analysis Failed")
            continue

        if out[-1] < 0:
            cmd = 'rm -f ' + fits + '*'
            os.system(cmd)
            write_on_skip_file(obsid)
            print("No Output")
            continue

        line = str(obsid) + '\t'

        if float(obsid) < 1000:
            line = line + '\t'

        line = line + str(fits) + '\t'
        line = line + out[7] + '\t'
        line = line + '%2.1f' % round(out[6], 1) + '\t'
        line = line + '%2.2f' % round(out[5], 2) + '\t'
        line = line + '%2.2f' % round(out[8], 2) + '\t'
        line = line + '%2.4f' % round(out[9], 4) + '\n'

        if out[-1] == 0:
            outfile = data_dir + 'hrc_s_0_results'
        if out[-1] == 1:
            outfile = data_dir + 'hrc_s_10_results'
        if out[-1] == 2:
            outfile = data_dir + 'hrc_s_25_results'
        if out[-1] == 3:
            outfile = data_dir + 'hrc_s_m10_results'
        if out[-1] == 4:
            outfile = data_dir + 'hrc_s_m25_results'

        if out[-1] == 10:
            outfile = data_dir + 'hrc_i_0_results'
        if out[-1] == 11:
            outfile = data_dir + 'hrc_i_10_results'
        if out[-1] == 12:
            outfile = data_dir + 'hrc_i_25_results'
        if out[-1] == 13:
            outfile = data_dir + 'hrc_i_m10_results'
        if out[-1] == 14:
            outfile = data_dir + 'hrc_i_m25_results'

        with open(outfile, 'a') as fo:
            fo.write(line)

        cmd = 'rm -f *fits*'
        os.system(cmd)
コード例 #16
0
def check_time_format(intime):
    """
    return time in Chandra time
    input:  intime  --- time in <yyyy>:<ddd>:<hh>:<mm>:<ss> 
                        or <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss> or chandra time
    output: year    --- the year
            time in chandra time (seconds from 1998.1.1)
    """
    mc1 = re.search('-', intime)
    mc2 = re.search(':', intime)
    #
    #--- it is already chandra format
    #
    if mcf.is_neumeric(intime):
        out = Chandra.Time.DateTime(intime).date
        atemp = re.split(':', out)
        year = int(atemp[0])
        return [year, int(float(intime))]
#
#--- time in <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
#
    elif mc1 is not None:
        mc2 = re.search('T', intime)
        if mc2 is not None:
            atemp = re.split('T', intime)
            btemp = re.split('-', atemp[0])
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            ctemp = re.split(':', atemp[1])
            hrs = ctemp[0]
            mins = ctemp[1]
            secs = ctemp[2]

        else:
            btemp = re.split('-', intime)
            year = int(float(btemp[0]))
            mon = int(float(btemp[1]))
            day = int(float(btemp[2]))
            hrs = '00'
            mins = '00'
            secs = '00'

        yday = datetime.date(year, mon, day).timetuple().tm_yday

        cyday = str(yday)
        if yday < 10:
            cyday = '00' + cyday
        elif yday < 100:
            cyday = '0' + cyday

        ytime = btemp[0] + ':' + cyday + ':' + hrs + ':' + mins + ':' + secs

        return [year, Chandra.Time.DateTime(ytime).secs]
#
#--- time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
#
    elif mc2 is not None:

        atemp = re.split(':', intime)
        year = int(atemp[0])
        return [year, Chandra.Time.DateTime(intime).secs]
コード例 #17
0
def run_process(hrc):
    """
    a control script to run reprocess csh scripts 
    input:  hrc: either "hrc_i" or "hrc_s"
    output: hrc_i_list  --- a list of hrc i obsids which need to be re-processed
            hrc_s_list  --- a list of hrc s obsids which need to be re-processed
            <data_dir>/<obsid>    --- re-processed data direcotry
    """
    #
    #--- set conditions for either hrc-i or hrc s
    #
    if hrc == 'hrc_i':
        out_list = 'hrc_i_list'
        data_dir = '/data/hrc/i/'
        inst = 'i'
    else:
        out_list = 'hrc_s_list'
        data_dir = '/data/hrc/s/'
        inst = 's'
#
#--- make a list of obsids
#
    cmd = 'ls -d ' + data_dir + '* > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)
    hlist = []
    for ent in data:
        atemp = re.split('\/', ent)
        obsid = atemp[-1]
        if mcf.is_neumeric(obsid):
            hlist.append(obsid)

#    if hrc == 'hrc_i':
#        print("HRC I : " + str(hlist))
#    else:
#        print("HRC S : " + str(hlist))
#
    for obsid in hlist:
        obsid = str(int(float(obsid)))
        with open(out_list, 'w') as fo:
            fo.write(str(obsid) + '\n')
        cmd = 'rm -rf ' + data_dir + obsid + "analysis/*"
        os.system(cmd)
        #
        #--- extract fits data needed for analysis
        #
        chk = extract_hrc_data(obsid, data_dir)
        if chk == False:
            print("Not all data are available")
            continue

        if hrc == 'hrc_i':
            cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'
        else:
            cmd = 'csh -f ' + bin_dir + 'repro_all_S_new.csh hrc_s_list'

        try:
            run_ciao(cmd)
            cdir = data_dir + '/' + str(obsid)
            if os.path.isdir(cdir):
                cmd = 'chgrp -R hat ' + cdir
                os.system(cmd)
                cmd = 'chmod -R 775 ' + cdir
                os.system(cmd)
#
#--- directory name should be 5 digit
#
            test = int(float(obsid))
            if test < 10000:
                chk = mcf.add_leading_zero(obsid, 5)
                odir = data_dir + '/' + str(chk)
                if os.path.isdir(odir):
                    cmd = 'rm -rf ' + odir
                    os.system(cmd)
                    cmd = 'mv ' + cdir + ' ' + odir
                    os.system(cmd)
                else:
                    cmd = 'mv ' + cdir + ' ' + odir
                    os.system(cmd)
        except:
            pass

        mcf.rm_files(out_list)
        correct_naming(obsid, inst)
コード例 #18
0
ファイル: extract_line_stat.py プロジェクト: chandra-mta/MTA
def get_lines(grating):
    """
    extract line statistics for a given grating
    input:  grating --- hetg, metg, or letg
    output: acis_<grating>_<line>_data
    """
    #
    #--- read data file header
    #
    infile = house_keeping + 'data_header'
    with open(infile, 'r') as f:
        header = f.read()
#
#--- set which grating data to extract
#
    if grating == 'hetg':
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1HEGp1_linelist.txt >' + zspace
        ofile = 'acis_hetg_'
        l_list = h_lines
    elif grating == 'metg':
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1MEGp1_linelist.txt >' + zspace
        ofile = 'acis_metg_'
        l_list = m_lines
    else:
        cmd = 'ls ' + gdata_dir + '/*/*/obsid_*_L1.5_S1LEGp1_linelist.txt >' + zspace
        ofile = 'hrc_letg_'
        l_list = l_lines

    os.system(cmd)
    d_list = mcf.read_data_file(zspace, remove=1)

    sdate_list = [[], [], [], [], [], [], []]
    line_list = [{}, {}, {}, {}, {}, {}, {}]
    lcnt = len(l_list)
    #
    #---- go though each files
    #
    for dfile in d_list:

        out = find_obs_date(dfile)
        if out == 'na':
            continue
        else:
            [obsid, ltime, stime] = out
#
#--- extract line information. if energy or fwhm are either "*" or "NaN", skip
#
        data = mcf.read_data_file(dfile)
        for ent in data:
            atemp = re.split('\s+', ent.strip())
            if mcf.is_neumeric(atemp[0]):
                energy = atemp[2]
                fwhm = atemp[3]

                if energy == 'NaN':
                    continue

                if (fwhm == '*') or (fwhm == 'NaN'):
                    continue
                energy = mcf.add_tailing_zero(energy, 6)
                peak = float(energy)
                err = atemp[4]
                ede = atemp[5]
                line = str(
                    obsid
                ) + '\t' + energy + '\t' + fwhm + '\t' + err + '\t' + ede + '\t'
                line = line + str(ltime) + '\t' + str(int(stime)) + '\n'
                #
                #--- find the line value within +/-5 of the expected line center position
                #
                for k in range(0, lcnt):
                    center = l_list[k]
                    low = (center - 5) / 1000.0
                    top = (center + 5) / 1000.0
                    if (peak >= low) and (peak <= top):

                        sdate_list[k].append(stime)
                        line_list[k][stime] = line
#
#--- output file name
#
    for k in range(0, lcnt):
        #
        #--- print out the data
        #
        slist = sdate_list[k]
        slist.sort()

        line = header + '\n'
        for sdate in slist:
            line = line + line_list[k][sdate]

        val = str(l_list[k])
        if len(val) < 4:
            val = '0' + val

        odata = data_dir + ofile + val + '_data'
        with open(odata, 'w') as fo:
            fo.write(line)
コード例 #19
0
def find_sigma_values(fits):
    """
    find 2 sigma, 3sigma, and 4 sigma values of the given data
    input:  fits    --- image fits file name
    output: (sigma1, sigma2, sigma3)
    """
    #
    #-- make histgram
    #
    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmimghist infile=' + fits
    cmd2 = cmd2 + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    cmd1 = "/usr/bin/env PERL5LIB="
    cmd2 = ' dmlist infile=outfile.fits outfile=' + zspace + ' opt=data'
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)

    data = mcf.read_data_file(zspace, remove=1)
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []
    vsum = 0

    for ent in data:
        atemp = re.split('\s+|\t+', ent)
        if mcf.is_neumeric(atemp[0]):
            hbin.append(float(atemp[1]))
            val = int(atemp[4])
            hcnt.append(val)
            vsum += val
#
#--- checking one sigma and two sigma counts
#
    if len(hbin) > 0:
        v68 = int(0.68 * vsum)
        v95 = int(0.95 * vsum)
        v99 = int(0.997 * vsum)
        sigma1 = -999
        sigma2 = -999
        sigma3 = -999
        acc = 0
        for i in range(0, len(hbin)):
            acc += hcnt[i]
            if acc > v68 and sigma1 < 0:
                sigma1 = hbin[i]

            elif acc > v95 and sigma2 < 0:
                sigma2 = hbin[i]

            elif acc > v99 and sigma3 < 0:
                sigma3 = hbin[i]
                break

        return (sigma1, sigma2, sigma3)

    else:
        return (0, 0, 0)
コード例 #20
0
ファイル: create_html_page.py プロジェクト: chandra-mta/HRC
def plot_data(data, outname, pos):
    """
    plotting data
    input:  data    --- data
            outname --- output file name
            pos     --- indicator of position of aiming
    output  outname --- png plot of the data
    """
    date = []
    cnt = []
    err = []
    for ent in data:
        atemp = re.split('\s+', ent)
        if not mcf.is_neumeric(atemp[-1]):
            continue
        try:
            ytime = hcf.convert_time_to_fyear(atemp[2],
                                              tformat='%Y-%m-%dT%H:%M:%S')

            exp = float(atemp[3])
            val = float(atemp[4])
            if val < 10:
                continue
            val /= exp
            sig = float(atemp[5]) / exp

            date.append(ytime)
            cnt.append(val)
            err.append(sig)
        except:
            continue
#
#--- fit a linear line
#
    [a, b, sa, sb] = line_fit(date, cnt, err)
    #
    #--- set min max
    #
    xmin = 1999.0
    ta = time.localtime()
    xmax = ta.tm_year + 1
    ymin = 0.0
    ymax = 6
    #
    #--- start plotting
    #
    plt.close('all')
    mpl.rcParams['font.size'] = 9
    props = font_manager.FontProperties(size=9)

    ax = plt.subplot(111)
    ax.set_autoscale_on(False)
    ax.set_xbound(xmin, xmax)
    ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)
    ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)

    plt.errorbar(date, cnt, yerr=err, fmt='o', lw=1)
    plt.xlabel('Time (year)')
    plt.ylabel('Source Rate (cnt/s)')
    #
    #--- plot fitting line
    #
    start = a + b * xmin
    stop = a + b * xmax
    plt.plot([xmin, xmax], [start, stop], lw=1, color='blue')
    #
    #--- write the fitting equation
    #
    xdiff = xmax - xmin
    ydiff = ymax - ymin
    xpos = xmin + 0.1 * xdiff
    ypos = ymax - 0.1 * ydiff
    ac = '%1.2f' % (round(a, 2))
    if abs(b) < 0.01:
        bc = '%1.4f' % (round(abs(b), 4))
        ec = '%1.4f' % (round(sb, 4))
    else:
        bc = '%1.2f' % (round(abs(b), 2))
        ec = '%1.2f' % (round(sb, 2))
    if b > 0:
        text = '(Source Rate) = ' + ac + ' + (' + bc + '+/-' + ec + ') * Time'
    else:
        text = '(Source Rate) = ' + ac + ' - (' + bc + '+/-' + ec + ') * Time'
    plt.text(xpos, ypos, text)
    #
    #--- set the size of the plotting area in inch (width: 10.0in, height 5 in)
    #
    fig = matplotlib.pyplot.gcf()
    fig.set_size_inches(10.0, 5.0)

    #
    #--- save the plot in png format
    #
    plt.savefig(outname, format='png', dpi=100)
コード例 #21
0
def find_obsids(ifile, o_dict, m_list, mp_person):
    """
    find obsids and their related information
    input:  ifile       --- a file name
            o_dict      --- a dictionary to keep the information about the obsids
            m_list      --- a list of obsids
            mp_person   --- mp person responsible for the data period
    output: o_dict      --- updated data dictionary
            m_list      --- updated list of obsids
    """
    data = mcf.read_data_file(ifile)

    cstep = 0  #--- indicator to show which area of the file
    ochk = 0  #--- how many times passed "OBS," marker
    ichk = 0  #--- how many times passed "ID" marker
    for ent in data:
        #
        #--- after 'SeqNbr' marker, there is a summary table
        #
        if cstep == 0:
            mc1 = re.search('SeqNbr', ent)
#
#--- the summary table finishes with the following marker
#
        if cstep == 1:
            mc2 = re.search('OR QUICK LOOK END', ent)

        if cstep == 2:
            mc3 = re.search('OBS\,', ent)
            mc4 = re.search('ID', ent)
            mc5 = re.search('CAL', ent)
            mc6 = re.search('HETG', ent)
            mc7 = re.search('LETG', ent)
            mc8 = re.search('ACIS', ent)
            mc9 = re.search('HRC', ent)

        if cstep == 0 and mc1 is not None:
            cstep = 1
#
#--- here we are reading the summary table
#
        elif cstep == 1:
            atemp = re.split('\s+', ent)
            if mcf.is_neumeric(atemp[0]):
                try:
                    pid = int(float(atemp[1]))
                except:
                    continue
                m_list.append(pid)
                #
                #--- name of the target is column somewhere between 18 and 40
                #
                name = ent[18:40]
                name.strip()
                o_dict[pid] = [atemp[0], name, 'None', 'CAL', mp_person]

            if mc1 is not None:
                cstep = 1
#
#--- now read more information about the obsidi
#
        elif cstep == 2:
            if mc3 is not None:
                ochk += 1
            if mc4 is not None:
                ent.strip()
                #
                #--- find which obsid information are presented next
                #
                atemp = re.split('\,', ent)
                btemp = re.split('ID=', atemp[0])
                pid = int(float(btemp[1]))

                ichk += 1
#
#--- only when ochk and ichk are equal, collect needed info
#
            if ochk == ichk:
                if mc5 is not None:
                    o_dict = update_dict(o_dict, pid, 1, 'CAL')
                if mc6 is not None:
                    o_dict = update_dict(o_dict, pid, 2, 'HETG')
                if mc7 is not None:
                    o_dict = update_dict(o_dict, pid, 2, 'LETG')
                if mc8 is not None:
                    o_dict = update_dict(o_dict, pid, 3, 'ACIS')
                if mc9 is not None:
                    o_dict = update_dict(o_dict, pid, 3, 'HRC')

    return [o_dict, m_list]
コード例 #22
0
        obsid   = atemp[1]
        yoffset = int(round(float(atemp[10])))
        zoffset = int(round(float(atemp[11])))
        odict[obsid] = ['Hrc I', yoffset, zoffset]

    for ent in data2:
        atemp = re.split('\s+', ent)
        obsid   = atemp[1]
        yoffset = int(round(float(atemp[10])))
        zoffset = int(round(float(atemp[11])))
        odict[obsid] = ['Hrc S', yoffset, zoffset]

    return odict

#--------------------------------------------------------------------

if __name__ == '__main__':

    if len(sys.argv) > 1:
        obsid = sys.argv[1]
        obsid.strip()
        if mcf.is_neumeric(obsid):
            obsid = int(float(obsid))
            create_profile_plot(obsid)
        else:
            run_for_all_profile_plot(obsid)
    else:
        run_for_all_profile_plot()


コード例 #23
0
ファイル: acis_dose_get_data.py プロジェクト: chandra-mta/MTA
def acis_dose_get_data(startYear='', startMonth='', stopYear='', stopMonth=''):
    """
    extract ACIS evt1 data from a month and create combined image file. 
    input:  startYear   --- year of starting time
            startMonth  --- month of starting time
            stopYear    --- year of stopping time
            stopMonth   --- month of stopping time
    """
    if startYear == '' or startMonth == '' or stopYear == '' or stopMonth == '':

        startYear = raw_input('Start Year: ')
        startYear = int(float(startYear))
        startMonth = raw_input('Start Month: ')
        startMonth = int(float(startMonth))

        stopYear = raw_input('Stop Year: ')
        stopYear = int(float(stopYear))
        stopMonth = raw_input('Stop Month: ')
        stopMonth = int(float(stopMonth))
#
#--- start extracting the data for the year/month period
#
    for year in range(startYear, stopYear + 1):
        #
        #--- create a list of month appropriate for the year
        #
        month_list = expf.make_month_list(year, startYear, stopYear,
                                          startMonth, stopMonth)

        for month in month_list:
            smon = mcf.add_leading_zero(month)
            start = str(year) + '-' + smon + '-01T00:00:00'

            nextMonth = month + 1
            nyear = year
            if nextMonth > 12:
                nextMonth = 1
                nyear += 1
            smon = mcf.add_leading_zero(nextMonth)
            stop = str(nyear) + '-' + smon + '-01T00:00:00'
            #
            #--- using ar5gl, get a list of file names
            #
            line = 'operation=browse\n'
            line = line + 'dataset=flight\n'
            line = line + 'detector=acis\n'
            line = line + 'level=1\n'
            line = line + 'filetype=evt1\n'
            line = line + 'tstart=' + start + '\n'
            line = line + 'tstop=' + stop + '\n'
            line = line + 'go\n'

            fitsList = mcf.run_arc5gl_process(line)
            #
            #--- extract each evt1 file, extract the central part, and combine them into a one file
            #
            for fits in fitsList:
                print("FITS File: " + fits)
                atemp = re.split('\s+', line)
                line = 'operation=retrieve\n'
                line = line + 'dataset=flight\n'
                line = line + 'detector=acis\n'
                line = line + 'level=1\n'
                line = line + 'filetype=evt1\n'
                line = line + 'filename=' + fits + '\n'
                line = line + 'go\n'

                out = mcf.run_arc5gl_process(line)
                #
                #--- check whether the fits file actually extracted and if so, ungip the file
                #
                if len(out) < 1:
                    continue
                cmd = 'gzip -d ' + out[0]
                os.system(cmd)

                line = fits + '[EVENTS][bin tdetx=2800:5200:1, tdety=1650:4150:1][option type=i4]'
                #
                #--- create an image file
                #
                ichk = expf.create_image(line, 'ztemp.fits')
                #
                #--- combined images
                #
                if ichk > 0:
                    expf.combine_image('ztemp.fits', 'total.fits')

                mcf.rm_files(fits)
                mcf.rm_files('ztemp.fits')
#
#--- rename the file
#
            lyear = str(startYear)
            lmon = mcf.add_leading_zero(startMonth)
            outfile = './ACIS_' + lmon + '_' + lyear + '_full.fits'
            cmd = 'mv total.fits ' + outfile
            os.system(cmd)
            #
            #--- trim the extreme values
            #
            upper = find_10th(outfile)
            if mcf.is_neumeric(upper):
                outfile2 = './ACIS_' + lmon + '_' + lyear + '.fits'
                cmd = ' dmimgthresh infile=' + outfile + ' outfile='
                cmd = cmd + outfile2 + ' cut="0:' + str(
                    upper) + '" value=0 clobber=yes'
                expf.run_ascds(cmd)
            else:
                cmd = 'cp -f ' + outfile + ' ' + outfile2
                os.system(cmd)

            cmd = 'gzip ' + outfile
            os.system(cmd)
            #
            #--- move full one to the data dir; keep other in <exc_dir> to be used to create cumlative files
            #
            cmd = 'mv ' + outfile + '* ' + mon_acis_dir + '/.'
            os.system(cmd)
コード例 #24
0
def create_img_html(year='', month=''):
    """
    create htmls to display exposure map.
    input:  year    --- year, if it is not given, the last month is used
            month   --- month, if it is not given, the last month is used
    output: <web_dir>/Image/HRC<inst>/Month/HRC<inst>_<mm>_<yyyy>_<sec>.html
            <web_dir>/Image/HRC<inst>/Cumulative/HRC<inst>_08_1999_<mm>_<yyyy>_<sec>.html
    """
    #
    #--- find the current year/month
    #
    atemp = time.strftime('%Y:%m', time.gmtime())
    [cyear, cmonth] = re.split(':', atemp)
    cyear = int(float(cyear))
    cmonth = int(float(cmonth))
    chk = 0
    #
    #--- if the year and month were not passed, set them to those of the last month
    #
    if mcf.is_neumeric(year):
        year = int(float(year))
        month = int(float(month))
        if year < cyear:
            chk = 1
    else:
        year = cyear
        month = cmonth - 1
        if month < 1:
            month = 12
            year -= 1
#
#--- set one month before and one month after
#
    pyear = year
    pmonth = month - 1
    if pmonth < 1:
        pmonth = 12
        pyear -= 1

    nyear = year
    nmonth = month + 1
    if nmonth > 12:
        nmonth = 1
        nyear += 1
#
#--- set link dates
#
    ldate = mcf.add_leading_zero(month) + '_' + str(year)
    pdate = mcf.add_leading_zero(pmonth) + '_' + str(pyear)
    ndate = mcf.add_leading_zero(nmonth) + '_' + str(nyear)

    monthly = read_template('mon_img_page')
    for inst in ('S', 'I'):
        if inst == 'S':
            cstop = 10
            odir = web_img_s_dir + 'Month/'
        else:
            cstop = 9
            odir = web_img_i_dir + 'Month/'

        for sec in range(0, cstop):
            #
            #--- create image file link
            #
            png = 'HRC' + inst + '_' + ldate + '_' + str(sec) + '.png'
            cpath = web_dir + 'Image/HRC' + inst + '/Month/' + png
            if os.path.isfile(cpath):
                pnglink = './' + png
            else:
                pnglink = './no_data.png'
#
#--- create link paths
#
            pfile = './HRC' + inst + '_' + pdate + '_' + str(sec) + '.html'
            nfile = './HRC' + inst + '_' + ndate + '_' + str(sec) + '.html'
            psfile = './HRC' + inst + '_' + ldate + '_' + str(sec -
                                                              1) + '.html'
            nsfile = './HRC' + inst + '_' + ldate + '_' + str(sec +
                                                              1) + '.html'
            #
            #--- section link
            #
            if sec == 0:
                seclink = '<a href="' + nsfile + '">Next Section</a><br />'
            elif sec == cstop - 1:
                seclink = '<a href="' + psfile + '">Prev Section</a><br />'
            else:
                seclink = '<a href="' + psfile + '">Prev Section</a>  '
                seclink = seclink + '<a href="' + nsfile + '">next Section</a><br />'
#
#--- time order link
#
            if year == 1999 and month == 8:
                tolink = '<a href="' + nfile + '">Next Month</a><br />'
            elif chk == 0:
                tolink = '<a href="' + pfile + '">Prev Month</a><br /> '
            else:
                tolink = '<a href="' + pfile + '">Prev Month</a>  '
                tolink = tolink + '<a href="' + nfile + '">Next Month</a><br />'
#
#--- section main link
#
            sublink = '../../../Sub_html/hrc' + inst.lower() + '_' + str(
                sec) + '.html'
            #
            #--- cumulative link
            #
            cumlink = '<a href="../Cumulative/HRC' + inst + '_08_1999_' + ldate
            cumlink = cumlink + '_' + str(sec) + '.html">Cumulative Plot</a>'
            #
            #--- replace texts in the template
            #
            otemp = monthly
            otemp = otemp.replace("#YEAR#", str(year))
            otemp = otemp.replace("#MONTH#", mcf.add_leading_zero(month))
            otemp = otemp.replace("#INST#", inst)
            otemp = otemp.replace("#SEC#", str(sec))
            otemp = otemp.replace("#PNGLINK#", pnglink)
            otemp = otemp.replace("#LATEST#", ldate)
            otemp = otemp.replace("#CUMLINK#", cumlink)
            otemp = otemp.replace("#SECLINK#", seclink)
            otemp = otemp.replace("#TOLINK#", tolink)
            otemp = otemp.replace("#SUBLINK#", sublink)
            #
            #--- set output fine name
            #
            ofile = odir + 'HRC' + inst + '_' + ldate + '_' + str(
                sec) + '.html'
            with open(ofile, 'w') as fo:
                fo.write(otemp)
#
#--- cumulatvie page
#
    cumulative = read_template('cum_img_page')
    for inst in ('S', 'I'):
        if inst == 'S':
            cstop = 10
            odir = web_img_s_dir + 'Cumulative/'
        else:
            cstop = 9
            odir = web_img_i_dir + 'Cumulative/'

        for sec in range(0, cstop):
            #
            #--- create link paths
            #
            pfile = './HRC' + inst + '_08_1999_' + pdate + '_' + str(
                sec) + '.html'
            nfile = './HRC' + inst + '_08_1999_' + ndate + '_' + str(
                sec) + '.html'
            psfile = './HRC' + inst + '_08_1999_' + ldate + '_' + str(
                sec - 1) + '.html'
            nsfile = './HRC' + inst + '_08_1999_' + ldate + '_' + str(
                sec + 1) + '.html'
            #
            #--- section link
            #
            if sec == 0:
                seclink = '<a href="' + nsfile + '">next Section</a><br />'
            elif sec == cstop - 1:
                seclink = '<a href="' + psfile + '">Prev Section</a><br />'
            else:
                seclink = '<a href="' + psfile + '">Prev Section</a>  '
                seclink = seclink + '<a href="' + nsfile + '">next Section</a><br />'
#
#--- time order link
#
            if year == 1999 and month == 8:
                tolink = '<a href="' + nfile + '">Next Month</a><br />'
            elif chk == 0:
                tolink = '<a href="' + pfile + '">Prev Month</a><br /> '
            else:
                tolink = '<a href="' + pfile + '">Prev Month</a>  '
                tolink = tolink + '<a href="' + nfile + '">Next Month</a><br />'
#
#--- section main link
#
            sublink = '../../../Sub_html/hrc' + inst.lower() + '_' + str(
                sec) + '.html'
            #
            #--- month link
            #
            monlink = '<a href="../Month/HRC' + inst + '_' + ldate + '_' + str(
                sec) + '.html">'
            monlink = monlink + 'Month Plot</a>'
            #
            #--- replace texts in the template
            #
            otemp = cumulative
            otemp = otemp.replace("#YEAR#", str(year))
            otemp = otemp.replace("#MONTH#", mcf.add_leading_zero(month))
            otemp = otemp.replace("#INST#", inst)
            otemp = otemp.replace("#SEC#", str(sec))
            otemp = otemp.replace("#LATEST#", ldate)
            otemp = otemp.replace("#MONLINK#", monlink)
            otemp = otemp.replace("#SECLINK#", seclink)
            otemp = otemp.replace("#TOLINK#", tolink)
            otemp = otemp.replace("#SUBLINK#", sublink)
            #
            #--- set output fine name
            #
            ofile = odir + 'HRC' + inst + '_08_1999_' + ldate + '_' + str(
                sec) + '.html'
            with open(ofile, 'w') as fo:
                fo.write(otemp)
コード例 #25
0
ファイル: compute_vega_count.py プロジェクト: chandra-mta/HRC
def create_data_tables(table):
    """
    read input table of observations and create data tables
    input:  table   --- a file which ontains a list of observations
                        either obsid list or a list in which the first column is obsid
    output: <data_dir>/hrc_<det/pos>_results 
    """
    data = mcf.read_data_file(table)

    for ent in data:
        if mcf.is_neumeric(ent):
            obsid = ent.strip()
        else:
            atemp = re.split('\s+', ent)
            obsid = atemp[0]

        if not mcf.is_neumeric(obsid):
            continue

        print(obsid)

        fits = hcf.run_arc5gl(0,
                              0,
                              obsid=str(obsid),
                              level='1',
                              filetype='evt1')
        out = extract_count_stats(fits)

        if out[-1] < 0:
            continue

        line = str(obsid) + '\t'

        if float(obsid) < 1000:
            line = line + '\t'

        line = line + str(fits) + '\t'
        line = line + out[7] + '\t'
        line = line + '%2.1f' % round(out[6], 1) + '\t'
        line = line + '%2.2f' % round(out[5], 2) + '\t'
        line = line + '%2.2f' % round(out[8], 2) + '\t'
        line = line + '%2.4f' % round(out[9], 4) + '\n'

        if out[-1] == 10:
            outfile = data_dir + 'hrc_i_results'
        if out[-1] == 0:
            outfile = data_dir + 'hrc_s_0_results'
        if out[-1] == 1:
            outfile = data_dir + 'hrc_s_10_results'
        if out[-1] == 2:
            outfile = data_dir + 'hrc_s_25_results'
        if out[-1] == 3:
            outfile = data_dir + 'hrc_s_m10_results'
        if out[-1] == 4:
            outfile = data_dir + 'hrc_s_m25_results'

        with open(outfile, 'a') as fo:
            fo.write(line)

        cmd = 'rm  -f ' + fits
        os.system(cmd)
コード例 #26
0
ファイル: extract_sim_data.py プロジェクト: chandra-mta/MTA
def extract_sim_data():
    """
    extract sim data from PRIMARYCCDM_*.*.tl
    input: none but read from <dump_dir>/PRIMARYCCDM_*.*.tl
    output: <data_dir>sim_data.out
    """
    #
    #--- find the time of the last entry from the sim_data.out
    #
    sfile = data_dir + 'sim_data.out'
    data = mcf.read_data_file(sfile)
    #
    #--- cleaning up the data; drop the data which the date starts from ":" e.g. :2014
    #
    pdata = []
    for ent in data:
        if re.search('^:', ent):
            continue
        else:
            pdata.append(ent)
#
#--- the last entiry values
#
    if len(pdata) > 0:
        atemp = re.split('\s+', pdata[-1])
        ltime = int(float(Chandra.Time.DateTime(atemp[0]).secs))
        time_2 = atemp[0]
        col1_2 = atemp[1]
        col2_2 = atemp[2]
        col3_2 = atemp[3]
    else:
        ltime = 0
        time_2 = 0
        col1_2 = ''
        col2_2 = ''
        col3_2 = ''
#
#--- check whether input files exists
#
    cmd = 'ls -rt ' + dump_dir + 'PRIMARYCCDM_*.*.tl >' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)

    dlen = len(data)
    if dlen < 1:
        exit(1)

#
#--- files exist. read the data from the last 40 files
#
    if dlen > 40:
        tlist = data[-40:]
    else:
        tlist = data

    for ent in tlist:
        cmd = 'cat ' + ent + ' >> ' + zspace
        os.system(cmd)

    data = mcf.read_data_file(zspace, remove=1)
    #
    #--- go though each data line
    #
    prev = ''
    sline = ''
    for ent in data:
        #
        #--- expect the first letter of the data line is numeric (e.g. 2014).
        #
        try:
            val = float(ent[0])
        except:
            continue
#
#--- only data with "FMT" format will be used
#
        mc = re.search('FMT', ent)
        if mc is None:
            continue
#
#--- if there are less than 20 entries, something wrong; skip it
#
        atemp = re.split('\t+', ent)
        if len(atemp) < 20:
            continue
#
#--- convert time format
#
        ttime = atemp[0]
        ttime = ttime.strip()
        ttime = ttime.replace(' ', ':')
        ttime = ttime.replace(':::', ':00')
        ttime = ttime.replace('::', ':0')
        #
        #--- if the time is exactly same as one before, skip it
        #
        if ttime == time_2:
            continue
#
#--- if the time is already in the database, skip it
#
        stime = int(float(Chandra.Time.DateTime(ttime).secs))
        if stime <= ltime:
            continue
#
#--- use only data which tscpos and fapos have numeric values
#
        tscpos = atemp[4].strip()
        fapos = atemp[5].strip()

        if tscpos == "" or fapos == "":
            continue
        else:
            if (mcf.is_neumeric(tscpos)) and (mcf.is_neumeric(fapos)):
                tscpos = str(int(float(tscpos)))
                fapos = str(int(float(fapos)))
            else:
                continue

        mpwm = atemp[12].strip()
        if mcf.is_neumeric(mpwm):
            mpwm = int(float(mpwm))
            mpwm = str(mpwm)
        else:
            mpwm = '0'
#
#--- we want to print only beginning and ending of the same data entries.
#--- skip the line if all three entiries are same as one before, except the last one
#
        if (col1_2 == tscpos) and (col2_2 == fapos) and (col3_2 == mpwm):
            time_2 = ttime
            continue

        line = ttime + '\t' + str(tscpos) + '\t' + str(fapos) + '\t' + str(
            mpwm) + '\n'
        if line == prev:
            continue
        else:
            pline = time_2 + '\t' + str(col1_2) + '\t' + str(
                col2_2) + '\t' + str(col3_2) + '\n'
            sline = sline + pline + line

            prev = line
            time_2 = ttime
            col1_2 = tscpos
            col2_2 = fapos
            col3_2 = mpwm

    with open('./temp_save', 'w') as fo:
        fo.write(sline)

    sfile2 = sfile + '~'
    cmd = 'cp  ' + sfile + ' ' + sfile2
    os.system(cmd)
    cmd = 'cat ./temp_save >> ' + sfile
    os.system(cmd)

    mcf.rm_file('./temp_save')
コード例 #27
0
def create_history_file(head):
    """
    create count history file and the information file containing current bad entry information
    input:  head                --- ccd, hccd, or col to indicate which data to handle
    output: <head>_ccd<ccd>_cnt --- count history data:
                                    <stime><><year:ydate><><cumlative cnt><><cnt for the day>
            <head>_ccd<ccd>_information --- current information of the bad entries. 
                                            Example, list of warm pixels, flickering pixels, 
                                                      totally new pixels, 
                                                      and all past and current warm pixels.
    """
    for ccd in range(0, 10):
        #
        #--- read data file head is either ccd, hccd, or col
        #
        ifile = data_dir + 'hist_' + head + str(ccd)
        data = mcf.read_data_file(ifile)

        bad_dat_list = []  #--- save all bad data as elements
        bad_dat_save = []  #--- save all bad data as a list for each day

        stime = []
        ydate = []
        dcnt = []  #--- keep discreate count history
        ccnt = []  #--- keep cumulative count history
        new = []  #--- keep totally new bad entries in the last 5 days
        pcnt = 0
        k = 0
        tot = len(data)

        for ent in data:
            #
            #--- read only data entries written in a correct format: <stime><><year>:<ydate><>:<bad_data>...
            #
            atemp = re.split('<>', ent)
            chk1 = mcf.is_neumeric(atemp[0])
            btemp = re.split(':', atemp[1])
            chk2 = mcf.is_neumeric(btemp[1])

            if (chk1 == True) and (int(
                    atemp[0]) > 0) and (chk2 == True) and (int(btemp[1]) > 0):
                stime.append(atemp[0])
                ydate.append(atemp[1])
                #
                #--- check the bad data is recorded for the given day
                #
                if head == 'ccd' or head == 'hccd':
                    m1 = re.search('\(', atemp[2])
                else:
                    btemp = re.split(':', atemp[2])  #--- case for warm columns
                    if mcf.is_neumeric(btemp[len(btemp) - 1]):
                        m1 = 'OK'
                    else:
                        m1 = None

                if m1 is not None:
                    btemp = re.split(':', atemp[2])
                    if btemp != '':
                        dcnt.append(len(btemp))
#
#--- for the last five days, check whether there are any totally new bad entries exists
#
                    if k > tot - 5:
                        for test in btemp:
                            chk = 0
                            for comp in bad_dat_list:
                                if test == comp:
                                    chk = 1
                                    continue
                            if chk == 0:
                                new.append(test)

                    bad_dat_list = bad_dat_list + btemp
                    out = list(set(bad_dat_list))

                    pcnt = len(out)
                    ccnt.append(pcnt)
                    bad_dat_save.append(btemp)
                else:
                    dcnt.append(0)
                    bad_dat_save.append([])
                    ccnt.append(pcnt)

            k += 1  #--- k is incremented to check the last 5 days
#
#--- find out which entries are warm/hot and flickering
#
        [warm, flick, b_list, p_list] = find_warm_and_flickering(bad_dat_save)
        #
        #--- open output file to print current information
        #
        ofile = data_dir + '/' + head + str(ccd) + '_information'
        with open(ofile, 'w') as fo:
            fo.write("warm:\t")
            print_data(fo, warm)

            fo.write('flick:\t')
            print_data(fo, flick)

            fo.write('new:\t')
            out = list(set(new))
            print_data(fo, out)

            fo.write('past:\t')
            out = list(set(bad_dat_list))
            print_data(fo, out)

#
#--- open output file to print out count history
#
        line = ''
        for i in range(0, len(stime)):
            if i < 13:
                line = line + stime[i] + '<>' + ydate[i] + '<>' + str(
                    ccnt[i]) + '<>'
                line = line + str(dcnt[i]) + '<>0<>0\n'
            else:
                line = line + stime[i] + '<>' + ydate[i] + '<>' + str(
                    ccnt[i]) + '<>'
                line = line + str(dcnt[i]) + '<>' + str(
                    b_list[i - 13]) + '<>' + str(p_list[i - 13]) + '\n'

        ofile = data_dir + '' + head + str(ccd) + '_cnt'
        with open(ofile, 'w') as fo:
            fo.write(line)