示例#1
0
def update_html(update):
    """
    check whether the update is needed and if so, run the update
    input:  update  --- if it is 1, run the update without chekcing the file exist or not
    output: none, but updated html pages (in <web_dir>)
    """
    #
    #--- find today's date
    #
    today = time.localtime()
    year = today.tm_year
    #
    #--- if update is asked, just run the update
    #
    if update > 0:
        run_update(year)
#
#--- otherwise, find the last update, and if needed, run the update
#
    else:
        cmd = 'ls ' + web_dir + '*.html > ' + zspace
        os.system(cmd)
        with open(zspace, 'r') as f:
            out = f.read()
        mcf.rm_files(zspace)
        #
        #--- chekcing the file existance (looking for year in the file name)
        #
        mc = re.search(str(year), out)

        if mc is None:
            run_update(year)
def extract_data(start, stop):
    """
    extract data to compute HRMA focus plots
    input:  start   ---- start time in the foramt of mm/dd/yy (e.g. 05/01/15)
            stio    ---- stop time in the format of mm/dd/yy
    output: acis*evt2.fits.gz, hrc*evt2.fits.gz
    """
#
#--- check whether previous fits files are still around, and if so, remove them
#
    cmd = 'ls * > ' + zspace
    os.system(cmd)
    with  open(zspace, 'r') as f:
        chk = f.read()

    mcf.rm_files(zspace)
    mc  = re.search('fits', chk)
    if mc is not None:
        cmd = 'rm *fits*'
        os.system(cmd)
#
#--- if time interval is not given, set for a month interval
#
    if start == '':
        [start, stop] = set_interval()
#
#--- extract acis and hrc evt2 files
#
    inst = 'acis'
    fits_a = create_fits_list(inst, start, stop)
    inst = 'hrc'
    fits_h = create_fits_list(inst, start, stop)


    return [fits_a, fits_h]
示例#3
0
def print_cti_results(out_type, elm, ccd, content):
    """
    print out selected/corrected cti data to an appropriate file
    Input:  out_type    --- directory name under <data_dir>
            elm         --- the name of element (al, mn, ti)
            ccd         --- ccd #
            content     --- a table list. each line is already terminated by "\n"
    Output: <data_dir>/<out_type>/<elm>_ccd<ccd#>
    """
    sline = ''
    for ent in content:
        sline = sline + ent
        #
        #--- just in a case, the line is not terminated by '\n', add it
        #
        chk = re.search('\n', ent)
        if chk is None:
            sline = sline + '\n'

    if sline != '':
        ifile = data_dir + '/' + out_type + '/' + elm + '_ccd' + str(ccd)
        mcf.rm_files(ifile)

        with open(ifile, 'w') as fo:
            fo.write(sline)
示例#4
0
def clean_otg_tl():
    """
    cleaning up OTG TLsave directory
    input:  none
    output: none.
    """
    cmd = 'ls -t  ' + save_dir + '*.tl* > ' + zspace
    os.system(cmd)

    files = mcf.read_data_file(zspace, remove=1)

    dlen = len(files)
    if dlen > 100:
        for i in range(100, dlen):
            try:
                mcf.rm_files(files[i])

            except:
                break

    if dlen > 50:
        for i in range(50, 100):
            try:
                if "gz" in files[i]:
                    continue
                else:
                    cmd = 'gzip -fq  ' + files[i]
                    os.system(cmd)
            except:
                break
示例#5
0
def combine_fits(flist, outname):
    """
    combine fits files in the list
    input:  flist   --- a list of fits file names
            outname --- a outputfits file name
    output: outname --- a combined fits file
    """
    mcf.rm_files(outname)
    cmd = 'mv ' + flist[0] + ' ' + outname
    os.system(cmd)

    for k in range(1, len(flist)):
        try:
            mfits.appendFitsTable(outname, flist[k], 'temp.fits')
        except:
            continue

        cmd = 'mv temp.fits ' + outname
        os.system(cmd)
        cmd = 'rm -f ' + flist[k]
        os.system(cmd)

    cmd = 'rm -rf *fits.gz'
    os.system(cmd)

    return outname
示例#6
0
def remove_old_data(fits, cols, cut):
    """
    remove the data older the cut time
    input:  fits    --- fits file name
            cols    --- a list of column names
            cut     --- cut time in seconds from 1998.1.1
    output: updated fits file
    """

    f = pyfits.open(fits)
    data = f[1].data
    f.close()
    #
    #--- find where the cut time
    #
    pos = 0
    dtime = list(data['time'])
    for k in range(0, len(dtime)):
        if dtime[k] >= cut:
            pos = k
            break
#
#--- remove the data before the cut time
#
    udata = []
    for k in range(0, len(cols)):
        udata.append(list(data[cols[k]][pos:]))

    mcf.rm_files(fits)
    ecf.create_fits_file(fits, cols, udata)
示例#7
0
def extract_acis_count_rate(year, month, dir_name):
    """
    extract acis count rate data
    input:  year        --- year    
            month       --- month
            dir_name    --- output dir name
    output: <dir_name>/ccd<#ccd>
    """
    #
    #--- make a list of data fits file
    #
    data_list = get_data_list_from_archive(year, month)

    for ifile in data_list:
        #
        #--- extract the fits file with arc5gl
        #
        line = 'operation=retrieve\n'
        line = line + 'dataset=flight\n'
        line = line + 'detector=acis\n'
        line = line + 'level=1\n'
        line = line + 'filetype=evt1\n'
        line = line + 'filename=' + ifile + '\n'
        line = line + 'go\n'

        run_arc5gl(line)

        cmd = 'gzip -d ' + ifile + '.gz'
        os.system(cmd)
        #
        #--- extract data and update/create the count rate data
        #
        extract_data(ifile, dir_name)

        mcf.rm_files(ifile)
示例#8
0
def check_data_exist(hdir):
    """
    check all needed fits data are extracted
    input:  hdir    --- directory where the data will be kept
    output: True/False. if False, the mail notification is also sent out
    """
    cmd = 'ls ' + hdir + '*/* > ' + zspace
    os.system(cmd)
    with open(zspace, 'r') as f:
        out = f.read()
    mcf.rm_files(zspace)

    for name in [
            'dtf1', 'fov', 'bpix1', 'evt1', 'msk1', 'mtl1', 'std_dtfstat1',
            'std_flt1', 'asol'
    ]:
        mc = re.search(name, out)
        if mc is None:
            line = 'Some data files are missing and the re-process is terminated. Check: ' + hdir + '\n'
            with open(zspace, 'w') as fo:
                fo.write(line)

            cmd = 'cat ' + zspace + ' | mailx -s "Subject: hrc re-process failed" [email protected]'
            os.system(cmd)

            return False

    return True
示例#9
0
def fitsImgSection(ifile, x1, x2, y1, y2, outname, extension=0, clobber='no'):
    """
    extract a x by y section of fits image file
    Input:  file        --- input fits image file name
            x1, x2      --- x range
            y1, y2      --- y rnage
            outname     --- output fits image name
            extension   --- extension #. default = 0
            clobber     --- clobber or not. default = no
    Output: outname     --- fits image file of size x by y
    """
    m1 = re.search('y', clobber)
    m2 = re.search('Y', clobber)
    if (m1 is not None) or (m2 is not None):
        mcf.rm_files(outname)

    t = pyfits.open(ifile)
    data = t[extension].data

    xsize = abs(x2 - x1)
    ysize = abs(y2 - y1)

    output = numpy.matrix(numpy.zeros([ysize, xsize]))

    for x in range(x1, x2):
        for y in range(y1, y2):
            newx = x - x1
            newy = y - y1
            output[newy, newx] = data[y, x]

    header = pyfits.getheader(ifile)
    pyfits.writeto(outname, output, header)

    t.close()
示例#10
0
def comp_stat(line, year, month, outfile):
    """
    compute statistics and print them out
    input:  line    --- command line, year, month, and output file name
                        command line is used by dmcopy to extract a specific location 
                            Example: ACIS_04_2012.fits.gz[1:1024,1:256]
            year    --- year
            month   --- month
            outfile --- output file name
    output: outfile --- stat results in outfile
    """
    cmd = ' dmcopy ' + line + ' temp.fits clobber="yes"'
    expf.run_ascds(cmd)
    #
    #-- to avoid get min from outside of the edge of a CCD
    #
    cmd = ' dmimgthresh infile=temp.fits  outfile=zcut.fits  cut="0:1e10" value=0 clobber=yes'
    expf.run_ascds(cmd)
    #
    #-- find avg, min, max and deviation
    #
    [avg, minv, minp, maxv, maxp, dev] = extract_stat_result('zcut.fits')
    #
    #-- find the one sigma and two sigma count rate:
    #
    [sigma1, sigma2, sigma3] = expf.three_sigma_values('zcut.fits')

    print_stat(avg, minv, minp, maxv, maxp, dev, sigma1, sigma2, sigma3,\
               year, month, outfile)

    mcf.rm_files('temp.fits')
    mcf.rm_files('zcut.fits')
示例#11
0
    def test_combine_evt1_files(self):

        #
        #--- make a list of all evt1 file of the year
        #
        year = 2000
        hdir = 'hrc_i_115'
        e_list = get_evt1_list(year, hdir)
        if len(e_list) == 0:
            print("Something wrong in getting files")
#
#--- combined all evt1 files of the year
#
        oname = 'test_combined_evt1.fits'
        hcf.combine_fits_files(e_list, oname)

        tstart = hcf.read_header_value(oname, 'tstart')
        tstop = hcf.read_header_value(oname, 'tstop')

        tstart = int(float(tstart))
        tstop = int(float(tstop))

        self.assertEquals(tstart, 65961070)
        self.assertEquals(tstop, 93190294)

        mcf.rm_files(oname)
def convert_acistemp_into_c():
    """
    convert all acistemp fits files in K into that of C
    input:  none, but read from <data_dir>/Acistemp/*fits
    output: converted fits files in Compaciscent/*fits
    """

    outdir = data_dir2 + '/Compaciscent/'
    cmd = 'ls ' + data_dir + '/Acistemp/*fits* > ' + zspace
    os.system(cmd)
    fits_list = mcf.readFile(zspace)
    mcf.rm_files(zspace)

    for fits in fits_list:
        atemp = re.split('\/', fits)
        fname = atemp[-1]
        btemp = re.split('_', fname)
        msid = btemp[0]
        cols = [msid] + bcols

        flist = pyfits.open(fits)
        fdata = flist[1].data

        for col in cols:
            odata = fdata[col] - 273.15  #--- this is a numpy object
            fdata[col] = odata

        flist[1].data = fdata

        outfile = outdir + fname
        mcf.rm_files(outfile)
        flist.writeto(outfile)
示例#13
0
def run_arc5gl(line, out=''):
    """
    run arc5gl command
    input:  line    --- acc5gl command lines
            out     --- output file name; default: "" --- no output file
    output: results of the command
    """
    with open(zspace, 'w') as fo:
        fo.write(line)

    try:
        cmd = '/proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace
        if out != '':
            cmd = cmd + '> ' + out

        os.system(cmd)
    except:
        try:
            cmd = '/proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
            if out != '':
                cmd = cmd + '> ' + out
            os.system(cmd)
        except:
            cmd1 = "/usr/bin/env PERL5LIB= "
            cmd2 = '/proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
            if out != '':
                cmd2 = cmd2 + '> ' + out
            cmd = cmd1 + cmd2
            bash(cmd, env=ascdsenv)

    mcf.rm_files(zspace)
示例#14
0
def run_arc5gl(tstart, tstop, ftype):
    """
    run arc5gl
    input:  start   --- start time
            stop    --- stop time
            ftype   --- file type
    output: extracted fits files
    """

    line = 'operation=retrieve\n'
    line = line + 'tstart=' + str(tstart) + '\n'
    line = line + 'tstop=' + str(tstop) + '\n'
    line = line + 'dataset=flight\n'
    line = line + 'level=0\n'
    line = line + 'detector=hrc\n'
    line = line + 'subdetector=eng\n'
    line = line + 'filetype=' + ftype + '\n'
    line = line + 'go\n'

    with open(zspace, 'w') as fo:
        fo.write(line)

    try:
        cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace
        os.system(cmd)
    except:
        cmd1 = "/usr/bin/env PERL5LIB="
        cmd2 = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
        cmd = cmd1 + cmd2
        bash(cmd, env=ascdsenv)

    mcf.rm_files(zspace)
示例#15
0
def add_coldata_to_fits(ofits1, ofits2, col_names, outfile):
    """
    adding data from the second fits file to the first fits file
    input:  ofits1      --- the first fits file
            ofits2      --- the second fits file
            col_names   --- a list of column names which you want to copy from the 
                            second fits file
            outfile     --- output fits file name
    output: outfile     --- resulted fits file
    """
    otable = pyfits.open(ofits1)[1].data
    ocols = otable.columns
    atable = pyfits.open(ofits2)[1].data
    acols = atable.columns
    #
    #--- extract informaiton of the columns and the column data which are not overlapped with
    #--- the data to be added
    #
    out = ocols.names
    ocol_names = numpy.setdiff1d(out, col_names)
    save = []
    for col in ocol_names:
        cent = ocols[col]
        data = otable[col]
        ndat = pyfits.Column(name=cent.name,
                             format=cent.format,
                             unit=cent.unit,
                             array=data)
        save.append(ndat)
#
#--- set the column data
#
    oldcols = pyfits.ColDefs(save)
    #
    #--- extreact information of the columns and the column data to be added from the
    #--- second fits file
    #
    save = []
    for col in col_names:
        cent = acols[col]
        data = atable[col]
        ndat = pyfits.Column(name=cent.name,
                             format=cent.format,
                             unit=cent.unit,
                             array=data)
        save.append(ndat)
#
#--- define the column data
#
    newcols = pyfits.ColDefs(save)
    #
    #--- combine the data
    #
    #hdu = pyfits.BinTableHDU.from_columns(ocols + newcols)
    hdu = pyfits.BinTableHDU.from_columns(oldcols + newcols)
    #
    #--- create the new fits file
    #
    mcf.rm_files(outfile)
    hdu.writeto(outfile)
示例#16
0
def check_cmd_diff(start='', stop=''):
    """
    compare backstop commands and hrc hk record to find mismatched cases
    and create info file containing the mismatch info, if there is the mismatch
    """
    #
    #--- remove email notificaiton file if it still exists
    #
    ifile = exc_dir + 'cmd_diff_cont'
    mcf.rm_files(ifile)
    #
    #--- if start and stop dates are not given, find today's date and set start and stop time
    #
    if start == '':
        today = time.strftime('%Y:%j:00:00:00', time.gmtime())
        stop = Chandra.Time.DateTime(today).secs
        start = stop - 86400.0 * 2.0  #--- 2 days ago
#
#--- extract needed information from hrc hk files (extracted from archive)
#
    hk_dict = read_hk_data(start, stop)
    #
    #--- read backstop data file for a given period
    #
    bs_dict = read_bs_data(start, stop)
    #
    #--- compare bk stop data against hrc hk data and send out email
    #
    mismatch1 = compare_two_dicts(bs_dict, hk_dict, offset1=0, offset2=180)
    out1 = create_notification(mismatch1, pchk=1)

    mismatch2 = compare_two_dicts(hk_dict, bs_dict, offset1=180, offset2=180)
    out2 = create_notification(mismatch2, pchk=2)

    send_out_notification(out1, out2)
示例#17
0
def extract_stat_fits_file(obsid, out_dir='./'):
    """
    extract acis stat fits files using arc5gl
    Input:  obsid   --- obsid
            out_dir --- a directory in which the fits file is deposited. default is "./"
    Output: acis stat fits file (decompressed) in out_dir
            data    --- a list of fits files extracted
    """
    line = 'operation=retrieve\n'
    line = line + 'dataset=flight\n'
    line = line + 'detector=acis\n'
    line = line + 'level=1\n'
    line = line + 'filetype=expstats\n'
    line = line + 'obsid=' + str(obsid) + '\n'
    line = line + 'go\n'

    with open(zspace, 'w') as fo:
        fo.write(line)
    try:
        try:
            cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace
            os.system(cmd)
        except:
            try:
                cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
                os.system(cmd)
            except:
                cmd1 = "/usr/bin/env PERL5LIB= "
                cmd2 = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
                cmd = cmd1 + cmd2
                bash(cmd, env=ascdsenv)

        cmd = 'ls ' + exc_dir + '> ' + zspace
        os.system(cmd)
        with open(zspace, 'r') as f:
            test = f.read()

        mcf.rm_files(zspace)

        m1 = re.search('stat1.fits.gz', test)
        if m1 is not None:
            cmd = 'mv ' + exc_dir + '/*stat1.fits.gz ' + out_dir + '/.'
            os.system(cmd)

            cmd = 'gzip -d ' + out_dir + '/*stat1.fits.gz'
            os.system(cmd)

            cmd = 'ls ' + out_dir + '/*' + str(
                obsid) + '*stat1.fits > ' + zspace
            os.system(cmd)

            data = mcf.read_data_file(zspace, remove=1)

            return data
        else:
            return []
    except:
        mcf.rm_file(zspace)
        return []
def get_data_with_dataseeker(tstart, tstop, col_list):
    """
    extract data using dataseeker
    input:  tstart  --- starting time in seconds from 1998.1.1
            tstop   --- stopping time in seconds from 1998.1.1
            col_list    --- data name to be extracted (without _ or _avg part)
    output: save        --- a list of lists of data, including time list
    """
    #
    #--- check wehter enmpty command file exist. if not, create
    #
    if not os.path.isfile('test'):
        cmd = 'touch test'
        os.system(cmd)
#
#--- create dataseeker command
#
    cmd1 = '/usr/bin/env PERL5LIB="" '

    cmd2 = 'dataseeker.pl infile=test outfile=temp.fits '
    cmd2 = cmd2 + 'search_crit="columns='
    #
    #--- column name start with '_' and end '_avg'
    #
    for k in range(0, len(col_list)):
        col = col_list[k]
        if k == 0:
            acol = '_' + col + '_avg'
        else:
            acol = ',_' + col + '_avg'

        cmd2 = cmd2 + acol

    cmd2 = cmd2 + ' timestart=' + str(tstart) + ' timestop=' + str(tstop) + '"'
    cmd2 = cmd2 + ' loginFile=' + house_keeping + 'loginfile '
    #
    #--- run the dataseeker command under ascds environment
    #
    cmd = cmd1 + cmd2
    bash(cmd, env=ascdsenv)
    #
    #--- read the data and create a list of lists
    #
    hrd = pyfits.open('temp.fits')
    data = hrd[1].data
    hrd.close()

    dtime = data['time']
    save = [dtime]
    for col in col_list:
        acol = col + '_avg'
        save.append(data[acol])
#
#--- clean up
#
    mcf.rm_files('test')
    mcf.rm_files('temp.fits')

    return save
示例#19
0
def update_violation_page(line, line2, color):
    """
    update violation pages
    input:  line    --- table contents
            pos     --- lower or upper
            color   --- yellow or red
    output: <house_keeping>/<pos>_<color>_violation.html
    """
    title = color.capitalize() + ' Violation'

    head = chs.read_template('html_head')
    jscript = chs.read_template('java_script_deposit')
    tstyle = chs.read_template('two_col_style')
    tail = chs.read_template('html_close')

    head = head.replace('#MSID#', title)
    head = head.replace('#JAVASCRIPT#', jscript)
    head = head.replace('#STYLE#', tstyle)

    if color == 'yellow':
        ocolor = 'red'
        other = 'Go to Red Violation'
    else:
        ocolor = 'yellow'
        other = 'Go to Yellow Violation'

    tline = head + '<h2 style="background-color:' + color + ';">' + title + '</h2>\n'
    tline = tline + '<div style="text-align:right;">\n'
    tline = tline + '<a href="https://cxc.cfa.harvard.edu/mta/MSID_Trends/mta_trending_main.html">'
    tline = tline + 'Back to Top</a>\n'
    tline = tline + '<br />\n'
    tline = tline + '<a href="https://cxc.cfa.harvard.edu/mta/MSID_Trends/'
    tline = tline + ocolor + '_violation.html">'
    tline = tline + other + '</a>\n'
    tline = tline + '</div>\n'
    tline = tline + '<div class="row">\n'
    tline = tline + '<div class="column">\n'
    tline = tline + '<h3>Lower Violation</h3>\n'
    tline = tline + '<table border=1 cellpadding=2>\n'
    tline = tline + line
    tline = tline + '</table>\n'
    tline = tline + '</div>\n'
    tline = tline + '<div class="column2">\n'
    tline = tline + '<h3>Upper Violation</h3>\n'
    tline = tline + '<table border=1 cellpadding=2>\n'
    tline = tline + line2
    tline = tline + '</table>\n'
    tline = tline + '</div>\n'
    tline = tline + '</div>\n'
    tline = tline + tail
    #
    #--- print out the page
    #
    page = web_dir + color + '_violation.html'
    mcf.rm_files(page)

    with open(page, 'w') as fo:
        fo.write(tline)
示例#20
0
def selectTableData(ifile,
                    colname,
                    condition,
                    outname,
                    extension=1,
                    clobber='no'):
    """
    select data for a given colname and the condition and create a new table fits file
    Input:      ifile    --- input table fits file
                colname  --- column name
                condition--- contidion of the data selection
                            if the selection is the interval, the format is in <start>:<stop>
                            if it is equal use: ==<value>
                            if it is not equal: !=<value>
                outname  --- output file name
                clobber  --- overwrite the file if exists. if not given, 'no'
    """
    m1 = re.search('y', clobber)
    m2 = re.search('Y', clobber)

    if (m1 is not None) or (m2 is not None):
        mcf.rm_files(outname)

    t = pyfits.open(ifile)
    tdata = t[extension].data

    mc = re.search(':', condition)
    mc1 = re.search('\!', condition)
    chk = 0
    if mc is not None:
        atemp = re.split(':', condition)
        start = float(atemp[0])
        stop = float(atemp[1])

        mask = tdata.field(colname) >= start
        modt = tdata[mask]
        mask = modt.field(colname) <= stop
        modt2 = modt[mask]

    elif mc1 is not None:
        condition = condition.replace('!=', "")
        mask = tdata.field(colname) != condition
        modt2 = tdata[mask]

    else:
        condition = condition.replace('==', "")
        if isinstance(condition, (float, int)):
            mask = tdata.field(colname) == condition
            modt2 = tdata[mask]
        else:
            data = select_data_with_logical_mask(tbdata, colname, condition)
            chk = 1

    header = pyfits.getheader(ifile)
    if chk == 0:
        data = pyfits.BinTableHDU(modt2, header)

    data.writeto(outname)
示例#21
0
def find_available_deph_data():
    """
    create a list of potential new data file name
    input:  none, but read from /dsops/GOT/aux/DEPH.dir/
    output: cdata   --- a list of the data file names
    """
    #
    #--- find current time
    #
    today = time.strftime('%Y:%j', time.gmtime())
    atemp = re.split(':', today)
    year = int(float(atemp[0]))
    ydate = int(float(atemp[1]))

    syear = str(year)
    tyear = syear[2] + syear[3]
    #
    #--- first 20 days of the year, we also check the last year input data
    #
    cmd1 = 'ls /dsops/GOT/aux/DEPH.dir/DE*.EPH >> ./zlocal'
    if ydate < 20:
        lyear = year - 1
        slyear = str(lyear)
        ltyear = slyear[2] + slyear[3]

        cmd = 'ls /dsops/GOT/aux/DEPH.dir/DE' + ltyear + '*.EPH >  ' + zspace
        os.system(cmd)

        cmd = 'ls /dsops/GOT/aux/DEPH.dir/DE' + tyear + '*.EPH >> ' + zspace
        os.system(cmd1)
        with open('./zlocal', 'r') as f:
            zchk = f.read()
        os.system('rm -f ./zlocal')

        zwd = 'DE' + str(tyear)
        mc = re.search(zwd, zchk)
        if mc is not None:
            os.system(cmd)
    else:
        cmd = 'ls /dsops/GOT/aux/DEPH.dir/DE' + tyear + '*.EPH >  ' + zspace
        os.system(cmd1)
        with open('./zlocal', 'r') as f:
            zchk = f.read()
        os.system('rm -f ./zlocal')

        zwd = 'DE' + str(tyear)
        mc = re.search(zwd, zchk)
        if mc is not None:
            os.system(cmd)
    try:
        cdata = mcf.read_data_file(zspace, remove=1)
    except:
        cdata = []
        mcf.rm_files(zspace)

    return cdata
def check_file_update_date():
    """
    find the files which are not updated for a while
    input:  none, but read from <data_dir>
    output: if there are problems, mail wil be sent out
    """
#
#--- the files listed in <house_keeping>/ignore_list are not updated 
#
    ifile  = house_keeping + 'ignore_list'
    ignore = mcf.read_data_file(ifile)

    cmd  = 'ls ' + data_dir + '*/*fits > ' +  zspace
    os.system(cmd)

    data = mcf.read_data_file(zspace, remove=1)

    stday = time.strftime("%Y:%j:00:00:00", time.gmtime())
    stday = Chandra.Time.DateTime(stday).secs - 86400.0  * 8.0 

    save = []
    for ent in data:
        out = find_modified_time(ent, stday)
        if out < 0:
            if ent in ignore:
                continue

            save.append(ent)

    if len(save) > 0:
        line = 'Following files are not updated more than a week\n\n'
        for ent in save:
            line = line + ent + '\n'

        with open(zspace, 'w') as fo:
            fo.write(line)
        
        cmd  = 'cat ' + zspace + ' | mailx -s "Subject: MTA Trending data update problem!" '
        cmd  = cmd + '*****@*****.**'
        os.system(cmd)

        mcf.rm_files(zspace)

    else:
        line = 'Secondary data update finished: ' 
        line = line +  time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + '\n'

        with open(zspace, 'w') as fo:
            fo.write(line)
        
        cmd  = 'cat ' + zspace + ' | mailx -s "Subject: Secondary Data Update" [email protected]'
        os.system(cmd)

        mcf.rm_files(zspace)
示例#23
0
def remove_older_files(flist, cdate):
    """
    remove files older than cdate
    input:  flist   --- a list of files
            cdate   --- a cut of date
    output: none 
    """
    for ofile in flist:
        chk = find_time(ofile)

        if chk < cdate:
            mcf.rm_files(ofile)
示例#24
0
def chkNewHigh(old_list, new_list, event, dname):
    """
    sending out email to warn that there are value violatioins 
    Input: old_list:   old violation table
           new_list:   new violation table
           event:      event name
           dname:      column name to be used
    """
    wchk = 0
    #
    #--- compare old and new list and if there are new entries, save them in "alart"
    #
    alart = []
    for ent in new_list:
        chk = 0
        ntemp = re.split('\t+|\s+', ent)
        for comp in old_list:
            otemp = re.split('\t+|\s+', comp)
            if (ent == comp) or (ntemp[0] == otemp[0]):
                chk = 1
                break
        if chk == 0:
            alart.append(ent)
            wchk += 1
#
#--- if there is violations, send out email
#
    if wchk > 0:
        line = 'ACIS Science Run issued the following warning(s)\n\n'
        line = line + "The following observation has a " + event + "Rate in today's science run\n\n"
        line = line + 'obsid   target                  start time  int time        '
        line = line + 'inst    ccd     grat    ' + dname + '\n'
        line = line + '------------------------------------------------------------'
        line = line + '-------------------------------------------\n'
        for ent in alart:
            line = line + ent + '\n'

        line = line + '\nPlese check: https://cxc.cfa.harvard.edu/mta_days/mta_acis_sci_run/'
        line = line + 'science_run.html\n'
        line = line + '\n or MIT ACIS Site: https://acisweb.mit.edu/asc/\n'

        with open(zspace, 'w') as f:
            f.wirte(line)

        cmd = 'cat ' + zspace + '|mailx -s \"Subject: ACIS Science Run Alert<>'
        cmd = cmd + event + 'Rate" isobe\@head.cfa.harvard.edu'
        #        cmd = 'cat ' + zspace + '|mailx -s \"Subject: ACIS Science Run Alert<>'
        #        cmd = cmd    + event  + 'Rate" isobe\@head.cfa.harvard.edu  '
        #        cmd = cmd    + swolk\@head.cfa.harvard.edu acisdude\@head.cfa.harvard.edu"'

        os.system(cmd)

        mcf.rm_files(zspace)
示例#25
0
def select_data_period(fits, start, stop, htype):
    """
    select the data under the periods and create a new fits file
    input:  fits    --- original fits file name
            start   --- a list of starting time of the period to be selected
            stop    --- a list of stopping time of the period to be selected
            htype   --- the tail of the output file (i, s, or off)
    output: <data_dir>/Hrcelec_<htype>/<fits file name>
    """
    flist = pyfits.open(fits)
    fdata = flist[1].data

    mlen = len(start)
    #
    #--- select data in each period
    #
    save = ''
    for k in range(0, mlen):
        try:
            mask = fdata['time'] >= start[k]
            out = fdata[mask]
        except:
            out = fdata['time']
        try:
            mask = out['time'] < stop[k]
            out2 = out[mask]
        except:
            out2 = out
#
#--- concatinate the data to the arrays
#
        if k == 0:
            save = out2
        else:
            save = numpy.concatenate((save, out2), axis=0)
#
#--- write out the data into a fits file
#
    if str(
            save
    ) == '':  #--- occasionally there is no data (e.g. weekly has not been filled)
        return False

    hdu = pyfits.BinTableHDU(data=save)

    rname = 'Hrcelec_' + htype
    outname = fits.replace('Hrcelec', rname)

    mcf.rm_files(outname)
    hdu.writeto(outname)

    flist.close()
示例#26
0
def run_process(obsid, hrc):
    """
    a control script to run reprocess csh scripts 
    input:  hrc: either "hrc_i" or "hrc_s"
    output: <data_dir>/<obsid>    --- re-processed data direcotry
    """
    #
    #--- set conditions for either hrc-i or hrc s
    #
    if hrc == 'hrc_i':
        out_list = 'hrc_i_list'
        data_dir = '/data/hrc/i/'
        inst = 'i'
    else:
        out_list = 'hrc_s_list'
        data_dir = '/data/hrc/s/'
        inst = 's'
#
#--- find un process data
#
    with open(out_list, 'w') as fo:
        fo.write(str(obsid) + '\n')
#
#--- extract fits data needed for analysis
#
    chk = extract_hrc_data(obsid, data_dir)
    if chk == False:
        print("Not all data are available")
        exit(1)

    if hrc == 'hrc_i':
        cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'
    else:
        cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_s_list'

    try:
        run_ciao(cmd)
        cdir = data_dir + '/' + str(obsid)
        if os.path.isdir(cdir):
            cmd = 'chgrp -R hat ' + cdir
            os.system(cmd)
            cmd = 'chmod -R 775 ' + cdir
            os.system(cmd)
    except:
        pass

    mcf.rm_files(out_list)

    hlist = [
        obsid,
    ]
    chk_proccess_status(inst, hlist)
示例#27
0
def find_10th(fits_file):
    """
    find 10th brightest value    
    input: fits_file    --- image fits file name
    output: 10th brightest value
    """
    #
    #-- make histgram
    #
    cmd = ' dmimghist infile=' + fits_file
    cmd = cmd + '  outfile=outfile.fits hist=1::1 strict=yes clobber=yes'
    expf.run_ascds(cmd)

    cmd = ' dmlist infile=outfile.fits outfile=./zout opt=data'
    expf.run_ascds(cmd)

    data = mcf.read_data_file('./zout', remove=1)
    mcf.rm_files('outfile.fits')
    #
    #--- read bin # and its count rate
    #
    hbin = []
    hcnt = []

    for ent in data:
        try:
            atemp = re.split('\s+|\t+', ent)
            if (len(atemp) > 3) and mcf.is_neumeric(atemp[1])  \
                    and mcf.is_neumeric(atemp[2])  and (int(atemp[4]) > 0):
                hbin.append(float(atemp[1]))
                hcnt.append(int(atemp[4]))
        except:
            pass
#
#--- checking 10 th bright position
#
    try:
        j = 0
        for i in range(len(hbin) - 1, 0, -1):
            if j == 9:
                val = i
                break
            else:
                #
                #--- only when the value is larger than 0, record as count
                #
                if hcnt[i] > 0:
                    j += 1

        return hbin[val]
    except:
        return 'I/INDEF'
示例#28
0
def find_observation(start, stop, lev):
    """
    find information about observations in the time period
    input:  start   --- starting time in the format of <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
            stop    --- stoping time in the format of <yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>
            lev     --- data level
    output: acis_obs    --- a list of the observation infromation 
    """
    #
    #--- run arc5gl to get observation list
    #
    data = run_arc5gl_browse(start, stop, lev)
    #
    #--- create obsid list
    #
    obsid_list = []
    for ent in data:
        mc = re.search('acisf', ent)
        if mc is not None:
            atemp = re.split('acisf', ent)
            btemp = re.split('_', atemp[1])
            obsid = btemp[0]
            mc = re.search('N', obsid)
            if mc is not None:
                ctemp = re.split('N', obsid)
                obsid = ctemp[0]
            obsid_list.append(obsid)
#
#--- remove duplicate
#
    o_set = set(obsid_list)
    obsid_list = list(o_set)
    #
    #--- open database and extract data for each obsid
    #
    save = {}
    tlist = []
    for obsid in obsid_list:
        out = get_data_from_db(obsid)
        #print("I AM ERE: " + str(obsid) + '<-->' + str(out))
        if out != NULL:
            [tsec, line] = out
            tlist.append(tsec)
            save[tsec] = line

    tlist.sort()

    if len(tlist) > 0:
        mcf.rm_files('./acis_obs')
        with open('./acis_obs', 'w') as fo:
            for ent in tlist:
                fo.write(save[ent])
示例#29
0
def get_data(start, stop, year, msid_list, out_dir):
    """
    update eph l1 related data for the given data peirod
    input:  start   --- start time in seconds from 1998.1.1
            stop    --- stop time in seconds from 1998.1.1
            year    --- data extracted year
            msid_list   --- list of msids
            out_dir --- output_directory
    output: <out_dir>/<msid>_full_data_<year>.fits
    """
    print(str(start) + '<-->' + str(stop))

    line = 'operation=retrieve\n'
    line = line + 'dataset = flight\n'
    line = line + 'detector = ephin\n'
    line = line + 'level = 0\n'
    line = line + 'filetype =ephhk \n'
    line = line + 'tstart = ' + str(start) + '\n'
    line = line + 'tstop = ' + str(stop) + '\n'
    line = line + 'go\n'

    data_list = mcf.run_arc5gl_process(line)
    #
    #--- uppend the data to the local fits data files
    #
    for fits in data_list:

        [cols, tbdata] = ecf.read_fits_file(fits)

        time = tbdata['time']

        for col in msid_list:
            #
            #--- ignore columns with "ST_" (standard dev) and time
            #
            mdata = tbdata[col]
            cdata = [time, mdata]
            ocols = ['time', col.lower()]

            if not os.path.isdir(out_dir):
                cmd = 'mkdir ' + out_dir
                os.system(cmd)

            ofits = out_dir + col.lower() + '_full_data_' + str(year) + '.fits'
            if os.path.isfile(ofits):
                ecf.update_fits_file(ofits, ocols, cdata)
            else:
                ecf.create_fits_file(ofits, ocols, cdata)

        mcf.rm_files(fits)
示例#30
0
def remove_duplicate(h_list, sdir):
    """
    remove duplicated fits files
    input:  h_list  --- a list of head part of the fits files
    sdir--- 'primary' or 'secondary'
    output: cleaned up 'primary' or 'secondary' directory
    """
    for head in h_list:
        cmd = 'ls ./' + sdir + '/*' + head + '* > ' + zspace
        os.system(cmd)
        out = mcf.read_data_file(zspace, remove=1)
        if len(out) > 1:
            for ent in out[:-1]:
                mcf.rm_files(ent)