Exemple #1
0
def update_full_archfiles_db3(dat, logger, msid_files, opt):
    # Update the archfiles.db3 database to include the associated archive files
    server_file = msid_files['archfiles'].abs
    logger.debug(f'Updating {server_file}')

    def as_python(val):
        try:
            return val.item()
        except AttributeError:
            return val

    with timing_logger(logger, f'Updating {server_file}', 'info', 'info'):
        with DBI(dbi='sqlite', server=server_file) as db:
            for archfile in dat['archfiles']:
                vals = {name: as_python(archfile[name]) for name in archfile.dtype.names}
                logger.debug(f'Inserting {vals["filename"]}')
                if not opt.dry_run:
                    try:
                        db.insert(vals, 'archfiles')
                    except sqlite3.IntegrityError as err:
                        # Expected exception for archfiles already in the table
                        assert 'UNIQUE constraint failed: archfiles.filename' in str(err)

            if not opt.dry_run:
                db.commit()
Exemple #2
0
    def cmd_states_fetch(self, tbegin, tend):
        """
        Search the TA database and retrieve all the command
                         state data between the given start/stop times.

        Returned - numpy array. Data types are:

             Data item and type
             ------------------
             ('datestart', '|S21'),
             ('datestop', '|S21'),
             ('tstart', '<f8'),
             ('tstop', '<f8'),
             ('obsid', '<i8'),
             ('power_cmd', '|S10'),
             ('si_mode', '|S8'),
             ('pcad_mode', '|S4'),
             ('vid_board', '<i8'),
             ('clocking', '<i8'),
             ('fep_count', '<i8'),
             ('ccd_count', '<i8'),
             ('simpos', '<i8'),
             ('simfa_pos', '<i8'),
             ('pitch', '<f8'),
             ('ra', '<f8'),
             ('dec', '<f8'),
             ('roll', '<f8'),
             ('q1', '<f8'),
             ('q2', '<f8'),
             ('q3', '<f8'),
             ('q4', '<f8'),
             ('trans_keys', '|S48')
             ('hetg', '|S4'),
             ('letg', '|S4'),
             ('dither', '|S4')

        """
        # convert begin and end into sybase query tstart and tstop
        tstart = DateTime(tbegin)
        tstop = DateTime(tend)
        #
        # form the query for everything, starting from tstart date to now
        #
        query = """select * from cmd_states where datestart >= '%s'
                   and datestop <= '%s' order by datestart asc """ % (
            tstart.date, tstop.date)
        #
        # set up a read to the data base
        #
        aca_read_db = DBI(dbi='sybase',
                          server='sybase',
                          user='******',
                          database='aca')

        #  Fetch all the data
        self.cmd_states = aca_read_db.fetchall(query)

        return self.cmd_states
Exemple #3
0
def hrc_gain_test_obs(new_obs, test=''):
    """
    find  new AL Lac observations from a hrc obsid list
    Input: new_obs  --- a list of hrc obsids
           test     --- a test indicator. if it is other than "", test will run
    Output: "./candidate_list"  which lists obsids of new AR Lac observations
            candidate_list      it also returns the same list
    """

    if test == "":
        f1 = open('./candidate_list', 'w')

        file = house_keeping + 'hrc_obsid_list'
        file2 = house_keeping + 'hrc_obsid_list~'
        cmd = 'cp -f ' + file + ' ' + file2
        os.system(cmd)
        f2 = open(file, 'a')

    candidate_list = []
    for obsid in new_obs:
        #
        #--- open sql database and extract data we need
        #
        db = DBI(dbi='sybase',
                 server=db_server,
                 user=db_user,
                 passwd=db_passwd,
                 database='axafocat')

        cmd = 'select obsid,targid,seq_nbr,targname,grating,instrument from target where obsid=' + obsid
        query_results = db.fetchall(cmd)
        if len(query_results):
            query_results = Table(query_results)

        line = query_results['targname'].data
        targname = line[0]
        #
        #--- if the observation is AR Lac, write it down in candidate_list
        #
        m1 = re.search('arlac', targname.lower())
        if m1 is not None:
            line = obsid + '\n'
            candidate_list.append(obsid)

            if test == '':
                f1.write(line)
                f2.write(line)

    if test == '':
        f1.close()
        f2.close()

    return candidate_list
Exemple #4
0
def update_sync_data_full(content, logger, row):
    """
    Update full-resolution sync data including archfiles for index table ``row``

    This generates a gzipped pickle file with a dict that has sync update values
    for all available  MSIDs in this chunk of ``content`` telemetry.  This has
    `archfiles` (structured ndarray of rows) to store archfiles rows and then
    {msid}.quality, {msid}.data, {msid}.row0 and {msid}.row1.

    :param content: content type
    :param logger: global logger
    :param row: archfile row
    :return: None
    """
    ft = fetch.ft
    ft['interval'] = 'full'

    outfile = Path(sync_files['data'].abs)
    if outfile.exists():
        logger.verbose(f'Skipping {outfile}, already exists')
        return

    out = {}
    msids = list(fetch.all_colnames[content]) + ['TIME']

    # row{filetime0} and row{filetime1} are the *inclusive* `filetime` stamps
    # for the archfiles to be included  in this row.  They do not overlap, so
    # the selection below must be equality.
    with DBI(dbi='sqlite', server=fetch.msid_files['archfiles'].abs) as dbi:
        query = (f'select * from archfiles '
                 f'where filetime >= {row["filetime0"]} '
                 f'and filetime <= {row["filetime1"]} '
                 f'order by filetime ')
        archfiles = dbi.fetchall(query)
        out['archfiles'] = archfiles

    # Row slice indexes into full-resolution MSID h5 files.  All MSIDs share the
    # same row0:row1 range.
    row0 = row['row0']
    row1 = row['row1']

    # Go through each MSID and collect values
    n_msids = 0
    for msid in msids:
        ft['msid'] = msid
        filename = fetch.msid_files['msid'].abs
        if not Path(filename).exists():
            logger.debug(f'No MSID file for {msid} - skipping')
            continue

        n_msids += 1
        with tables.open_file(filename, 'r') as h5:
            out[f'{msid}.quality'] = h5.root.quality[row0:row1]
            out[f'{msid}.data'] = h5.root.data[row0:row1]
            out[f'{msid}.row0'] = row0
            out[f'{msid}.row1'] = row1

    n_rows = row1 - row0
    logger.info(
        f'Writing {outfile} with {n_rows} rows of data and {n_msids} msids')

    outfile.parent.mkdir(exist_ok=True, parents=True)
    # TODO: increase compression to max (gzip?)
    with gzip.open(outfile, 'wb') as fh:
        pickle.dump(out, fh)
Exemple #5
0
def update_index_file(index_file, opt, logger):
    """Update the top-level index file of data available in the sync archive

    :param index_file: Path of index ECSV file
    :param opt: options
    :param logger: output logger
    :return: index table (astropy Table)
    """
    if index_file.exists():
        # Start time of last update contained in the sync repo (if it exists), but do not look
        # back more than max_lookback days.  This is relevant for rarely sampled
        # content like cpe1eng.
        filetime0 = (DateTime(opt.date_stop) - opt.max_lookback).secs

        index_tbl = Table.read(index_file)
        if len(index_tbl) == 0:
            # Need to start with a fresh index_tbl since the string column will end up
            # with a length=1 string (date_id) and add_row later will give the wrong result.
            index_tbl = None
        else:
            filetime0 = max(filetime0, index_tbl['filetime1'][-1])
    else:
        # For initial index file creation use the --date-start option
        index_tbl = None
        filetime0 = DateTime(opt.date_start).secs

    max_secs = int(opt.max_days * 86400)
    time_stop = DateTime(opt.date_stop).secs

    # Step through the archfile files entries and collect them into groups of up
    # to --max-days based on file time stamp (which is an integer in CXC secs).
    rows = []
    filename = fetch.msid_files['archfiles'].abs
    logger.verbose(f'Opening archfiles {filename}')
    with DBI(dbi='sqlite', server=filename) as dbi:
        while True:
            filetime1 = min(filetime0 + max_secs, time_stop)
            logger.verbose(
                f'select from archfiles '
                f'filetime > {DateTime(filetime0).fits[:-4]} {filetime0} '
                f'filetime <= {DateTime(filetime1).fits[:-4]} {filetime1} ')
            archfiles = dbi.fetchall(f'select * from archfiles '
                                     f'where filetime > {filetime0} '
                                     f'and filetime <= {filetime1} '
                                     f'order by filetime ')

            # Found new archfiles?  If so get a new index table row for them.
            if len(archfiles) > 0:
                rows.append(get_row_from_archfiles(archfiles))
                filedates = DateTime(archfiles['filetime']).fits
                logger.verbose(f'Got {len(archfiles)} archfiles rows from '
                               f'{filedates[0]} to {filedates[-1]}')

            filetime0 = filetime1

            # Stop if already queried out to the end of desired time range
            if filetime1 >= time_stop:
                break

    if not rows:
        logger.info(f'No updates available for content {fetch.ft["content"]}')
        return index_tbl

    # Create table from scratch or add new rows.  In normal processing there
    # will just be one row per run.
    if index_tbl is None:
        index_tbl = Table(rows)
    else:
        for row in rows:
            index_tbl.add_row(row)

    if not index_file.parent.exists():
        logger.info(f'Making directory {index_file.parent}')
        index_file.parent.mkdir(exist_ok=True, parents=True)

    msg = check_index_tbl_consistency(index_tbl)
    if msg:
        msg += '\n'
        msg += '\n'.join(index_tbl.pformat(max_lines=-1, max_width=-1))
        logger.error(f'Index table inconsistency: {msg}')
        return None

    logger.info(f'Writing {len(rows)} row(s) to index file {index_file}')
    index_tbl.write(index_file, format='ascii.ecsv')

    return index_tbl
Exemple #6
0
import tempfile
from datetime import datetime
import cPickle

from astropy.table import Table
from astropy.io import fits
from Ska.Numpy import smooth, interpolate
from Ska.Shell import bash, tcsh_shell, getenv
from Ska.DBI import DBI
from Ska.engarchive import fetch
import Quaternion
import Ska.quatutil

REDO = False
MTIME = 1457707041.222744
sqlaca = DBI(dbi='sybase', user='******')

#XRAY_DATA = '/data/aca/archive/xray_for_periscope'
projdir = '/proj/sot/ska/analysis/periscope_tilt_2016'
XRAY_DATA = os.path.join(projdir, 'auto')
ciao_env = getenv("source /soft/ciao/bin/ciao.csh", shell='tcsh')


def get_on_axis_bright(srctable, x_center, y_center, limit=180):
    """
    Given source table from celldetect and the X/Y center for the observation
    return a reduced source list with just the brightest source within radius limit

    :param srctable: table of sources containing at least X, Y, and NET_COUNTS for each source
    :param x_center: rough aimpoint X center from evt2 file
    :param y_center: rough aimpoint Y center from evt2 file
Exemple #7
0
def radial_sym(y, z):
    yc, zc = hist_center(pos['yag'], pos['zag'])
    t, p = cart_to_polar(pos['yag'] - yc, pos['zag'] - zc)
    h = np.histogram(np.degrees(p), bins=np.arange(-180, 190, 30))
    #    raise ValueError
    radtest = (h[0] * 1.0 / len(p)) > (3 * 1.0 / len(h[0]))
    print h[0]
    centertest = (np.count_nonzero(t < 1.0) * 1.0 / len(t)) < .5
    center_hist = np.histogram(t, bins=np.arange(0, 5, .2))
    return np.any(radtest) | centertest | (np.argmax(center_hist[0]) == 0)


COUNT_LIMIT = 2500

acadb = DBI(server='sybase', dbi='sybase', user='******')
DATADIR = 'auto'

obs_srcs = glob(os.path.join(DATADIR, "obs*/picked_src.dat"))
srcs = []
for src_file in obs_srcs:
    src = Table.read(src_file, format='ascii')
    if src['NET_COUNTS'] < COUNT_LIMIT:
        continue
    src_dir = os.path.dirname(src_file)
    stat_file = os.path.join(src_dir, 'point_stat.dat')
    stat = False
    if os.path.exists(stat_file):
        stat_text = open(stat_file).read().strip()
        if stat_text == 'True':
            stat = True