Exemple #1
0
def plot_repeat_star_mags(repeats):
    plt.close(1)
    plt.figure(1, figsize=(6, 4))

    aca_db = DBI(server='sybase', dbi='sybase', user='******')
    for agasc_id in repeats['id']:
        print agasc_id
        if agasc_id in cache:
            obsdata = cache[agasc_id]
        else:
            obsdata = aca_db.fetchall("select * from trak_stats_data where id = {} "
                                      "order by kalman_tstart".format(agasc_id))
            cache[agasc_id] = obsdata
        years = DateTime(obsdata['kalman_tstart']).frac_year
        scatter = np.random.uniform(-0.5, 0.5, size=len(years))
        dmags = obsdata['aoacmag_mean'] - np.median(obsdata['aoacmag_mean'])
        plt.plot(years + scatter, dmags, '.', label='ID {}'.format(agasc_id))
    aca_db.conn.close()

    plt.xlabel('Year')
    plt.ylabel('Delta Mag')
    plt.grid()
    plt.ylim(-0.1, 0.1)
    plt.legend(loc='upper left', fontsize=10)
    plt.title('ACA Responsivity')
    plt.tight_layout()
    plt.savefig('responsivity.png')
Exemple #2
0
def get_repeats(n_repeats=300):  # default gives us 4 stars
    aca_db = DBI(server='sybase', dbi='sybase', user='******')
    repeats = aca_db.fetchall("select id, count(id) as num_obs from trak_stats_data "
                              "group by id having (count(id) > {})".format(n_repeats))
    repeats = repeats[repeats['id'] >= 20]
    aca_db.conn.close()

    return repeats
Exemple #3
0
    def cmd_states_fetch(self, tbegin, tend):
        """
        Search the TA database and retrieve all the command
                         state data between the given start/stop times.

        Returned - numpy array. Data types are:

             Data item and type
             ------------------
             ('datestart', '|S21'),
             ('datestop', '|S21'),
             ('tstart', '<f8'),
             ('tstop', '<f8'),
             ('obsid', '<i8'),
             ('power_cmd', '|S10'),
             ('si_mode', '|S8'),
             ('pcad_mode', '|S4'),
             ('vid_board', '<i8'),
             ('clocking', '<i8'),
             ('fep_count', '<i8'),
             ('ccd_count', '<i8'),
             ('simpos', '<i8'),
             ('simfa_pos', '<i8'),
             ('pitch', '<f8'),
             ('ra', '<f8'),
             ('dec', '<f8'),
             ('roll', '<f8'),
             ('q1', '<f8'),
             ('q2', '<f8'),
             ('q3', '<f8'),
             ('q4', '<f8'),
             ('trans_keys', '|S48')
             ('hetg', '|S4'),
             ('letg', '|S4'),
             ('dither', '|S4')

        """
        # convert begin and end into sybase query tstart and tstop
        tstart = DateTime(tbegin)
        tstop = DateTime(tend)
        #
        # form the query for everything, starting from tstart date to now
        #
        query = """select * from cmd_states where datestart >= '%s'
                   and datestop <= '%s' order by datestart asc """ % (
            tstart.date, tstop.date)
        #
        # set up a read to the data base
        #
        aca_read_db = DBI(dbi='sybase',
                          server='sybase',
                          user='******',
                          database='aca')

        #  Fetch all the data
        self.cmd_states = aca_read_db.fetchall(query)

        return self.cmd_states
Exemple #4
0
def db_state0( starttime ):
    stime = DateTime(starttime)
    db = DBI(dbi='sybase', server='sybase', user='******', database='aca')
    query = """select * from cmd_states
               where datestart =
               (select max(datestart) from cmd_states
                where datestart < '%s')""" % stime.date
    state0 = db.fetchone( query )
    return state0
Exemple #5
0
    def cmd_states_fetch(self, tbegin, tend):
        """
        Search the TA database and retrieve all the command
                         state data between the given start/stop times.
     
        Returned - numpy array. Data types are:
     
             Data item and type
             ------------------
             ('datestart', '|S21'), 
             ('datestop', '|S21'), 
             ('tstart', '<f8'), 
             ('tstop', '<f8'), 
             ('obsid', '<i8'), 
             ('power_cmd', '|S10'), 
             ('si_mode', '|S8'), 
             ('pcad_mode', '|S4'), 
             ('vid_board', '<i8'), 
             ('clocking', '<i8'), 
             ('fep_count', '<i8'), 
             ('ccd_count', '<i8'), 
             ('simpos', '<i8'), 
             ('simfa_pos', '<i8'), 
             ('pitch', '<f8'), 
             ('ra', '<f8'), 
             ('dec', '<f8'), 
             ('roll', '<f8'), 
             ('q1', '<f8'), 
             ('q2', '<f8'), 
             ('q3', '<f8'), 
             ('q4', '<f8'),
             ('trans_keys', '|S48')]
             ('hetg', '|S4'), 
             ('letg', '|S4'), 
             ('dither', '|S4')])

        """
        # convert begin and end into sybase query tstart and tstop
        tstart = Chandra.Time.DateTime(tbegin)
        tstop = Chandra.Time.DateTime(tend)
        #
        # form the query for everything, starting from tstart date to now
        #
        query = """select * from cmd_states where datestart >= '%s' and datestop <= '%s' order by datestart asc """ % (
            tstart.date,
            tstop.date,
        )
        #
        # set up a read to the data base
        #
        aca_read_db = DBI(dbi="sybase", server="sybase", user="******", database="aca")

        #  Fetch all the data
        self.cmd_states = aca_read_db.fetchall(query)

        return self.cmd_states
Exemple #6
0
def main(loadseg_rdb_dir, dryrun=False, test=False,
         dbi='sqlite', server='db_base.db3' ,database=None, user=None, verbose=False):
    """
    Command Load Segment Table Updater
    
    Read RDB table from SKA arc iFOT events area and update load_segments table
    Meant to be run as a cront task with no arguments.

    Details:
    Reads most recent RDB file from arc data iFOT events load_segments directory.
    Checks loads in that file for overlap and prolonged separations
    Removes outdated table entries
    Inserts new and newly modified entries

    Note that dryrun mode does not show timelines which *would* be updated,
    as an update to the load_segments table must happen prior to get_timelines()

    """

    dbh = DBI(dbi=dbi, server=server, database=database, user=user, verbose=verbose)
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARN)
    if verbose:
        ch.setLevel(logging.DEBUG)
    log.addHandler(ch)
    if dryrun:
        log.info("LOAD_SEG INFO: Running in dryrun mode")
    loadseg_dir = loadseg_rdb_dir
    # get the loads from the arc ifot area
    all_rdb_files = glob.glob(os.path.join(loadseg_dir, "*"))
    rdb_file = max(all_rdb_files)
    log.debug("LOAD_SEG DEBUG: Updating from %s" % rdb_file)
    orig_rdb_loads = Ska.Table.read_ascii_table(rdb_file, datastart=3)
    ifot_loads = rdb_to_db_schema( orig_rdb_loads )
    if len(ifot_loads):
        # make any scripted edits to the tables of parsed files to override directory
        # mapping
        import fix_tl_processing
        fix_tl_processing.repair(dbh)
        # make any scripted edits to the load segments table
        import fix_load_segments
        ifot_loads = fix_load_segments.repair(ifot_loads)
        max_timelines_id = dbh.fetchone(
            'SELECT max(id) AS max_id FROM timelines')['max_id'] or 0
        if max_timelines_id == 0 and test == False:
            raise ValueError("TIMELINES: no timelines in database.")
        update_loads_db( ifot_loads, dbh=dbh, test=test, dryrun=dryrun )    
        db_loads = dbh.fetchall("""select * from load_segments 
                                   where datestart >= '%s' order by datestart   
                                  """ % ( ifot_loads[0]['datestart'] )
                                )
        update_timelines_db(loads=db_loads, dbh=dbh, max_id=max_timelines_id,
                            dryrun=dryrun, test=test)

    log.removeHandler(ch)
Exemple #7
0
def hrc_gain_test_obs(new_obs, test=''):
    """
    find  new AL Lac observations from a hrc obsid list
    Input: new_obs  --- a list of hrc obsids
           test     --- a test indicator. if it is other than "", test will run
    Output: "./candidate_list"  which lists obsids of new AR Lac observations
            candidate_list      it also returns the same list
    """

    if test == "":
        f1 = open('./candidate_list', 'w')

        file = house_keeping + 'hrc_obsid_list'
        file2 = house_keeping + 'hrc_obsid_list~'
        cmd = 'cp -f ' + file + ' ' + file2
        os.system(cmd)
        f2 = open(file, 'a')

    candidate_list = []
    for obsid in new_obs:
        #
        #--- open sql database and extract data we need
        #
        db = DBI(dbi='sybase',
                 server=db_server,
                 user=db_user,
                 passwd=db_passwd,
                 database='axafocat')

        cmd = 'select obsid,targid,seq_nbr,targname,grating,instrument from target where obsid=' + obsid
        query_results = db.fetchall(cmd)
        if len(query_results):
            query_results = Table(query_results)

        line = query_results['targname'].data
        targname = line[0]
        #
        #--- if the observation is AR Lac, write it down in candidate_list
        #
        m1 = re.search('arlac', targname.lower())
        if m1 is not None:
            line = obsid + '\n'
            candidate_list.append(obsid)

            if test == '':
                f1.write(line)
                f2.write(line)

    if test == '':
        f1.close()
        f2.close()

    return candidate_list
Exemple #8
0
def update_full_archfiles_db3(dat, logger, msid_files, opt):
    # Update the archfiles.db3 database to include the associated archive files
    server_file = msid_files['archfiles'].abs
    logger.debug(f'Updating {server_file}')

    def as_python(val):
        try:
            return val.item()
        except AttributeError:
            return val

    with timing_logger(logger, f'Updating {server_file}', 'info', 'info'):
        with DBI(dbi='sqlite', server=server_file) as db:
            for archfile in dat['archfiles']:
                vals = {name: as_python(archfile[name]) for name in archfile.dtype.names}
                logger.debug(f'Inserting {vals["filename"]}')
                if not opt.dry_run:
                    try:
                        db.insert(vals, 'archfiles')
                    except sqlite3.IntegrityError as err:
                        # Expected exception for archfiles already in the table
                        assert 'UNIQUE constraint failed: archfiles.filename' in str(err)

            if not opt.dry_run:
                db.commit()
Exemple #9
0
def update_sync_data_full(content, logger, row):
    """
    Update full-resolution sync data including archfiles for index table ``row``

    This generates a gzipped pickle file with a dict that has sync update values
    for all available  MSIDs in this chunk of ``content`` telemetry.  This has
    `archfiles` (structured ndarray of rows) to store archfiles rows and then
    {msid}.quality, {msid}.data, {msid}.row0 and {msid}.row1.

    :param content: content type
    :param logger: global logger
    :param row: archfile row
    :return: None
    """
    ft = fetch.ft
    ft['interval'] = 'full'

    outfile = Path(sync_files['data'].abs)
    if outfile.exists():
        logger.verbose(f'Skipping {outfile}, already exists')
        return

    out = {}
    msids = list(fetch.all_colnames[content]) + ['TIME']

    # row{filetime0} and row{filetime1} are the *inclusive* `filetime` stamps
    # for the archfiles to be included  in this row.  They do not overlap, so
    # the selection below must be equality.
    with DBI(dbi='sqlite', server=fetch.msid_files['archfiles'].abs) as dbi:
        query = (f'select * from archfiles '
                 f'where filetime >= {row["filetime0"]} '
                 f'and filetime <= {row["filetime1"]} '
                 f'order by filetime ')
        archfiles = dbi.fetchall(query)
        out['archfiles'] = archfiles

    # Row slice indexes into full-resolution MSID h5 files.  All MSIDs share the
    # same row0:row1 range.
    row0 = row['row0']
    row1 = row['row1']

    # Go through each MSID and collect values
    n_msids = 0
    for msid in msids:
        ft['msid'] = msid
        filename = fetch.msid_files['msid'].abs
        if not Path(filename).exists():
            logger.debug(f'No MSID file for {msid} - skipping')
            continue

        n_msids += 1
        with tables.open_file(filename, 'r') as h5:
            out[f'{msid}.quality'] = h5.root.quality[row0:row1]
            out[f'{msid}.data'] = h5.root.data[row0:row1]
            out[f'{msid}.row0'] = row0
            out[f'{msid}.row1'] = row1

    n_rows = row1 - row0
    logger.info(
        f'Writing {outfile} with {n_rows} rows of data and {n_msids} msids')

    outfile.parent.mkdir(exist_ok=True, parents=True)
    # TODO: increase compression to max (gzip?)
    with gzip.open(outfile, 'wb') as fh:
        pickle.dump(out, fh)
Exemple #10
0
def update_index_file(index_file, opt, logger):
    """Update the top-level index file of data available in the sync archive

    :param index_file: Path of index ECSV file
    :param opt: options
    :param logger: output logger
    :return: index table (astropy Table)
    """
    if index_file.exists():
        # Start time of last update contained in the sync repo (if it exists), but do not look
        # back more than max_lookback days.  This is relevant for rarely sampled
        # content like cpe1eng.
        filetime0 = (DateTime(opt.date_stop) - opt.max_lookback).secs

        index_tbl = Table.read(index_file)
        if len(index_tbl) == 0:
            # Need to start with a fresh index_tbl since the string column will end up
            # with a length=1 string (date_id) and add_row later will give the wrong result.
            index_tbl = None
        else:
            filetime0 = max(filetime0, index_tbl['filetime1'][-1])
    else:
        # For initial index file creation use the --date-start option
        index_tbl = None
        filetime0 = DateTime(opt.date_start).secs

    max_secs = int(opt.max_days * 86400)
    time_stop = DateTime(opt.date_stop).secs

    # Step through the archfile files entries and collect them into groups of up
    # to --max-days based on file time stamp (which is an integer in CXC secs).
    rows = []
    filename = fetch.msid_files['archfiles'].abs
    logger.verbose(f'Opening archfiles {filename}')
    with DBI(dbi='sqlite', server=filename) as dbi:
        while True:
            filetime1 = min(filetime0 + max_secs, time_stop)
            logger.verbose(
                f'select from archfiles '
                f'filetime > {DateTime(filetime0).fits[:-4]} {filetime0} '
                f'filetime <= {DateTime(filetime1).fits[:-4]} {filetime1} ')
            archfiles = dbi.fetchall(f'select * from archfiles '
                                     f'where filetime > {filetime0} '
                                     f'and filetime <= {filetime1} '
                                     f'order by filetime ')

            # Found new archfiles?  If so get a new index table row for them.
            if len(archfiles) > 0:
                rows.append(get_row_from_archfiles(archfiles))
                filedates = DateTime(archfiles['filetime']).fits
                logger.verbose(f'Got {len(archfiles)} archfiles rows from '
                               f'{filedates[0]} to {filedates[-1]}')

            filetime0 = filetime1

            # Stop if already queried out to the end of desired time range
            if filetime1 >= time_stop:
                break

    if not rows:
        logger.info(f'No updates available for content {fetch.ft["content"]}')
        return index_tbl

    # Create table from scratch or add new rows.  In normal processing there
    # will just be one row per run.
    if index_tbl is None:
        index_tbl = Table(rows)
    else:
        for row in rows:
            index_tbl.add_row(row)

    if not index_file.parent.exists():
        logger.info(f'Making directory {index_file.parent}')
        index_file.parent.mkdir(exist_ok=True, parents=True)

    msg = check_index_tbl_consistency(index_tbl)
    if msg:
        msg += '\n'
        msg += '\n'.join(index_tbl.pformat(max_lines=-1, max_width=-1))
        logger.error(f'Index table inconsistency: {msg}')
        return None

    logger.info(f'Writing {len(rows)} row(s) to index file {index_file}')
    index_tbl.write(index_file, format='ascii.ecsv')

    return index_tbl
Exemple #11
0
#!/usr/bin/env python

import sys
from Ska.DBI import DBI
from Chandra.Time import DateTime
import numpy as np

dbh = DBI(dbi='sybase', server='sybase', user='******')
mp = '/data/mpcrit1/mplogs'

t = DateTime()
if len(sys.argv) > 1:
    t = DateTime(sys.argv[1])
timelines = dbh.fetchall(
"""select * from timelines
   where datestart <= '%(date)s' and datestop > '%(date)s'"""
% {'date': t.date})

if not len(timelines):
    timelines = dbh.fetchall(
"""select * from timelines
   where ( datestart = (
       select max(datestart) from timelines where datestart < '%(date)s'))
    or ( datestart = (
       select min(datestart) from timelines where datestart > '%(date)s'))"""
% {'date': t.date})

for tdir in np.unique(timelines['dir']):
    print "file://%s%s%s" % (mp, tdir, 'starcheck.html')
Exemple #12
0
def radial_sym(y, z):
    yc, zc = hist_center(pos['yag'], pos['zag'])
    t, p = cart_to_polar(pos['yag'] - yc, pos['zag'] - zc)
    h = np.histogram(np.degrees(p), bins=np.arange(-180, 190, 30))
#    raise ValueError
    radtest =  (h[0] * 1.0 / len(p)) > (3 * 1.0 / len(h[0]))
    print h[0]
    centertest = (np.count_nonzero(t < 1.0) * 1.0 / len(t)) < .5
    center_hist = np.histogram(t, bins=np.arange(0, 5, .2))
    return np.any(radtest) | centertest | (np.argmax(center_hist[0]) == 0)

COUNT_LIMIT = 2500


acadb = DBI(server='sybase', dbi='sybase', user='******')
DATADIR = 'auto'

obs_srcs = glob(os.path.join(DATADIR, "obs*/picked_src.dat"))
srcs = []
for src_file in obs_srcs:
    src = Table.read(src_file, format='ascii')
    if src['NET_COUNTS'] < COUNT_LIMIT:
        continue
    src_dir = os.path.dirname(src_file)
    stat_file = os.path.join(src_dir, 'point_stat.dat')
    stat = False
    if os.path.exists(stat_file):
        stat_text = open(stat_file).read().strip()
        if stat_text == 'True':
            stat = True
Exemple #13
0
def radial_sym(y, z):
    yc, zc = hist_center(pos['yag'], pos['zag'])
    t, p = cart_to_polar(pos['yag'] - yc, pos['zag'] - zc)
    h = np.histogram(np.degrees(p), bins=np.arange(-180, 190, 30))
    #    raise ValueError
    radtest = (h[0] * 1.0 / len(p)) > (3 * 1.0 / len(h[0]))
    print h[0]
    centertest = (np.count_nonzero(t < 1.0) * 1.0 / len(t)) < .5
    center_hist = np.histogram(t, bins=np.arange(0, 5, .2))
    return np.any(radtest) | centertest | (np.argmax(center_hist[0]) == 0)


COUNT_LIMIT = 2500

acadb = DBI(server='sybase', dbi='sybase', user='******')
DATADIR = 'auto'

obs_srcs = glob(os.path.join(DATADIR, "obs*/picked_src.dat"))
srcs = []
for src_file in obs_srcs:
    src = Table.read(src_file, format='ascii')
    if src['NET_COUNTS'] < COUNT_LIMIT:
        continue
    src_dir = os.path.dirname(src_file)
    stat_file = os.path.join(src_dir, 'point_stat.dat')
    stat = False
    if os.path.exists(stat_file):
        stat_text = open(stat_file).read().strip()
        if stat_text == 'True':
            stat = True
Exemple #14
0
    parser.add_argument("--stop", help="Stop time for roll/temp checks.  Default to March past end of cycle.")
    parser.add_argument("--redo", action="store_true", help="Redo processing even if complete and up-to-date")
    opt = parser.parse_args()
    return opt


opt = get_options()
OUTDIR = opt.out
if not os.path.exists(OUTDIR):
    os.makedirs(OUTDIR)
CYCLE = opt.cycle
LABEL = "Outstanding Targets"
PLANNING_LIMIT = opt.planning_limit
TASK_DATA = os.path.join(os.environ["SKA"], "data", "aca_lts_eval")

db = DBI(dbi="sybase", server="sqlsao", database="axafocat", user="******")
query = """SELECT t.obsid, t.ra, t.dec,
t.y_det_offset as y_offset, t.z_det_offset as z_offset,
t.approved_exposure_time, t.instrument, t.grating, t.obs_ao_str
FROM target t
WHERE
((t.status='unobserved' OR t.status='partially observed' OR t.status='untriggered' OR t.status='scheduled')
AND NOT(t.ra = 0 AND t.dec = 0)
AND NOT(t.ra IS NULL OR t.dec IS NULL))
ORDER BY t.obsid"""

targets = Table(db.fetchall(query))
targets.write(os.path.join(OUTDIR, "requested_targets.txt"), format="ascii.fixed_width_two_line")


stop = DateTime("{}-03-15".format(2000 + CYCLE))
def retrieve_perigee_telem(start='2009:100:00:00:00.000',
                           stop=None,
                           pass_data_dir='.',
                           redo=False):
    """
    Retrieve perigee pass and other 8x8 image telemetry.

    Telemetry is stored in directories named by datestart in the PASS_DATA
    directory.
    The file pass_times.txt in each directory contains the time range that
    has been queried for 8x8 image data

    :param start: Chandra.Time compatible time for beginning of range
    :param stop: Chandra.time compatible time for end of range
    :rtype: list of updated directories
    """

    tstart = DateTime(start)
    # default tstop should be now
    if stop is None:
        tstop = DateTime(time.time(), format='unix')

    log.info("retrieve_perigee_telem(): Checking for current telemetry from %s"

             % tstart.date)

    pass_time_file = 'pass_times.txt'
    aca_db = DBI(dbi='sybase', server='sybase',
                 user='******', database='aca')
    obsids = aca_db.fetchall("""SELECT obsid,obsid_datestart,obsid_datestop
                                from observations
                                where obsid_datestart > '%s'
                                and obsid_datestart < '%s' order by obsid_datestart"""
                             % (tstart.date, tstop.date))

    # Get contiguous ER chunks, which are largely perigee passes
    chunks = []
    chunk = {'start': None,
             'stop': None}
    for obsid in obsids:
        # If a OR, end a "chunk" of ER unless undefined
        # (this should only append on the first OR after one or more ERs)
        if obsid['obsid'] < 40000:
            if chunk['start'] is not None and chunk['stop'] is not None:
                chunks.append(chunk.copy())
                chunk = {'start': None,
                         'stop': None}
        else:
            if chunk['start'] is None:
                chunk['start'] = obsid['obsid_datestart']
            chunk['stop'] = obsid['obsid_datestop']

    pass_dirs = []
    # for each ER chunk get telemetry
    for chunk in chunks:
        er_start = chunk['start']
        er_stop = chunk['stop']
        log.debug("checking for %s pass" % er_start)
        er_year = DateTime(er_start).year
        year_dir = os.path.join(pass_data_dir, "%s" % er_year)
        if not os.access(year_dir, os.R_OK):
            os.mkdir(year_dir)
        pass_dir = os.path.join(pass_data_dir, "%s" % er_year, er_start)
        if not os.access(pass_dir, os.R_OK):
            os.mkdir(pass_dir)
        if (DateTime(er_stop).secs - DateTime(er_start).secs > 86400 * 2):
            if not os.path.exists(os.path.join(pass_dir, 'warned.txt')):
                log.warn("Skipping %s pass, more than 48 hours long" % er_start)
                continue
        pass_dirs.append(pass_dir)
        made_timefile = os.path.exists(os.path.join(pass_dir, pass_time_file))
        if made_timefile:
            pass_done = Ska.Table.read_ascii_table(
                os.path.join(pass_dir, pass_time_file))
            if ((pass_done['obsid_datestart'] == er_start)
                    & (pass_done['obsid_datestop'] == er_stop)):
                log.debug("%s times match" % pass_dir)
                continue
            else:
                log.info("pass %s exists but needs updating" % er_start)
                redo = True
        if not made_timefile or redo:
            f = open(os.path.join(pass_dir, pass_time_file), 'w')
            f.write("obsid_datestart,obsid_datestop\n")
            f.write("%s,%s\n" % (er_start, er_stop))
            f.close()
    return pass_dirs
Exemple #16
0
import tempfile
from datetime import datetime
import cPickle

from astropy.table import Table
from astropy.io import fits
from Ska.Numpy import smooth, interpolate
from Ska.Shell import bash, tcsh_shell, getenv
from Ska.DBI import DBI
from Ska.engarchive import fetch
import Quaternion
import Ska.quatutil

REDO = False
MTIME = 1457707041.222744
sqlaca = DBI(dbi='sybase', user='******')

#XRAY_DATA = '/data/aca/archive/xray_for_periscope'
projdir = '/proj/sot/ska/analysis/periscope_tilt_2016'
XRAY_DATA = os.path.join(projdir, 'auto')
ciao_env = getenv("source /soft/ciao/bin/ciao.csh", shell='tcsh')


def get_on_axis_bright(srctable, x_center, y_center, limit=180):
    """
    Given source table from celldetect and the X/Y center for the observation
    return a reduced source list with just the brightest source within radius limit

    :param srctable: table of sources containing at least X, Y, and NET_COUNTS for each source
    :param x_center: rough aimpoint X center from evt2 file
    :param y_center: rough aimpoint Y center from evt2 file
loglevel = pyyaks.logger.INFO
logger = pyyaks.logger.get_logger(name='update_aimpoint_data', level=loglevel,
                                  format="%(asctime)s %(message)s")

# Get options
opt = get_opt()
stop = DateTime(opt.stop)
start = stop - 10 if (opt.start is None) else DateTime(opt.start)
logger.info('Processsing from {} to {}'.format(start.date, stop.date))

# Define file names
h5_file = os.path.join(opt.data_root, 'aimpoint_asol_values.h5')
obsid_file = os.path.join(opt.data_root, 'aimpoint_obsid_index.shelve')

# Get obsids in date range
db = DBI(dbi='sqlite', server='/data/aca/archive/obspar/archfiles.db3')
obs = db.fetchall('select obsid, tstart from archfiles where tstart > {}'
                  ' and tstart < {}'
                  .format(start.secs, stop.secs))
db.conn.close()

# Get unique obsids and then sort by tstart
idx = np.unique(obs['obsid'], return_index=True)[1]
obs = Table(obs[idx])
obs.sort('tstart')
obs['datestart'] = Time(obs['tstart'], format='cxcsec').yday
obs.pprint(max_lines=-1)

obsid_index = shelve.open(obsid_file)

# Go through obsids and either process or skip