Ejemplo n.º 1
0
def get_state_data(tstart, tstop):
    """ Get states where 'vid_board', 'clocking', 'fep_count', 'pcad_mode' are constant.

    Args:
        tstart (int, float, string): Start time, using Chandra.Time epoch
        tstop (int, float, string): Stop time, using Chandra.Time epoch

    Returns:
        (numpy.ndarray): state data

    """
    keys = [
        'pitch', 'off_nom_roll', 'ccd_count', 'fep_count', 'clocking',
        'vid_board', 'pcad_mode', 'simpos'
    ]
    state_data = states.get_states(tstart,
                                   tstop,
                                   state_keys=keys,
                                   merge_identical=True)

    # Convert 'trans_keys' elements from 'TransKeysSet' objects to strings for compatibility with the 'reduce_states'
    # function in the 'Chandra.cmd_states' package
    state_data['trans_keys'] = [str(val) for val in state_data['trans_keys']]
    state_data['tstart'] = DateTime(
        state_data['datestart']).secs  # Add start time in seconds as 'tstart'
    state_data['tstop'] = DateTime(
        state_data['datestop']).secs  # Add stop time in seconds as 'tstop'

    # relying on 'pcad_mode' to ensure attitude does not change significantly within a dwell
    state_data = reduce_states(
        state_data,
        ['ccd_count', 'fep_count', 'clocking', 'vid_board', 'pcad_mode'])

    return state_data
Ejemplo n.º 2
0
def del_stats(colname, time0, interval):
    """Delete all rows in ``interval`` stats file for column ``colname`` that
    occur after time ``time0`` - ``interval``.  This is used to fix problems
    that result from a file misorder.  Subsequent runs of update_stats will
    refresh the values correctly.
    """
    dt = {'5min': 328, 'daily': 86400}[interval]

    ft['msid'] = colname
    ft['interval'] = interval
    stats_file = msid_files['stats'].abs
    if not os.path.exists(stats_file):
        raise IOError('Stats file {} not found'.format(stats_file))

    logger.info('Fixing stats file %s after time %s', stats_file,
                DateTime(time0).date)

    stats = tables.openFile(stats_file,
                            mode='a',
                            filters=tables.Filters(complevel=5,
                                                   complib='zlib'))
    index0 = time0 // dt - 1
    indexes = stats.root.data.col('index')[:]
    row0 = np.searchsorted(indexes, [index0])[0] - 1
    if opt.dry_run:
        n_del = len(stats.root.data) - row0
    else:
        n_del = stats.root.data.removeRows(row0, len(stats.root.data))
    logger.info('Deleted %d rows from row %s (%s) to end', n_del, row0,
                DateTime(indexes[row0] * dt).date)
    stats.close()
Ejemplo n.º 3
0
 def test_start_day(self):
     self.assertEqual(
         DateTime('1996365.010203').day_start().iso,
         '1996-12-30 00:00:00.000')
     self.assertEqual(
         DateTime('1996367.010203').day_start().iso,
         '1997-01-01 00:00:00.000')
Ejemplo n.º 4
0
def test_sun_vec_versus_telemetry():
    """
    Test sun vector values `pitch` and `off_nominal_roll` versus flight telem.  Include
    Load maneuver at 2017:349:20:52:37.719 in DEC1117 with large pitch and
    off_nominal_roll change (from around zero to -17 deg).

    State values are within 1.5 degrees of telemetry.
    """

    state_keys = ['pitch', 'off_nom_roll']
    start, stop = '2017:349:10:00:00', '2017:350:10:00:00'
    cmds = commands.get_cmds(start, stop)
    rk = states.get_states(state_keys=state_keys,
                           cmds=cmds,
                           merge_identical=True)[-20:-1]

    tstart = DateTime(rk['datestart']).secs
    tstop = DateTime(rk['datestop']).secs
    tmid = (tstart + tstop) / 2

    # Pitch from telemetry
    dat = fetch.Msid('pitch', tstart[0] - 100, tstop[-1] + 100)
    dat.interpolate(times=tmid)
    delta = np.abs(dat.vals - rk['pitch'])
    assert np.max(rk['pitch']) - np.min(rk['pitch']) > 75  # Big maneuver
    assert np.all(delta < 1.5)

    # Off nominal roll (not roll from ra,dec,roll) from telemetry
    dat = fetch.Msid('roll', tstart[0] - 100, tstop[-1] + 100)
    dat.interpolate(times=tmid)
    delta = np.abs(dat.vals - rk['off_nom_roll'])
    assert np.max(rk['off_nom_roll']) - np.min(
        rk['off_nom_roll']) > 20  # Large range
    assert np.all(delta < 1.5)
Ejemplo n.º 5
0
Archivo: aca_l0.py Proyecto: sot/mica
 def _fetch_by_time(self, range_tstart, range_tstop):
     logger.info("Fetching %s from %s to %s" %
                 ('ACA L0 Data', DateTime(range_tstart).date,
                  DateTime(range_tstop).date))
     archfiles = self._get_archive_files(DateTime(range_tstart),
                                         DateTime(range_tstop))
     return archfiles
Ejemplo n.º 6
0
 def test_secs(self):
     self.assertEqual('%.3f' % DateTime(20483020.).secs, '20483020.000')
     self.assertEqual(DateTime(20483020.).date, '1998:238:01:42:36.816')
     self.assertEqual(
         DateTime('2012:001:00:00:00.000').secs, 441763266.18399996)
     self.assertEqual(
         DateTime(473385667.18399996).date, '2013:001:00:00:00.000')
Ejemplo n.º 7
0
def get_ifot(event_type, start=None, stop=None, props=[], columns=[], timeout=TIMEOUT, types={}):
    start = DateTime('1998:001' if start is None else start)
    stop = DateTime(stop)
    event_props = '.'.join([event_type] + props)

    params = odict(r='home',
                   t='qserver',
                   format='tsv',
                   tstart=start.date,
                   tstop=stop.date,
                   e=event_props,
                   ul='7',
                   )
    if columns:
        params['columns'] = ','.join(columns)

    # Get the TSV data for the iFOT event table
    url = ROOTURL + URLS['ifot']
    response = requests.get(url, auth=get_auth(), params=params, timeout=timeout)

    # For Py2 convert from unicode to ASCII str
    text = response.text
    text = re.sub(r'\r\n', ' ', text)
    lines = [x for x in text.split('\t\n') if x.strip()]

    converters = {key: [ascii.convert_numpy(getattr(np, type_))]
                  for key, type_ in types.items()}
    dat = ascii.read(lines, format='tab', guess=False, converters=converters,
                     fill_values=None)
    return dat
Ejemplo n.º 8
0
def main():
    opt = get_opt()
    pitchs = list(range(45, 170, 5))
    model_spec = json.load(open(opt.model_spec, 'r'))

    plt.close(1)
    plt.figure(1, figsize=(6, 4))

    years = np.arange(opt.start_year, opt.stop_year + 0.0001, opt.dt_year)
    times = DateTime(years, format='frac_year').secs

    for tstart in times:
        pitch_temps = []
        for pitch in pitchs:
            model = calc_model(tstart, pitch, model_spec)
            pitch_temps.append(model.comp['pftank2t'].mvals[-1])
        pitch_temps = np.array(pitch_temps) * 1.8 + 32  # In degF
        label = '{}'.format(DateTime(tstart).date[:8])
        plt.plot(pitchs, pitch_temps, lw=1.5, label=label)

    plt.legend(loc='best')
    plt.grid('on')
    plt.ylabel('PFTANK2T (degF)')
    plt.title('Tank settling temperature')
    plt.tight_layout()
    plt.legend(loc='best')
    if opt.plot_file:
        plt.savefig(opt.plot_file)
Ejemplo n.º 9
0
def get_unique_orbit_cmds(orbit_cmds):
    """
    Given list of ``orbit_cmds`` find the quasi-unique set.  In the event of a
    replan/reopen or other schedule oddity, it can happen that there are multiple cmds
    that describe the same orbit event.  Since the detailed timing might change between
    schedule runs, cmds are considered the same if the date is within 3 minutes.
    """
    if len(orbit_cmds) == 0:
        return []

    # Sort by (event_type, date)
    orbit_cmds.sort(key=lambda y: (y['params']['EVENT_TYPE'], y['date']))

    uniq_cmds = [orbit_cmds[0]]
    # Step through one at a time and add to uniq_cmds only if the candidate is
    # "different" from uniq_cmds[-1].
    for cmd in orbit_cmds:
        last_cmd = uniq_cmds[-1]
        if (cmd['params']['EVENT_TYPE'] == last_cmd['params']['EVENT_TYPE']
                and abs(DateTime(cmd['date']).secs - DateTime(last_cmd['date']).secs) < 180):
            # Same event as last (even if date is a bit different).  Now if this one
            # has a larger timeline_id that means it is from a more recent schedule, so
            # use that one.
            if cmd['timeline_id'] > last_cmd['timeline_id']:
                uniq_cmds[-1] = cmd
        else:
            uniq_cmds.append(cmd)

    uniq_cmds.sort(key=lambda y: y['date'])

    return uniq_cmds
Ejemplo n.º 10
0
 def test_init_from_mxDateTime(self):
     if HAS_MX_DATETIME:
         mxd = DateTime('1999-01-01 12:13:14').mxDateTime
         self.assertEqual(DateTime(mxd).fits, '1999-01-01T12:14:18.184')
         self.assertEqual(
             DateTime(mxd).mxDateTime.strftime('%c'),
             'Fri Jan  1 12:13:14 1999')
def get_eclipse_text(eclfile, t1, t2):

    #----------------------------------
    # Import eclipse data
    eclipse = ecl.read_eclipse_file(eclfile)
    # eclipse = ecl.read_eclipse_file('./ECLIPSE_HISTORY_2014.txt')

    #----------------------------------
    # Look for eclipses that fall in the current week
    ecllist = []
    for k in eclipse['eclipse_nums']:
        tstart = eclipse[k]['entrancepenumbra']['startsec']
        if eclipse[k].has_key('exitpenumbra'):
            # This is used for a normal eclipse
            tstop = eclipse[k]['exitpenumbra']['stopsec']
        else:
            # This is used for a penumbral eclipse
            tstop = eclipse[k]['entrancepenumbra']['stopsec']

        if (tstart >= DateTime(t1).secs) & (tstop <= DateTime(t2).secs):
            ecllist.append(k)

    if ecllist:
        eclipselines = writeEclipseText(eclipse, ecllist)
    else:
        eclipselines = 'None.'

    return eclipselines
Ejemplo n.º 12
0
def get_bad_mask(tlm):
    mask = np.zeros(len(tlm), dtype='bool')
    for interval in characteristics.bad_times:
        bad = ((tlm['date'] >= DateTime(interval['start']).secs)
               & (tlm['date'] < DateTime(interval['stop']).secs))
        mask[bad] = True
    return mask
Ejemplo n.º 13
0
    def fetch(self, msid, attr='vals', method='linear'):
        """Get data from the Chandra engineering archive.

        Parameters
        ----------
        msid :

        attr :
             (Default value = 'vals')
        method :
             (Default value = 'linear')

        Returns
        -------

        """
        tpad = DEFAULT_DT * 5.0
        datestart = DateTime(self.tstart - tpad).date
        datestop = DateTime(self.tstop + tpad).date
        logger.info('Fetching msid: %s over %s to %s' %
                    (msid, datestart, datestop))
        try:
            import Ska.engarchive.fetch_sci as fetch
            tlm = fetch.MSID(msid, datestart, datestop, stat='5min')
            tlm.filter_bad_times()
        except ImportError:
            raise ValueError('Ska.engarchive.fetch not available')
        if tlm.times[0] > self.tstart or tlm.times[-1] < self.tstop:
            raise ValueError('Fetched telemetry does not span model start and '
                             'stop times for {}'.format(msid))
        vals = Ska.Numpy.interpolate(getattr(tlm, attr),
                                     tlm.times,
                                     self.times,
                                     method=method)
        return vals
Ejemplo n.º 14
0
    def psf_time_anal(sources, title):
        """
        Time analysis - refit only the current year, somehow remember or
        read from file coefficients for the previous years? They will change
        if we change the source detection algorithm?
        """

        if self.year is None:
            current_year = DateTime().year
        # years = np.arange(1999, current_year + 1)
        years = [current_year]

        for year in years:
            self.year = year
            flt1 = DateTime(sources['TSTART']).date >= np.str(year)
            flt2 = DateTime(sources['TSTART']).date < np.str(year + 1)
            flt_sources = sources[flt1 & flt2]
            if len(flt_sources) > NUM_FIT_SRC:
               flt_sources.sort('DIST_ARCSEC')
               x = flt_sources['DIST_ARCSEC']
               y = flt_sources['PSF_ARCSEC']
               self.popt, self.pcov = curve_fit(psf_function, x, y)
               popt, pcov = curve_fit(psf_function_a0, x, y)
               self.on_axis = popt[0]
               self.on_axis_error = pcov[0][0]
               self.x = x
               self.y = y
Ejemplo n.º 15
0
def get_states(datestart, datestop):
    """Get states exactly covering date range

    :param datestart: start date
    :param datestop: stop date
    :param db: database handle
    :returns: np recarry of states
    """
    get_states_func = get_states_kadi if opt.dbi == 'kadi' else get_states_dbi
    states = get_states_func(datestart, datestop)

    # Add power columns to states and tlm
    states = Ska.Numpy.add_column(states, 'power', get_power(states))

    # Set start and end state date/times to match telemetry span.  Extend the
    # state durations by a small amount because of a precision issue converting
    # to date and back to secs.  (The reference tstop could be just over the
    # 0.001 precision of date and thus cause an out-of-bounds error when
    # interpolating state values).
    states[0].tstart = DateTime(datestart).secs - 0.01
    states[0].datestart = DateTime(states[0].tstart).date
    states[-1].tstop = DateTime(datestop).secs + 0.01
    states[-1].datestop = DateTime(states[-1].tstop).date

    return states
Ejemplo n.º 16
0
def interpolate_orbit_points(orbit_points, name):
    """
    Linearly interpolate across any gaps for ``name`` orbit_points.
    """
    if len(orbit_points) == 0:
        return []

    ok = orbit_points['name'] == name
    ops = orbit_points[ok]
    # Get the indexes of missing orbits
    idxs = np.flatnonzero(np.diff(ops['orbit_num']) > 1)
    new_orbit_points = []
    for idx in idxs:
        op0 = ops[idx]
        op1 = ops[idx + 1]
        orb_num0 = op0['orbit_num']
        orb_num1 = op1['orbit_num']
        time0 = DateTime(op0['date']).secs
        time1 = DateTime(op1['date']).secs
        for orb_num in range(orb_num0 + 1, orb_num1):
            time = time0 + (orb_num - orb_num0) / (orb_num1 -
                                                   orb_num0) * (time1 - time0)
            date = DateTime(time).date
            new_orbit_point = (date, name, orb_num, op0['descr'])
            logger.info('Adding new orbit point {}'.format(new_orbit_point))
            new_orbit_points.append(new_orbit_point)

    return new_orbit_points
Ejemplo n.º 17
0
def get_telem_values(tstart, msids, days=14, dt=32.8, name_map={}):
    """
    Fetch last ``days`` of available ``msids`` telemetry values before
    time ``tstart``.

    :param tstart: start time for telemetry (secs)
    :param msids: fetch msids list
    :param days: length of telemetry request before ``tstart``
    :param dt: sample time (secs)
    :param name_map: dict mapping msid to recarray col name
    :returns: np recarray of requested telemetry values from fetch
    """
    tstart = DateTime(tstart).secs
    start = DateTime(tstart - days * 86400).date
    stop = DateTime(tstart).date
    logger.info('Fetching telemetry between %s and %s' % (start, stop))
    msidset = fetch.Msidset(msids, start, stop)
    start = max(x.times[0] for x in msidset.values())
    stop = min(x.times[-1] for x in msidset.values())
    msidset.interpolate(dt, start, stop)

    # Finished when we found at least 10 good records (5 mins)
    if len(msidset.times) < 10:
        raise ValueError('Found no telemetry within %d days of %s' %
                         (days, str(tstart)))

    outnames = ['date'] + [name_map.get(x, x) for x in msids]
    out = np.rec.fromarrays([msidset.times] + [msidset[x].vals for x in msids],
                            names=outnames)
    return out
Ejemplo n.º 18
0
def download_data(start=None, stop=None):
    """
    Extract data to compute HRMA focus plots.

    :param start, stop: any DateTime compatible time formats
    :returns: fetches acis*evt2.fits.gz, hrc*evt2.fits.gz files
              from the archive and stores them in the current
              directory
    """

    # Remove directory 'param' and all files that it contains
    if os.path.isdir('param'):
        rmtree('param')
    os.makedirs('param')

    # check whether previous fits files are still around
    # if yes, remove them
    fits_files = glob('*fits*')
    for file_ in fits_files:
        os.remove(file_)

    # if time interval is not defined, set the relevant 1 month interval
    if start is None or stop is None:
        [start, stop] = set_interval()

    start = DateTime(start).date
    stop = DateTime(stop).date

    # extract acis and hrc evt2 files
    for inst in ('acis', 'hrc'):
        run_arc(inst, start, stop)
Ejemplo n.º 19
0
def test_interpolate_msid_times():
    start = '2008:002:21:48:00'
    stop = '2008:002:21:50:00'
    dat = fetch.MSID('aoattqt1', start, stop)
    dt = 10.0
    times = dat.tstart + np.arange((dat.tstop - dat.tstart) // dt + 3) * dt
    dat.interpolate(times=times)
    assert np.allclose(dat.vals, [
        -0.33072634, -0.33072637, -0.33072674, -0.33072665, -0.33073477,
        -0.330761, -0.33080694, -0.33089434, -0.33089264, -0.33097442,
        -0.33123678
    ])

    assert np.all(DateTime(dat.times).date == DATES_EXPECT3)

    dat = fetch.MSID('aogyrct1', start, stop)
    dat.interpolate(times=times)
    assert np.all(dat.vals == [
        -22247, -21117, -19988, -18839, -17468, -15605, -13000, -9360, -4052,
        2752, 10648
    ])

    assert np.all(DateTime(dat.times).date == DATES_EXPECT3)

    dat = fetch.MSID('aopcadmd', start, stop)
    dat.interpolate(times=times)
    assert np.all(dat.vals == [
        'NPNT', 'NPNT', 'NMAN', 'NMAN', 'NMAN', 'NMAN', 'NMAN', 'NMAN', 'NMAN',
        'NMAN', 'NMAN'
    ])

    assert np.all(DateTime(dat.times).date == DATES_EXPECT3)
Ejemplo n.º 20
0
def main():
    """
    Program-level main.  This will normally be called via the update_archive.py wrapper
    script installed in $SKA/share/eng_archive.
    """
    # Allow for a cmd line option --date-start.  If supplied then loop the
    # effective value of opt.date_now from date_start to the cmd line
    # --date-now in steps of --max-lookback-time
    if opt.date_start is None:
        date_nows = [opt.date_now]
    else:
        t_starts = np.arange(
            DateTime(opt.date_start).secs,
            DateTime(opt.date_now).secs, opt.max_lookback_time * 86400.)
        date_nows = [DateTime(t).date for t in t_starts]
        date_nows.append(opt.date_now)

        # Drop the first date_now because that is covered by the second entry
        # minus the extended max_lookback_time (below).
        date_nows = date_nows[1:]

        # Increase max_lookback_time by 50%, but by no less than 2 days and no more than
        # 10 days.
        opt.max_lookback_time += min(max(opt.max_lookback_time * 0.5, 2), 10)

    for date_now in date_nows:
        opt.date_now = date_now
        main_loop()
Ejemplo n.º 21
0
 def test_stop_day(self):
     self.assertEqual(
         DateTime('1996365.010203').day_end().iso,
         '1996-12-31 00:00:00.000')
     self.assertEqual(
         DateTime('1996366.010203').day_end().iso,
         '1997-01-01 00:00:00.000')
Ejemplo n.º 22
0
def make_archfiles_db(filename, content_def):
    # Do nothing if it is already there
    if os.path.exists(filename):
        return

    datestart = DateTime(DateTime(opt.start).secs - 60)
    tstart = datestart.secs
    tstop = tstart
    year, doy = datestart.date.split(':')[:2]
    times, indexes = derived.times_indexes(tstart, tstop,
                                           content_def['time_step'])

    logger.info('Creating db {}'.format(filename))
    archfiles_def = open(Path(__file__).parent / 'archfiles_def.sql').read()
    db = Ska.DBI.DBI(dbi='sqlite', server=filename)
    db.execute(archfiles_def)
    archfiles_row = dict(
        filename='{}:0:1'.format(content_def['content']),
        filetime=0,
        year=year,
        doy=doy,
        tstart=tstart,
        tstop=tstop,
        rowstart=0,
        rowstop=0,
        startmjf=indexes[0],  # really index0
        stopmjf=indexes[-1],  # really index1
        date=datestart.date)
    db.insert(archfiles_row, 'archfiles')
Ejemplo n.º 23
0
Archivo: fit.py Proyecto: sot/xijafit
    def save_snapshot(self, fit_stat=None, method=None):
        """Save a snapshot of fit statistics.

        :param fit_stat: Manual way to pass fit statistic (may not be necessary in future)
        :param method: Manual way to pass method (may not be necessary in future)
        """
        if fit_stat is None:
            fit_stat = self.model.calc_stat()

        pattern = 'Final fit statistic\s*=\s*([0-9.e]+\+?\d*)\D+(\d+)'
        found = re.findall(pattern, self.sherpa_log_capture_string.getvalue())
        if found:
            final_eval_num = found[-1][-1]
        else:
            final_eval_num = None

        snapshot = {}
        for pars in self.model.pars:
            snapshot[pars['full_name']] = {
                k: pars[k]
                for k in ('frozen', 'min', 'max', 'val')
            }

        snapshot['fit_stat'] = fit_stat
        snapshot['final_eval_num'] = final_eval_num
        snapshot['tstart'] = DateTime(self.model.tstart).date
        snapshot['tstop'] = DateTime(self.model.tstop).date
        snapshot['method'] = method
        snapshot['date'] = DateTime().date

        self.snapshots.append(snapshot)
Ejemplo n.º 24
0
def mups_2_temps_xout_2():

    close('all')

    t1 = '2013:231:11:00:00'
    t2 = '2013:231:12:45:00'

    msids = ['AOVBM2FS', 'PM2THV1T', 'PM2THV2T']

    data = fetch.Msidset(msids, t1, t2, stat=None)
    b2_exp = data['PM2THV1T'].vals + 3

    xticks = np.linspace(DateTime(t1).secs, DateTime(t2).secs, 11)
    xticklabels = [DateTime(t).date[5:17] for t in xticks]

    fig = plt.figure(figsize=[18, 9], facecolor='w')
    rect = [0.06, 0.15, 0.88, 0.75]
    ax1 = fig.add_axes(rect)

    ax1.plot(data['AOVBM2FS'].times,
             data['AOVBM2FS'].raw_vals,
             color=[0.4, 0.4, 0.4])
    ax1.set_ylim(-1, 12)
    ax1.set_yticks([0])
    ax1.set_yticklabels(['AOVBM2FS'], rotation=45)
    for t in ax1.yaxis.get_ticklines():
        t.set_visible(False)
    ax1.set_xticks(xticks)
    ax1.set_xticklabels(xticklabels, rotation=45, ha='right')
    ax1.set_xlim(xticks[0], xticks[-1])

    ax2 = fig.add_axes(rect, frameon=False)
    ax2.plot(data['PM2THV1T'].times,
             data['PM2THV1T'].vals,
             color="#56B4E9",
             linewidth=4.0,
             label='2A Actual')
    ax2.plot(data['PM2THV2T'].times,
             data['PM2THV2T'].vals,
             'm',
             linewidth=4.0,
             label='2B Actual')
    ax2.plot(data['PM2THV1T'].times,
             b2_exp,
             'm:',
             linewidth=4.0,
             label='2B Predicted w/o Firings')
    ax2.legend(loc='upper left')
    ax2.set_ylim(100, 150)
    ax2.yaxis.set_label_position('right')
    ax2.yaxis.tick_right()
    ax2.set_ylabel('MUPS-2 Temperatures [deg F]')
    ax2.set_xticks(xticks)
    ax2.set_xticklabels('')
    ax2.set_xlim(xticks[0], xticks[-1])

    title('MUPS B-Side Activations and MUPS 2B Temperature - 2013:231 Firing')

    fig.savefig('2013_231_mups2_temps_expected.png')
Ejemplo n.º 25
0
Archivo: aca_hdr3.py Proyecto: sot/mica
    def __init__(self, msids, start, stop):
        super(MSIDset, self).__init__()
        self.tstart = DateTime(start).secs
        self.tstop = DateTime(stop).secs
        self.datestart = DateTime(self.tstart).date
        self.datestop = DateTime(self.tstop).date
        slot_datas = {}
        slots = set(slot_for_msid(confirm_msid(msid)) for msid in msids)
        for slot in slots:
            # get the 8x8 data
            tstop = self.tstop + 33.0  # Major frame of padding
            slot_data = aca_l0.get_slot_data(
                self.tstart, tstop, slot,
                imgsize=[8], columns=ACA_DTYPE_NAMES)

            # Find samples where the time stamp changes by a value other than 4.1 secs
            # (which is the value for 8x8 readouts).  In that case there must have been a
            # break in L0 decom, typically due to a change to 4x4 or 6x6 data.
            #  t[0] = 1.0
            #  t[1] = 5.1   <= This record could be bad, as indicated by the gap afterward
            #  t[2, 3] = 17.4, 21.5
            # To form the time diffs first add `tstop` to the end so that if 8x8 data
            # does not extend through `tstop` then the last record gets chopped.
            dt = np.diff(np.concatenate([slot_data['TIME'], [tstop]]))
            bad = np.abs(dt - 4.1) > 1e-3
            slot_data[bad] = ma.masked

            # Chop off the padding
            i_stop = np.searchsorted(slot_data['TIME'], self.tstop, side='right')
            slot_data = slot_data[:i_stop]

            # explicitly unmask useful columns
            slot_data['TIME'].mask = ma.nomask
            slot_data['IMGSIZE'].mask = ma.nomask
            slot_data['FILENAME'].mask = ma.nomask
            slot_datas[slot] = slot_data
        # make a shared time ndarray that is the union of the time sets in the
        # slots.  The ACA L0 telemetry has the same timestamps across slots,
        # so the only differences here are caused by different times in
        # non-TRAK across the slots (usually SRCH differences at the beginning
        # of the observation)
        shared_time = np.unique(np.concatenate([
            slot_datas[slot]['TIME'].data for slot in slots]))
        for msid in msids:
            hdr3_msid = confirm_msid(msid)
            slot = slot_for_msid(hdr3_msid)
            full_data = ma.zeros(len(shared_time),
                                 dtype=slot_datas[slot].dtype)
            full_data.mask = ma.masked
            fd_idx = search_both_sorted(shared_time,
                                        slot_datas[slot]['TIME'])
            full_data[fd_idx] = slot_datas[slot]
            # make a data dictionary to feed to the MSID constructor
            slot_data = {'vals': HDR3_DEF[hdr3_msid]['value'](full_data),
                         'desc': HDR3_DEF[hdr3_msid]['desc'],
                         'longdesc': HDR3_DEF[hdr3_msid]['longdesc'],
                         'times': shared_time,
                         'hdr3_msid': hdr3_msid}
            self[msid] = MSID(msid, start, stop, slot_data)
Ejemplo n.º 26
0
def check_content(outdir, content, msids=None):
    outdir = Path(outdir)
    if outdir.exists():
        shutil.rmtree(outdir)

    print()
    print(f'Test dir: {outdir}')

    if msids is None:
        msids = CONTENTS[content]

    basedir_ref = outdir / 'orig'
    basedir_test = outdir / 'test'

    basedir_ref.mkdir(parents=True)
    basedir_test.mkdir(parents=True)

    # Make a local hard-link copy of select parts (content and msids) of the
    # "official" cheta archive data (nominally $SKA/data/engarchive) in basedir_ref.
    # This hard-link repo servers as the source for making the sync repo so this
    # is faster/lighter.
    make_linked_local_archive(basedir_ref, content, msids)

    # Make the sync repo, using basedir_ref as input data and outputting the
    # sync/ dir to basedir_test.
    with set_fetch_basedir(basedir_ref):
        make_sync_repo(basedir_test, content)

    # Make stubs of archive content, meaning filled with mostly zeros until about
    # before before test start date, then some real data to get the sync'ing going.
    make_stub_content(content,
                      date=DateTime(START) - 2,
                      basedir_stub=basedir_test,
                      basedir_ref=basedir_ref,
                      msids=msids)

    date_stop = (DateTime(STOP) + 2).date

    print(f'Updating client archive {content}')
    with set_fetch_basedir(basedir_test):
        update_client_archive.main([f'--content={content}',
                                    f'--log-level={LOG_LEVEL}',
                                    f'--date-stop={date_stop}',
                                    f'--data-root={basedir_test}',
                                    f'--sync-root={basedir_test}'])

    print(f'Checking {content} {msids}')
    for stat in None, '5min', 'daily':
        for msid in msids:
            fetch.times_cache['key'] = None
            with set_fetch_basedir(basedir_test):
                dat_stub = fetch.Msid(msid, START, STOP, stat=stat)

            fetch.times_cache['key'] = None
            with set_fetch_basedir(basedir_ref):
                dat_orig = fetch.Msid(msid, START, STOP, stat=stat)

            for attr in dat_orig.colnames:
                assert np.all(getattr(dat_stub, attr) == getattr(dat_orig, attr))
Ejemplo n.º 27
0
Archivo: aca_l0.py Proyecto: sot/mica
 def __init__(self, msid, slot, start, stop):
     self.tstart = DateTime(start).secs
     self.tstop = DateTime(stop).secs
     self.datestart = DateTime(self.tstart).date
     self.datestop = DateTime(self.tstop).date
     self.slot = slot
     self._check_msid(msid)
     self._get_data()
Ejemplo n.º 28
0
Archivo: aca_l0.py Proyecto: sot/mica
 def __init__(self, msids, start, stop):
     super(MSIDset, self).__init__()
     self.tstart = DateTime(start).secs
     self.tstop = DateTime(stop).secs
     self.datestart = DateTime(self.tstart).date
     self.datestop = DateTime(self.tstop).date
     for msid in msids:
         self[msid] = MSID(msid, self.tstart, self.tstop)
Ejemplo n.º 29
0
def get_trak_cat_from_telem(start, stop, cmd_quat):
    start = DateTime(start)
    stop = DateTime(stop)
    msids = [
        "{}{}".format(m, i)
        for m in ['AOACYAN', 'AOACZAN', 'AOACFID', 'AOIMAGE', 'AOACFCT']
        for i in range(0, 8)
    ]
    telem = fetch.MSIDset(
        ['AOACASEQ', 'CORADMEN', 'AOPCADMD', 'AONSTARS', 'AOKALSTR'] + msids,
        start, stop)
    att = fetch.MSIDset(['AOATTQT{}'.format(i) for i in [1, 2, 3, 4]], start,
                        stop)
    cat = {}
    for slot in range(0, 8):
        track = telem['AOACFCT{}'.format(slot)].vals == 'TRAK'
        fid = telem['AOACFID{}'.format(slot)].vals == 'FID '
        star = telem['AOIMAGE{}'.format(slot)].vals == 'STAR'
        n = 30
        if np.count_nonzero(track) < n:
            continue
        if np.any(fid & track):
            cat[slot] = {
                'type': 'FID',
                'yag': telem['AOACYAN{}'.format(slot)].vals[fid & track][0],
                'zag': telem['AOACZAN{}'.format(slot)].vals[fid & track][0]
            }
        else:
            n_samples = np.count_nonzero(track & star)
            if n_samples < (n + 4):
                continue
            # If there is tracked data with a star, let's try to get our n samples from about
            # the middle of the range
            mid_point = int(n_samples / 2.)
            yags = []
            zags = []
            for sample in range(mid_point - int(n / 2.),
                                mid_point + int(n / 2.)):
                qref = Quat(
                    normalize([
                        att['AOATTQT{}'.format(i)].vals[track & star][sample]
                        for i in [1, 2, 3, 4]
                    ]))
                ra, dec = yagzag2radec(
                    telem['AOACYAN{}'.format(slot)].vals[track & star][sample]
                    / 3600.,
                    telem['AOACZAN{}'.format(slot)].vals[track & star][sample]
                    / 3600., qref)
                yag, zag = radec2yagzag(ra, dec, cmd_quat)
                yags.append(yag)
                zags.append(zag)
            # This doesn't detect MON just yet
            cat[slot] = {
                'type': 'STAR',
                'yag': np.median(yags) * 3600.,
                'zag': np.median(zags) * 3600.
            }
    return cat, telem
def write_report(thermal_msid_checks_file, t1, t2):
    """ Write Thermal Weekly Report

    :param thermal_msid_checks_file: Name of pickle file listing msids and related information
    :param t1: string containing start time in HOSC format
    :param t2: string containign stop time in HOSC format

    Note, in the past:
    thermal_msid_checks_file = 'thermalmsiddata.pkl'
    """
    thermdict, missing, notinarchive = pickle.load(
        open(thermal_msid_checks_file, 'r'))

    t1 = DateTime(t1).date
    t2 = DateTime(t2).date

    dayrange = (t1[:9] + '-' + t2[:9]).replace(':', '')

    power = get_average_tel_power(t1, t2)

    eclfile = pathjoin(AXAFDATA, 'ECLIPSE.txt')
    ecltext = get_eclipse_text(eclfile, t1, t2)

    allviolations, missingmsids, checkedmsids = check_violations(
        thermdict, t1, t2)

    # 3shtren and 4csdhav are not decommed correctly in the CXC archive
    if '3shtren' in allviolations.keys():
        _ = allviolations.pop('3shtren')

    if '4csdhav' in allviolations.keys():
        _ = allviolations.pop('4csdhav')

    limitchanges = check_limit_changes(t1, t2)

    html_limit_change_table = 'None'

    env = ja.Environment(loader=ja.FileSystemLoader(
        home +
        '/AXAFLIB/thermal_weekly_report/thermal_weekly_report/templates'))

    template = env.get_template('thermal_weekly_template.htm')
    webpage = template.render(startday=t1,
                              endday=t2,
                              dayrange=dayrange,
                              power=str('%5.1f' % power),
                              eclipse=ecltext,
                              violations=allviolations,
                              limitchanges=limitchanges)

    reportfilename = 'THERMAL_Weekly_' + dayrange + '.htm'
    outfile = file(reportfilename, 'w+')
    outfile.writelines(webpage)
    outfile.close()

    print('    Saved weekly report to {0}\n'.format(reportfilename))

    return reportfilename
Ejemplo n.º 31
0
Archivo: aca_l0.py Proyecto: sot/mica
    def update(self):
        """
        Retrieve ACA0 telemetry files from the CXC archive, store in the
        Ska/ACA archive, and update database of files.
        """
        contentdir = self.data_root
        if not os.path.exists(contentdir):
            os.makedirs(contentdir)
        if not os.path.exists(self.temp_root):
            os.makedirs(self.temp_root)
        archdb = os.path.join(contentdir, 'archfiles.db3')
        # if the database of the archived files does not exist,
        # or is empty, make it
        if not os.path.exists(archdb) or os.stat(archdb).st_size == 0:
            logger.info("creating archfiles db from %s"
                        % self.sql_def)
            db_sql = os.path.join(os.environ['SKA_DATA'],
                                  'mica', self.sql_def)
            db_init_cmds = file(db_sql).read()
            with Ska.DBI.DBI(**self.db) as db:
                db.execute(db_init_cmds, commit=True)
        if self.start:
            datestart = DateTime(self.start)
        else:
            # Get datestart as the most-recent file time from archfiles table
            # will need min-of-max-slot-datestart
            with Ska.DBI.DBI(**self.db) as db:
                last_time = min([db.fetchone(
                            "select max(filetime) from archfiles where slot = %d"
                            % s)['max(filetime)'] for s in range(0, 8)])
                if last_time is None:
                    raise ValueError(
                        "No files in archive to do update-since-last-run mode.\n"
                        + "Please specify a time with --start")
                datestart = DateTime(last_time)
        datestop = DateTime(self.stop)
        padding_seconds = 10000
        # loop over the specified time range in chunks of
        # days_at_once in seconds with some padding
        for tstart in np.arange(datestart.day_start().secs,
                                datestop.day_end().secs,
                                self.days_at_once * 86400):
            # set times for a chunk
            range_tstart = tstart - padding_seconds
            range_tstop = tstart + self.days_at_once * 86400
            if range_tstop > datestop.day_end().secs:
                range_tstop = datestop.day_end().secs
            range_tstop += padding_seconds
            # make a temporary directory
            tmpdir = Ska.File.TempDir(dir=self.temp_root)
            dirname = tmpdir.name
            logger.debug("Files save to temp dir %s" % dirname)
            # get the files, store in file archive, and record in database
            with Ska.File.chdir(dirname):
                fetched_files = self._fetch_by_time(range_tstart, range_tstop)
                self._insert_files(fetched_files)

        timestamp_file = os.path.join(self.data_root, 'last_timestamp.txt')
        # get list of missing files since the last time the tool ingested
        # files.  If this is first run of the tool, check from the start of
        # the requested time range
        if (os.path.exists(timestamp_file)
                and os.stat(timestamp_file).st_size > 0):
            cda_checked_timestamp = open(timestamp_file).read().rstrip()
        else:
            cda_checked_timestamp = DateTime(self.start).date
        missing_datetime = DateTime(cda_checked_timestamp)
        missing_files, last_ingest_date = \
            self._get_missing_archive_files(missing_datetime,
                                            only_new=True)
        # update the file to have up through the last confirmed good file
        # even before we try to fetch missing ones
        open(timestamp_file, 'w').write("%s" % last_ingest_date)

        if len(missing_files):
            logger.info("Found %d missing individual files"
                        % len(missing_files))
            # make a temporary directory
            tmpdir = Ska.File.TempDir(dir=self.temp_root)
            dirname = tmpdir.name
            logger.info("File save to temp dir %s" % dirname)
            with Ska.File.chdir(dirname):
                fetched_files, ingest_times = \
                    self._fetch_individual_files(missing_files)
                self._insert_files(fetched_files)

            last_ingest_date = missing_files[-1]['ingest_date']
            # update the file to have up through the last confirmed good file
            # even before we try to fetch missing ones
            open(timestamp_file, 'w').write("%s" % last_ingest_date)
        else:
            logger.info("No missing files")