Example #1
0
    def get_msid_attrs(self, tstart, tstop, msid, msid_args):
        """Get attributes for computed MSID: ``vals``, ``bads``, ``times``

        :param tstart: start time (CXC secs)
        :param tstop: stop time (CXC secs)
        :param msid: full MSID name e.g. cmd_state_pitch_clean
        :param msid_args: tuple of regex match groups: (state_key, dt)

        :returns: dict of MSID attributes
        """
        from kadi.commands.states import get_states
        from Chandra.Time import date2secs

        state_key = msid_args[0]
        dt = 1.025 * int(msid_args[1])
        states = get_states(tstart, tstop, state_keys=[state_key])

        tstart = date2secs(states['datestart'][0])
        tstops = date2secs(states['datestop'])

        times = np.arange(tstart, tstops[-1], dt)
        vals = states[state_key].view(np.ndarray)

        indexes = np.searchsorted(tstops, times)

        out = {
            'vals': vals[indexes],
            'times': times,
            'bads': np.zeros(len(times), dtype=bool),
            'unit': None
        }

        return out
Example #2
0
    def get_validation_states(self, datestart, datestop):
        """
        Get states for validation of the thermal model.

        Parameters
        ----------
        datestart : string
            The start date to grab states afterward.
        datestop : string
            The end date to grab states before.
        """
        start = CxoTime(datestart)
        stop = CxoTime(datestop)
        self.logger.info('Getting commanded states between %s - %s' %
                         (start.date, stop.date))

        states = kadi_states.get_states(start,
                                        stop,
                                        state_keys=STATE_KEYS,
                                        merge_identical=True)

        # Set start and end state date/times to match telemetry span.  Extend the
        # state durations by a small amount because of a precision issue converting
        # to date and back to secs.  (The reference tstop could be just over the
        # 0.001 precision of date and thus cause an out-of-bounds error when
        # interpolating state values).
        dt = 0.01 / 86400
        states['tstart'][0] = (start - dt).secs
        states['datestart'][0] = (start - dt).date
        states['tstop'][-1] = (stop + dt).secs
        states['datestop'][-1] = (stop + dt).date

        return states
def get_state_data(tstart, tstop):
    """ Get states where 'vid_board', 'clocking', 'fep_count', 'pcad_mode' are constant.

    Args:
        tstart (int, float, string): Start time, using Chandra.Time epoch
        tstop (int, float, string): Stop time, using Chandra.Time epoch

    Returns:
        (numpy.ndarray): state data

    """
    keys = [
        'pitch', 'off_nom_roll', 'ccd_count', 'fep_count', 'clocking',
        'vid_board', 'pcad_mode', 'simpos'
    ]
    state_data = states.get_states(tstart,
                                   tstop,
                                   state_keys=keys,
                                   merge_identical=True)

    # Convert 'trans_keys' elements from 'TransKeysSet' objects to strings for compatibility with the 'reduce_states'
    # function in the 'Chandra.cmd_states' package
    state_data['trans_keys'] = [str(val) for val in state_data['trans_keys']]
    state_data['tstart'] = DateTime(
        state_data['datestart']).secs  # Add start time in seconds as 'tstart'
    state_data['tstop'] = DateTime(
        state_data['datestop']).secs  # Add stop time in seconds as 'tstop'

    # relying on 'pcad_mode' to ensure attitude does not change significantly within a dwell
    state_data = reduce_states(
        state_data,
        ['ccd_count', 'fep_count', 'clocking', 'vid_board', 'pcad_mode'])

    return state_data
Example #4
0
    def _get_cmd_states(self):
        if not hasattr(self, '_cmd_states'):
            import kadi.commands.states as kadi_states
            logger.info('Getting kadi commanded states over %s to %s' %
                        (self.datestart, self.datestop))
            states = kadi_states.get_states(self.datestart, self.datestop)
            self._cmd_states = kadi_states.interpolate_states(
                states, self.times).as_array()

        return self._cmd_states
Example #5
0
 def from_kadi_states(cls, tstart, tstop, state_keys=None):
     from kadi.commands import states
     tstart = get_time(tstart)
     tstop = get_time(tstop)
     if state_keys is not None:
         state_keys = ensure_list(state_keys)
     t = states.get_states(tstart,
                           tstop,
                           state_keys=state_keys,
                           merge_identical=True).as_array()
     return cls(t)
Example #6
0
 def from_commands(cls, tstart, tstop, cmds=None, state_keys=None):
     from kadi import commands
     from kadi.commands import states
     tstart = get_time(tstart)
     tstop = get_time(tstop)
     if cmds is None:
         cmds = commands.get_cmds(tstart, tstop)
     continuity = states.get_continuity(tstart, state_keys)
     t = states.get_states(cmds=cmds,
                           continuity=continuity,
                           state_keys=state_keys,
                           merge_identical=True).as_array()
     return cls(t)
Example #7
0
def get_states_kadi(datestart, datestop):
    # Local import for speed and for namespace clarity
    from kadi.commands.states import get_states

    logger.info('Using kadi.commands.states to get cmd_states')
    logger.info('Getting commanded states between %s - %s' %
                (datestart, datestop))

    states = get_states(datestart, datestop)
    states['tstart'] = DateTime(states['datestart']).secs
    states['tstop'] = DateTime(states['datestop']).secs

    # Convert to recarray and return
    sa = states.as_array()
    rsa = np.recarray(sa.shape, dtype=sa.dtype, names=sa.dtype.names, buf=sa.data)

    return rsa
Example #8
0
def get_states_kadi(datestart, datestop):
    # Local import for speed and for namespace clarity
    from kadi.commands.states import get_states

    logger.info('Using kadi.commands.states to get cmd_states')
    logger.info('Getting commanded states between %s - %s' %
                (datestart, datestop))

    states = get_states(datestart, datestop)
    states['tstart'] = DateTime(states['datestart']).secs
    states['tstop'] = DateTime(states['datestop']).secs

    # Convert to recarray and return
    sa = states.as_array()
    rsa = np.recarray(sa.shape,
                      dtype=sa.dtype,
                      names=sa.dtype.names,
                      buf=sa.data)

    return rsa
Example #9
0
def update():
    recent_obs = get_states(start=CxoTime.now() - 7, state_keys=['obsid'], merge_identical=True)
    for obs in recent_obs['obsid']:
        process_obsids([int(obs)]) # the int() is here to keep json happy downstream
Example #10
0
def main(opt):
    opt, args = get_options()
    if not os.path.exists(opt.outdir):
        os.mkdir(opt.outdir)

    config_logging(opt.outdir, opt.verbose)

    # Store info relevant to processing for use in outputs
    proc = dict(
        run_user=os.environ['USER'],
        run_time=time.ctime(),
        errors=[],
    )
    logger.info(
        '#####################################################################'
    )
    logger.info(
        '# %s run at %s by %s' %
        (os.path.dirname(__file__), proc['run_time'], proc['run_user']))
    logger.info('# version = %s' % VERSION)
    logger.info('# characteristics version = %s' % characteristics.VERSION)
    logger.info(
        '#####################################################################\n'
    )

    logger.info('Command line options:\n%s\n' % pformat(opt.__dict__))

    # Connect to database (NEED TO USE aca_read)
    tnow = DateTime(opt.run_start_time).secs
    tstart = tnow

    # Get temperature telemetry for 3 weeks prior to min(tstart, NOW)
    tlm = get_telem_values(tstart, [
        'sim_z', 'dp_pitch', 'aoacaseq', 'aodithen', 'cacalsta', 'cobsrqid',
        'aofunlst', 'aopcadmd', '4ootgsel', '4ootgmtn', 'aocmdqt1', 'aocmdqt2',
        'aocmdqt3', '1de28avo', '1deicacu', '1dp28avo', '1dpicacu', '1dp28bvo',
        '1dpicbcu'
    ],
                           days=opt.days,
                           name_map={
                               'sim_z': 'tscpos',
                               'cobsrqid': 'obsid'
                           })

    tlm['tscpos'] = tlm['tscpos'] * -397.7225924607
    outdir = opt.outdir
    states = get_states(tlm[0].date, tlm[-1].date)
    write_states(opt, states)
    tlm = Ska.Numpy.add_column(tlm, 'power', smoothed_power(tlm))

    # Get bad time intervals
    bad_time_mask = get_bad_mask(tlm)

    # Interpolate states onto the tlm.date grid
    state_vals = cmd_states.interpolate_states(states, tlm['date'])

    # "Forgive" dither intervals with dark current replicas
    # This will also exclude dither disables that are in cmd states for standard dark cals
    dark_mask = np.zeros(len(tlm), dtype='bool')
    dark_times = []
    # Find dither "disable" states from tlm
    dith_disa_states = logical_intervals(tlm['date'],
                                         tlm['aodithen'] == 'DISA')
    for state in dith_disa_states:
        # Index back into telemetry for each of these constant dither disable states
        idx0 = np.searchsorted(tlm['date'], state['tstart'], side='left')
        idx1 = np.searchsorted(tlm['date'], state['tstop'], side='right')
        # If any samples have aca calibration flag, mark interval for exclusion.
        if np.any(tlm['cacalsta'][idx0:idx1] != 'OFF '):
            dark_mask[idx0:idx1] = True
            dark_times.append({
                'start': state['datestart'],
                'stop': state['datestop']
            })

    # Calculate the 4th term of the commanded quaternions
    cmd_q4 = np.sqrt(
        np.abs(1.0 - tlm['aocmdqt1']**2 - tlm['aocmdqt2']**2 -
               tlm['aocmdqt3']**2))
    raw_tlm_q = np.vstack(
        [tlm['aocmdqt1'], tlm['aocmdqt2'], tlm['aocmdqt3'],
         cmd_q4]).transpose()

    # Calculate angle/roll differences in state cmd vs tlm cmd quaternions
    raw_state_q = np.vstack([state_vals[n]
                             for n in ['q1', 'q2', 'q3', 'q4']]).transpose()
    tlm_q = normalize(raw_tlm_q)
    # only use values that aren't NaNs
    good = np.isnan(np.sum(tlm_q, axis=-1)) == False
    # and are in NPNT
    npnt = tlm['aopcadmd'] == 'NPNT'
    # and are in KALM after the first 2 sample of the transition
    not_kalm = tlm['aoacaseq'] != 'KALM'
    kalm = (not_kalm | np.hstack([[False, False], not_kalm[:-2]])) == False
    # and aren't during momentum unloads or in the first 2 samples after unloads
    unload = tlm['aofunlst'] != 'NONE'
    no_unload = (unload | np.hstack([[False, False], unload[:-2]])) == False
    ok = good & npnt & kalm & no_unload & ~bad_time_mask
    state_q = normalize(raw_state_q)
    dot_q = np.sum(tlm_q[ok] * state_q[ok], axis=-1)
    dot_q[dot_q > 1] = 1
    angle_diff = np.degrees(2 * np.arccos(dot_q))
    angle_diff = np.min([angle_diff, 360 - angle_diff], axis=0)
    roll_diff = Quat(tlm_q[ok]).roll - Quat(state_q[ok]).roll
    roll_diff = np.min([roll_diff, 360 - roll_diff], axis=0)

    for msid in MODE_SOURCE:
        tlm_col = np.zeros(len(tlm))
        state_col = np.zeros(len(tlm))
        for mode, idx in zip(MODE_MSIDS[msid], count()):
            tlm_col[tlm[MODE_SOURCE[msid]] == mode] = idx
            state_col[state_vals[msid] == mode] = idx
        tlm = Ska.Numpy.add_column(tlm, msid, tlm_col)
        state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid),
                                          state_col)

    for msid in ['letg', 'hetg']:
        txt = np.repeat('RETR', len(tlm))
        # use a combination of the select telemetry and the insertion telem to
        # approximate the state_vals values
        txt[(tlm['4ootgsel'] == msid.upper())
            & (tlm['4ootgmtn'] == 'INSE')] = 'INSE'
        tlm_col = np.zeros(len(tlm))
        state_col = np.zeros(len(tlm))
        for mode, idx in zip(MODE_MSIDS[msid], count()):
            tlm_col[txt == mode] = idx
            state_col[state_vals[msid] == mode] = idx
        tlm = Ska.Numpy.add_column(tlm, msid, tlm_col)
        state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid),
                                          state_col)

    diff_only = {
        'pointing': {
            'diff': angle_diff * 3600,
            'date': tlm['date'][ok]
        },
        'roll': {
            'diff': roll_diff * 3600,
            'date': tlm['date'][ok]
        }
    }

    pred = {
        'dp_pitch': state_vals.pitch,
        'obsid': state_vals.obsid,
        'dither': state_vals['dither_pred'],
        'pcad_mode': state_vals['pcad_mode_pred'],
        'letg': state_vals['letg_pred'],
        'hetg': state_vals['hetg_pred'],
        'tscpos': state_vals.simpos,
        'power': state_vals.power,
        'pointing': 1,
        'roll': 1
    }

    plots_validation = []
    valid_viols = []
    logger.info('Making validation plots and quantile table')
    quantiles = (1, 5, 16, 50, 84, 95, 99)
    # store lines of quantile table in a string and write out later
    quant_table = ''
    quant_head = ",".join(['MSID'] + ["quant%d" % x for x in quantiles])
    quant_table += quant_head + "\n"
    for fig_id, msid in enumerate(sorted(pred)):
        plot = dict(msid=msid.upper())
        fig = plt.figure(10 + fig_id, figsize=(7, 3.5))
        fig.clf()
        scale = SCALES.get(msid, 1.0)
        ax = None
        if msid not in diff_only:
            if msid in MODE_MSIDS:
                state_msid = np.zeros(len(tlm))
                for mode, idx in zip(MODE_MSIDS[msid], count()):
                    state_msid[state_vals[msid] == mode] = idx
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 tlm[msid],
                                                 fig=fig,
                                                 fmt='-r')
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 state_msid,
                                                 fig=fig,
                                                 fmt='-b')
                plt.yticks(range(len(MODE_MSIDS[msid])), MODE_MSIDS[msid])
            else:
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 tlm[msid] / scale,
                                                 fig=fig,
                                                 fmt='-r')
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 pred[msid] / scale,
                                                 fig=fig,
                                                 fmt='-b')
        else:
            ticklocs, fig, ax = plot_cxctime(diff_only[msid]['date'],
                                             diff_only[msid]['diff'] / scale,
                                             fig=fig,
                                             fmt='-k')
        plot['diff_only'] = msid in diff_only
        ax.set_title(TITLE[msid])
        ax.set_ylabel(LABELS[msid])
        xlims = ax.get_xlim()
        ylims = ax.get_ylim()

        bad_times = list(characteristics.bad_times)

        # Add the time intervals of dark current calibrations that have been excluded from
        # the diffs to the "bad_times" for validation so they also can be marked with grey
        # rectangles in the plot.  This is only really visible with interactive/zoomed plot.
        if msid in ['dither', 'pcad_mode']:
            bad_times.extend(dark_times)

        # Add "background" grey rectangles for excluded time regions to vs-time plot
        for bad in bad_times:
            bad_start = cxc2pd([DateTime(bad['start']).secs])[0]
            bad_stop = cxc2pd([DateTime(bad['stop']).secs])[0]
            if not ((bad_stop >= xlims[0]) & (bad_start <= xlims[1])):
                continue
            rect = matplotlib.patches.Rectangle((bad_start, ylims[0]),
                                                bad_stop - bad_start,
                                                ylims[1] - ylims[0],
                                                alpha=.2,
                                                facecolor='black',
                                                edgecolor='none')
            ax.add_patch(rect)

        filename = msid + '_valid.png'
        outfile = os.path.join(outdir, filename)
        logger.info('Writing plot file %s' % outfile)
        plt.tight_layout()
        plt.margins(0.05)
        fig.savefig(outfile)
        plot['lines'] = filename

        if msid not in diff_only:
            ok = ~bad_time_mask
            if msid in ['dither', 'pcad_mode']:
                # For these two validations also ignore intervals during a dark current calibration
                ok &= ~dark_mask
            diff = tlm[msid][ok] - pred[msid][ok]
        else:
            diff = diff_only[msid]['diff']

        # Sort the diffs in-place because we're just using them in aggregate
        diff = np.sort(diff)

        # if there are only a few residuals, don't bother with histograms
        if msid.upper() in validation_scale_count:
            plot['samples'] = len(diff)
            plot['diff_count'] = np.count_nonzero(diff)
            plot['n_changes'] = 1 + np.count_nonzero(pred[msid][1:] -
                                                     pred[msid][0:-1])
            if (plot['diff_count'] <
                (plot['n_changes'] * validation_scale_count[msid.upper()])):
                plots_validation.append(plot)
                continue
            # if the msid exceeds the diff count, add a validation violation
            else:
                viol = {
                    'msid':
                    "{}_diff_count".format(msid),
                    'value':
                    plot['diff_count'],
                    'limit':
                    plot['n_changes'] * validation_scale_count[msid.upper()],
                    'quant':
                    None,
                }
                valid_viols.append(viol)
                logger.info(
                    'WARNING: %s %d discrete diffs exceed limit of %d' %
                    (msid, plot['diff_count'],
                     plot['n_changes'] * validation_scale_count[msid.upper()]))

        # Make quantiles
        if (msid != 'obsid'):
            quant_line = "%s" % msid
            for quant in quantiles:
                quant_val = diff[(len(diff) * quant) // 100]
                plot['quant%02d' % quant] = FMTS[msid] % quant_val
                quant_line += (',' + FMTS[msid] % quant_val)
            quant_table += quant_line + "\n"

        for histscale in ('lin', 'log'):
            fig = plt.figure(20 + fig_id, figsize=(4, 3))
            fig.clf()
            ax = fig.gca()
            ax.hist(diff / scale, bins=50, log=(histscale == 'log'))
            ax.set_title(msid.upper() + ' residuals: telem - cmd states',
                         fontsize=11)
            ax.set_xlabel(LABELS[msid])
            fig.subplots_adjust(bottom=0.18)
            plt.tight_layout()
            filename = '%s_valid_hist_%s.png' % (msid, histscale)
            outfile = os.path.join(outdir, filename)
            logger.info('Writing plot file %s' % outfile)
            fig.savefig(outfile)
            plot['hist' + histscale] = filename

        plots_validation.append(plot)

    filename = os.path.join(outdir, 'validation_quant.csv')
    logger.info('Writing quantile table %s' % filename)
    f = open(filename, 'w')
    f.write(quant_table)
    f.close()

    # If run_start_time is specified this is likely for regression testing
    # or other debugging.  In this case write out the full predicted and
    # telemetered dataset as a pickle.
    if opt.run_start_time:
        filename = os.path.join(outdir, 'validation_data.pkl')
        logger.info('Writing validation data %s' % filename)
        f = open(filename, 'w')
        pickle.dump({'pred': pred, 'tlm': tlm}, f, protocol=-1)
        f.close()

    valid_viols.extend(make_validation_viols(plots_validation))
    if len(valid_viols) > 0:
        # generate daily plot url if outdir in expected year/day format
        daymatch = re.match('.*(\d{4})/(\d{3})', opt.outdir)
        if daymatch:
            url = os.path.join(URL, daymatch.group(1), daymatch.group(2))
            logger.info('validation warning(s) at %s' % url)
        else:
            logger.info('validation warning(s) in output at %s' % opt.outdir)

    write_index_rst(opt, proc, plots_validation, valid_viols)
    rst_to_html(opt, proc)
Example #11
0
def get_starcheck_catalog_at_date(date, starcheck_db=None, timelines_db=None):
    """
    For a given date, return a dictionary describing the starcheck catalog that should apply.
    The content of that dictionary is from the database tables that parsed the starcheck report.
    A catalog is defined as applying, in this function, to any time from the end of the
    previous dwell through the end of the dwell in which the catalog was used.

    Star catalog dictionary with keys:

    - cat: catalog rows as astropy.table
    - manvr: list of maneuvers to this attitude
    - pred_temp: predicted ACA CCD temperature
    - warnings: list of warnings below catalog in starcheck output
    - obs: dictionary of observation target and pointing information
    - mp_dir: directory with products that are the source of this catalog data
    - status: string describing status of that observation, described below.

    Status:

    - ran: observation was observed
    - planned: observation in a not-approved future schedule
    - approved: observation in an approved future schedule (ingested in timelines/cmd_states)
    - ran_pretimelines: ran, but before timelines database starts
    - timelines_gap: after timelines database start but missing data
    - no starcat: in the database but has no star catalog

    :param date: Chandra.Time compatible date
    :param starcheck_db: optional handle to already-open starcheck database
    :param timelines_db: optional handle to already-open timelines database
    :returns: dictionary with starcheck content described above


    """
    date = DateTime(date).date
    if starcheck_db is None:
        starcheck_db = Ska.DBI.DBI(**DEFAULT_CONFIG['starcheck_db'])
    db = starcheck_db
    if timelines_db is None:
        timelines_db = Ska.DBI.DBI(**DEFAULT_CONFIG['timelines_db'])
    last_tl = timelines_db.fetchone(
        "select max(datestop) as datestop from timelines")['datestop']
    first_tl = timelines_db.fetchone(
        "select min(datestart) as datestart from timelines")['datestart']
    # Check kadi to get the first dwell that *ends* after the given time
    dwells = events.dwells.filter(stop__gte=date, subset=slice(None, 1))
    # if we're outside of timelines or not yet in kadi, just try from the starcheck database
    if date > last_tl or date < first_tl:
        # Get one entry that is the last one before the specified time, in the most
        # recently ingested products directory
        starcheck = db.fetchone("""select * from starcheck_obs, starcheck_id
               where mp_starcat_time <= '{}' and mp_starcat_time > '{}'
               and starcheck_id.id = starcheck_obs.sc_id
               order by sc_id desc, mp_starcat_time desc """.format(
            date, (DateTime(date) - 1).date))
        if starcheck:
            cat_info = get_starcheck_catalog(starcheck['obsid'],
                                             mp_dir=starcheck['dir'])
            if date < first_tl:
                cat_info['status'] = 'ran_pretimelines'
            if date > last_tl:
                cat_info['status'] = 'planned'
            return cat_info

    # We want to search for legitimate commanding that would cover the time when a star
    # catalog would have been commanded for this dwell.  This is generally the time range
    # between the end of the previous dwell and the beginning of this dwell.  However, if
    # there are multiple dwells from one maneuver, use the beginning of NMM from that one
    # maneuver else, use the end of the last dwell.  Don't use nman_start time by default
    # because that doesn't appear to work if the catalog was commanded in a nonstandard
    # nmm sequence like dark cal.

    # There is a tiny window of time in cmd_states but not yet in kadi, but this code tries to
    # grab the dwell and maneuver that would be related to a date in that range
    if date < last_tl and len(dwells) == 0:
        pcad_states = get_states(start=DateTime(date) - 2,
                                 state_keys=['pcad_mode'],
                                 merge_identical=True)
        dwell = pcad_states[(pcad_states['pcad_mode'] == 'NPNT')
                            & (pcad_states['datestop'] >= date)][0]
        manvr = pcad_states[
            (pcad_states['pcad_mode'] == 'NMAN')
            & (pcad_states['datestop'] <= dwell['datestart'])][-1]
        start_cat_search = manvr['datestart']
        dwell_start = dwell['datestart']
    else:
        # If we have a dwell from kadi, use it to search for commanding
        dwell = dwells[0]
        dwell_start = dwell.start
        # Try to use the beginning of the previous nman period to define when the catalog
        # should have been commanded.  If there are multiple dwells for attitude, try to
        # use nman_start if available.
        if dwell.manvr.n_dwell > 1 and dwell.manvr.nman_start is not None:
            start_cat_search = dwell.manvr.nman_start
        else:
            start_cat_search = dwell.get_previous().stop

    timelines = timelines_db.fetchall(
        """select * from timeline_loads where scs < 131
           and datestop > '{}' and datestart < '{}' order by datestart""".
        format(start_cat_search, dwell_start))
    for timeline in timelines[::-1]:
        starchecks = db.fetchall("""select * from starcheck_obs, starcheck_id
               where dir = '{}'
               and mp_starcat_time >= '{}'
               and mp_starcat_time <= '{}' and mp_starcat_time <= '{}'
               and starcheck_id.id = starcheck_obs.sc_id
               order by mp_starcat_time """.format(timeline['mp_dir'],
                                                   timeline['datestart'],
                                                   timeline['datestop'],
                                                   dwell_start))
        # The last one should be the one before beginning of the dwell
        if len(starchecks):
            # Use the obsid and the known products directory to use the more generic get_starcheck_catalog
            # to fetch the right one from the database
            cat_info = get_starcheck_catalog(starchecks[-1]['obsid'],
                                             mp_dir=starchecks[-1]['dir'])
            cat_info['status'] = 'ran' if date < DateTime(
            ).date else 'approved'
            return cat_info
    return None
Example #12
0
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.  This includes both the
        states from the review load backstop file and all the
        states between the latest telemetry data and the beginning
        of that review load backstop file.

        The Review Backstop commands already obtained.
        Telemtry from 21 days back to the latest in Ska obtained.

        So now the task is to backchain through the loads and assemble
        any states missing between the end of telemetry through the start
        of the review load.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction. This is tlm['date'][-5]) or, in other words, the
            date used is approximately 30 minutes before the end of the
            fetched telemetry
        """
        # If an OFLS directory has been specified, get the backstop commands
        # stored in the backstop file in that directory

        # Ok ready to start the collection of continuity commands
        #
        # Make a copy of the Review Load Commands. This will have
        # Continuity commands concatenated to it and will be the final product

        import copy

        # List of dict representing commands at this point
        bs_cmds = copy.copy(self.bs_cmds)

        # Capture the start time of the review load
        bs_start_time = bs_cmds[0]['time']

        # Capture the path to the ofls directory
        present_ofls_dir = copy.copy(self.backstop_file)

        # So long as the earliest command in bs_cmds is after the state0 time
        # (which is the same as tbegin), keep concatenating continuity commands
        # to bs_cmds based upon the type of load. Note that as you march back in
        # time along the load chain, "present_ofls_dir" will change.

        # WHILE
        # The big while loop that backchains through previous loads and concatenates the
        # proper load sections to the review load.
        while CxoTime(tbegin).secs < bs_start_time:

            # Read the Continuity information of the present ofls directory
            cont_load_path, present_load_type, scs107_date = self.BSC.get_continuity_file_info(
                present_ofls_dir)

            #---------------------- NORMAL ----------------------------------------
            # If the load type is "normal" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            if present_load_type.upper() == 'NORMAL':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # Combine the continuity commands with the bs_cmds. The result
                # is stored in bs_cmds
                bs_cmds = self.BSC.CombineNormal(cont_bs_cmds, bs_cmds)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- TOO ----------------------------------------
            # If the load type is "too" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            elif present_load_type.upper() == 'TOO':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # Combine the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineTOO(cont_bs_cmds, bs_cmds)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- STOP ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'STOP':

                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # CombineSTOP the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineSTOP(cont_bs_cmds, bs_cmds,
                                               scs107_date)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- SCS-107 ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'SCS-107':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)
                # Store the continuity bs commands as a chunk in the chunk list

                # Obtain the CONTINUITY load Vehicle-Only file
                vo_bs_cmds, vo_bs_name = self.BSC.get_vehicle_only_bs_cmds(
                    cont_load_path)

                # Combine107 the continuity commands with the bs_cmds
                bs_cmds = self.BSC.Combine107(cont_bs_cmds, vo_bs_cmds,
                                              bs_cmds, scs107_date)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

        # Convert backstop commands from a list of dict to a CommandTable and
        # store in self.
        bs_cmds = kadi.commands.CommandTable(bs_cmds)
        self.bs_cmds = bs_cmds

        # Clip commands to tbegin
        bs_cmds = bs_cmds[bs_cmds['date'] > tbegin]

        # Scheduled stop time is the end of propagation, either the explicit
        # time as a pseudo-command in the loads or the last backstop command time.
        # Use the last such command if any are found (which is always the case
        # since backstop 6.9).
        ok = bs_cmds['event_type'] == 'SCHEDULED_STOP_TIME'
        sched_stop = bs_cmds['date'][ok][-1] if np.any(
            ok) else bs_cmds['date'][-1]

        # Convert the assembled command history into commanded states
        # corresponding to the commands. This includes continuity commanding
        # from the end of telemetry along with the in-review load backstop
        # commands.
        states = kadi_states.get_states(cmds=bs_cmds,
                                        start=tbegin,
                                        stop=sched_stop,
                                        state_keys=STATE_KEYS)

        # Make the column order match legacy Chandra.cmd_states.
        states = states[sorted(states.colnames)]

        # Get the first state as a dict.
        state0 = {key: states[0][key] for key in states.colnames}

        self.logger.debug(f"state0 at {CxoTime(state0['tstart']).date} "
                          f"is\n{pformat(state0)}")

        return states, state0
Example #13
0
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction.
        """
        # This is a kadi.commands.CommandTable (subclass of astropy Table)
        bs_cmds = self.bs_cmds
        bs_dates = bs_cmds['date']

        # Running loads termination time is the last time of "current running
        # loads" (or in the case of a safing action, "current approved load
        # commands" in kadi commands) which should be included in propagation.
        # Starting from around 2020-April (backstop 6.9) this is included as a
        # commmand in the loads, while prior to that we just use the first
        # command in the backstop loads.
        ok = bs_cmds['event_type'] == 'RUNNING_LOAD_TERMINATION_TIME'
        if np.any(ok):
            rltt = CxoTime(bs_dates[ok][0])
        else:
            # Handle the case of old loads (prior to backstop 6.9) where there
            # is no RLTT.  If the first command is AOACRSTD this indicates the
            # beginning of a maneuver ATS which may overlap by 3 mins with the
            # previous loads because of the AOACRSTD command. So move the RLTT
            # forward by 3 minutes (exactly 180.0 sec). If the first command is
            # not AOACRSTD then that command time is used as RLTT.
            if bs_cmds['tlmsid'][0] == 'AOACRSTD':
                rltt = CxoTime(bs_cmds['time'][0] + 180)
            else:
                rltt = CxoTime(bs_cmds['date'][0])

        # Scheduled stop time is the end of propagation, either the explicit
        # time as a pseudo-command in the loads or the last backstop command time.
        ok = bs_cmds['event_type'] == 'SCHEDULED_STOP_TIME'
        sched_stop = CxoTime(bs_dates[ok][0] if np.any(ok) else bs_dates[-1])

        self.logger.info(f'RLTT = {rltt.date}')
        self.logger.info(f'sched_stop = {sched_stop.date}')

        # Get currently running (or approved) commands from tbegin up to and
        # including commands at RLTT
        cmds = kadi.commands.get_cmds(tbegin, rltt, inclusive_stop=True)

        # Add in the backstop commands
        cmds = cmds.add_cmds(bs_cmds)

        # Get the states for available commands, boxed by tbegin / sched_stop.
        # The merge_identical=False is for compatibility with legacy Chandra.cmd_states,
        # but this could probably be set to True.
        states = kadi_states.get_states(cmds=cmds,
                                        start=tbegin,
                                        stop=sched_stop,
                                        state_keys=STATE_KEYS,
                                        merge_identical=False)

        # Make the column order match legacy Chandra.cmd_states.
        states = states[sorted(states.colnames)]

        # Get the first state as a dict.
        state0 = {key: states[0][key] for key in states.colnames}

        return states, state0
Example #14
0
def main(opt):
    opt, args = get_options()
    if not os.path.exists(opt.outdir):
        os.mkdir(opt.outdir)

    config_logging(opt.outdir, opt.verbose)

    # Store info relevant to processing for use in outputs
    proc = dict(run_user=os.environ['USER'],
                run_time=time.ctime(),
                errors=[],
                )
    logger.info('#####################################################################')
    logger.info('# %s run at %s by %s' % (os.path.dirname(__file__),
                                          proc['run_time'], proc['run_user']))
    logger.info('# version = %s' % VERSION)
    logger.info('# characteristics version = %s' % characteristics.VERSION)
    logger.info('#####################################################################\n')

    logger.info('Command line options:\n%s\n' % pformat(opt.__dict__))

    # Connect to database (NEED TO USE aca_read)
    tnow = DateTime(opt.run_start_time).secs
    tstart = tnow

    # Get temperature telemetry for 3 weeks prior to min(tstart, NOW)
    tlm = get_telem_values(tstart,
                           ['sim_z', 'dp_pitch', 'aoacaseq',
                            'aodithen', 'cacalsta', 'cobsrqid', 'aofunlst',
                            'aopcadmd', '4ootgsel', '4ootgmtn',
                            'aocmdqt1', 'aocmdqt2', 'aocmdqt3',
                            '1de28avo', '1deicacu',
                            '1dp28avo', '1dpicacu',
                            '1dp28bvo', '1dpicbcu'],
                           days=opt.days,
                           name_map={'sim_z': 'tscpos',
                                     'cobsrqid': 'obsid'})

    tlm['tscpos'] = tlm['tscpos'] * -397.7225924607
    outdir = opt.outdir
    states = get_states(tlm[0].date, tlm[-1].date)
    write_states(opt, states)
    tlm = Ska.Numpy.add_column(tlm, 'power', smoothed_power(tlm))

    # Get bad time intervals
    bad_time_mask = get_bad_mask(tlm)

    # Interpolate states onto the tlm.date grid
    state_vals = cmd_states.interpolate_states(states, tlm['date'])

    # "Forgive" dither intervals with dark current replicas
    # This will also exclude dither disables that are in cmd states for standard dark cals
    dark_mask = np.zeros(len(tlm), dtype='bool')
    dark_times = []
    # Find dither "disable" states from tlm
    dith_disa_states = logical_intervals(tlm['date'], tlm['aodithen'] == 'DISA')
    for state in dith_disa_states:
        # Index back into telemetry for each of these constant dither disable states
        idx0 = np.searchsorted(tlm['date'], state['tstart'], side='left')
        idx1 = np.searchsorted(tlm['date'], state['tstop'], side='right')
        # If any samples have aca calibration flag, mark interval for exclusion.
        if np.any(tlm['cacalsta'][idx0:idx1] != 'OFF '):
            dark_mask[idx0:idx1] = True
            dark_times.append({'start': state['datestart'],
                               'stop': state['datestop']})

    # Calculate the 4th term of the commanded quaternions
    cmd_q4 = np.sqrt(np.abs(1.0
                            - tlm['aocmdqt1']**2
                            - tlm['aocmdqt2']**2
                            - tlm['aocmdqt3']**2))
    raw_tlm_q = np.vstack([tlm['aocmdqt1'],
                           tlm['aocmdqt2'],
                           tlm['aocmdqt3'],
                           cmd_q4]).transpose()

    # Calculate angle/roll differences in state cmd vs tlm cmd quaternions
    raw_state_q = np.vstack([state_vals[n] for n
                             in ['q1', 'q2', 'q3', 'q4']]).transpose()
    tlm_q = normalize(raw_tlm_q)
    # only use values that aren't NaNs
    good = np.isnan(np.sum(tlm_q, axis=-1)) == False
    # and are in NPNT
    npnt = tlm['aopcadmd'] == 'NPNT'
    # and are in KALM after the first 2 sample of the transition
    not_kalm = tlm['aoacaseq'] != 'KALM'
    kalm = (not_kalm | np.hstack([[False, False], not_kalm[:-2]])) == False
    # and aren't during momentum unloads or in the first 2 samples after unloads
    unload = tlm['aofunlst'] != 'NONE'
    no_unload = (unload | np.hstack([[False, False], unload[:-2]])) == False
    ok = good & npnt & kalm & no_unload & ~bad_time_mask
    state_q = normalize(raw_state_q)
    dot_q = np.sum(tlm_q[ok] * state_q[ok], axis=-1)
    dot_q[dot_q > 1] = 1
    angle_diff = np.degrees(2 * np.arccos(dot_q))
    angle_diff = np.min([angle_diff, 360 - angle_diff], axis=0)
    roll_diff = Quat(tlm_q[ok]).roll - Quat(state_q[ok]).roll
    roll_diff = np.min([roll_diff, 360 - roll_diff], axis=0)

    for msid in MODE_SOURCE:
        tlm_col = np.zeros(len(tlm))
        state_col = np.zeros(len(tlm))
        for mode, idx in zip(MODE_MSIDS[msid], count()):
            tlm_col[tlm[MODE_SOURCE[msid]] == mode] = idx
            state_col[state_vals[msid] == mode] = idx
        tlm = Ska.Numpy.add_column(tlm, msid, tlm_col)
        state_vals = Ska.Numpy.add_column(state_vals,
                                          "{}_pred".format(msid), state_col)

    for msid in ['letg', 'hetg']:
        txt = np.repeat('RETR', len(tlm))
        # use a combination of the select telemetry and the insertion telem to
        # approximate the state_vals values
        txt[(tlm['4ootgsel'] == msid.upper())
            & (tlm['4ootgmtn'] == 'INSE')] = 'INSE'
        tlm_col = np.zeros(len(tlm))
        state_col = np.zeros(len(tlm))
        for mode, idx in zip(MODE_MSIDS[msid], count()):
            tlm_col[txt == mode] = idx
            state_col[state_vals[msid] == mode] = idx
        tlm = Ska.Numpy.add_column(tlm, msid, tlm_col)
        state_vals = Ska.Numpy.add_column(state_vals,
                                          "{}_pred".format(msid), state_col)


    diff_only = {'pointing': {'diff': angle_diff * 3600,
                              'date': tlm['date'][ok]},
                 'roll': {'diff': roll_diff * 3600,
                          'date': tlm['date'][ok]}}

    pred = {'dp_pitch': state_vals.pitch,
            'obsid': state_vals.obsid,
            'dither': state_vals['dither_pred'],
            'pcad_mode': state_vals['pcad_mode_pred'],
            'letg': state_vals['letg_pred'],
            'hetg': state_vals['hetg_pred'],
            'tscpos': state_vals.simpos,
            'power': state_vals.power,
            'pointing': 1,
            'roll': 1}

    plots_validation = []
    valid_viols = []
    logger.info('Making validation plots and quantile table')
    quantiles = (1, 5, 16, 50, 84, 95, 99)
    # store lines of quantile table in a string and write out later
    quant_table = ''
    quant_head = ",".join(['MSID'] + ["quant%d" % x for x in quantiles])
    quant_table += quant_head + "\n"
    for fig_id, msid in enumerate(sorted(pred)):
        plot = dict(msid=msid.upper())
        fig = plt.figure(10 + fig_id, figsize=(7, 3.5))
        fig.clf()
        scale = SCALES.get(msid, 1.0)
        ax = None
        if msid not in diff_only:
            if msid in MODE_MSIDS:
                state_msid = np.zeros(len(tlm))
                for mode, idx in zip(MODE_MSIDS[msid], count()):
                    state_msid[state_vals[msid] == mode] = idx
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 tlm[msid], fig=fig, fmt='-r')
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 state_msid, fig=fig, fmt='-b')
                plt.yticks(range(len(MODE_MSIDS[msid])), MODE_MSIDS[msid])
            else:
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 tlm[msid] / scale, fig=fig, fmt='-r')
                ticklocs, fig, ax = plot_cxctime(tlm['date'],
                                                 pred[msid] / scale, fig=fig, fmt='-b')
        else:
            ticklocs, fig, ax = plot_cxctime(diff_only[msid]['date'],
                                             diff_only[msid]['diff'] / scale, fig=fig, fmt='-k')
        plot['diff_only'] = msid in diff_only
        ax.set_title(TITLE[msid])
        ax.set_ylabel(LABELS[msid])
        xlims = ax.get_xlim()
        ylims = ax.get_ylim()

        bad_times = list(characteristics.bad_times)

        # Add the time intervals of dark current calibrations that have been excluded from
        # the diffs to the "bad_times" for validation so they also can be marked with grey
        # rectangles in the plot.  This is only really visible with interactive/zoomed plot.
        if msid in ['dither', 'pcad_mode']:
            bad_times.extend(dark_times)

        # Add "background" grey rectangles for excluded time regions to vs-time plot
        for bad in bad_times:
            bad_start = cxc2pd([DateTime(bad['start']).secs])[0]
            bad_stop = cxc2pd([DateTime(bad['stop']).secs])[0]
            if not ((bad_stop >= xlims[0]) & (bad_start <= xlims[1])):
                continue
            rect = matplotlib.patches.Rectangle((bad_start, ylims[0]),
                                                bad_stop - bad_start,
                                                ylims[1] - ylims[0],
                                                alpha=.2,
                                                facecolor='black',
                                                edgecolor='none')
            ax.add_patch(rect)

        filename = msid + '_valid.png'
        outfile = os.path.join(outdir, filename)
        logger.info('Writing plot file %s' % outfile)
        plt.tight_layout()
        plt.margins(0.05)
        fig.savefig(outfile)
        plot['lines'] = filename

        if msid not in diff_only:
            ok = ~bad_time_mask
            if msid in ['dither', 'pcad_mode']:
                # For these two validations also ignore intervals during a dark current calibration
                ok &= ~dark_mask
            diff = tlm[msid][ok] - pred[msid][ok]
        else:
            diff = diff_only[msid]['diff']

        # Sort the diffs in-place because we're just using them in aggregate
        diff = np.sort(diff)

        # if there are only a few residuals, don't bother with histograms
        if msid.upper() in validation_scale_count:
            plot['samples'] = len(diff)
            plot['diff_count'] = np.count_nonzero(diff)
            plot['n_changes'] = 1 + np.count_nonzero(pred[msid][1:] - pred[msid][0:-1])
            if (plot['diff_count'] <
                (plot['n_changes'] * validation_scale_count[msid.upper()])):
                plots_validation.append(plot)
                continue
            # if the msid exceeds the diff count, add a validation violation
            else:
                viol = {'msid': "{}_diff_count".format(msid),
                        'value': plot['diff_count'],
                        'limit': plot['n_changes'] * validation_scale_count[msid.upper()],
                        'quant': None,
                        }
                valid_viols.append(viol)
                logger.info('WARNING: %s %d discrete diffs exceed limit of %d' %
                            (msid, plot['diff_count'],
                             plot['n_changes'] * validation_scale_count[msid.upper()]))

        # Make quantiles
        if (msid != 'obsid'):
            quant_line = "%s" % msid
            for quant in quantiles:
                quant_val = diff[(len(diff) * quant) // 100]
                plot['quant%02d' % quant] = FMTS[msid] % quant_val
                quant_line += (',' + FMTS[msid] % quant_val)
            quant_table += quant_line + "\n"

        for histscale in ('lin', 'log'):
            fig = plt.figure(20 + fig_id, figsize=(4, 3))
            fig.clf()
            ax = fig.gca()
            ax.hist(diff / scale, bins=50, log=(histscale == 'log'))
            ax.set_title(msid.upper() + ' residuals: telem - cmd states', fontsize=11)
            ax.set_xlabel(LABELS[msid])
            fig.subplots_adjust(bottom=0.18)
            plt.tight_layout()
            filename = '%s_valid_hist_%s.png' % (msid, histscale)
            outfile = os.path.join(outdir, filename)
            logger.info('Writing plot file %s' % outfile)
            fig.savefig(outfile)
            plot['hist' + histscale] = filename

        plots_validation.append(plot)

    filename = os.path.join(outdir, 'validation_quant.csv')
    logger.info('Writing quantile table %s' % filename)
    f = open(filename, 'w')
    f.write(quant_table)
    f.close()

    # If run_start_time is specified this is likely for regression testing
    # or other debugging.  In this case write out the full predicted and
    # telemetered dataset as a pickle.
    if opt.run_start_time:
        filename = os.path.join(outdir, 'validation_data.pkl')
        logger.info('Writing validation data %s' % filename)
        f = open(filename, 'w')
        pickle.dump({'pred': pred, 'tlm': tlm}, f, protocol=-1)
        f.close()

    valid_viols.extend(make_validation_viols(plots_validation))
    if len(valid_viols) > 0:
        # generate daily plot url if outdir in expected year/day format
        daymatch = re.match('.*(\d{4})/(\d{3})', opt.outdir)
        if daymatch:
            url = os.path.join(URL, daymatch.group(1), daymatch.group(2))
            logger.info('validation warning(s) at %s' % url)
        else:
            logger.info('validation warning(s) in output at %s' % opt.outdir)

    write_index_rst(opt, proc, plots_validation, valid_viols)
    rst_to_html(opt, proc)