def get_prediction_data(self, tstart, tstop, T_init, att_data, cmd_states): times = self._eng_match_times(tstart, tstop, 328.0) states = interpolate_states(cmd_states, times) if T_init is None: msid_vals = fetch.MSID(self.msid, tstart, tstop, stat='5min', filter_bad=True) msid_vals.interpolate(times=times) msid_vals = msid_vals.vals else: msid_vals = T_init * np.ones_like(times) combined_dict = {'msid_times': times, 'msid_vals': msid_vals} att_times = att_data.pop("times") d_sun = Ska.Numpy.interpolate(att_data.pop("d_sun"), att_times, times, method="linear") combined_dict['d_sun'] = d_sun for input in self.inputs: if input in att_data: combined_dict[input] = Ska.Numpy.interpolate(att_data[input], att_times, times, method="linear") elif input == "sim_z": combined_dict[ "sim_z"] = -0.0025143153015598743 * states["simpos"] elif input in pwr_states: combined_dict[input] = states[input] return pd.DataFrame(combined_dict)
def get_prediction_data(self, times, T_init, att_data, cmd_states): states = interpolate_states(cmd_states, times) combined_dict = { 'msid_times': times, 'msid_vals': T_init * np.ones_like(times), 'phase': make_phase(times) } combined_dict.update(att_data) for key in states: if key in self.inputs: combined_dict[key] = states[key] return combined_dict
def test_pitch_2017(): """ Test pitch for 100 days in 2017. Includes 2017:066, 068, 090 anomalies. This is done by interpolating states (at 200 second intervals) because the pitch generation differs slightly between kadi and Chandra.cmd_states. (See test_states_2017 note). Make sure that pitch matches to within 0.5 deg in all samples, and 0.05 deg during NPNT. """ rcstates, rkstates = get_states_test('2017:060', '2017:160', ['pcad_mode', 'pitch']) rcstates['tstop'] = DateTime(rcstates['datestop']).secs rkstates['tstop'] = DateTime(rkstates['datestop']).secs times = np.arange(rcstates['tstop'][0], rcstates['tstop'][-2], 200.0) rci = cmd_states.interpolate_states(rcstates, times) rki = cmd_states.interpolate_states(rkstates, times) dp = np.abs(rci['pitch'] - rki['pitch']) assert np.all(dp < 0.5) assert np.all(rci['pcad_mode'] == rki['pcad_mode']) ok = rci['pcad_mode'] == 'NPNT' assert np.all(dp[ok] < 0.05)
def main(): import numpy as np from scipy.signal import medfilt opt, args = get_options() if 'tlm' not in globals(): print 'Reading telemetry' tlm = Ska.Table.read_ascii_table('t/tlm2002_2008.dat', delimiters=[',']) db = Ska.DBI.DBI(dbi=opt.dbi, server=opt.server) datestart = '2002:010:00:00:00' datestop = '2009:001:00:00:00' if 'states' not in globals(): print 'Getting states' states = db.fetchall("""SELECT * from cmd_states WHERE datestart > '%s' AND datestop < '%s'""" % (datestart, datestop)) ok = (tlm.date > states[0].tstart) & (tlm.date < states[-1].tstop) tlm = tlm[ok] state_vals = cmd_states.interpolate_states(states, tlm.date) simdiff = medfilt(tlm.tscpos - state_vals.simpos, 5) bad = abs(simdiff) > 5000. bad_state_idxs = np.unique(np.searchsorted(states.tstop, tlm[bad].date)) for bad_state in states[bad_state_idxs]: ok = (tlm.date >= bad_state.tstart) & (tlm.date <= bad_state.tstop) simpos = np.median(tlm[ok].tscpos) cmd = "UPDATE cmd_states SET simpos=%d WHERE datestart='%s'" % (simpos, bad_state.datestart) print cmd db.execute(cmd) pitchdiff = medfilt(tlm.aosares1 - state_vals.pitch, 9) bad = abs(pitchdiff) > 5. bad_state_idxs = np.unique(np.searchsorted(states.tstop, tlm[bad].date)) for bad_state in states[bad_state_idxs]: ok = (tlm.date >= bad_state.tstart) & (tlm.date <= bad_state.tstop) pitch = np.median(tlm[ok].aosares1) cmd = "UPDATE cmd_states SET pitch=%f WHERE datestart='%s'" % (pitch, bad_state.datestart) print cmd db.execute(cmd) db.commit()
def get_cmd_states(self, datestart, datestop, times): tstart = DateTime(datestart).secs - 50.0 * 328.0 tstop = DateTime(datestop).secs + 50.0 * 328.0 states = fetch_states(tstart, tstop, dbi="hdf5") cmd_states = interpolate_states(states, times) return cmd_states
if 'tlm' not in globals(): print 'Reading telemetry' tlm = Ska.Table.read_ascii_table('t/tlm%d.dat' % year) # or ',' # db = Ska.DBI.DBI(dbi='sqlite', server='db_base.db3') db = Ska.DBI.DBI(dbi='sybase') datestart = '%d:365' % (year-1) datestop = '%d:001' % (year+1) if 'states' not in globals(): print 'Getting states' states = db.fetchall("""SELECT * from cmd_states WHERE datestop > '%s' AND datestart < '%s'""" % (datestart, datestop)) state_vals = cmd_states.interpolate_states(states, tlm.date) diff = medfilt(tlm.aosares1 - state_vals.pitch, 9) figure(1, figsize=(5.5,4)) clf() plot_cxctime(tlm.date, diff, fmt='.-b') title('AOSARES1 - states.pitch (%d)' % year) ylabel('degrees') savefig('t/cmp_pitch_%d.png' % year) figure(2, figsize=(5.5,4)) clf() hist(diff, bins=50, log=True) title('AOSARES1 - states.pitch (%d)' % year) xlabel('degrees')
def main(opt): opt, args = get_options() if not os.path.exists(opt.outdir): os.mkdir(opt.outdir) config_logging(opt.outdir, opt.verbose) # Store info relevant to processing for use in outputs proc = dict( run_user=os.environ['USER'], run_time=time.ctime(), errors=[], ) logger.info( '#####################################################################' ) logger.info( '# %s run at %s by %s' % (os.path.dirname(__file__), proc['run_time'], proc['run_user'])) logger.info('# version = %s' % VERSION) logger.info('# characteristics version = %s' % characteristics.VERSION) logger.info( '#####################################################################\n' ) logger.info('Command line options:\n%s\n' % pformat(opt.__dict__)) # Connect to database (NEED TO USE aca_read) tnow = DateTime(opt.run_start_time).secs tstart = tnow # Get temperature telemetry for 3 weeks prior to min(tstart, NOW) tlm = get_telem_values(tstart, [ 'sim_z', 'dp_pitch', 'aoacaseq', 'aodithen', 'cacalsta', 'cobsrqid', 'aofunlst', 'aopcadmd', '4ootgsel', '4ootgmtn', 'aocmdqt1', 'aocmdqt2', 'aocmdqt3', '1de28avo', '1deicacu', '1dp28avo', '1dpicacu', '1dp28bvo', '1dpicbcu' ], days=opt.days, name_map={ 'sim_z': 'tscpos', 'cobsrqid': 'obsid' }) tlm['tscpos'] = tlm['tscpos'] * -397.7225924607 outdir = opt.outdir states = get_states(tlm[0].date, tlm[-1].date) write_states(opt, states) tlm = Ska.Numpy.add_column(tlm, 'power', smoothed_power(tlm)) # Get bad time intervals bad_time_mask = get_bad_mask(tlm) # Interpolate states onto the tlm.date grid state_vals = cmd_states.interpolate_states(states, tlm['date']) # "Forgive" dither intervals with dark current replicas # This will also exclude dither disables that are in cmd states for standard dark cals dark_mask = np.zeros(len(tlm), dtype='bool') dark_times = [] # Find dither "disable" states from tlm dith_disa_states = logical_intervals(tlm['date'], tlm['aodithen'] == 'DISA') for state in dith_disa_states: # Index back into telemetry for each of these constant dither disable states idx0 = np.searchsorted(tlm['date'], state['tstart'], side='left') idx1 = np.searchsorted(tlm['date'], state['tstop'], side='right') # If any samples have aca calibration flag, mark interval for exclusion. if np.any(tlm['cacalsta'][idx0:idx1] != 'OFF '): dark_mask[idx0:idx1] = True dark_times.append({ 'start': state['datestart'], 'stop': state['datestop'] }) # Calculate the 4th term of the commanded quaternions cmd_q4 = np.sqrt( np.abs(1.0 - tlm['aocmdqt1']**2 - tlm['aocmdqt2']**2 - tlm['aocmdqt3']**2)) raw_tlm_q = np.vstack( [tlm['aocmdqt1'], tlm['aocmdqt2'], tlm['aocmdqt3'], cmd_q4]).transpose() # Calculate angle/roll differences in state cmd vs tlm cmd quaternions raw_state_q = np.vstack([state_vals[n] for n in ['q1', 'q2', 'q3', 'q4']]).transpose() tlm_q = normalize(raw_tlm_q) # only use values that aren't NaNs good = np.isnan(np.sum(tlm_q, axis=-1)) == False # and are in NPNT npnt = tlm['aopcadmd'] == 'NPNT' # and are in KALM after the first 2 sample of the transition not_kalm = tlm['aoacaseq'] != 'KALM' kalm = (not_kalm | np.hstack([[False, False], not_kalm[:-2]])) == False # and aren't during momentum unloads or in the first 2 samples after unloads unload = tlm['aofunlst'] != 'NONE' no_unload = (unload | np.hstack([[False, False], unload[:-2]])) == False ok = good & npnt & kalm & no_unload & ~bad_time_mask state_q = normalize(raw_state_q) dot_q = np.sum(tlm_q[ok] * state_q[ok], axis=-1) dot_q[dot_q > 1] = 1 angle_diff = np.degrees(2 * np.arccos(dot_q)) angle_diff = np.min([angle_diff, 360 - angle_diff], axis=0) roll_diff = Quat(tlm_q[ok]).roll - Quat(state_q[ok]).roll roll_diff = np.min([roll_diff, 360 - roll_diff], axis=0) for msid in MODE_SOURCE: tlm_col = np.zeros(len(tlm)) state_col = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): tlm_col[tlm[MODE_SOURCE[msid]] == mode] = idx state_col[state_vals[msid] == mode] = idx tlm = Ska.Numpy.add_column(tlm, msid, tlm_col) state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid), state_col) for msid in ['letg', 'hetg']: txt = np.repeat('RETR', len(tlm)) # use a combination of the select telemetry and the insertion telem to # approximate the state_vals values txt[(tlm['4ootgsel'] == msid.upper()) & (tlm['4ootgmtn'] == 'INSE')] = 'INSE' tlm_col = np.zeros(len(tlm)) state_col = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): tlm_col[txt == mode] = idx state_col[state_vals[msid] == mode] = idx tlm = Ska.Numpy.add_column(tlm, msid, tlm_col) state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid), state_col) diff_only = { 'pointing': { 'diff': angle_diff * 3600, 'date': tlm['date'][ok] }, 'roll': { 'diff': roll_diff * 3600, 'date': tlm['date'][ok] } } pred = { 'dp_pitch': state_vals.pitch, 'obsid': state_vals.obsid, 'dither': state_vals['dither_pred'], 'pcad_mode': state_vals['pcad_mode_pred'], 'letg': state_vals['letg_pred'], 'hetg': state_vals['hetg_pred'], 'tscpos': state_vals.simpos, 'power': state_vals.power, 'pointing': 1, 'roll': 1 } plots_validation = [] valid_viols = [] logger.info('Making validation plots and quantile table') quantiles = (1, 5, 16, 50, 84, 95, 99) # store lines of quantile table in a string and write out later quant_table = '' quant_head = ",".join(['MSID'] + ["quant%d" % x for x in quantiles]) quant_table += quant_head + "\n" for fig_id, msid in enumerate(sorted(pred)): plot = dict(msid=msid.upper()) fig = plt.figure(10 + fig_id, figsize=(7, 3.5)) fig.clf() scale = SCALES.get(msid, 1.0) ax = None if msid not in diff_only: if msid in MODE_MSIDS: state_msid = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): state_msid[state_vals[msid] == mode] = idx ticklocs, fig, ax = plot_cxctime(tlm['date'], tlm[msid], fig=fig, fmt='-r') ticklocs, fig, ax = plot_cxctime(tlm['date'], state_msid, fig=fig, fmt='-b') plt.yticks(range(len(MODE_MSIDS[msid])), MODE_MSIDS[msid]) else: ticklocs, fig, ax = plot_cxctime(tlm['date'], tlm[msid] / scale, fig=fig, fmt='-r') ticklocs, fig, ax = plot_cxctime(tlm['date'], pred[msid] / scale, fig=fig, fmt='-b') else: ticklocs, fig, ax = plot_cxctime(diff_only[msid]['date'], diff_only[msid]['diff'] / scale, fig=fig, fmt='-k') plot['diff_only'] = msid in diff_only ax.set_title(TITLE[msid]) ax.set_ylabel(LABELS[msid]) xlims = ax.get_xlim() ylims = ax.get_ylim() bad_times = list(characteristics.bad_times) # Add the time intervals of dark current calibrations that have been excluded from # the diffs to the "bad_times" for validation so they also can be marked with grey # rectangles in the plot. This is only really visible with interactive/zoomed plot. if msid in ['dither', 'pcad_mode']: bad_times.extend(dark_times) # Add "background" grey rectangles for excluded time regions to vs-time plot for bad in bad_times: bad_start = cxc2pd([DateTime(bad['start']).secs])[0] bad_stop = cxc2pd([DateTime(bad['stop']).secs])[0] if not ((bad_stop >= xlims[0]) & (bad_start <= xlims[1])): continue rect = matplotlib.patches.Rectangle((bad_start, ylims[0]), bad_stop - bad_start, ylims[1] - ylims[0], alpha=.2, facecolor='black', edgecolor='none') ax.add_patch(rect) filename = msid + '_valid.png' outfile = os.path.join(outdir, filename) logger.info('Writing plot file %s' % outfile) plt.tight_layout() plt.margins(0.05) fig.savefig(outfile) plot['lines'] = filename if msid not in diff_only: ok = ~bad_time_mask if msid in ['dither', 'pcad_mode']: # For these two validations also ignore intervals during a dark current calibration ok &= ~dark_mask diff = tlm[msid][ok] - pred[msid][ok] else: diff = diff_only[msid]['diff'] # Sort the diffs in-place because we're just using them in aggregate diff = np.sort(diff) # if there are only a few residuals, don't bother with histograms if msid.upper() in validation_scale_count: plot['samples'] = len(diff) plot['diff_count'] = np.count_nonzero(diff) plot['n_changes'] = 1 + np.count_nonzero(pred[msid][1:] - pred[msid][0:-1]) if (plot['diff_count'] < (plot['n_changes'] * validation_scale_count[msid.upper()])): plots_validation.append(plot) continue # if the msid exceeds the diff count, add a validation violation else: viol = { 'msid': "{}_diff_count".format(msid), 'value': plot['diff_count'], 'limit': plot['n_changes'] * validation_scale_count[msid.upper()], 'quant': None, } valid_viols.append(viol) logger.info( 'WARNING: %s %d discrete diffs exceed limit of %d' % (msid, plot['diff_count'], plot['n_changes'] * validation_scale_count[msid.upper()])) # Make quantiles if (msid != 'obsid'): quant_line = "%s" % msid for quant in quantiles: quant_val = diff[(len(diff) * quant) // 100] plot['quant%02d' % quant] = FMTS[msid] % quant_val quant_line += (',' + FMTS[msid] % quant_val) quant_table += quant_line + "\n" for histscale in ('lin', 'log'): fig = plt.figure(20 + fig_id, figsize=(4, 3)) fig.clf() ax = fig.gca() ax.hist(diff / scale, bins=50, log=(histscale == 'log')) ax.set_title(msid.upper() + ' residuals: telem - cmd states', fontsize=11) ax.set_xlabel(LABELS[msid]) fig.subplots_adjust(bottom=0.18) plt.tight_layout() filename = '%s_valid_hist_%s.png' % (msid, histscale) outfile = os.path.join(outdir, filename) logger.info('Writing plot file %s' % outfile) fig.savefig(outfile) plot['hist' + histscale] = filename plots_validation.append(plot) filename = os.path.join(outdir, 'validation_quant.csv') logger.info('Writing quantile table %s' % filename) f = open(filename, 'w') f.write(quant_table) f.close() # If run_start_time is specified this is likely for regression testing # or other debugging. In this case write out the full predicted and # telemetered dataset as a pickle. if opt.run_start_time: filename = os.path.join(outdir, 'validation_data.pkl') logger.info('Writing validation data %s' % filename) f = open(filename, 'w') pickle.dump({'pred': pred, 'tlm': tlm}, f, protocol=-1) f.close() valid_viols.extend(make_validation_viols(plots_validation)) if len(valid_viols) > 0: # generate daily plot url if outdir in expected year/day format daymatch = re.match('.*(\d{4})/(\d{3})', opt.outdir) if daymatch: url = os.path.join(URL, daymatch.group(1), daymatch.group(2)) logger.info('validation warning(s) at %s' % url) else: logger.info('validation warning(s) in output at %s' % opt.outdir) write_index_rst(opt, proc, plots_validation, valid_viols) rst_to_html(opt, proc)
def write_states_json(fn, fig, ax, states, start, stop, now, next_comm, fluences, fluence_times, p3s, p3_times, p3_avg, hrcs, hrc_times): """ Generate JSON data file that contains all the annotation values used in the javascript-driven annotated plot on Replan Central. This creates a data structure with state values for each 10-minute time step along the X-axis of the plot. All of the hard work (formatting etc) is done here so the javascript is very simple. """ formats = {'ra': '{:10.4f}', 'dec': '{:10.4f}', 'roll': '{:10.4f}', 'pitch': '{:8.2f}', 'obsid': '{:5d}', } start = start - 1 tstop = (stop + 1).secs tstart = DateTime(start.date[:8] + ':00:00:00').secs times = np.arange(tstart, tstop, 600) pds = cxc2pd(times) # Convert from CXC time to plotdate times # Set up matplotlib transforms data_to_disp = ax.transData.transform ax_to_disp = ax.transAxes.transform disp_to_ax = ax.transAxes.inverted().transform disp_to_fig = fig.transFigure.inverted().transform disp_xy = ax_to_disp([(0, 0), (1, 1)]) fig_xy = disp_to_fig(disp_xy) data = {'ax_x': fig_xy[:, 0].tolist(), 'ax_y': fig_xy[:, 1].tolist()} outs = [] now_idx = 0 now_secs = now.secs state_names = ('obsid', 'simpos', 'pitch', 'ra', 'dec', 'roll', 'pcad_mode', 'si_mode', 'power_cmd', 'letg', 'hetg') # Get all the state values that occur within the range of the plot disp_xy = data_to_disp([(pd, 0.0) for pd in pds]) ax_xy = disp_to_ax(disp_xy) ok = (ax_xy[:, 0] > 0.0) & (ax_xy[:, 0] < 1.0) times = times[ok] pds = pds[ok] state_vals = interpolate_states(states, times) # Set the current values p3_now = p3s[-1] hrc_now = hrcs[-1] fluence_now = fluences[0] fluences = Ska.Numpy.interpolate(fluences, fluence_times, times) p3s = Ska.Numpy.interpolate(p3s, p3_times, times) hrcs = Ska.Numpy.interpolate(hrcs, hrc_times, times) # Iterate through each time step and create corresponding data structure # with pre-formatted values for display in the output table. NOT_AVAIL = 'N/A' for time, pd, state_val, fluence, p3, hrc in izip(times, pds, state_vals, fluences, p3s, hrcs): out = {} out['date'] = date_zulu(time) for name in state_names: val = state_val[name].tolist() fval = formats.get(name, '{}').format(val) out[name] = re.sub(' ', ' ', fval) out['ccd_fep'] = '{}, {}'.format(state_val['ccd_count'], state_val['fep_count']) out['vid_clock'] = '{}, {}'.format(state_val['vid_board'], state_val['clocking']) out['si'] = get_si(state_val['simpos']) out['now_dt'] = get_fmt_dt(time, now_secs) if time < now_secs: now_idx += 1 out['fluence'] = '{:.2f}e9'.format(fluence_now) out['p3'] = '{:.0f}'.format(p3) if p3 > 0 else NOT_AVAIL out['hrc'] = '{:.0f}'.format(hrc) else: out['fluence'] = '{:.2f}e9'.format(fluence) out['p3'] = '{:.0f}'.format(p3_now) if p3_now > 0 else NOT_AVAIL out['hrc'] = '{:.0f}'.format(hrc_now) outs.append(out) data['states'] = outs data['now_idx'] = now_idx data['now_date'] = date_zulu(now) data['p3_avg_now'] = '{:.0f}'.format(p3_avg) if p3_avg > 0 else NOT_AVAIL data['p3_now'] = '{:.0f}'.format(p3_now) if p3_now > 0 else NOT_AVAIL data['hrc_now'] = '{:.0f}'.format(hrc_now) track = next_comm['track_local']['value'] data['track_time'] = (' ' + track[15:19] + track[:4] + ' ' + track[10:13]) data['track_dt'] = get_fmt_dt(next_comm['bot_date']['value'], now_secs) data['track_station'] = '{}-{}'.format(next_comm['site']['value'], next_comm['station']['value'][4:6]) data['track_activity'] = next_comm['activity']['value'][:14] # Finally write this all out as a simple javascript program that defines a single # variable ``data``. with open(fn, 'w') as f: f.write('var data = {}'.format(json.dumps(data)))
def main(): """ Generate the Replan Central timeline plot. """ import matplotlib.patches import matplotlib.pyplot as plt from Ska.Matplotlib import plot_cxctime # TODO: refactor this into smaller functions where possible. # Basic setup. Set times and get input states, radzones and comms. now = DateTime('2012:249:00:35:00' if args.test else None) now = DateTime(now.date[:14] + ':00') # truncate to 0 secs start = now - 1.0 stop = start + args.hours / 24.0 states = fetch_states(start, stop, server='/proj/sot/ska/data/cmd_states/cmd_states.h5') radzones = get_radzones() comms = get_comms() # Get the ACIS ops fluence estimate and current 2hr avg flux fluence_date, fluence0 = get_fluence(ACIS_FLUENCE_FILE) if fluence_date.secs < now.secs: fluence_date = now avg_flux = get_avg_flux(ACE_RATES_FILE) # Get the realtime ACE P3 and HRC proxy values over the time range goes_x_times, goes_x_vals = get_goes_x(start.secs, now.secs) p3_times, p3_vals = get_ace_p3(start.secs, now.secs) hrc_times, hrc_vals = get_hrc(start.secs, now.secs) # For testing: inject predefined values for different scenarios if args.test_scenario: p3_vals, avg_flux, fluence0 = get_test_vals( args.test_scenario, p3_times, p3_vals, avg_flux, fluence0) # Compute the predicted fluence based on the current 2hr average flux. fluence_times = np.arange(fluence_date.secs, stop.secs, args.dt) rates = np.ones_like(fluence_times) * max(avg_flux, 0.0) * args.dt fluence = calc_fluence(fluence_times, fluence0, rates, states) zero_fluence_at_radzone(fluence_times, fluence, radzones) # Initialize the main plot figure plt.rc('legend', fontsize=10) fig = plt.figure(1, figsize=(9, 5)) fig.clf() fig.patch.set_alpha(0.0) ax = fig.add_axes(AXES_LOC, axis_bgcolor='w') ax.yaxis.tick_right() ax.yaxis.set_label_position('right') ax.yaxis.set_offset_position('right') ax.patch.set_alpha(1.0) # Plot lines at 1.0 and 2.0 (10^9) corresponding to fluence yellow # and red limits. Also plot the fluence=0 line in black. x0, x1 = cxc2pd([fluence_times[0], fluence_times[-1]]) plt.plot([x0, x1], [0.0, 0.0], '-k') plt.plot([x0, x1], [1.0, 1.0], '--b', lw=2.0) plt.plot([x0, x1], [2.0, 2.0], '--r', lw=2.0) # Draw dummy lines off the plot for the legend lx = [fluence_times[0], fluence_times[-1]] ly = [-1, -1] plot_cxctime(lx, ly, '-k', lw=3, label='None', fig=fig, ax=ax) plot_cxctime(lx, ly, '-r', lw=3, label='HETG', fig=fig, ax=ax) plot_cxctime(lx, ly, '-c', lw=3, label='LETG', fig=fig, ax=ax) # Make a z-valued curve where the z value corresponds to the grating state. x = cxc2pd(fluence_times) y = fluence z = np.zeros(len(fluence_times), dtype=np.int) for state in states: ok = ((state['tstart'] < fluence_times) & (fluence_times <= state['tstop'])) if state['hetg'] == 'INSR': z[ok] = 1 elif state['letg'] == 'INSR': z[ok] = 2 plot_multi_line(x, y, z, [0, 1, 2], ['k', 'r', 'c'], ax) # Plot 10, 50, 90 percentiles of fluence p3_slope = get_p3_slope(p3_times, p3_vals) if p3_slope is not None and avg_flux > 0: p3_fits, p3_samps, fluences = cfd.get_fluences( os.path.join(args.data_dir, 'ACE_hourly_avg.npy')) hrs, fl10, fl50, fl90 = cfd.get_fluence_percentiles( avg_flux, p3_slope, p3_fits, p3_samps, fluences, args.min_flux_samples, args.max_slope_samples) fluence_hours = (fluence_times - fluence_times[0]) / 3600.0 for fl_y, linecolor in zip((fl10, fl50, fl90), ('-g', '-b', '-r')): fl_y = Ska.Numpy.interpolate(fl_y, hrs, fluence_hours) rates = np.diff(fl_y) fl_y_atten = calc_fluence(fluence_times[:-1], fluence0, rates, states) zero_fluence_at_radzone(fluence_times[:-1], fl_y_atten, radzones) plt.plot(x0 + fluence_hours[:-1] / 24.0, fl_y_atten, linecolor) # Set x and y axis limits x0, x1 = cxc2pd([start.secs, stop.secs]) plt.xlim(x0, x1) y0 = -0.45 y1 = 2.55 plt.ylim(y0, y1) id_xs = [] id_labels = [] # Draw comm passes next_comm = None for comm in comms: t0 = DateTime(comm['bot_date']['value']).secs t1 = DateTime(comm['eot_date']['value']).secs pd0, pd1 = cxc2pd([t0, t1]) if pd1 >= x0 and pd0 <= x1: p = matplotlib.patches.Rectangle((pd0, y0), pd1 - pd0, y1 - y0, alpha=0.2, facecolor='r', edgecolor='none') ax.add_patch(p) id_xs.append((pd0 + pd1) / 2) id_labels.append('{}:{}'.format(comm['station']['value'][4:6], comm['track_local']['value'][:9])) if (next_comm is None and DateTime(comm['bot_date']['value']).secs > now.secs): next_comm = comm # Draw radiation zones for rad0, rad1 in radzones: t0 = DateTime(rad0).secs t1 = DateTime(rad1).secs if t0 < stop.secs and t1 > start.secs: if t0 < start.secs: t0 = start.secs if t1 > stop.secs: t1 = stop.secs pd0, pd1 = cxc2pd([t0, t1]) p = matplotlib.patches.Rectangle((pd0, y0), pd1 - pd0, y1 - y0, alpha=0.2, facecolor='b', edgecolor='none') ax.add_patch(p) # Draw now line plt.plot(cxc2pd([now.secs, now.secs]), [y0, y1], '-g', lw=4) id_xs.extend(cxc2pd([now.secs])) id_labels.append('NOW') # Add labels for obsids id_xs.extend(cxc2pd([start.secs])) id_labels.append(str(states[0]['obsid'])) for s0, s1 in zip(states[:-1], states[1:]): if s0['obsid'] != s1['obsid']: id_xs.append(cxc2pd([s1['tstart']])[0]) id_labels.append(str(s1['obsid'])) plt.grid() plt.ylabel('Attenuated fluence / 1e9') plt.legend(loc='upper center', labelspacing=0.15) lineid_plot.plot_line_ids(cxc2pd([start.secs, stop.secs]), [y1, y1], id_xs, id_labels, ax=ax, box_axes_space=0.14, label1_size=10) # Plot observed GOES X-ray rates and limits pd = cxc2pd(goes_x_times) lgoesx = log_scale(goes_x_vals * 1e8) plt.plot(pd, lgoesx, '-m', alpha=0.3, lw=1.5) plt.plot(pd, lgoesx, '.m', mec='m', ms=3) # Plot observed ACE P3 rates and limits lp3 = log_scale(p3_vals) pd = cxc2pd(p3_times) ox = cxc2pd([start.secs, now.secs]) oy1 = log_scale(12000.) plt.plot(ox, [oy1, oy1], '--b', lw=2) oy1 = log_scale(55000.) plt.plot(ox, [oy1, oy1], '--r', lw=2) plt.plot(pd, lp3, '-k', alpha=0.3, lw=3) plt.plot(pd, lp3, '.k', mec='k', ms=3) # Plot observed HRC shield proxy rates and limits pd = cxc2pd(hrc_times) lhrc = log_scale(hrc_vals) plt.plot(pd, lhrc, '-c', alpha=0.3, lw=3) plt.plot(pd, lhrc, '.c', mec='c', ms=3) # Draw SI state times = np.arange(start.secs, stop.secs, 300) state_vals = interpolate_states(states, times) y_si = -0.23 x = cxc2pd(times) y = np.zeros_like(times) + y_si z = np.zeros_like(times, dtype=np.float) # 0 => ACIS z[state_vals['simpos'] < 0] = 1.0 # HRC plot_multi_line(x, y, z, [0, 1], ['c', 'r'], ax) dx = (x1 - x0) * 0.01 plt.text(x1 + dx, y_si, 'HRC/ACIS', ha='left', va='center', size='small') # Draw log scale y-axis on left ax2 = fig.add_axes(AXES_LOC, axis_bgcolor='w', frameon=False) ax2.set_autoscale_on(False) ax2.xaxis.set_visible(False) ax2.set_xlim(0, 1) ax2.set_yscale('log') ax2.set_ylim(np.power(10.0, np.array([y0, y1]) * 2 + 1)) ax2.set_ylabel('ACE flux / HRC proxy / GOES X-ray') ax2.text(-0.015, 2.5e3, 'M', ha='right', color='m', weight='demibold') ax2.text(-0.015, 2.5e4, 'X', ha='right', color='m', weight='semibold') # Draw dummy lines off the plot for the legend lx = [0, 1] ly = [1, 1] ax2.plot(lx, ly, '-k', lw=3, label='ACE') ax2.plot(lx, ly, '-c', lw=3, label='HRC') ax2.plot(lx, ly, '-m', lw=3, label='GOES-X') ax2.legend(loc='upper left', labelspacing=0.15) plt.draw() plt.savefig(os.path.join(args.data_dir, 'timeline.png')) write_states_json(os.path.join(args.data_dir, 'timeline_states.js'), fig, ax, states, start, stop, now, next_comm, fluence, fluence_times, p3_vals, p3_times, avg_flux, hrc_vals, hrc_times)
def main(opt): opt, args = get_options() if not os.path.exists(opt.outdir): os.mkdir(opt.outdir) config_logging(opt.outdir, opt.verbose) # Store info relevant to processing for use in outputs proc = dict(run_user=os.environ['USER'], run_time=time.ctime(), errors=[], ) logger.info('#####################################################################') logger.info('# %s run at %s by %s' % (os.path.dirname(__file__), proc['run_time'], proc['run_user'])) logger.info('# version = %s' % VERSION) logger.info('# characteristics version = %s' % characteristics.VERSION) logger.info('#####################################################################\n') logger.info('Command line options:\n%s\n' % pformat(opt.__dict__)) # Connect to database (NEED TO USE aca_read) tnow = DateTime(opt.run_start_time).secs tstart = tnow # Get temperature telemetry for 3 weeks prior to min(tstart, NOW) tlm = get_telem_values(tstart, ['sim_z', 'dp_pitch', 'aoacaseq', 'aodithen', 'cobsrqid', 'aofunlst', 'aopcadmd', '4ootgsel', '4ootgmtn', 'aocmdqt1', 'aocmdqt2', 'aocmdqt3', '1de28avo', '1deicacu', '1dp28avo', '1dpicacu', '1dp28bvo', '1dpicbcu'], days=opt.days, name_map={'sim_z': 'tscpos', 'cobsrqid': 'obsid'}) tlm['tscpos'] = tlm['tscpos'] * -397.7225924607 outdir = opt.outdir states = get_states(tlm[0].date, tlm[-1].date) write_states(opt, states) tlm = Ska.Numpy.add_column(tlm, 'power', smoothed_power(tlm)) # Get bad time intervals bad_time_mask = get_bad_mask(tlm) # Interpolate states onto the tlm.date grid state_vals = cmd_states.interpolate_states(states, tlm['date']) # Calculate the 4th term of the commanded quaternions cmd_q4 = np.sqrt(np.abs(1.0 - tlm['aocmdqt1']**2 - tlm['aocmdqt2']**2 - tlm['aocmdqt3']**2)) raw_tlm_q = np.vstack([tlm['aocmdqt1'], tlm['aocmdqt2'], tlm['aocmdqt3'], cmd_q4]).transpose() # Calculate angle/roll differences in state cmd vs tlm cmd quaternions raw_state_q = np.vstack([state_vals[n] for n in ['q1', 'q2', 'q3', 'q4']]).transpose() tlm_q = normalize(raw_tlm_q) # only use values that aren't NaNs good = np.isnan(np.sum(tlm_q, axis=-1)) == False # and are in NPNT npnt = tlm['aopcadmd'] == 'NPNT' # and are in KALM after the first 2 sample of the transition not_kalm = tlm['aoacaseq'] != 'KALM' kalm = (not_kalm | np.hstack([[False, False], not_kalm[:-2]])) == False # and aren't during momentum unloads or in the first 2 samples after unloads unload = tlm['aofunlst'] != 'NONE' no_unload = (unload | np.hstack([[False, False], unload[:-2]])) == False ok = good & npnt & kalm & no_unload & ~bad_time_mask state_q = normalize(raw_state_q) dot_q = np.sum(tlm_q[ok] * state_q[ok], axis=-1) dot_q[dot_q > 1] = 1 angle_diff = np.degrees(2 * np.arccos(dot_q)) angle_diff = np.min([angle_diff, 360 - angle_diff], axis=0) roll_diff = Quat(tlm_q[ok]).roll - Quat(state_q[ok]).roll roll_diff = np.min([roll_diff, 360 - roll_diff], axis=0) for msid in MODE_SOURCE: tlm_col = np.zeros(len(tlm)) state_col = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): tlm_col[tlm[MODE_SOURCE[msid]] == mode] = idx state_col[state_vals[msid] == mode] = idx tlm = Ska.Numpy.add_column(tlm, msid, tlm_col) state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid), state_col) for msid in ['letg', 'hetg']: txt = np.repeat('RETR', len(tlm)) # use a combination of the select telemetry and the insertion telem to # approximate the state_vals values txt[(tlm['4ootgsel'] == msid.upper()) & (tlm['4ootgmtn'] == 'INSE')] = 'INSE' tlm_col = np.zeros(len(tlm)) state_col = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): tlm_col[txt == mode] = idx state_col[state_vals[msid] == mode] = idx tlm = Ska.Numpy.add_column(tlm, msid, tlm_col) state_vals = Ska.Numpy.add_column(state_vals, "{}_pred".format(msid), state_col) diff_only = {'pointing': {'diff': angle_diff * 3600, 'date': tlm['date'][ok]}, 'roll': {'diff': roll_diff * 3600, 'date': tlm['date'][ok]}} pred = {'dp_pitch': state_vals.pitch, 'obsid': state_vals.obsid, 'dither': state_vals['dither_pred'], 'pcad_mode': state_vals['pcad_mode_pred'], 'letg': state_vals['letg_pred'], 'hetg': state_vals['hetg_pred'], 'tscpos': state_vals.simpos, 'power': state_vals.power, 'pointing': 1, 'roll': 1} plots_validation = [] valid_viols = [] logger.info('Making validation plots and quantile table') quantiles = (1, 5, 16, 50, 84, 95, 99) # store lines of quantile table in a string and write out later quant_table = '' quant_head = ",".join(['MSID'] + ["quant%d" % x for x in quantiles]) quant_table += quant_head + "\n" for fig_id, msid in enumerate(sorted(pred)): plot = dict(msid=msid.upper()) fig = plt.figure(10 + fig_id, figsize=(7, 3.5)) fig.clf() scale = SCALES.get(msid, 1.0) ax = None if msid not in diff_only: if msid in MODE_MSIDS: state_msid = np.zeros(len(tlm)) for mode, idx in zip(MODE_MSIDS[msid], count()): state_msid[state_vals[msid] == mode] = idx ticklocs, fig, ax = plot_cxctime(tlm['date'], tlm[msid], fig=fig, fmt='-r') ticklocs, fig, ax = plot_cxctime(tlm['date'], state_msid, fig=fig, fmt='-b') plt.yticks(range(len(MODE_MSIDS[msid])), MODE_MSIDS[msid]) else: ticklocs, fig, ax = plot_cxctime(tlm['date'], tlm[msid] / scale, fig=fig, fmt='-r') ticklocs, fig, ax = plot_cxctime(tlm['date'], pred[msid] / scale, fig=fig, fmt='-b') else: ticklocs, fig, ax = plot_cxctime(diff_only[msid]['date'], diff_only[msid]['diff'] / scale, fig=fig, fmt='-k') plot['diff_only'] = msid in diff_only ax.set_title(TITLE[msid]) ax.set_ylabel(LABELS[msid]) xlims = ax.get_xlim() ylims = ax.get_ylim() for bad in characteristics.bad_times: bad_start = cxc2pd([DateTime(bad['start']).secs])[0] bad_stop = cxc2pd([DateTime(bad['stop']).secs])[0] if not ((bad_stop >= xlims[0]) & (bad_start <= xlims[1])): continue rect = matplotlib.patches.Rectangle((bad_start, ylims[0]), bad_stop - bad_start, ylims[1] - ylims[0], alpha=.2, facecolor='black', edgecolor='none') ax.add_patch(rect) filename = msid + '_valid.png' outfile = os.path.join(outdir, filename) logger.info('Writing plot file %s' % outfile) plt.tight_layout() fig.savefig(outfile) plot['lines'] = filename if msid not in diff_only: diff = tlm[msid][~bad_time_mask] - pred[msid][~bad_time_mask] diff = np.sort(diff) else: diff = np.sort(diff_only[msid]['diff']) # if there are only a few residuals, don't bother with histograms if msid.upper() in validation_scale_count: plot['samples'] = len(diff) plot['diff_count'] = np.count_nonzero(diff) plot['n_changes'] = 1 + np.count_nonzero(pred[msid][1:] - pred[msid][0:-1]) if (plot['diff_count'] < (plot['n_changes'] * validation_scale_count[msid.upper()])): plots_validation.append(plot) continue # if the msid exceeds the diff count, add a validation violation else: viol = {'msid': "{}_diff_count".format(msid), 'value': plot['diff_count'], 'limit': plot['n_changes'] * validation_scale_count[msid.upper()], 'quant': None, } valid_viols.append(viol) logger.info('WARNING: %s %d discrete diffs exceed limit of %d' % (msid, plot['diff_count'], plot['n_changes'] * validation_scale_count[msid.upper()])) # Make quantiles if (msid != 'obsid'): quant_line = "%s" % msid for quant in quantiles: quant_val = diff[(len(diff) * quant) // 100] plot['quant%02d' % quant] = FMTS[msid] % quant_val quant_line += (',' + FMTS[msid] % quant_val) quant_table += quant_line + "\n" for histscale in ('lin', 'log'): fig = plt.figure(20 + fig_id, figsize=(4, 3)) fig.clf() ax = fig.gca() ax.hist(diff / scale, bins=50, log=(histscale == 'log')) ax.set_title(msid.upper() + ' residuals: telem - cmd states', fontsize=11) ax.set_xlabel(LABELS[msid]) fig.subplots_adjust(bottom=0.18) plt.tight_layout() filename = '%s_valid_hist_%s.png' % (msid, histscale) outfile = os.path.join(outdir, filename) logger.info('Writing plot file %s' % outfile) fig.savefig(outfile) plot['hist' + histscale] = filename plots_validation.append(plot) filename = os.path.join(outdir, 'validation_quant.csv') logger.info('Writing quantile table %s' % filename) f = open(filename, 'w') f.write(quant_table) f.close() # If run_start_time is specified this is likely for regression testing # or other debugging. In this case write out the full predicted and # telemetered dataset as a pickle. if opt.run_start_time: filename = os.path.join(outdir, 'validation_data.pkl') logger.info('Writing validation data %s' % filename) f = open(filename, 'w') pickle.dump({'pred': pred, 'tlm': tlm}, f, protocol=-1) f.close() valid_viols.extend(make_validation_viols(plots_validation)) if len(valid_viols) > 0: # generate daily plot url if outdir in expected year/day format daymatch = re.match('.*(\d{4})/(\d{3})', opt.outdir) if daymatch: url = os.path.join(URL, daymatch.group(1), daymatch.group(2)) logger.info('validation warning(s) at %s' % url) else: logger.info('validation warning(s) in output at %s' % opt.outdir) write_index_rst(opt, proc, plots_validation, valid_viols) rst_to_html(opt, proc)
def make_validation_plots(opt, tlm, db): """ Make validation output plots. :param outdir: output directory :param tlm: telemetry :param db: database handle :returns: list of plot info including plot file names """ outdir = opt.outdir states = get_states(tlm[0].date, tlm[-1].date, db) tlm = Ska.Numpy.add_column(tlm, "power", smoothed_power(tlm)) T_dea0 = np.mean(tlm["1pdeaat"][:10]) T_pin0 = np.mean(tlm["1pin1at"][:10]) # Create array of times at which to calculate PSMC temperatures, then do it. logger.info("Calculating PSMC thermal model for validation") T_pin, T_dea = twodof.calc_twodof_model(states, T_pin0, T_dea0, tlm.date, characteristics.model_par) # Interpolate states onto the tlm.date grid state_vals = cmd_states.interpolate_states(states, tlm.date) pred = { "1pdeaat": T_dea, "1pin1at": T_pin, "aosares1": state_vals.pitch, "tscpos": state_vals.simpos, "power": state_vals.power, } labels = { "1pdeaat": "Degrees (C)", "1pin1at": "Degrees (C)", "aosares1": "Pitch (degrees)", "tscpos": "SIM-Z (steps/1000)", "power": "ACIS power (watts)", } scales = {"tscpos": 1000.0} fmts = {"1pdeaat": "%.2f", "1pin1at": "%.2f", "aosares1": "%.3f", "power": "%.2f", "tscpos": "%d"} plots = [] logger.info("Making PSMC model validation plots and quantile table") quantiles = (1, 5, 16, 50, 84, 95, 99) # store lines of quantile table in a string and write out later quant_table = "" quant_head = ",".join(["MSID"] + ["quant%d" % x for x in quantiles]) quant_table += quant_head + "\n" for fig_id, msid in enumerate(sorted(pred)): plot = dict(msid=msid.upper()) fig = plt.figure(10 + fig_id, figsize=(7, 3.5)) fig.clf() scale = scales.get(msid, 1.0) ticklocs, fig, ax = plot_cxctime(tlm.date, tlm[msid] / scale, fig=fig, fmt="-r") ticklocs, fig, ax = plot_cxctime(tlm.date, pred[msid] / scale, fig=fig, fmt="-b") ax.set_title(msid.upper() + " validation") ax.set_ylabel(labels[msid]) filename = msid + "_valid.png" outfile = os.path.join(outdir, filename) logger.info("Writing plot file %s" % outfile) fig.savefig(outfile) plot["lines"] = filename # Make quantiles diff = np.sort(tlm[msid] - pred[msid]) quant_line = "%s" % msid for quant in quantiles: quant_val = diff[(len(diff) * quant) // 100] plot["quant%02d" % quant] = fmts[msid] % quant_val quant_line += "," + fmts[msid] % quant_val quant_table += quant_line + "\n" for histscale in ("log", "lin"): fig = plt.figure(20 + fig_id, figsize=(4, 3)) fig.clf() ax = fig.gca() ax.hist(diff / scale, bins=50, log=(histscale == "log")) ax.set_title(msid.upper() + " residuals: data - model") ax.set_xlabel(labels[msid]) fig.subplots_adjust(bottom=0.18) filename = "%s_valid_hist_%s.png" % (msid, histscale) outfile = os.path.join(outdir, filename) logger.info("Writing plot file %s" % outfile) fig.savefig(outfile) plot["hist" + histscale] = filename plots.append(plot) filename = os.path.join(outdir, "validation_quant.csv") logger.info("Writing quantile table %s" % filename) f = open(filename, "w") f.write(quant_table) f.close() # If run_start_time is specified this is likely for regression testing # or other debugging. In this case write out the full predicted and # telemetered dataset as a pickle. if opt.run_start_time: filename = os.path.join(outdir, "validation_data.pkl") logger.info("Writing validation data %s" % filename) f = open(filename, "w") pickle.dump({"pred": pred, "tlm": tlm}, f, protocol=-1) f.close() return plots
stream=sys.stdout) tlm = fetch.MSIDset(['4HPOSARO', '4LPOSARO'], '2009:010', '2010:210', stat='5min') # db = Ska.DBI.DBI(dbi='sybase') datestart = DateTime('2008:360').date datestop = DateTime('2010:220').date if 1 or 'states' not in globals(): print 'Getting states' db = Ska.DBI.DBI(dbi='sqlite', server='db_base.db3') states = db.fetchall("""SELECT * from cmd_states WHERE datestop > '%s' AND datestart < '%s'""" % (datestart, datestop)) state_vals_h = cmd_states.interpolate_states(states, tlm['4HPOSARO'].times) state_vals_l = cmd_states.interpolate_states(states, tlm['4LPOSARO'].times) hetg_state_pos = np.where(state_vals_h['hetg'] == 'RETR', 78.0, 6.0) letg_state_pos = np.where(state_vals_l['letg'] == 'RETR', 77.0, 6.0) diff = medfilt(tlm['4HPOSARO'].vals - hetg_state_pos, 9) figure(1, figsize=(5.5,4)) clf() plot_cxctime(tlm['4HPOSARO'].times, diff, fmt='.-b') title('4HPOSARO - states.hetg') ylabel('degrees') savefig('t/cmp_hetg.png') figure(2, figsize=(5.5,4)) clf()