for ftype in masks: dtable = open("bymonth_data_%s.txt" % ftype, 'w') dtable.write("time,rate,err,n_stars,n_fail,err_hi,err_low\n") curr_unit = trend_start_unit while (curr_unit != now_unit): range = timerange(curr_unit) range_mask = ((stars['tstart'] >= DateTime(range['start']).secs) & (stars['tstart'] < DateTime(range['stop']).secs)) range_stars = stars[range_mask] range_fail = masks[ftype][range_mask] if not len(range_stars): raise NoStarError("No stars in range") n_stars = len(range_stars) n_failed = len(np.flatnonzero(range_fail)) fail_rate = n_failed / n_stars err_high, err_low = high_low_rate(n_failed, n_stars) mid_frac = ((DateTime(range['start']).frac_year + DateTime(range['stop']).frac_year) / 2) dtable.write("%.2f,%.6f,%.6f,%d,%d,%.4f,%.4f\n" % (mid_frac, fail_rate, np.max([err_high, err_low]), n_stars, n_failed, err_high, err_low)) next_range = get_next(timerange(curr_unit)) curr_unit = in_range(trend_type, next_range['start']) dtable.close()
def star_info(stars, predictions, bad_thresh, obc_bad_thresh, tname, range_datestart, range_datestop, outdir): """ Generate a report dictionary for the time range. :param acqs: recarray of all acquisition stars available in the table :param tname: timerange string (e.g. 2010-M05) :param range_datestart: Chandra.Time DateTime of start of reporting interval :param range_datestop: Chandra.Time DateTime of end of reporting interval :param pred_start: date for beginning of time range for predictions based on average from pred_start to now() :rtype: dict of report values """ rep = { 'datestring' : tname, 'datestart' : DateTime(range_datestart).date, 'datestop' : DateTime(range_datestop).date, 'human_date_start' : "{}-{}-{}".format( range_datestart.caldate[0:4], range_datestart.caldate[4:7], range_datestart.caldate[7:9]), 'human_date_stop' : "{}-{}-{}".format( range_datestop.caldate[0:4], range_datestop.caldate[4:7], range_datestop.caldate[7:9]) } rep['n_stars'] = len(stars) rep['fail_types'] = [] if not len(stars): raise NoStarError("No acq stars in range") fail_stars = dict(bad_trak = stars[(1.0 - stars['f_track']) > bad_thresh], obc_bad = stars[stars['f_obc_bad'] > obc_bad_thresh], no_trak = stars[stars['f_track'] == 0]) fail_types = ['bad_trak', 'no_trak', 'obc_bad'] for ftype in fail_types: trep={} trep['type']=ftype trep['n_stars'] = len(fail_stars[ftype]) trep['rate'] = len(fail_stars[ftype])*1.0/rep['n_stars'] trep['rate_err_high'], trep['rate_err_low'] = high_low_rate(trep['n_stars'],rep['n_stars']) trep['n_stars_pred'] = predictions['%s_rate' % ftype ]*rep['n_stars'] trep['rate_pred'] = predictions['%s_rate' % ftype] trep['p_less'] = scipy.stats.poisson.cdf( trep['n_stars'], trep['n_stars_pred']) trep['p_more'] = 1 - scipy.stats.poisson.cdf( trep['n_stars'] - 1, trep['n_stars_pred']) flat_fails = [dict(id=star['agasc_id'], obsid=star['obsid'], mag=star['mag_aca'], mag_obs=star['aoacmag_mean'], bad_track=(1.0 - star['f_track']), obc_bad_status=star['f_obc_bad'], color=star['color']) for star in fail_stars[ftype]] outfile = os.path.join(outdir, "%s_stars_list.html" % ftype) trep['fail_url'] = "%s_stars_list.html" % ftype rep['fail_types'].append(trep) make_fail_html(flat_fails, outfile) rep['by_mag'] = [] # looping first over mag and then over fail type for a better # data structure bin = .1 for tmag_start in np.arange(10.0,10.8,.1): mag_range_stars = stars[ (stars['mag_aca'] >= tmag_start) & (stars['mag_aca'] < (tmag_start + bin))] mag_rep=dict(mag_start=tmag_start, mag_stop=(tmag_start + bin), n_stars=len(mag_range_stars)) for ftype in fail_types: mag_range_fails = fail_stars[ftype][ (fail_stars[ftype]['mag_aca'] >= tmag_start) & (fail_stars[ftype]['mag_aca'] < (tmag_start + bin))] flat_fails = [ dict(id=star['agasc_id'], obsid=star['obsid'], mag=star['mag_aca'], mag_obs=star['aoacmag_mean'], bad_track=(1.0 - star['f_track']), obc_bad_status=star['f_obc_bad'], color=star['color']) for star in mag_range_fails] failed_star_file = "%s_%.1f_stars_list.html" % (ftype, tmag_start) make_fail_html(flat_fails, os.path.join(outdir, failed_star_file)) mag_rep["%s_n_stars" % ftype] = len(mag_range_fails) mag_rep["%s_fail_url" % ftype] = failed_star_file if len(mag_range_stars) == 0: mag_rep["%s_rate" % ftype] = 0 else: mag_rep["%s_rate" % ftype] = len(mag_range_fails)*1.0/len(mag_range_stars) rep['by_mag'].append(mag_rep) return rep
now = mx.DateTime.now() data[range_type] = {} for mag in mag_ranges: t = t0.copy() data[range_type][mag] = [] while t['stop'] < now: new_t = get_next(t) t = new_t range_gui = stars[(stars['kalman_tstart'] >= DateTime(new_t['start']).secs) & (stars['kalman_tstop'] < DateTime(new_t['stop']).secs) & (stars['mag_exp'] < mag_ranges[mag]['faint']) & (stars['mag_exp'] >= mag_ranges[mag]['bright'])] if len(range_gui) > 1: bad_trak_frac = np.mean(range_gui['not_tracking_samples'] / range_gui['n_samples']) bad_trak = range_gui[range_gui['not_tracking_samples'] / range_gui['n_samples'] > bad_thresh] bad_trak_err_high, bad_trak_err_low = high_low_rate(len(bad_trak), len(range_gui)) obc_bad = range_gui[range_gui['obc_bad_status_samples'] / range_gui['n_samples'] > obc_bad_thresh] obc_bad_err_high, obc_bad_err_low = high_low_rate(len(obc_bad), len(range_gui)) no_trak = range_gui[range_gui['not_tracking_samples'] == range_gui['n_samples']] no_trak_err_high, no_trak_err_low = high_low_rate(len(no_trak), len(range_gui)) entry = [((DateTime(new_t['start']).secs + DateTime(new_t['stop']).secs)/2), (DateTime((DateTime(new_t['start']).secs + DateTime(new_t['stop']).secs)/2).frac_year), new_t['year'], new_t['subid'], np.mean(range_gui['mag_exp']), bad_trak_frac, (len(bad_trak) / len(range_gui)), bad_trak_err_high, bad_trak_err_low, (len(obc_bad) / len(range_gui)),
(all_acq["tstart"] >= DateTime(new_t["start"]).secs) & (all_acq["tstop"] < DateTime(new_t["stop"]).secs) & (all_acq["mag"] < mag_ranges[mag]["faint"]) & (all_acq["mag"] >= mag_ranges[mag]["bright"]) ] good = range_acqs[range_acqs["obc_id"] == "ID"] bad = range_acqs[range_acqs["obc_id"] == "NOID"] n50_mean = np.mean(range_acqs["n50"]) n75_mean = np.mean(range_acqs["n75"]) n100_mean = np.mean(range_acqs["n100"]) n125_mean = np.mean(range_acqs["n125"]) n150_mean = np.mean(range_acqs["n150"]) n200_mean = np.mean(range_acqs["n200"]) n1000_mean = np.mean(range_acqs["n1000"]) if len(range_acqs): err_high, err_low = high_low_rate(len(bad), len(range_acqs)) data[range_type][mag].append( [ ((DateTime(new_t["start"]).secs + DateTime(new_t["stop"]).secs) / 2), (DateTime((DateTime(new_t["start"]).secs + DateTime(new_t["stop"]).secs) / 2).frac_year), new_t["year"], new_t["subid"], (len(bad) / len(range_acqs)), err_high, err_low, (n50_mean / (1024 * 1024)), (n75_mean / (1024 * 1024)), (n100_mean / (1024 * 1024)), (n125_mean / (1024 * 1024)), (n150_mean / (1024 * 1024)), (n200_mean / (1024 * 1024)),