def extract_ovl_vconfigs(rank_frames, channame, traindir, start, end, metric='eff/dt'): """ returns a dictionary mapping active vconfigs to segments does NOT include "none" channel """ vconfigs = [] for rnkfr in rank_frames: trained, calib = idq.extract_timeseries_ranges(rnkfr) classifier = idq.extract_fap_name(rnkfr) vetolist = glob.glob("%s/%d_%d/ovl/ovl/*vetolist.eval" % (traindir, trained[0], trained[1])) if len(vetolist) != 1: raise ValueError( "trouble finding a single vetolist file for : %s" % rnkfr) vetolist = vetolist[0] v = event.loadstringtable(vetolist) rankmap = {0: [(None, None, None, None, 0, 0)]} for line in v: metric_exp = float(line[ovl.vD['metric_exp']]) if metric == 'eff/dt': rnk = ovl.effbydt_to_rank(metric_exp) elif metric == 'vsig': rnk = ovl.vsig_to_rank(metric_exp) elif metric == 'useP': rnk = ovl.useP_to_rank(metric_exp) else: raise ValueError("metric=%s not understood" % metric) if rankmap.has_key(rnk): rankmap[rnk].append( (line[ovl.vD['vchan']], float(line[ovl.vD['vthr']]), float(line[ovl.vD['vwin']]), metric, metric_exp, rnk)) else: rankmap[rnk] = [ (line[ovl.vD['vchan']], float(line[ovl.vD['vthr']]), float(line[ovl.vD['vwin']]), metric, metric_exp, rnk) ] for key, value in rankmap.items(): rankmap[key] = tuple(value) t, ts = idq.combine_gwf([rnkfr], [channame]) t = t[0] truth = (start <= t) * (t <= end) t = t[truth] ts = ts[0][truth] if not len(ts): continue configs = rankmap[ts[0]] segStart = t[0] for T, TS in zip(t, ts): if rankmap[TS] != configs: vconfigs.append((configs, [segStart, T])) segStart = T configs = rankmap[TS] else: pass vconfigs.append((configs, [segStart, T + t[1] - t[0]])) configs = {} for vconfig, seg in vconfigs: if configs.has_key(vconfig): configs[vconfig].append(seg) else: configs[vconfig] = [seg] for key, value in configs.items(): value = event.andsegments([event.fixsegments(value), [[start, end]]]) if event.livetime(value): configs[key] = event.fixsegments(value) else: raise ValueError( "somehow picked up a config with zero livetime...") return vconfigs, configs, { "vchan": 0, "vthr": 1, "vwin": 2, "metric": 3, "metric_exp": 4, "rank": 5 }
realtimedir, opts.start, opts.end, pad=0, suffix='.gwf') if ('fap' in fap) and ( opts.classifier == idq.extract_fap_name(fap)) and event.livetime( event.andsegments([[idq.extract_start_stop(fap, suffix=".gwf")], idqsegs])) ] ### compute total time covered #T = event.livetime( [idq.extract_start_stop(fap, suffix='.gwf') for fap in faps] )*1.0 T = event.livetime(idqsegs) * 1.0 ### combine timeseries and generate segments if opts.verbose: print "generating segments from %d fap files" % (len(faps)) segs = dict((fapThr, [[], 1.0]) for fapThr in opts.FAPthr) t, ts = idq.combine_gwf(faps, [fap_channame]) for t, ts in zip(t, ts): t, ts = idq.timeseries_in_segments(t, ts, idqsegs) for fapThr in opts.FAPthr: s, minFAP = idq.timeseries_to_segments( t, -ts, -fapThr) # we want FAP <= FAPthr <--> -FAP >= -FAPthr s = event.andsegments( [s, idqsegs] ) ### necessary because of how timeseries_to_segments may interact with timeseries_in_segments segs[fapThr][0] += s if minFAP != None: segs[fapThr][1] = min(segs[fapThr][1], -minFAP) if opts.verbose:
rnk_ax = fig.add_axes(isp.rank_splittimeseries_axpos) fap_ax = fig.add_axes(isp.fap_splittimeseries_axpos) fap_figax = (fig, fap_ax) rnk_figax = (fig, rnk_ax) if opts.gps: to = opts.gps # rnk_ax.plot( np.zeros((2, )), rnk_ax.get_ylim(), ':k', linewidth=2, alpha=0.5) # rnk_ax.text( 0.0, 0.8, "%.3f"%opts.gps, ha='center', va='center') else: to = opts.plotting_gps_start ### check calibration type -> include UpperLimits when possible if config.get('calibration', 'mode') == 'dat': f_times, f_timeseries = idq.combine_gwf(fap_filenames, [fap_channame, fapUL_channame]) fUL_timeseries = [f[1] for f in f_timeseries] f_timeseries = [f[0] for f in f_timeseries] ### write combined data to disk if opts.verbose: print "writing combined fap frames to disk" for t, ts, tS in zip(f_times, f_timeseries, fUL_timeseries): truth = (opts.plotting_gps_start <= t) * (t <= opts.plotting_gps_end) t = t[truth] ts = ts[truth] tS = tS[truth] start = int(t[0]) dt = t[1] - t[0] dur = int(len(t) * dt)
rnk_ax = fig.add_axes(isp.rank_splittimeseries_axpos) fap_ax = fig.add_axes(isp.fap_splittimeseries_axpos) fap_figax = (fig, fap_ax) rnk_figax = (fig, rnk_ax) if opts.gps: to = opts.gps # rnk_ax.plot( np.zeros((2, )), rnk_ax.get_ylim(), ':k', linewidth=2, alpha=0.5) # rnk_ax.text( 0.0, 0.8, "%.3f"%opts.gps, ha='center', va='center') else: to = opts.plotting_gps_start ### check calibration type -> include UpperLimits when possible if config.get('calibration','mode')=='dat': f_times, f_timeseries = idq.combine_gwf(fap_filenames, [fap_channame, fapUL_channame]) fUL_timeseries = [f[1] for f in f_timeseries] f_timeseries = [f[0] for f in f_timeseries] ### write combined data to disk if opts.verbose: print "writing combined fap frames to disk" for t, ts, tS in zip(f_times, f_timeseries, fUL_timeseries): truth = (opts.plotting_gps_start <= t)*(t <= opts.plotting_gps_end) t = t[truth] ts = ts[truth] tS = tS[truth] start = int(t[0]) dt = t[1]-t[0] dur = int(len(t)*dt)
def extract_ovl_vconfigs( rank_frames, channame, traindir, start, end, metric='eff/dt' ): """ returns a dictionary mapping active vconfigs to segments does NOT include "none" channel """ vconfigs = [] for rnkfr in rank_frames: trained, calib = idq.extract_timeseries_ranges( rnkfr ) classifier = idq.extract_fap_name( rnkfr ) vetolist = glob.glob( "%s/%d_%d/ovl/ovl/*vetolist.eval"%(traindir, trained[0], trained[1]) ) if len(vetolist) != 1: raise ValueError( "trouble finding a single vetolist file for : %s"%rnkfr ) vetolist=vetolist[0] v = event.loadstringtable( vetolist ) rankmap = { 0:[(None, None, None, None, 0, 0)] } for line in v: metric_exp = float(line[ovl.vD['metric_exp']]) if metric == 'eff/dt': rnk = ovl.effbydt_to_rank( metric_exp ) elif metric == 'vsig': rnk = ovl.vsig_to_rank( metric_exp ) elif metric == 'useP': rnk = ovl.useP_to_rank( metric_exp ) else: raise ValueError("metric=%s not understood"%metric) if rankmap.has_key(rnk): rankmap[rnk].append( (line[ovl.vD['vchan']], float(line[ovl.vD['vthr']]), float(line[ovl.vD['vwin']]), metric, metric_exp, rnk )) else: rankmap[rnk] = [(line[ovl.vD['vchan']], float(line[ovl.vD['vthr']]), float(line[ovl.vD['vwin']]), metric, metric_exp, rnk )] for key, value in rankmap.items(): rankmap[key] = tuple(value) t, ts = idq.combine_gwf( [rnkfr], [channame]) t = t[0] truth = (start <= t)*(t <= end) t = t[truth] ts = ts[0][truth] if not len(ts): continue configs = rankmap[ts[0]] segStart = t[0] for T, TS in zip(t, ts): if rankmap[TS] != configs: vconfigs.append( (configs, [segStart, T] ) ) segStart = T configs = rankmap[TS] else: pass vconfigs.append( (configs, [segStart, T+t[1]-t[0]] ) ) configs = {} for vconfig, seg in vconfigs: if configs.has_key( vconfig ): configs[vconfig].append( seg ) else: configs[vconfig] = [ seg ] for key, value in configs.items(): value = event.andsegments( [event.fixsegments( value ), [[start,end]] ] ) if event.livetime( value ): configs[key] = event.fixsegments( value ) else: raise ValueError("somehow picked up a config with zero livetime...") return vconfigs, configs, {"vchan":0, "vthr":1, "vwin":2, "metric":3, "metric_exp":4, "rank":5}
#=================================================================================================== ### Find all FAP files if opts.verbose: print "finding all fap*gwf files" faps = [fap for fap in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.gwf') if ('fap' in fap) and (opts.classifier==idq.extract_fap_name( fap )) and event.livetime(event.andsegments([[idq.extract_start_stop(fap, suffix=".gwf")], idqsegs])) ] ### compute total time covered #T = event.livetime( [idq.extract_start_stop(fap, suffix='.gwf') for fap in faps] )*1.0 T = event.livetime( idqsegs )*1.0 ### combine timeseries and generate segments if opts.verbose: print "generating segments from %d fap files"%(len(faps)) segs = dict( (fapThr, [[], 1.0]) for fapThr in opts.FAPthr ) t, ts = idq.combine_gwf(faps, [fap_channame]) for t, ts in zip(t, ts): t, ts = idq.timeseries_in_segments( t, ts, idqsegs ) for fapThr in opts.FAPthr: s, minFAP = idq.timeseries_to_segments(t, -ts, -fapThr) # we want FAP <= FAPthr <--> -FAP >= -FAPthr s = event.andsegments( [s, idqsegs] ) ### necessary because of how timeseries_to_segments may interact with timeseries_in_segments segs[fapThr][0] += s if minFAP!=None: segs[fapThr][1] = min(segs[fapThr][1], -minFAP) if opts.verbose: print "computing associated deadtimes" dt = [event.livetime(segs[fapThr][0])/T for fapThr in opts.FAPthr] maxFAP = [segs[fapThr][1] for fapThr in opts.FAPthr]
### write list of dats to cache file cache = idq.cache(output_dir, classifier, "_rankcache%s" % usertag) logger.info('writing list of rank files to %s' % cache) f = open(cache, 'w') for rank in ranksD[classifier]: print >> f, rank f.close() logger.info( ' analyzing rank timeseries to obtain mapping from rank->fap') ### load in timeseries if opts.mode == "npy": _times, timeseries = idq.combine_ts(ranksD[classifier], n=1) else: ### opts.mode=="gwf" _times, timeseries = idq.combine_gwf( ranksD[classifier], [channameD[classifier]['rank']]) times = [] ranks = [] for t, ts in zip(_times, timeseries): _t, _ts = idq.timeseries_in_segments(t, ts, idqsegs) if len(_ts): times.append(_t) ranks.append(_ts) ### need to compute deadsecs for every rank in r -> function call (probably within calibration module)! crank = [] for _r in r: dsec = 0 for t, ts in zip(times, ranks):
segtab = lsctables.New( lsctables.SegmentTable, columns=["process_id", "segment_def_id", "segment_id", "start_time", "start_time_ns", "end_time", "end_time_ns"] ) xml_element.appendChild( segdef ) xml_element.appendChild( segsum ) xml_element.appendChild( segtab ) ### iterate through classifiers for classifier in opts.classifier: logger.info('Begin: generating segments for %s'%classifier) faps = fapsD[classifier] logger.info(' found %d files'%(len(faps))) ### need to load in time-series from frames here! chan = idq.channame(ifo, classifier, "%s_fap"%usertag) t, ts = idq.combine_gwf(faps, [chan]) ### loads in the data from frames logger.info(' found %d continous segments'%(len(t))) ### set up segdef row fap2segdef_id = {} for FAPthr in opts.FAPthr: segdef_id = segdef.get_next_id() segdef_row = lsctables.SegmentDef() segdef_row.process_id = proc_id segdef_row.segment_def_id = segdef_id segdef_row.ifos = ifo segdef_row.name = classifier segdef_row.version = 1 segdef_row.comment = 'FAPthr=%.9e'%FAPthr
elif (opts.mode == "npy") or (opts.mode == "gwf"): ### write list of dats to cache file cache = idq.cache(output_dir, classifier, "_rankcache%s"%usertag) logger.info('writing list of rank files to %s'%cache) f = open(cache, 'w') for rank in ranksD[classifier]: print >>f, rank f.close() logger.info(' analyzing rank timeseries to obtain mapping from rank->fap') ### load in timeseries if opts.mode == "npy": _times, timeseries = idq.combine_ts(ranksD[classifier], n=1) else: ### opts.mode=="gwf" _times, timeseries = idq.combine_gwf(ranksD[classifier], [channameD[classifier]['rank']]) times = [] ranks = [] for t, ts in zip(_times, timeseries): _t, _ts = idq.timeseries_in_segments(t, ts, idqsegs) if len(_ts): times.append( _t ) ranks.append( _ts ) ### need to compute deadsecs for every rank in r -> function call (probably within calibration module)! crank = [] for _r in r: dsec = 0 for t, ts in zip(times, ranks):
]) xml_element.appendChild(segdef) xml_element.appendChild(segsum) xml_element.appendChild(segtab) ### iterate through classifiers for classifier in opts.classifier: logger.info('Begin: generating segments for %s' % classifier) faps = fapsD[classifier] logger.info(' found %d files' % (len(faps))) ### need to load in time-series from frames here! chan = idq.channame(ifo, classifier, "%s_fap" % usertag) t, ts = idq.combine_gwf(faps, [chan]) ### loads in the data from frames logger.info(' found %d continous segments' % (len(t))) ### set up segdef row fap2segdef_id = {} for FAPthr in opts.FAPthr: segdef_id = segdef.get_next_id() segdef_row = lsctables.SegmentDef() segdef_row.process_id = proc_id segdef_row.segment_def_id = segdef_id segdef_row.ifos = ifo segdef_row.name = classifier segdef_row.version = 1 segdef_row.comment = 'FAPthr=%.9e' % FAPthr