Example #1
0
### iterate over gps and load timing differences
trgdata = {}
for gps in args:
    print "gps : %.9f"%(gps)

    minwin = opts.window

    ### go find triggers
    if opts.verbose:
        print "\tdiscoverying KW triggers within [%.9f, %.9f]"%(gps-opts.window, gps+opts.window)

    ### figure out which files you want
    filenames = []
    coverage = []
    for gdsdir in kwgdsdirs:
        for filename in idq.get_all_files_in_range(gdsdir, gps-opts.window, gps+opts.window, pad=0, suffix=".trg"):
            seg = idq.extract_start_stop(filename, suffix=".trg")
            if not event.livetime(event.andsegments([coverage, [seg]])):
                coverage = event.fixsegments( coverage + [seg] )
                filenames.append( filename )

    ### figure out the extent of the coverage
    if len(event.include([[gps]], coverage, tcent=0)) == 0:
        if opts.force:
            if opts.verbose:
                print "no triggers found for gps : %.3f"%(gps)
            continue
        else:
            raise ValueError("no triggers found for gps : %.3f"%(gps))
    for s, e in coverage:
        if s < gps:
Example #2
0
rank_channame = idq.channame(ifo, opts.classifier, "%s_rank" % tag)
fap_channame = idq.channame(ifo, opts.classifier, "%s_fap" % tag)
fapUL_channame = idq.channame(ifo, opts.classifier, "%s_fapUL" % tag)

#===================================================================================================

# get all *.gwf files in range

if opts.verbose:
    print "Finding relevant *.gwf files"
rank_filenames = []
fap_filenames = []
all_files = idq.get_all_files_in_range(realtimedir,
                                       opts.plotting_gps_start,
                                       opts.plotting_gps_end,
                                       pad=0,
                                       suffix='.gwf')
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(
            filename):  # and ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (
        not fap_filenames):  # we couldn't find either rank or fap files
gdbdir = config.get('gdb general','main_gdb_dir')

if not opts.skip_gracedb_upload:
    if config.has_option('gdb general', 'gdb_url'):
        gracedb = GraceDb(config.get('gdb general', 'gdb_url'))
    else:
        gracedb = GraceDb()

##########################################
### Find relevant files
##########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([filename for filename in 
                          idq.get_all_files_in_range(realtimedir, opts.start, opts.end, pad=0, suffix='.xml.gz') 
                          if opts.classifier == idq.extract_xml_name(filename) 
                          and 'glitch' in filename
                          and ifo in filename])
                        
if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ glitch tables data from "+opts.classifier+" available for the candidate  at "+ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
    print "Found:"
    for filename in gchxml_filenames:
        print '\t' + filename
parser.add_option("", "--gps-start-time", default="", type="string",help="GPS start time of the period  to be processed")
parser.add_option("", "--gps-end-time", default="", type="string",help="GPS end time of the period  to be processed")
parser.add_option("", "--output-file",type="string", default="auxmvc_samples.pat", help="path and name of the output file to save auxmvc feature vectors")
parser.add_option("", "--verbose", action="store_true", default=False, help="run in verbose mode")
(opts,args) = parser.parse_args()


main_channel = opts.main_channel

if opts.gps_start_time and opts.gps_end_time:
	gps_start_time = int(opts.gps_start_time)
	gps_end_time = int(opts.gps_end_time)

# get list of trigger files
if opts.trigger_dir:
	trig_files = idq.get_all_files_in_range(opts.trigger_dir, gps_start_time, gps_end_time, pad=0, suffix='.trg')
else:
	trig_files = glob.glob(opts.trigger_files)

if not trig_files:
	print "Warning: Empty list of trigger files, exiting without doing anything."
	sys.exit(0)

if opts.verbose: 
	print "Loading triggers ..."
# load triggers from files
trigger_dict = event.loadkwm(trig_files) 
if main_channel not in trigger_dict:
    trigger_dict[main_channel] = []

if opts.verbose:
fap_channame = idq.channame(ifo, opts.classifier, "%s_fap" % tag)
fapUL_channame = idq.channame(ifo, opts.classifier, "%s_fapUL" % tag)

flavor = config.get(opts.classifier, 'flavor')
if config.has_option(opts.classifier, 'plotting_label'):
    plotting_label = config.get(opts.classifier, 'plotting_label')
else:
    plotting_label = opts.classifier

#===================================================================================================

### Find all FAP files
if opts.verbose:
    print "finding all fap*gwf files"
faps = [
    fap for fap in idq.get_all_files_in_range(
        realtimedir, opts.start, opts.end, pad=0, suffix='.gwf')
    if ('fap' in fap) and (
        opts.classifier == idq.extract_fap_name(fap)) and event.livetime(
            event.andsegments([[idq.extract_start_stop(fap, suffix=".gwf")],
                               idqsegs]))
]

### compute total time covered
#T = event.livetime( [idq.extract_start_stop(fap, suffix='.gwf') for fap in faps] )*1.0
T = event.livetime(idqsegs) * 1.0

### combine timeseries and generate segments
if opts.verbose:
    print "generating segments from %d fap files" % (len(faps))
segs = dict((fapThr, [[], 1.0]) for fapThr in opts.FAPthr)
t, ts = idq.combine_gwf(faps, [fap_channame])
#===================================================================================================

rank_channame  = idq.channame(ifo, opts.classifier, "%s_rank"%tag)
fap_channame   = idq.channame(ifo, opts.classifier, "%s_fap"%tag)
fapUL_channame = idq.channame(ifo, opts.classifier, "%s_fapUL"%tag)

#===================================================================================================

# get all *.gwf files in range

if opts.verbose:
    print "Finding relevant *.gwf files"
rank_filenames = []
fap_filenames = []
all_files = idq.get_all_files_in_range(realtimedir, opts.plotting_gps_start, opts.plotting_gps_end, pad=0, suffix='.gwf')
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(filename): # and ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (not fap_filenames): # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s"%(opts.classifier, ifo)
    if not opts.skip_gracedb_upload:
    else:
        idqseg_path = idq.idqsegascii(output_dir, '_%s'%dq_name, gpsstart - lookback, lookback+stride)
    f = open(idqseg_path, 'w')
    for seg in idqsegs:
        print >> f, seg[0], seg[1]
    f.close()

    #===============================================================================================
    # update mappings via uroc files
    #===============================================================================================

    ### find all *dat files, bin them according to classifier
    ### needed for opts.mode=="dat" and KDE estimates
    logger.info('finding all *dat files')
    datsD = defaultdict( list )
    for dat in idq.get_all_files_in_range(realtimedir, gpsstart-lookback, gpsstart+stride, pad=0, suffix='.dat' ):
        datsD[idq.extract_dat_name( dat )].append( dat )

    ### throw away any un-needed files
    for key in datsD.keys():
        if key not in classifiers:
            datsD.pop(key) 
        else: ### throw out files that don't contain any science time
            datsD[key] = [ dat for dat in datsD[key] if event.livetime(event.andsegments([idqsegs, [idq.extract_start_stop(dat, suffix='.dat')]])) ]

    if opts.mode=="npy": ### need rank files
        ### find all *rank*npy.gz files, bin them according to classifier
        logger.info('  finding all *rank*.npy.gz files')
        ranksD = defaultdict( list )
        for rank in [rank for rank in  idq.get_all_files_in_range(realtimedir, gpsstart-lookback, gpsstart+stride, pad=0, suffix='.npy.gz') if "rank" in rank]:
            ranksD[idq.extract_fap_name( rank )].append( rank ) ### should just work...
### write segment file
if opts.ignore_science_segments:
    idqseg_path = idq.idqsegascii(opts.output_dir, '', startgps, stride)
else:
    idqseg_path = idq.idqsegascii(opts.output_dir, '_%s'%dq_name, startgps , stride)
f = open(idqseg_path, 'w')
for seg in idqsegs:
    print >> f, seg[0], seg[1]
f.close()

#========================
# go findeth the frame data
#========================
logger.info('  finding all *fap*.gwf files')
fapsD = defaultdict( list )
for fap in [fap for fap in  idq.get_all_files_in_range(realtimedir, lookup_startgps, lookup_endgps, pad=0, suffix='.gwf') if "fap" in fap]:
    fapsD[idq.extract_fap_name( fap )].append( fap )

### throw away files we will never need
for key in fapsD.keys():
    if key not in opts.classifier: ### throw away unwanted files
        fapsD.pop(key)
    else: ### keep only files that overlap with scisegs
        fapsD[key] = [ fap for fap in fapsD[key] if event.livetime(event.andsegments([idqsegs, [idq.extract_start_stop(fap, suffix='.gwf')]])) ]

#========================
# iterate through classifiers -> generate segments
#========================

### set up xml document
from glue.ligolw import ligolw
wait = opts.gps + delay - int(idq.nowgps())
if wait > 0:
    print 'waiting %.1f seconds before processing' % wait
    time.sleep(wait)

# =================================================
#
# finding KW.trg files
#
# =================================================

padding = max(float(myconf['padding']),
              float(config.get('build_auxmvc_vectors', 'time-window')))

kwdirname = '%s/%s/' % (config.get('general', 'gdsdir'), kwbasename)
kwfilenames = idq.get_all_files_in_range(kwdirname, opts.gps - padding,
        opts.gps + padding, pad=0, suffix='.trg')

if not len(kwfilenames):
    raise StandardError('no KW files found!')

print 'loading KW triggers:\n', kwfilenames[0]
kwdict = event.loadkwm(kwfilenames[0])
for kwfilename in kwfilenames[1:]:
    print kwfilename
    kwdict = event.loadkwm(kwfilename, trigs_dict=kwdict)

# require GW channel in trigger dict

if gwchannel not in kwdict:
    kwdict[gwchannel] = []
gdbdir = config.get('gdb general', 'main_gdb_dir')

if not opts.skip_gracedb_upload:
    if config.has_option('gdb general', 'gdb_url'):
        gracedb = GraceDb(config.get('gdb general', 'gdb_url'))
    else:
        gracedb = GraceDb()

##########################################
### Find relevant files
##########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([
    filename for filename in idq.get_all_files_in_range(
        realtimedir, opts.start, opts.end, pad=0, suffix='.xml.gz')
    if opts.classifier == idq.extract_xml_name(filename)
    and 'glitch' in filename and ifo in filename
])

if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id,
                         message="No iDQ glitch tables data from " +
                         opts.classifier +
                         " available for the candidate  at " + ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
Example #11
0
                  action="store_true",
                  default=False,
                  help="run in verbose mode")
(opts, args) = parser.parse_args()

main_channel = opts.main_channel

if opts.gps_start_time and opts.gps_end_time:
    gps_start_time = int(opts.gps_start_time)
    gps_end_time = int(opts.gps_end_time)

# get list of trigger files
if opts.trigger_dir:
    trig_files = idq.get_all_files_in_range(opts.trigger_dir,
                                            gps_start_time,
                                            gps_end_time,
                                            pad=0,
                                            suffix='.trg')
else:
    trig_files = glob.glob(opts.trigger_files)

if not trig_files:
    print "Warning: Empty list of trigger files, exiting without doing anything."
    sys.exit(0)

if opts.verbose:
    print "Loading triggers ..."
# load triggers from files
trigger_dict = event.loadkwm(trig_files)
if main_channel not in trigger_dict:
    trigger_dict[main_channel] = []
    # check that gracedb id is given
    if not opts.gracedb_id:
        print "GraceDB ID must be specified for enabling correct uploading of the data. Please use --gracedb-id option."
        sys.exit(1)


# #########################################
# ## Find relevant files
# #########################################

if opts.verbose:
    print "Finding relevant *glitch*.xml files"
gchxml_filenames = sorted(
    [
        filename
        for filename in idq.get_all_files_in_range(opts.input_dir, opts.start, opts.end, pad=0, suffix=".xml.gz")
        if opts.classifier == idq.extract_xml_name(filename) and "glitch" in filename and opts.ifo in filename
    ]
)

if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(
            opts.gracedb_id,
            message="No iDQ glitch tables data from "
            + opts.classifier
            + " available for the candidate  at "
            + opts.ifo,
        )
    print "No glitch files found, exiting."
Example #13
0
    channels = f.read().splitlines()
print(channels)

locktimes = DataQualityFlag.query_dqsegdb(flag, gpsstart, gpsstop)
print("Locktimes")
print(locktimes.active)
for i, seg in enumerate(locktimes.active):  #Loop through lock segments
    print("Locksegment %d: %d-%d" % (i, seg[0], seg[1]))
    if (seg[1] -
            seg[0]) <= fftlength:  #Ignore lock segments that are too short
        continue
    ### Get triggers
    print("Collecting triggers for labelling")
    trig_files = idq.get_all_files_in_range(trigger_dir,
                                            seg[0],
                                            seg[1],
                                            pad=0,
                                            suffix='.trg')
    trigger_dict = event.loadkwm(trig_files)
    trigger_dict.include([[seg[0], seg[1]]])
    if trigger_dict[gwchannel]:
        trigger_dict.apply_signif_threshold(threshold=signif_threshold,
                                            channels=[gwchannel])
        darmtrg = trigger_dict.get_triggers_from_channel(gwchannel)
        auxdata = TimeSeriesDict.get(
            channels, seg[0], seg[1], frametype='L1_R',
            verbose=True)  #Generate a dictionary for each
        for key, value in auxdata.iteritems():
            #print(value)
            value.whiten(fftlength, overlap)  #Whiten the data
            if value.sample_rate.value == samplerate:  #Convert all channels to the same samplingrate
                  )
parser.add_option('', '--output-file', type='string',
                  default='training_samples.pat',
                  help='full path and name of the output file into which the training samples will be saved'
                  )
parser.add_option('', '--verbose', action='store_true', default=False,
                  help='run in verbose mode')

(opts, args) = parser.parse_args()

gps_start_time = int(opts.gps_start_time)
gps_end_time = int(opts.gps_end_time)

# get all *.pat files in the specififed range

patfiles = idq.get_all_files_in_range(opts.source_directory,
        gps_start_time, gps_end_time, suffix='.pat')

if len(patfiles) == 0:
    print 'No *.pat files found in the gps range ' \
        + str(gps_start_time) + ' - ' + str(gps_end_time)
    print 'Exiting with status 2.'
    sys.exit(2)

# load auxmvc vector samples

auxmvc_samples = auxmvc_utils.ReadMVSCTriggers(patfiles,
        Classified=False)

if opts.dq_segments:

    # load dq segments
    # check that gracedb id is given
    if not opts.gracedb_id:
        print "GraceDB ID must be specified for enabling correct uploading of the data. Please use --gracedb-id option."
        sys.exit(1)



# #########################################
# ## Find relevant files
# #########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([filename for filename in
                          idq.get_all_files_in_range(opts.input_dir,
                          opts.start, opts.end, pad=0, suffix='.xml.gz'
                          ) if opts.classifier in filename.split('/'
                          )[-1] and 'glitch' in filename
                          and opts.ifo in filename])
                        
if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ glitch tables data from "+opts.classifier+" available for the candidate  at "+opts.ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
    print "Found:"
    for filename in gchxml_filenames:
        print '\t' + filename
Example #16
0
        gracedb = GraceDb()
    # check that gracedb id is given
    if not opts.gracedb_id:
        print "GraceDB ID must be specified for enabling correct uploading of the data. Please use --gracedb-id option."
        sys.exit(1)



# #########################################
# ## Find relevant files
# #########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([filename for filename in 
                          idq.get_all_files_in_range(opts.input_dir, opts.start, opts.end, pad=0, suffix='.xml.gz') 
                          if opts.classifier == idq.extract_xml_name(filename) 
                          and 'glitch' in filename
                          and opts.ifo in filename])
                        
if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ glitch tables data from "+opts.classifier+" available for the candidate  at "+opts.ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
    print "Found:"
    for filename in gchxml_filenames:
        print '\t' + filename
Example #17
0
rank_channame = idq.channame(opts.ifo, opts.classifier, "%s_rank" % opts.tag)
fap_channame = idq.channame(opts.ifo, opts.classifier, "%s_fap" % opts.tag)
fapUL_channame = idq.channame(opts.ifo, opts.classifier, "%s_fapUL" % opts.tag)

#===================================================================================================

# get all *.npy.gz files in range

if opts.verbose:
    print "Finding relevant *.npy.gz files"
rank_filenames = []
fap_filenames = []
all_files = idq.get_all_files_in_range(opts.input_dir,
                                       opts.plotting_gps_start,
                                       opts.plotting_gps_end,
                                       pad=0,
                                       suffix='.npy.gz')
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(
            filename
    ):  # and opts.ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (
Example #18
0
### look up KW trg files that intersect segs
if opts.verbose:
    print("finding relevant kw_trgfiles")
kw_trgfiles = []
### iterate over different configurations used in training
for kwconf, dirname in eval(config.get(
        'general', 'kw')).items():  ### this is kinda ugly...
    if opts.verbose:
        print(
            "  searching for KW trgfiles corresponding to %s in %s within [%.3f, %.3f]"
            % (kwconf, dirname, segs[0][0], segs[-1][1]))

    ### iterate over all trg files found in that directory
    for trgfile in idq.get_all_files_in_range(dirname,
                                              segs[0][0],
                                              segs[-1][1],
                                              pad=0,
                                              suffix='.trg'):
        ### check whether there is some overlap
        ### not gauranteed if there are gaps between min and max gps times
        if event.livetime(
                event.andsegments(
                    [[idq.extract_start_stop(trgfile, suffix='.trg')], segs])):
            if opts.verbose:
                print("    kept : " + trgfile)
            kw_trgfiles.append(trgfile)

        elif opts.verbose:
            print("    discarded : " + trgfile)

#---
segs = event.fixsegments([[t-win, t+win] for t in gps]) ### the segments in which we need KW triggers

#---

### look up KW trg files that intersect segs
if opts.verbose:
    print( "finding relevant kw_trgfiles" )
kw_trgfiles = []
### iterate over different configurations used in training
for kwconf, dirname in eval(config.get('general', 'kw')).items(): ### this is kinda ugly...
    if opts.verbose:
        print( "  searching for KW trgfiles corresponding to %s in %s within [%.3f, %.3f]"%(kwconf, dirname, segs[0][0], segs[-1][1]) )

    ### iterate over all trg files found in that directory
    for trgfile in idq.get_all_files_in_range(dirname, segs[0][0], segs[-1][1], pad=0, suffix='.trg'):
        ### check whether there is some overlap 
        ### not gauranteed if there are gaps between min and max gps times
        if event.livetime(event.andsegments([[idq.extract_start_stop(trgfile, suffix='.trg')], segs])): 
            if opts.verbose:
                print( "    kept : "+trgfile )
            kw_trgfiles.append( trgfile )

        elif opts.verbose:
            print( "    discarded : "+trgfile )

#---

if opts.verbose:
    print( "evaluating %d times using %d KW trgfiles"%(Ngps, len(kw_trgfiles) ) )
### set up output pointers
def check_calibartion(
    realtimedir,
    start,
    end,
    classifier,
    FAPthrs,
    verbose=False,
    ):
    """
....checks the pipeline's calibration at each "FAPthr in FAPThrs"
        """

        # =================================================
        # grab idq_segments so we compute meaningful livetimes
        # =================================================

    if verbose:
        print 'getting idq_segements'

    idq_segs = idq.get_idq_segments(realtimedir, start, end,
                                    suffix='.npy.gz')
    idq_livetime = event.livetime(idq_segs)

    # =================================================
    # grab relevant data
    # =================================================

    if verbose:
        print 'looking for *_fap_*.npy.gz files in', opts.realtimedir

    fapfilenames = [filename for filename in
                    idq.get_all_files_in_range(realtimedir, start, end,
                    pad=0, suffix='.npy.gz') if '_fap_' in filename
                    and classifier in filename]

    if opts.verbose:
        print 'discovered %d files' % len(fapfilenames)
        print 'building time-series'

    (times, timeseries) = idq_gdb_utils.combine_ts(fapfilenames)

        # =================================================
        # check calibration
        # =================================================

    segments = []
    deadtimes = []
    statedFAPs = []
    for FAPthr in FAPthrs:
        if verbose:
            print 'computing segements for FAPthr =', FAPthr

        segs = []
        max_statedFAP = None
        for (t, ts) in zip(times, timeseries):
            (_segs, _min_ts) = timeseries_to_segments(t, -ts, -FAPthr)  # we want FAP <= FAPthr <--> -FAP >= FAPthr
            segs += _segs
            if _min_ts != None:
                statedFAP = -_min_ts
                if max_statedFAP < statedFAP:
                    max_statedFAP = statedFAP

        segs = event.andsegments([segs, idq_segs])
        segments.append(segs)
        deadtimes.append(1.0 * event.livetime(segs) / idq_livetime)
        statedFAPs.append(statedFAP)

    return (idq_segs, segments, deadtimes, statedFAPs)
rank_channame  = idq.channame(ifo, opts.classifier, "%s_rank"%tag)
fap_channame   = idq.channame(ifo, opts.classifier, "%s_fap"%tag)
fapUL_channame = idq.channame(ifo, opts.classifier, "%s_fapUL"%tag)

flavor = config.get(opts.classifier, 'flavor')
if config.has_option(opts.classifier, 'plotting_label'):
    plotting_label = config.get(opts.classifier, 'plotting_label')
else:
    plotting_label = opts.classifier

#===================================================================================================

### Find all FAP files
if opts.verbose:
    print "finding all fap*gwf files"
faps = [fap for fap in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.gwf') if ('fap' in fap) and (opts.classifier==idq.extract_fap_name( fap )) and event.livetime(event.andsegments([[idq.extract_start_stop(fap, suffix=".gwf")], idqsegs])) ]

### compute total time covered
#T = event.livetime( [idq.extract_start_stop(fap, suffix='.gwf') for fap in faps] )*1.0
T = event.livetime( idqsegs )*1.0

### combine timeseries and generate segments
if opts.verbose:
    print "generating segments from %d fap files"%(len(faps))
segs = dict( (fapThr, [[], 1.0]) for fapThr in opts.FAPthr )
t, ts = idq.combine_gwf(faps, [fap_channame])
for t, ts in zip(t, ts):

    t, ts = idq.timeseries_in_segments( t, ts, idqsegs )

    for fapThr in opts.FAPthr:
Example #22
0
    'full path and name of the output file into which the training samples will be saved'
)
parser.add_option('',
                  '--verbose',
                  action='store_true',
                  default=False,
                  help='run in verbose mode')

(opts, args) = parser.parse_args()

gps_start_time = int(opts.gps_start_time)
gps_end_time = int(opts.gps_end_time)

# get all *.pat files in the specififed range
patfiles = idq.get_all_files_in_range(opts.source_directory,
                                      gps_start_time,
                                      gps_end_time,
                                      suffix='.pat')

if opts.verbose:
    print "found %d pat files" % len(patfiles)

if opts.dq_segments:
    # load dq segments
    if opts.verbose:
        print "reading segments from %s" % opts.dq_segments

    (dq_segments, covered_segments) = \
        idq.extract_dq_segments(open(opts.dq_segments, 'r'),
                                opts.dq_segments_name)

    # sort and merge segments
Example #23
0
    f = open(idqseg_path, 'w')
    for seg in idqsegs:
        print >> f, seg[0], seg[1]
    f.close()

    #===============================================================================================
    # update mappings via uroc files
    #===============================================================================================

    ### find all *dat files, bin them according to classifier
    ### needed for opts.mode=="dat" and KDE estimates
    logger.info('finding all *dat files')
    datsD = defaultdict(list)
    for dat in idq.get_all_files_in_range(realtimedir,
                                          gpsstart - lookback,
                                          gpsstart + stride,
                                          pad=0,
                                          suffix='.dat'):
        datsD[idq.extract_dat_name(dat)].append(dat)

    ### throw away any un-needed files
    for key in datsD.keys():
        if key not in classifiers:
            datsD.pop(key)
        else:  ### throw out files that don't contain any science time
            datsD[key] = [
                dat for dat in datsD[key] if event.livetime(
                    event.andsegments([
                        idqsegs, [idq.extract_start_stop(dat, suffix='.dat')]
                    ]))
            ]
#===================================================================================================

rank_channame = idq.channame(opts.ifo, opts.classifier, "%s_rank"%opts.tag)
fap_channame = idq.channame(opts.ifo, opts.classifier, "%s_fap"%opts.tag)
fapUL_channame = idq.channame(opts.ifo, opts.classifier, "%s_fapUL"%opts.tag)

#===================================================================================================

# get all *.npy.gz files in range

if opts.verbose:
    print "Finding relevant *.npy.gz files"
rank_filenames = []
fap_filenames = []
all_files = idq.get_all_files_in_range(opts.input_dir, opts.plotting_gps_start,
    opts.plotting_gps_end, pad=0, suffix='.npy.gz')
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(filename): # and opts.ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (not fap_filenames): # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s"%(opts.classifier, opts.ifo)
    if not opts.skip_gracedb_upload:
Example #25
0
### iterate over gps and load timing differences
trgdata = {}
for gps in args:
    print "gps : %.9f" % (gps)

    minwin = opts.window

    ### go find triggers
    if opts.verbose:
        print "\tdiscoverying KW triggers within [%.9f, %.9f]" % (gps - opts.window, gps + opts.window)

    ### figure out which files you want
    filenames = []
    coverage = []
    for gdsdir in kwgdsdirs:
        for filename in idq.get_all_files_in_range(gdsdir, gps - opts.window, gps + opts.window, pad=0, suffix=".trg"):
            seg = idq.extract_start_stop(filename, suffix=".trg")
            if not event.livetime(event.andsegments([coverage, [seg]])):
                coverage = event.fixsegments(coverage + [seg])
                filenames.append(filename)

    ### figure out the extent of the coverage
    if len(event.include([[gps]], coverage, tcent=0)) == 0:
        if opts.force:
            if opts.verbose:
                print "no triggers found for gps : %.3f" % (gps)
            continue
        else:
            raise ValueError("no triggers found for gps : %.3f" % (gps))
    for s, e in coverage:
        if s < gps:
rank_channame  = idq.channame(ifo, opts.classifier, "%s_rank"%tag)
fap_channame   = idq.channame(ifo, opts.classifier, "%s_fap"%tag)
fapUL_channame = idq.channame(ifo, opts.classifier, "%s_fapUL"%tag)

flavor = config.get(opts.classifier, 'flavor')
if config.has_option(opts.classifier, 'plotting_label'):
    plotting_label = config.get(opts.classifier, 'plotting_label')
else:
    plotting_label = opts.classifier

#===================================================================================================

### Find all FAP files
if opts.verbose:
    print "finding all fap*gwf files"
faps = [fap for fap in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.gwf') if ('fap' in fap) and (opts.classifier==idq.extract_fap_name( fap )) and event.livetime(event.andsegments([[idq.extract_start_stop(fap, suffix=".gwf")], idqsegs])) ]

### compute total time covered
#T = event.livetime( [idq.extract_start_stop(fap, suffix='.gwf') for fap in faps] )*1.0
T = event.livetime( idqsegs )*1.0

### combine timeseries and generate segments
if opts.verbose:
    print "generating segments from %d fap files"%(len(faps))
segs = dict( (fapThr, [[], 1.0]) for fapThr in opts.FAPthr )
t, ts = idq.combine_gwf(faps, [fap_channame])
for t, ts in zip(t, ts):

    t, ts = idq.timeseries_in_segments( t, ts, idqsegs )

    for fapThr in opts.FAPthr:
Example #27
0
            utils.write_filename(xmldoc, seg_file, gz=seg_file.endswith(".gz"))

            (scisegs, coveredseg) = idq.extract_dq_segments(seg_file, config.get('get_science_segments', 'include'))

        except Exception as e:
            traceback.print_exc()
            logger.info("ERROR: segment generation failed. Skipping this summary period.")

            gpsstart += stride
            continue

    #=============================================
    # generating summary datfiles filtered by segments
    #=============================================
    ### get all relevant datfiles
    datfiles = idq.get_all_files_in_range(realtimedir, gpsstart, gpsstart+stride, suffix=".dat")

    for classifier in classifiers:
        ### follow standard datfile naming conventions 
        summary_dat = "%s/%s_%s_0-0_%sSummary-%d-%d.dat"%(this_sumdir, kwbasename, classifier, usertag, gpsstart, stride)

        logger.info("generating summary dat file for %s : %s"%(classifier, summary_dat))

        columns = ['GPS', 'i', 'rank', "signif", "SNR"]
        if classifier == "ovl":
            columns += ["vchan", "vthr", "vwin"]

        ### write columns to summary dat
        file_obj = open(summary_dat, "w")
        print >> file_obj, " ".join(columns)
    idqseg_path = idq.idqsegascii(opts.output_dir, '', startgps, stride)
else:
    idqseg_path = idq.idqsegascii(opts.output_dir, '_%s' % dq_name, startgps,
                                  stride)
f = open(idqseg_path, 'w')
for seg in idqsegs:
    print >> f, seg[0], seg[1]
f.close()

#========================
# go findeth the frame data
#========================
logger.info('  finding all *fap*.gwf files')
fapsD = defaultdict(list)
for fap in [
        fap for fap in idq.get_all_files_in_range(
            realtimedir, lookup_startgps, lookup_endgps, pad=0, suffix='.gwf')
        if "fap" in fap
]:
    fapsD[idq.extract_fap_name(fap)].append(fap)

### throw away files we will never need
for key in fapsD.keys():
    if key not in opts.classifier:  ### throw away unwanted files
        fapsD.pop(key)
    else:  ### keep only files that overlap with scisegs
        fapsD[key] = [
            fap for fap in fapsD[key] if event.livetime(
                event.andsegments(
                    [idqsegs, [idq.extract_start_stop(fap, suffix='.gwf')]]))
        ]