def create_string_sngl_is_vetoed_function(connection, veto_segments_name = None): """ Creates a function named string_sngl_is_vetoed in the database at connection. The function accepts three parameters --- the instrument name, and the integer and integer nanoseconds components of a time --- and returns true if the instrument is vetoed at that time or false otherwise. veto_segments_name sets the name of the segment lists used to define the vetoes. If veto_segments_name is None then a no-op function is created that always returns False. Note: this funtion requires ligo.lw.dbtables and ligo.lw.utils.segments to be imported as dbtables and ligolwsegments respectively. """ if veto_segments_name is None: connection.create_function("string_sngl_is_vetoed", 3, lambda instrument, peak_time, peak_time_ns: False) return xmldoc = dbtables.get_xml(connection) seglists = ligolwsegments.segmenttable_get_by_name(xmldoc, options.vetoes_name).coalesce() xmldoc.unlink() def is_vetoed(instrument, peak_time, peak_time_ns, seglists = seglists): return instrument in seglists and dbtables.lsctables.LIGOTimeGPS(peak_time, peak_time_ns) in seglists[instrument] connection.create_function("string_sngl_is_vetoed", 3, is_vetoed)
def create_string_sngl_is_vetoed_function(connection, veto_segments_name=None): """ Creates a function named string_sngl_is_vetoed in the database at connection. The function accepts three parameters --- the instrument name, and the integer and integer nanoseconds components of a time --- and returns true if the instrument is vetoed at that time or false otherwise. veto_segments_name sets the name of the segment lists used to define the vetoes. If veto_segments_name is None then a no-op function is created that always returns False. Note: this funtion requires ligo.lw.dbtables and ligo.lw.utils.segments to be imported as dbtables and ligolwsegments respectively. """ if veto_segments_name is None: connection.create_function( "string_sngl_is_vetoed", 3, lambda instrument, peak_time, peak_time_ns: False) return xmldoc = dbtables.get_xml(connection) seglists = ligolwsegments.segmenttable_get_by_name( xmldoc, options.vetoes_name).coalesce() xmldoc.unlink() def is_vetoed(instrument, peak_time, peak_time_ns, seglists=seglists): return instrument in seglists and dbtables.lsctables.LIGOTimeGPS( peak_time, peak_time_ns) in seglists[instrument] connection.create_function("string_sngl_is_vetoed", 3, is_vetoed)
def load_segments(filename, name, verbose = False): if verbose: print("loading \"%s\" segments ... " % name, end=' ', file=sys.stderr) connection = sqlite3.connect(filename) segs = ligolw_segments.segmenttable_get_by_name(dbtables.get_xml(connection), name).coalesce() connection.close() if verbose: print("done.", file=sys.stderr) for ifo in segs: print("loaded %d veto segment(s) for %s totalling %g s" % (len(segs[ifo]), ifo, float(abs(segs[ifo]))), file=sys.stderr) return segs
# it's not hard to modify the veto segments in the .xml to be just # those that intersect the search summary segments. That way, if # multiple documents are inserted into the same database, or merged # with ligolw_add, the veto lists will not get duplicated. # if not ligolw_segments.has_segment_tables(xmldoc): if options.verbose: print("warning: no segment definitions found, vetoes will not be applied", file=sys.stderr) vetoes = None elif not ligolw_segments.has_segment_tables(xmldoc, name = options.vetoes_name): if options.verbose: print("warning: document contains segment definitions but none named \"%s\", vetoes will not be applied" % options.vetoes_name, file=sys.stderr) vetoes = None else: vetoes = ligolw_segments.segmenttable_get_by_name(xmldoc, options.vetoes_name).coalesce() # # Run coincidence algorithm. # thinca.ligolw_thinca( xmldoc, process_id = process.process_id, delta_t = options.threshold, ntuple_comparefunc = ntuple_comparefunc, seglists = None, # FIXME veto_segments = vetoes, min_instruments = options.min_instruments, verbose = options.verbose )
def __init__(self, connection, live_time_program, search="excesspower", veto_segments_name=None): """ Compute and record some summary information about the database. Call this after all the data has been inserted, and before you want any of this information. """ self.connection = connection self.xmldoc = dbtables.get_xml(connection) # find the tables try: self.sngl_burst_table = lsctables.SnglBurstTable.get_table( self.xmldoc) except ValueError: self.sngl_burst_table = None try: self.sim_burst_table = lsctables.SimBurstTable.get_table( self.xmldoc) except ValueError: self.sim_burst_table = None try: self.coinc_def_table = lsctables.CoincDefTable.get_table( self.xmldoc) self.coinc_table = lsctables.CoincTable.get_table(self.xmldoc) self.time_slide_table = lsctables.TimeSlideTable.get_table( self.xmldoc) except ValueError: self.coinc_def_table = None self.coinc_table = None self.time_slide_table = None try: self.multi_burst_table = lsctables.MultiBurstTable.get_table( self.xmldoc) except ValueError: self.multi_burst_table = None # get the segment lists self.seglists = ligolw_search_summary.segmentlistdict_fromsearchsummary( self.xmldoc, live_time_program).coalesce() self.instruments = set(self.seglists.keys()) if veto_segments_name is not None: self.vetoseglists = ligolw_segments.segmenttable_get_by_name( self.xmldoc, veto_segments_name).coalesce() else: self.vetoseglists = ligolw_segments.segments.segmentlistdict() # determine a few coinc_definer IDs # FIXME: don't hard-code the numbers if self.coinc_def_table is not None: try: self.bb_definer_id = self.coinc_def_table.get_coinc_def_id( search, 0, create_new=False) except KeyError: self.bb_definer_id = None try: self.sb_definer_id = self.coinc_def_table.get_coinc_def_id( search, 1, create_new=False) except KeyError: self.sb_definer_id = None try: self.sce_definer_id = self.coinc_def_table.get_coinc_def_id( search, 2, create_new=False) except KeyError: self.sce_definer_id = None try: self.scn_definer_id = self.coinc_def_table.get_coinc_def_id( search, 3, create_new=False) except KeyError: self.scn_definer_id = None else: self.bb_definer_id = None self.sb_definer_id = None self.sce_definer_id = None self.scn_definer_id = None
if not ligolw_segments.has_segment_tables(xmldoc): if options.verbose: print( "warning: no segment definitions found, vetoes will not be applied", file=sys.stderr) vetoes = None elif not ligolw_segments.has_segment_tables(xmldoc, name=options.vetoes_name): if options.verbose: print( "warning: document contains segment definitions but none named \"%s\", vetoes will not be applied" % options.vetoes_name, file=sys.stderr) vetoes = None else: vetoes = ligolw_segments.segmenttable_get_by_name( xmldoc, options.vetoes_name).coalesce() # # Run coincidence algorithm. # thinca.ligolw_thinca( xmldoc, process_id=process.process_id, delta_t=options.threshold, ntuple_comparefunc=ntuple_comparefunc, seglists=None, # FIXME veto_segments=vetoes, min_instruments=options.min_instruments, verbose=options.verbose)
def __init__(self, options): """! Initialize a DataSourceInfo class instance from command line options specified by append_options() """ ## A list of possible, valid data sources ("frames", "framexmit", "lvshm", "white", "silence") self.data_sources = set( ("framexmit", "lvshm", "frames", "white", "silence", "white_live")) self.live_sources = set(("framexmit", "lvshm", "white_live")) assert self.live_sources <= self.data_sources # Sanity check the options if options.data_source not in self.data_sources: raise ValueError("--data-source must be one of %s" % ", ".join(self.data_sources)) if options.data_source == "frames" and options.frame_cache is None: raise ValueError( "--frame-cache must be specified when using --data-source=frames" ) if options.frame_segments_file is not None and options.data_source != "frames": raise ValueError( "can only give --frame-segments-file if --data-source=frames") if options.frame_segments_name is not None and options.frame_segments_file is None: raise ValueError( "can only specify --frame-segments-name if --frame-segments-file is given" ) if not (options.channel_list or options.channel_name): raise ValueError( "must specify a channel list in the form --channel-list=/path/to/file or --channel-name=H1:AUX-CHANNEL-NAME:RATE --channel-name=H1:SOMETHING-ELSE:RATE" ) if (options.channel_list and options.channel_name): raise ValueError( "must specify a channel list in the form --channel-list=/path/to/file or --channel-name=H1:AUX-CHANNEL-NAME:RATE --channel-name=H1:SOMETHING-ELSE:RATE" ) ## Generate a dictionary of requested channels from channel INI file # known/permissible values of safety and fidelity flags self.known_safety = set( ("safe", "unsafe", "unsafeabove2kHz", "unknown")) self.known_fidelity = set(("clean", "flat", "glitchy", "unknown")) # ensure safety and fidelity options are valid options.safety_include = set(options.safety_include) options.fidelity_exclude = set(options.fidelity_exclude) for safety in options.safety_include: assert safety in self.known_safety, '--safety-include=%s is not understood. Must be one of %s' % ( safety, ", ".join(self.known_safety)) for fidelity in options.fidelity_exclude: assert fidelity in self.known_fidelity, '--fidelity-exclude=%s is not understood. Must be one of %s' % ( fidelity, ", ".join(self.known_fidelity)) # dictionary of the requested channels, e.g., {"H1:LDAS-STRAIN": 16384, "H1:ODC-LARM": 2048} if options.channel_list: name, self.extension = options.channel_list.rsplit('.', 1) if self.extension == 'ini': self.channel_dict = channel_dict_from_channel_ini(options) else: self.channel_dict = channel_dict_from_channel_file( options.channel_list) elif options.channel_name: self.extension = 'none' self.channel_dict = channel_dict_from_channel_list( options.channel_name) # set instrument; it is assumed all channels from a given channel list are from the same instrument self.instrument = self.channel_dict[next(iter( self.channel_dict))]['ifo'] # set the maximum number of streams to be run by a single pipeline. self.max_streams = options.max_streams # set the frequency ranges considered by channels with splitting into multiple frequency bands. # If channel sampling rate doesn't fall within this range, it will not be split into multiple bands. self.max_sample_rate = options.max_sample_rate self.min_sample_rate = options.min_sample_rate # split up channels requested into partitions for serial processing if options.equal_subsets: self.channel_subsets = partition_channels_to_equal_subsets( self.channel_dict, self.max_streams, self.min_sample_rate, self.max_sample_rate) else: self.channel_subsets = partition_channels_to_subsets( self.channel_dict, self.max_streams, self.min_sample_rate, self.max_sample_rate) ## A dictionary for shared memory partition, e.g., {"H1": "LHO_Data", "H2": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"} self.shm_part_dict = { "H1": "LHO_Data", "H2": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data" } if options.shared_memory_partition is not None: self.shm_part_dict.update( datasource.channel_dict_from_channel_list( options.shared_memory_partition)) ## options for shared memory self.shm_assumed_duration = options.shared_memory_assumed_duration self.shm_block_size = options.shared_memory_block_size # NOTE: should this be incorporated into options.block_size? currently only used for offline data sources ## A dictionary of framexmit addresses self.framexmit_addr = framexmit_ports["CIT"] if options.framexmit_addr is not None: self.framexmit_addr.update( datasource.framexmit_dict_from_framexmit_list( options.framexmit_addr)) self.framexmit_iface = options.framexmit_iface ## Analysis segment. Default is None self.seg = None ## Set latency output self.latency_output = options.latency_output if options.gps_start_time is not None: if options.gps_end_time is None: raise ValueError( "must provide both --gps-start-time and --gps-end-time") try: start = LIGOTimeGPS(options.gps_start_time) except ValueError: raise ValueError("invalid --gps-start-time '%s'" % options.gps_start_time) try: end = LIGOTimeGPS(options.gps_end_time) except ValueError: raise ValueError("invalid --gps-end-time '%s'" % options.gps_end_time) if start >= end: raise ValueError( "--gps-start-time must be < --gps-end-time: %s < %s" % (options.gps_start_time, options.gps_end_time)) ## Segment from gps start and stop time if given self.seg = segments.segment(LIGOTimeGPS(options.gps_start_time), LIGOTimeGPS(options.gps_end_time)) elif options.gps_end_time is not None: raise ValueError( "must provide both --gps-start-time and --gps-end-time") elif options.data_source not in self.live_sources: raise ValueError( "--gps-start-time and --gps-end-time must be specified when --data-source not one of %s" % ", ".join(sorted(self.live_sources))) if options.frame_segments_file is not None: ## Frame segments from a user defined file self.frame_segments = ligolw_segments.segmenttable_get_by_name( ligolw_utils.load_filename( options.frame_segments_file, contenthandler=ligolw_segments.LIGOLWContentHandler), options.frame_segments_name).coalesce() if self.seg is not None: # Clip frame segments to seek segment if it # exists (not required, just saves some # memory and I/O overhead) self.frame_segments = segments.segmentlistdict( (instrument, seglist & segments.segmentlist([self.seg])) for instrument, seglist in self.frame_segments.items()) else: ## if no frame segments provided, set them to an empty segment list dictionary self.frame_segments = segments.segmentlistdict( {self.instrument: None}) ## frame cache file self.frame_cache = options.frame_cache ## block size in bytes to read data from disk self.block_size = options.block_size ## Data source, one of python.datasource.DataSourceInfo.data_sources self.data_source = options.data_source # FIXME: this is ugly, but we have to protect against busted shared memory partitions if self.data_source == "lvshm": import subprocess subprocess.call([ "smrepair", "--bufmode", "5", self.shm_part_dict[self.instrument] ])
trig_overlap = config_parser.getint('pipeline', 'trig_overlap') overlap = short_segment_duration / 2 + 2 * pad # FIXME: correct? # # get the instruments and raw segments # instruments = lsctables.instrumentsproperty.get( config_parser.get('pipeline', 'ifos')) segments_cache = set([ CacheEntry(None, "SEG", None, "file://localhost" + os.path.abspath(options.segments_file)) ]) seglists = ligolw_segments.segmenttable_get_by_name( ligolw_utils.load_filename( options.segments_file, contenthandler=ligolw_segments.LIGOLWContentHandler, verbose=options.verbose), options.segments_name).coalesce() # remove extra instruments for instrument in set(seglists) - instruments: if options.verbose: print("warning: ignoring segments for '%s' found in '%s'" % (instrument, options.segments_file), file=sys.stderr) del seglists[instrument] # check for missing instruments if not instruments.issubset(set(seglists)): raise ValueError( "segment lists retrieved from '%s' missing segments for instruments %s" % (options.segments_file, ", ".join(instruments - set(seglists)))) # now rely on seglists' keys to provide the instruments
def load_veto_segments(filename, verbose = False, contenthandler = None): return ligolw_segments.segmenttable_get_by_name(ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = contenthandler), "sngl_burst_veto").coalesce()
# get chunk lengths from the values in the ini file # short_segment_duration = config_parser.getint('lalapps_StringSearch', 'short-segment-duration') pad = config_parser.getint('lalapps_StringSearch', 'pad') min_segment_length = config_parser.getint('pipeline', 'segment-length') # not including pad at each end trig_overlap = config_parser.getint('pipeline', 'trig_overlap') overlap = short_segment_duration / 2 + 2 * pad # FIXME: correct? # # get the instruments and raw segments # instruments = lsctables.instrumentsproperty.get(config_parser.get('pipeline','ifos')) segments_cache = set([CacheEntry(None, "SEG", None, "file://localhost" + os.path.abspath(options.segments_file))]) seglists = ligolw_segments.segmenttable_get_by_name(ligolw_utils.load_filename(options.segments_file, contenthandler = ligolw_segments.LIGOLWContentHandler, verbose = options.verbose), options.segments_name).coalesce() # remove extra instruments for instrument in set(seglists) - instruments: if options.verbose: print("warning: ignoring segments for '%s' found in '%s'" % (instrument, options.segments_file), file=sys.stderr) del seglists[instrument] # check for missing instruments if not instruments.issubset(set(seglists)): raise ValueError("segment lists retrieved from '%s' missing segments for instruments %s" % (options.segments_file, ", ".join(instruments - set(seglists)))) # now rely on seglists' keys to provide the instruments del instruments # # Using time slide information, construct segment lists describing times # requiring trigger construction. #