def ReadSimInspiralFromFiles(fileList, verbose=False): """ Read the simInspiral tables from a list of files @param fileList: list of input files @param verbose: print ligolw_add progress """ simInspiralTriggers = None lsctables.use_in(ExtractSimInspiralTableLIGOLWContentHandler) for thisFile in fileList: doc = utils.load_filename( thisFile, gz=(thisFile or "stdin").endswith(".gz"), verbose=verbose, contenthandler=ExtractSimInspiralTableLIGOLWContentHandler) # extract the sim inspiral table try: simInspiralTable = lsctables.SimInspiralTable.get_table(doc) except: simInspiralTable = None if simInspiralTriggers and simInspiralTable: simInspiralTriggers.extend(simInspiralTable) elif not simInspiralTriggers: simInspiralTriggers = simInspiralTable return simInspiralTriggers
def read_segfile_xml(segfile, verbose): """ Read segment file in ligolw xml type and return in glue.segments.segmentlist format. """ from glue.ligolw import ligolw, utils, lsctables, table lsctables.use_in(ligolw.LIGOLWContentHandler) def ContentHandler(xmldoc): return ligolw.LIGOLWContentHandler(xmldoc, lambda name, attrs:\ (name == ligolw.Table.tagName) and\ (table.StripTableName(attrs["Name"]) in ["segment"])) utils.ContentHandler = ContentHandler xmldoc = utils.load_url(segfile, verbose=verbose, gz=segfile.endswith(".gz"), contenthandler=ligolw.LIGOLWContentHandler) seg_list = segmentlist() rows = table.get_table(xmldoc, lsctables.VetoDefTable.tableName) for row in rows: seg_list.append(segment(row.start_time, row.end_time)) xmldoc.unlink() return seg_list
def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, digest = ligolw_utils.load_fileobj( xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join( [str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join( [str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
def open_xmldoc(fobj, **kwargs): """Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~glue.ligolw.utils.load_filename`, or :func:`~glue.ligolw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~glue.ligolw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document` """ from glue.ligolw.lsctables import use_in from glue.ligolw.ligolw import (Document, LIGOLWContentHandler) from glue.ligolw.utils import load_filename, load_fileobj try: # try and load existing file if isinstance(fobj, string_types): kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler)) return load_filename(fobj, **kwargs) if isinstance(fobj, FILE_LIKE): kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler)) return load_fileobj(fobj, **kwargs)[0] except (OSError, IOError): # or just create a new Document return Document()
def find_version_xml(segfile, seg, verbose): """ Find out the version of the flag for the given seg. """ from glue.ligolw import ligolw, utils, lsctables, table lsctables.use_in(ligolw.LIGOLWContentHandler) def ContentHandler(xmldoc): return ligolw.LIGOLWContentHandler(xmldoc, lambda name, attrs:\ (name == ligolw.Table.tagName) and\ (table.StripTableName(attrs["Name"]) in ["segment_definer","segment_summary"])) utils.ContentHandler = ContentHandler xmldoc = utils.load_url(segfile, verbose=verbose, gz=segfile.endswith(".gz"), contenthandler=ligolw.LIGOLWContentHandler) for n, table_elem in enumerate(xmldoc.getElements(lambda e:\ (e.tagName == ligolw.Table.tagName))): if n == 0: definer = {} for row in table_elem: if row.name != "RESULT": definer[str( row.segment_def_id).split(":")[-1]] = row.version if n == 1: for row in table_elem: if seg[0] >= row.start_time and seg[1] <= row.end_time: if str(row.segment_def_id).split( ":")[-1] in definer.keys(): xmldoc.unlink() return definer[str(row.segment_def_id).split(":")[-1]]
def find_version_xml(segfile,seg,verbose): """ Find out the version of the flag for the given seg. """ from glue.ligolw import ligolw,utils, lsctables, table lsctables.use_in(ligolw.LIGOLWContentHandler) def ContentHandler(xmldoc): return ligolw.LIGOLWContentHandler(xmldoc, lambda name, attrs:\ (name == ligolw.Table.tagName) and\ (table.StripTableName(attrs["Name"]) in ["segment_definer","segment_summary"])) utils.ContentHandler = ContentHandler xmldoc = utils.load_url(segfile, verbose = verbose,gz = segfile.endswith(".gz"),contenthandler=ligolw.LIGOLWContentHandler) for n, table_elem in enumerate(xmldoc.getElements(lambda e:\ (e.tagName == ligolw.Table.tagName))): if n == 0: definer = {} for row in table_elem: if row.name != "RESULT": definer[str(row.segment_def_id).split(":")[-1]] = row.version if n == 1: for row in table_elem: if seg[0] >= row.start_time and seg[1] <= row.end_time: if str(row.segment_def_id).split(":")[-1] in definer.keys(): xmldoc.unlink() return definer[str(row.segment_def_id).split(":")[-1]]
def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, _ = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
def parse_veto_definer(veto_def_filename): """ Parse a veto definer file from the filename and return a dictionary indexed by ifo and veto definer category level. Parameters ---------- veto_def_filename: str The path to the veto definer file Returns: parsed_definition: dict Returns a dictionary first indexed by ifo, then category level, and finally a list of veto definitions. """ from glue.ligolw import table, lsctables, utils as ligolw_utils from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h) veto_table = table.get_table(indoc, 'veto_definer') ifo = veto_table.getColumnByName('ifo') name = veto_table.getColumnByName('name') version = numpy.array(veto_table.getColumnByName('version')) category = numpy.array(veto_table.getColumnByName('category')) start = numpy.array(veto_table.getColumnByName('start_time')) end = numpy.array(veto_table.getColumnByName('end_time')) start_pad = numpy.array(veto_table.getColumnByName('start_pad')) end_pad = numpy.array(veto_table.getColumnByName('end_pad')) data = {} for i in range(len(veto_table)): if ifo[i] not in data: data[ifo[i]] = {} # The veto-definer categories are weird! Hardware injections are stored # in "3" and numbers above that are bumped up by one (although not # often used any more). So we remap 3 to H and anything above 3 to # N-1. 2 and 1 correspond to 2 and 1 (YAY!) if category[i] > 3: curr_cat = "CAT_{}".format(category[i]-1) elif category[i] == 3: curr_cat = "CAT_H" else: curr_cat = "CAT_{}".format(category[i]) if curr_cat not in data[ifo[i]]: data[ifo[i]][curr_cat] = [] veto_info = {'name': name[i], 'version': version[i], 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i], } data[ifo[i]][curr_cat].append(veto_info) return data
def open_xmldoc(f, **kwargs): """Try and open an existing LIGO_LW-format file, or create a new Document """ from glue.ligolw.lsctables import use_in from glue.ligolw.ligolw import Document from glue.ligolw.utils import load_filename, load_fileobj use_in(kwargs['contenthandler']) try: # try and load existing file if isinstance(f, string_types): return load_filename(f, **kwargs) if isinstance(f, FILE_LIKE): return load_fileobj(f, **kwargs)[0] except (OSError, IOError): # or just create a new Document return Document()
def fromsegmentxml(file, dict=False, id=None): """ Read a glue.segments.segmentlist from the file object file containing an xml segment table. Arguments: file : file object file object for segment xml file Keyword Arguments: dict : [ True | False ] returns a glue.segments.segmentlistdict containing coalesced glue.segments.segmentlists keyed by seg_def.name for each entry in the contained segment_def_table. Default False id : int returns a glue.segments.segmentlist object containing only those segments matching the given segment_def_id integer """ # load xmldocument and SegmentDefTable and SegmentTables xmldoc, digest = utils.load_fileobj(file, gz=file.name.endswith(".gz"), contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)) seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc) seg_table = lsctables.SegmentTable.get_table(xmldoc) if dict: segs = segments.segmentlistdict() else: segs = segments.segmentlist() seg_id = {} for seg_def in seg_def_table: seg_id[int(seg_def.segment_def_id)] = str(seg_def.name) if dict: segs[str(seg_def.name)] = segments.segmentlist() for seg in seg_table: if dict: segs[seg_id[int(seg.segment_def_id)]]\ .append(segments.segment(seg.start_time, seg.end_time)) continue if id and int(seg.segment_def_id) == id: segs.append(segments.segment(seg.start_time, seg.end_time)) continue segs.append(segments.segment(seg.start_time, seg.end_time)) if dict: for seg_name in seg_id.values(): segs[seg_name] = segs[seg_name].coalesce() else: segs = segs.coalesce() xmldoc.unlink() return segs
def loadSingleBurst( files, trigs_dict=None): """ loads snglburst tables (produced by Omicron) into trgdict object files - is the list of file names """ if type(files) is str: files = [files] if trigs_dict is None: trigs_dict = trigdict() for file in files: for row in table.get_table( ligolw_utils.load_filename(file, contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler)), lsctables.SnglBurstTable.tableName ): channel = "%s-%s_%s"%(row.ifo, row.channel.replace("-","_"), row.search) tcent = row.peak_time + 1e-9*row.peak_time_ns tstart = row.start_time + 1e-9*row.start_time_ns dur = row.duration fpeak = row.peak_frequency fcent = row.central_freq bndwth = row.bandwidth amp = row.amplitude snr = row.snr conf = row.confidence chi2 = row.chisq chi2_dof = row.chisq_dof trigger = [tstart, dur, tcent, fpeak, fcent, bndwth, amp, snr, conf, chi2, chi2_dof] if channel in trigs_dict.channels(): trigs_dict[channel].append( trigger ) ### SingleBurst trigger structure else: trigs_dict[channel] = [ trigger ] return trigs_dict
def load_xml_file(filename): """Wrapper to ligolw's utils.load_filename""" xml_doc = utils.load_filename(filename, gz=filename.endswith("gz"), contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)) return xml_doc
def readCoincInspiralFromFiles(fileList, statistic=None): """ read in the Sngl and SimInspiralTables from a list of files if Sngls are found, construct coincs, add injections (if any) also return Sims (if any) @param fileList: list of input files @param statistic: instance of coincStatistic, to use in creating coincs """ if not fileList: return coincInspiralTable(), None if not (isinstance(statistic, coincStatistic)): raise TypeError, "invalid statistic, must be coincStatistic" sims = None coincs = None lsctables.use_in(ExtractCoincInspiralTableLIGOLWContentHandler) for thisFile in fileList: doc = utils.load_filename( thisFile, gz=(thisFile or "stdin").endswith(".gz"), contenthandler=ExtractCoincInspiralTableLIGOLWContentHandler) # extract the sim inspiral table try: simInspiralTable = lsctables.SimInspiralTable.get_table(doc) if sims: sims.extend(simInspiralTable) else: sims = simInspiralTable except: simInspiralTable = None # extract the sngl inspiral table, construct coincs try: snglInspiralTable = lsctables.SnglInspiralTable.get_table(doc) except: snglInspiralTable = None if snglInspiralTable: coincFromFile = coincInspiralTable(snglInspiralTable, statistic) if simInspiralTable: coincFromFile.add_sim_inspirals(simInspiralTable) if coincs: coincs.extend(coincFromFile) else: coincs = coincFromFile doc.unlink() return coincs, sims
def select_segments_by_definer(segment_file, segment_name=None, ifo=None): """ Return the list of segments that match the segment name Parameters ---------- segment_file: str path to segment xml file segment_name: str Name of segment ifo: str, optional Returns ------- seg: list of segments """ from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, 'segment') seg_def_table = table.get_table(indoc, 'segment_definer') def_ifos = seg_def_table.getColumnByName('ifos') def_names = seg_def_table.getColumnByName('name') def_ids = seg_def_table.getColumnByName('segment_def_id') valid_id = [] for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): if ifo and ifo != def_ifo: continue if segment_name and segment_name != def_name: continue valid_id += [def_id] start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns did = segment_table.getColumnByName('segment_def_id') keep = numpy.array([d in valid_id for d in did]) if sum(keep) > 0: return start_end_to_segments(start[keep], end[keep]) else: return segmentlist([])
def select_segments_by_definer(segment_file, segment_name=None, ifo=None): """ Return the list of segments that match the segment name Parameters ---------- segment_file: str path to segment xml file segment_name: str Name of segment ifo: str, optional Returns ------- seg: list of segments """ from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, 'segment') seg_def_table = table.get_table(indoc, 'segment_definer') def_ifos = seg_def_table.getColumnByName('ifos') def_names = seg_def_table.getColumnByName('name') def_ids = seg_def_table.getColumnByName('segment_def_id') valid_id = [] for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): if ifo and ifo != def_ifo: continue if segment_name and segment_name != def_name: continue valid_id += [def_id] start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns did = segment_table.getColumnByName('segment_def_id') keep = numpy.array([d in valid_id for d in did]) if sum(keep) > 0: return start_end_to_segments(start[keep], end[keep]) else: return segmentlist([])
def load_xml_table(file_name, table_name): """Load xml table from file.""" xml_doc = utils.load_filename( file_name, gz=file_name.endswith("gz"), contenthandler=glsctables.use_in(LIGOLWContentHandler)) return get_table(xml_doc, table_name)
def load_table(sim_file): xml_doc = utils.load_filename(sim_file, contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)) sim_inspiral_table = table.get_table(xml_doc, lsctables.SimInspiralTable.tableName) return sim_inspiral_table
def readCoincInspiralFromFiles(fileList,statistic=None): """ read in the Sngl and SimInspiralTables from a list of files if Sngls are found, construct coincs, add injections (if any) also return Sims (if any) @param fileList: list of input files @param statistic: instance of coincStatistic, to use in creating coincs """ if not fileList: return coincInspiralTable(), None if not (isinstance(statistic,coincStatistic)): raise TypeError, "invalid statistic, must be coincStatistic" sims = None coincs = None lsctables.use_in(ExtractCoincInspiralTableLIGOLWContentHandler) for thisFile in fileList: doc = utils.load_filename(thisFile, gz = (thisFile or "stdin").endswith(".gz"), contenthandler=ExtractCoincInspiralTableLIGOLWContentHandler) # extract the sim inspiral table try: simInspiralTable = \ table.get_table(doc, lsctables.SimInspiralTable.tableName) if sims: sims.extend(simInspiralTable) else: sims = simInspiralTable except: simInspiralTable = None # extract the sngl inspiral table, construct coincs try: snglInspiralTable = \ table.get_table(doc, lsctables.SnglInspiralTable.tableName) except: snglInspiralTable = None if snglInspiralTable: coincFromFile = coincInspiralTable(snglInspiralTable,statistic) if simInspiralTable: coincFromFile.add_sim_inspirals(simInspiralTable) if coincs: coincs.extend(coincFromFile) else: coincs = coincFromFile doc.unlink() return coincs, sims
def build_content_handler(parent, filter_func): """Build a `~xml.sax.handler.ContentHandler` with a given filter """ from glue.ligolw.lsctables import use_in class _ContentHandler(parent): # pylint: disable=too-few-public-methods def __init__(self, document): super(_ContentHandler, self).__init__(document, filter_func) return use_in(_ContentHandler)
def read_multiinspiral_timeslides_from_files(file_list): """ Read time-slid multiInspiral tables from a list of files """ multis = None time_slides = [] contenthandler = glsctables.use_in(LIGOLWContentHandler) for this_file in file_list: doc = utils.load_filename(this_file, gz=this_file.endswith("gz"), contenthandler=contenthandler) # Extract the time slide table time_slide_table = get_table(doc, lsctables.TimeSlideTable.tableName) slide_mapping = {} curr_slides = {} for slide in time_slide_table: curr_id = int(slide.time_slide_id) if curr_id not in curr_slides: curr_slides[curr_id] = {} curr_slides[curr_id][slide.instrument] = slide.offset elif slide.instrument not in curr_slides[curr_id]: curr_slides[curr_id][slide.instrument] = slide.offset for slide_id, offset_dict in curr_slides.items(): try: # Is the slide already in the list and where? offset_index = time_slides.index(offset_dict) slide_mapping[slide_id] = offset_index except ValueError: # If not then add it time_slides.append(offset_dict) slide_mapping[slide_id] = len(time_slides) - 1 # Extract the multi inspiral table try: multi_inspiral_table = get_table(doc, 'multi_inspiral') # Remap the time slide IDs for multi in multi_inspiral_table: new_id = slide_mapping[int(multi.time_slide_id)] multi.time_slide_id = gilwdchar( f"time_slide:time_slide_id:{new_id}") if multis: multis.extend(multi_inspiral_table) else: multis = multi_inspiral_table except Exception as exc: err_msg = "Unable to read a time-slid multiInspiral table " err_msg += f"from {this_file}." raise RuntimeError(err_msg) from exc return multis, time_slides
def start_end_from_segments(segment_file): """ Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray """ from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
def read_segfile_xml(segfile,verbose): """ Read segment file in ligolw xml type and return in glue.segments.segmentlist format. """ from glue.ligolw import ligolw,utils, lsctables, table lsctables.use_in(ligolw.LIGOLWContentHandler) def ContentHandler(xmldoc): return ligolw.LIGOLWContentHandler(xmldoc, lambda name, attrs:\ (name == ligolw.Table.tagName) and\ (table.StripTableName(attrs["Name"]) in ["segment"])) utils.ContentHandler = ContentHandler xmldoc = utils.load_url(segfile, verbose = verbose,gz = segfile.endswith(".gz"),contenthandler=ligolw.LIGOLWContentHandler) seg_list = segmentlist() rows = table.get_table(xmldoc, lsctables.VetoDefTable.tableName) for row in rows: seg_list.append(segment(row.start_time, row.end_time)) xmldoc.unlink() return seg_list
def start_end_from_segments(segment_file): """ Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray """ from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
def ReadSimInspiralFromFiles(fileList, verbose=False): """ Read the simInspiral tables from a list of files @param fileList: list of input files @param verbose: print ligolw_add progress """ simInspiralTriggers = None lsctables.use_in(ExtractSimInspiralTableLIGOLWContentHandler) for thisFile in fileList: doc = utils.load_filename(thisFile, gz=(thisFile or "stdin").endswith(".gz"), verbose=verbose, contenthandler=ExtractSimInspiralTableLIGOLWContentHandler) # extract the sim inspiral table try: simInspiralTable = lsctables.SimInspiralTable.get_table(doc) except: simInspiralTable = None if simInspiralTriggers and simInspiralTable: simInspiralTriggers.extend(simInspiralTable) elif not simInspiralTriggers: simInspiralTriggers = simInspiralTable return simInspiralTriggers
def ReadSnglInspiralFromFiles(fileList, verbose=False, filterFunc=None): """ Read the SnglInspiralTables from a list of files. If filterFunc is not None, only keep triggers for which filterFunc evaluates to True. Ex.: filterFunc=lambda sng: sng.snr >= 6.0 @param fileList: list of input files @param verbose: print progress """ # NOTE: this function no longer carries out event ID mangling (AKA # reassignment). Please adjust calling codes accordingly! # This means that identical event IDs produced by lalapps_thinca in # non-slide files having the same GPS start time will stay identical, # affecting zerolag and injection runs made over the same data. # # In consequence, if the calling code is going to reconstruct coincs # from the sngl event IDs, and if these include multiple injection # runs, coinc finding should be done one file at a time - see the # readCoincInspiralFromFiles function in CoincInspiralUtils.py sngls = lsctables.New(lsctables.SnglInspiralTable, \ columns=lsctables.SnglInspiralTable.loadcolumns) lsctables.use_in(ExtractSnglInspiralTableLIGOLWContentHandler) for i, file in enumerate(fileList): if verbose: print str(i + 1) + "/" + str(len(fileList)) + ": " xmldoc = utils.load_filename( file, verbose=verbose, contenthandler=ExtractSnglInspiralTableLIGOLWContentHandler) try: sngl_table = lsctables.SnglInspiralTable.get_table(xmldoc) if filterFunc is not None: iterutils.inplace_filter(filterFunc, sngl_table) except ValueError: #some xml files have no sngl table, that's OK sngl_table = None if sngl_table: sngls.extend(sngl_table) xmldoc.unlink() #free memory return sngls
def ReadSnglInspiralFromFiles(fileList, verbose=False, filterFunc=None): """ Read the SnglInspiralTables from a list of files. If filterFunc is not None, only keep triggers for which filterFunc evaluates to True. Ex.: filterFunc=lambda sng: sng.snr >= 6.0 @param fileList: list of input files @param verbose: print progress """ # NOTE: this function no longer carries out event ID mangling (AKA # reassignment). Please adjust calling codes accordingly! # This means that identical event IDs produced by lalapps_thinca in # non-slide files having the same GPS start time will stay identical, # affecting zerolag and injection runs made over the same data. # # In consequence, if the calling code is going to reconstruct coincs # from the sngl event IDs, and if these include multiple injection # runs, coinc finding should be done one file at a time - see the # readCoincInspiralFromFiles function in CoincInspiralUtils.py sngls = lsctables.New(lsctables.SnglInspiralTable, \ columns=lsctables.SnglInspiralTable.loadcolumns) lsctables.use_in(ExtractSnglInspiralTableLIGOLWContentHandler) for i,file in enumerate(fileList): if verbose: print str(i+1)+"/"+str(len(fileList))+": " xmldoc = utils.load_filename(file, verbose=verbose, contenthandler=ExtractSnglInspiralTableLIGOLWContentHandler) try: sngl_table = lsctables.SnglInspiralTable.get_table(xmldoc) if filterFunc is not None: iterutils.inplace_filter(filterFunc, sngl_table) except ValueError: #some xml files have no sngl table, that's OK sngl_table = None if sngl_table: sngls.extend(sngl_table) xmldoc.unlink() #free memory return sngls
def read_ligolw(source, contenthandler=None, verbose=False, non_lsc_tables_ok=True): """Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file`, `list` one or more open files or file paths to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading files, default: `False` non_lsc_tables_ok : `bool`, optional if `False` error on unrecognised tables in documents, default: `True` Returns ------- xmldoc : :class:`~glue.ligolw.ligolw.Document` the document object as parsed from the file(s) """ from glue.ligolw.ligolw import (Document, LIGOLWContentHandler) from glue.ligolw import types from glue.ligolw.lsctables import use_in from glue.ligolw.utils.ligolw_add import ligolw_add # mock ToPyType to link to numpy dtypes topytype = types.ToPyType.copy() for key in types.ToPyType: if key in types.ToNumPyType: types.ToPyType[key] = numpy.dtype(types.ToNumPyType[key]).type # set default content handler if contenthandler is None: contenthandler = use_in(LIGOLWContentHandler) # read one or more files into a single Document try: return ligolw_add(Document(), file_list(source), contenthandler=contenthandler, verbose=verbose, non_lsc_tables_ok=non_lsc_tables_ok) finally: # replace ToPyType types.ToPyType = topytype
def check_segment_availability(grb_name, grb_time, query_start, query_end, offset, ifo, segmentName): ''' Searches +/- offset from GRB time to download the latest segment lists then extracts times and puts them into a txt file. ''' args = { 'grb_name': grb_name, 'query_start': query_start, 'query_end': query_end, 'ifo': ifo, 'segmentName': segmentName } cmd = "ligolw_segment_query --database --query-segments --include-segments '{segmentName}' --gps-start-time {query_start} --gps-end-time {query_end} > ./segments{ifo}_grb{grb_name}.xml".format( **args) print '>>', cmd print process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, err = process.communicate() # try to open the file try: doc = utils.load_filename( "segments{ifo}_grb{grb_name}.xml".format(**args), contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler)) except: raise IOError, "Error reading file: segments{ifo}_grb{grb_name}.xml".format( **args) # extract the segment list from segment:table and store in a txt file segs = table.get_table(doc, "segment") seglist = segments.segmentlist( segments.segment(s.start_time, s.end_time) for s in segs) segmentsUtils.tosegwizard(file( "{ifo}-science_grb{grb_name}.txt".format(**args), 'w'), seglist, header=True) print ">> %s segments +/-%ds from %ds found:" % (ifo, offset, grb_time) for s in segs: print "Start:", s.start_time, "End:", s.end_time, "Duration:", s.end_time - s.start_time print return
def ReadMultiInspiralFromFiles(fileList): """ Read the multiInspiral tables from a list of files @param fileList: list of input files """ if not fileList: return multiInspiralTable(), None multis = None for thisFile in fileList: doc = utils.load_filename(thisFile, gz=(thisFile or "stdin").endswith(".gz"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) # extract the multi inspiral table try: multiInspiralTable = lsctables.MultiInspiralTable.get_table(doc) if multis: multis.extend(multiInspiralTable) else: multis = multiInspiralTable except: multiInspiralTable = None return multis
def loadSingleBurst(files, trigs_dict=None): """ loads snglburst tables (produced by Omicron) into trgdict object files - is the list of file names """ if type(files) is str: files = [files] if trigs_dict is None: trigs_dict = trigdict() for file in files: for row in table.get_table( ligolw_utils.load_filename(file, contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)), lsctables.SnglBurstTable.tableName): channel = "%s-%s_%s" % (row.ifo, row.channel.replace( "-", "_"), row.search) tcent = row.peak_time + 1e-9 * row.peak_time_ns tstart = row.start_time + 1e-9 * row.start_time_ns dur = row.duration fpeak = row.peak_frequency fcent = row.central_freq bndwth = row.bandwidth amp = row.amplitude snr = row.snr conf = row.confidence chi2 = row.chisq chi2_dof = row.chisq_dof trigger = [ tstart, dur, tcent, fpeak, fcent, bndwth, amp, snr, conf, chi2, chi2_dof ] if channel in trigs_dict.channels(): trigs_dict[channel].append( trigger) ### SingleBurst trigger structure else: trigs_dict[channel] = [trigger] return trigs_dict
from optparse import OptionParser from lal.utils import CacheEntry from glue.ligolw import ligolw from glue.ligolw import lsctables from glue.ligolw import utils from glue.ligolw.utils import process as ligolw_process from lalburst import git_version from lalburst import bucluster lsctables.use_in(ligolw.LIGOLWContentHandler) __author__ = "Kipp Cannon <*****@*****.**>" __version__ = "git id %s" % git_version.id __date__ = git_version.date # # ============================================================================= # # Command Line # # ============================================================================= #
""" cache = lal.Cache() for file in fileList: AddFileToCache(file, cache) return(cache) class SummValueContentHandler(ligolw.PartialLIGOLWContentHandler): """ Content handler that only reads in the SummValue table """ def __init__(self, xmldoc): ligolw.PartialLIGOLWContentHandler.__init__(self, xmldoc, lambda name, attrs: lsctables.IsTableProperties(lsctables.SummValueTable, name, attrs)) try: lsctables.use_in(SummValueContentHandler) except AttributeError: # old glue did not allow .use_in(). # FIXME: remove when we can require the latest version of glue pass def initialise(opts, name, version = None): """ Create suffix and prefix that will be used to name the output files. 'version' is outdated and not used anymore. @param opts : the user arguments (user_tag, gps_end_time and gps_start_time are used). @param name: name of the calling function/executable @return prefix
def parse_veto_definer(veto_def_filename, ifos): """ Parse a veto definer file from the filename and return a dictionary indexed by ifo and veto definer category level. Parameters ---------- veto_def_filename: str The path to the veto definer file ifos: str The list of ifos for which we require information from the veto definer file Returns -------- parsed_definition: dict Returns a dictionary first indexed by ifo, then category level, and finally a list of veto definitions. """ from glue.ligolw import table, lsctables, utils as ligolw_utils from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) data = {} for ifo_name in ifos: data[ifo_name] = {} data[ifo_name]['CAT_H'] = [] for cat_num in range(1, 5): data[ifo_name]['CAT_{}'.format(cat_num)] = [] indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h) veto_table = table.get_table(indoc, 'veto_definer') ifo = veto_table.getColumnByName('ifo') name = veto_table.getColumnByName('name') version = numpy.array(veto_table.getColumnByName('version')) category = numpy.array(veto_table.getColumnByName('category')) start = numpy.array(veto_table.getColumnByName('start_time')) end = numpy.array(veto_table.getColumnByName('end_time')) start_pad = numpy.array(veto_table.getColumnByName('start_pad')) end_pad = numpy.array(veto_table.getColumnByName('end_pad')) for i in range(len(veto_table)): if ifo[i] not in data: continue # The veto-definer categories are weird! Hardware injections are stored # in "3" and numbers above that are bumped up by one (although not # often used any more). So we remap 3 to H and anything above 3 to # N-1. 2 and 1 correspond to 2 and 1 (YAY!) if category[i] > 3: curr_cat = "CAT_{}".format(category[i]-1) elif category[i] == 3: curr_cat = "CAT_H" else: curr_cat = "CAT_{}".format(category[i]) veto_info = {'name': name[i], 'version': version[i], 'full_name': name[i]+':'+str(version[i]), 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i], } data[ifo[i]][curr_cat].append(veto_info) return data
""" import os, copy import urlparse import logging from glue import segments, lal from glue.ligolw import utils, table, lsctables, ligolw from pycbc.workflow.core import OutSegFile, File, FileList, make_analysis_dir from pycbc.frame import datafind_connection class ContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(ContentHandler) def setup_datafind_workflow(workflow, scienceSegs, outputDir, segFilesList, tag=None): """ Setup datafind section of the workflow. This section is responsible for generating, or setting up the workflow to generate, a list of files that record the location of the frame files needed to perform the analysis. There could be multiple options here, the datafind jobs could be done at run time or could be put into a dag. The subsequent jobs will know what was done here from the OutFileList containing the datafind jobs (and the Dagman nodes if appropriate. For now the only implemented option is to generate the datafind files at runtime. This module can also check if the frameFiles actually exist, check whether the obtained segments line up with the original ones and update the science segments to reflect missing data files.
def ReadMultiInspiralTimeSlidesFromFiles(fileList, generate_output_tables=False): """ Read time-slid multiInspiral tables from a list of files @param fileList: list of input files """ if not fileList: return multiInspiralTable(), None multis = None timeSlides = [] segmentDict = {} for thisFile in fileList: doc = utils.load_filename(thisFile, gz=(thisFile or "stdin").endswith(".gz"), contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)) # Extract the time slide table timeSlideTable = table.get_table(doc, lsctables.TimeSlideTable.tableName) slideMapping = {} currSlides = {} # NOTE: I think some of this is duplicated in the glue definition of the # time slide table. Probably should move over to that for slide in timeSlideTable: currID = int(slide.time_slide_id) if currID not in currSlides.keys(): currSlides[currID] = {} currSlides[currID][slide.instrument] = slide.offset elif slide.instrument not in currSlides[currID].keys(): currSlides[currID][slide.instrument] = slide.offset for slideID, offsetDict in currSlides.items(): try: # Is the slide already in the list and where? offsetIndex = timeSlides.index(offsetDict) slideMapping[slideID] = offsetIndex except ValueError: # If not then add it timeSlides.append(offsetDict) slideMapping[slideID] = len(timeSlides) - 1 # Get the mapping table segmentMap = {} timeSlideMapTable = table.get_table( doc, lsctables.TimeSlideSegmentMapTable.tableName) for entry in timeSlideMapTable: segmentMap[int(entry.segment_def_id)] = int(entry.time_slide_id) # Extract the segment table segmentTable = table.get_table(doc, lsctables.SegmentTable.tableName) for entry in segmentTable: currSlidId = segmentMap[int(entry.segment_def_id)] currSeg = entry.get() if not segmentDict.has_key(slideMapping[currSlidId]): segmentDict[slideMapping[currSlidId]] = segments.segmentlist() segmentDict[slideMapping[currSlidId]].append(currSeg) segmentDict[slideMapping[currSlidId]].coalesce() # extract the multi inspiral table try: multiInspiralTable = table.get_table( doc, lsctables.MultiInspiralTable.tableName) # Remap the time slide IDs for multi in multiInspiralTable: newID = slideMapping[int(multi.time_slide_id)] multi.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (newID)) if multis: multis.extend(multiInspiralTable) else: multis = multiInspiralTable # except: multiInspiralTable = None except: raise if not generate_output_tables: return multis, timeSlides, segmentDict else: # Make a new time slide table timeSlideTab = lsctables.New(lsctables.TimeSlideTable) for slideID, offsetDict in enumerate(timeSlides): for instrument in offsetDict.keys(): currTimeSlide = lsctables.TimeSlide() currTimeSlide.instrument = instrument currTimeSlide.offset = offsetDict[instrument] currTimeSlide.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (slideID)) currTimeSlide.process_id = ilwd.ilwdchar(\ "process:process_id:%d" % (0)) timeSlideTab.append(currTimeSlide) # Make a new mapping table timeSlideSegMapTab = lsctables.New(lsctables.TimeSlideSegmentMapTable) for i in range(len(timeSlides)): currMapEntry = lsctables.TimeSlideSegmentMap() currMapEntry.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (i)) currMapEntry.segment_def_id = ilwd.ilwdchar(\ "segment_def:segment_def_id:%d" % (i)) timeSlideSegMapTab.append(currMapEntry) # Make a new segment table newSegmentTable = lsctables.New(lsctables.SegmentTable) segmentIDCount = 0 for i in range(len(timeSlides)): currSegList = segmentDict[i] for seg in currSegList: currSegment = lsctables.Segment() currSegment.segment_id = ilwd.ilwdchar(\ "segment:segment_id:%d" %(segmentIDCount)) segmentIDCount += 1 currSegment.segment_def_id = ilwd.ilwdchar(\ "segment_def:segment_def_id:%d" % (i)) currSegment.process_id = ilwd.ilwdchar(\ "process:process_id:%d" % (0)) currSegment.set(seg) currSegment.creator_db = -1 currSegment.segment_def_cdb = -1 newSegmentTable.append(currSegment) return multis,timeSlides,segmentDict,timeSlideTab,newSegmentTable,\ timeSlideSegMapTab
def setup_roq(cp): """ Generates cp objects with the different ROQs applied """ use_roq = False if cp.has_option('paths', 'roq_b_matrix_directory') or cp.has_option( 'paths', 'computeroqweights'): if not cp.has_option('analysis', 'roq'): print( "Warning: If you are attempting to enable ROQ by specifying roq_b_matrix_directory or computeroqweights,\n\ please use analysis.roq in your config file in future. Enabling ROQ." ) cp.set('analysis', 'roq', True) if not cp.getboolean('analysis', 'roq'): yield cp raise StopIteration() from numpy import genfromtxt, array path = cp.get('paths', 'roq_b_matrix_directory') if not os.path.isdir(path): print("The ROQ directory %s does not seem to exist\n" % path) sys.exit(1) use_roq = True roq_paths = os.listdir(path) roq_params = {} roq_force_flow = None if cp.has_option('lalinference', 'roq_force_flow'): roq_force_flow = cp.getfloat('lalinference', 'roq_force_flow') print("WARNING: Forcing the f_low to ", str(roq_force_flow), "Hz") print( "WARNING: Overwriting user choice of flow, srate, seglen, and (mc_min, mc_max and q-min) or (mass1_min, mass1_max, mass2_min, mass2_max)" ) def key(item): # to order the ROQ bases return float(item[1]['seglen']) coinc_xml_obj = None row = None # Get file object of coinc.xml if opts.gid is not None: from ligo.gracedb.rest import GraceDb gid = opts.gid cwd = os.getcwd() if cp.has_option('analysis', 'service-url'): client = GraceDb(cp.get('analysis', 'service-url')) else: client = GraceDb() coinc_xml_obj = ligolw_utils.load_fileobj( client.files(gid, "coinc.xml"), contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))[0] elif cp.has_option('input', 'coinc-xml'): coinc_xml_obj = ligolw_utils.load_fileobj( open(cp.get('input', 'coinc-xml'), "rb"), contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))[0] # Get sim_inspiral from injection file if cp.has_option('input', 'injection-file'): print( "Only 0-th event in the XML table will be considered while running with ROQ\n" ) row = lsctables.SimInspiralTable.get_table( ligolw_utils.load_filename(cp.get('input', 'injection-file'), contenthandler=lsctables.use_in( ligolw.LIGOLWContentHandler)))[0] roq_bounds = pipe_utils.Query_ROQ_Bounds_Type(path, roq_paths) if roq_bounds == 'chirp_mass_q': print('ROQ has bounds in chirp mass and mass-ratio') mc_priors, trigger_mchirp = pipe_utils.get_roq_mchirp_priors( path, roq_paths, roq_params, key, coinc_xml_obj=coinc_xml_obj, sim_inspiral=row) elif roq_bounds == 'component_mass': print('ROQ has bounds in component masses') # get component mass bounds, then compute the chirp mass that can be safely covered # further below we pass along the component mass bounds to the sampler, not the tighter chirp-mass, q bounds m1_priors, m2_priors, trigger_mchirp = pipe_utils.get_roq_component_mass_priors( path, roq_paths, roq_params, key, coinc_xml_obj=coinc_xml_obj, sim_inspiral=row) mc_priors = {} for (roq, m1_prior), (roq2, m2_prior) in zip(m1_priors.items(), m2_priors.items()): mc_priors[roq] = sorted([ pipe_utils.mchirp_from_components(m1_prior[1], m2_prior[0]), pipe_utils.mchirp_from_components(m1_prior[0], m2_prior[1]) ]) if cp.has_option('lalinference', 'trigger_mchirp'): trigger_mchirp = float(cp.get('lalinference', 'trigger_mchirp')) roq_mass_freq_scale_factor = pipe_utils.get_roq_mass_freq_scale_factor( mc_priors, trigger_mchirp, roq_force_flow) if roq_mass_freq_scale_factor != 1.: print( 'WARNING: Rescaling ROQ basis, please ensure it is allowed with the model used.' ) # If the true chirp mass is unknown, add variations over the mass bins if opts.gid is not None or (opts.injections is not None or cp.has_option( 'input', 'injection-file')) or cp.has_option( 'lalinference', 'trigger_mchirp') or cp.has_option( 'input', 'coinc-xml'): for mc_prior in mc_priors: mc_priors[mc_prior] = array(mc_priors[mc_prior]) # find mass bin containing the trigger trigger_bin = None for roq in roq_paths: if mc_priors[roq][ 0] * roq_mass_freq_scale_factor <= trigger_mchirp <= mc_priors[ roq][1] * roq_mass_freq_scale_factor: trigger_bin = roq print('Prior in Mchirp will be [' + str(mc_priors[roq][0] * roq_mass_freq_scale_factor) + ',' + str(mc_priors[roq][1] * roq_mass_freq_scale_factor) + '] to contain the trigger Mchirp ' + str(trigger_mchirp)) break roq_paths = [trigger_bin] else: for mc_prior in mc_priors: mc_priors[mc_prior] = array( mc_priors[mc_prior]) * roq_mass_freq_scale_factor # write the master configparser cur_basedir = cp.get('paths', 'basedir') masterpath = os.path.join(cur_basedir, 'config.ini') with open(masterpath, 'w') as cpfile: cp.write(cpfile) for roq in roq_paths: this_cp = configparser.ConfigParser() this_cp.optionxform = str this_cp.read(masterpath) basedir = this_cp.get('paths', 'basedir') for dirs in 'basedir', 'daglogdir', 'webdir': val = this_cp.get('paths', dirs) newval = os.path.join(val, roq) mkdirs(newval) this_cp.set('paths', dirs, newval) this_cp.set( 'paths', 'roq_b_matrix_directory', os.path.join(cp.get('paths', 'roq_b_matrix_directory'), roq)) flow = roq_params[roq]['flow'] / roq_mass_freq_scale_factor srate = 2. * roq_params[roq]['fhigh'] / roq_mass_freq_scale_factor #if srate > 8192: # srate = 8192 seglen = roq_params[roq]['seglen'] * roq_mass_freq_scale_factor # params.dat uses the convention q>1 so our q_min is the inverse of their qmax this_cp.set('engine', 'srate', str(srate)) this_cp.set('engine', 'seglen', str(seglen)) if this_cp.has_option('lalinference', 'flow'): tmp = this_cp.get('lalinference', 'flow') tmp = eval(tmp) ifos = tmp.keys() else: tmp = {} ifos = eval(this_cp.get('analysis', 'ifos')) for i in ifos: tmp[i] = flow this_cp.set('lalinference', 'flow', str(tmp)) if roq_bounds == 'chirp_mass_q': mc_min = mc_priors[roq][0] * roq_mass_freq_scale_factor mc_max = mc_priors[roq][1] * roq_mass_freq_scale_factor # params.dat uses the convention q>1 so our q_min is the inverse of their qmax q_min = 1. / float(roq_params[roq]['qmax']) this_cp.set('engine', 'chirpmass-min', str(mc_min)) this_cp.set('engine', 'chirpmass-max', str(mc_max)) this_cp.set('engine', 'q-min', str(q_min)) this_cp.set( 'engine', 'comp-min', str( max( roq_params[roq]['compmin'] * roq_mass_freq_scale_factor, mc_min * pow(1 + q_min, 1. / 5.) * pow(q_min, 2. / 5.)))) this_cp.set( 'engine', 'comp-max', str(mc_max * pow(1 + q_min, 1. / 5.) * pow(q_min, -3. / 5.))) elif roq_bounds == 'component_mass': m1_min = m1_priors[roq][0] m1_max = m1_priors[roq][1] m2_min = m2_priors[roq][0] m2_max = m2_priors[roq][1] this_cp.set('engine', 'mass1-min', str(m1_min)) this_cp.set('engine', 'mass1-max', str(m1_max)) this_cp.set('engine', 'mass2-min', str(m2_min)) this_cp.set('engine', 'mass2-max', str(m2_max)) yield this_cp raise StopIteration()
#from sbank import git_version FIXME from lalinspiral.sbank.bank import Bank from lalinspiral.sbank.tau0tau3 import proposals from lalinspiral.sbank.psds import noise_models, read_psd, get_PSD from lalinspiral.sbank.waveforms import waveforms, SnglInspiralTable import lal import lalsimulation as lalsim class ContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(ContentHandler) usage = """ lalapps_cbc_sbank: This program generates a template bank for compact binary searches covering a given region of mass and spin parameter space. The program supports the waveform approximants listed below and is designed to be easily extensible to other waveform approximants as they become available (see waveforms.py for details). Supported template approximants: \t%s Example command lines: ** Generate a template bank of positively aligned-spin
def load_injections(fname): from glue.ligolw import lsctables, utils, ligolw lsctables.use_in(ligolw.LIGOLWContentHandler) xmldoc = utils.load_filename(fname, contenthandler=ligolw.LIGOLWContentHandler) return lsctables.SimInspiralTable.get_table(xmldoc)
def ReadMultiInspiralTimeSlidesFromFiles(fileList,generate_output_tables=False): """ Read time-slid multiInspiral tables from a list of files @param fileList: list of input files """ if not fileList: return multiInspiralTable(), None multis = None timeSlides = [] segmentDict = {} for thisFile in fileList: doc = utils.load_filename(thisFile, gz=(thisFile or "stdin").endswith(".gz"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) # Extract the time slide table timeSlideTable = lsctables.TimeSlideTable.get_table(doc) slideMapping = {} currSlides = {} # NOTE: I think some of this is duplicated in the glue definition of the # time slide table. Probably should move over to that for slide in timeSlideTable: currID = int(slide.time_slide_id) if currID not in currSlides.keys(): currSlides[currID] = {} currSlides[currID][slide.instrument] = slide.offset elif slide.instrument not in currSlides[currID].keys(): currSlides[currID][slide.instrument] = slide.offset for slideID,offsetDict in currSlides.items(): try: # Is the slide already in the list and where? offsetIndex = timeSlides.index(offsetDict) slideMapping[slideID] = offsetIndex except ValueError: # If not then add it timeSlides.append(offsetDict) slideMapping[slideID] = len(timeSlides) - 1 # Get the mapping table segmentMap = {} timeSlideMapTable = lsctables.TimeSlideSegmentMapTable.get_table(doc) for entry in timeSlideMapTable: segmentMap[int(entry.segment_def_id)] = int(entry.time_slide_id) # Extract the segment table segmentTable = lsctables.SegmentTable.get_table(doc) for entry in segmentTable: currSlidId = segmentMap[int(entry.segment_def_id)] currSeg = entry.get() if not segmentDict.has_key(slideMapping[currSlidId]): segmentDict[slideMapping[currSlidId]] = segments.segmentlist() segmentDict[slideMapping[currSlidId]].append(currSeg) segmentDict[slideMapping[currSlidId]].coalesce() # extract the multi inspiral table try: multiInspiralTable = lsctables.MultiInspiralTable.get_table(doc) # Remap the time slide IDs for multi in multiInspiralTable: newID = slideMapping[int(multi.time_slide_id)] multi.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (newID)) if multis: multis.extend(multiInspiralTable) else: multis = multiInspiralTable # except: multiInspiralTable = None except: raise if not generate_output_tables: return multis,timeSlides,segmentDict else: # Make a new time slide table timeSlideTab = lsctables.New(lsctables.TimeSlideTable) for slideID,offsetDict in enumerate(timeSlides): for instrument in offsetDict.keys(): currTimeSlide = lsctables.TimeSlide() currTimeSlide.instrument = instrument currTimeSlide.offset = offsetDict[instrument] currTimeSlide.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (slideID)) currTimeSlide.process_id = ilwd.ilwdchar(\ "process:process_id:%d" % (0)) timeSlideTab.append(currTimeSlide) # Make a new mapping table timeSlideSegMapTab = lsctables.New(lsctables.TimeSlideSegmentMapTable) for i in range(len(timeSlides)): currMapEntry = lsctables.TimeSlideSegmentMap() currMapEntry.time_slide_id = ilwd.ilwdchar(\ "time_slide:time_slide_id:%d" % (i)) currMapEntry.segment_def_id = ilwd.ilwdchar(\ "segment_def:segment_def_id:%d" % (i)) timeSlideSegMapTab.append(currMapEntry) # Make a new segment table newSegmentTable = lsctables.New(lsctables.SegmentTable) segmentIDCount = 0 for i in range(len(timeSlides)): currSegList = segmentDict[i] for seg in currSegList: currSegment = lsctables.Segment() currSegment.segment_id = ilwd.ilwdchar(\ "segment:segment_id:%d" %(segmentIDCount)) segmentIDCount += 1 currSegment.segment_def_id = ilwd.ilwdchar(\ "segment_def:segment_def_id:%d" % (i)) currSegment.process_id = ilwd.ilwdchar(\ "process:process_id:%d" % (0)) currSegment.set(seg) currSegment.creator_db = -1 currSegment.segment_def_cdb = -1 newSegmentTable.append(currSegment) return multis,timeSlides,segmentDict,timeSlideTab,newSegmentTable,\ timeSlideSegMapTab
def startElement(self, name, attrs): if attrs.has_key('Name') and table.Table.TableName( attrs['Name']) == self.tabname: self.tableElementName = name # Got the right table, let's see if it's the right event ligolw.LIGOLWContentHandler.startElement(self, name, attrs) self.intable = True elif self.intable: # We are in the correct table ligolw.LIGOLWContentHandler.startElement(self, name, attrs) def endElement(self, name): if self.intable: ligolw.LIGOLWContentHandler.endElement(self, name) if self.intable and name == self.tableElementName: self.intable = False lsctables.use_in(LIGOLWContentHandlerExtractSimBurstTable) posterior_name_to_sim_burst_extractor = { 'frequency': lambda sb: sb.frequency, 'duration': lambda sb: sb.duration, 'quality': lambda sb: sb.q, 'hrss': lambda sb: sb.hrss, 'psi': lambda sb: sb.psi, 'time': lambda sb: sb.time_geocent_gps + 1e-9 * sb.time_geocent_gps_ns, 'ra': lambda sb: sb.ra, 'dec': lambda sb: sb.dec, 'polar_angle': lambda sb: sb.pol_ellipse_angle, 'polar_eccentricity': lambda sb: sb.pol_ellipse_e, 'alpha': lambda sb: sb.pol_ellipse_angle }
parser.add_option('--injXML', action='store', type='string', dest='injxml', help='sim_inspiral XML file for injections') parser.add_option('--outdir', action='store', type='string', help='output directory') parser.add_option('--postsamples', action='store', type='string', default='posterior_samples.dat', help='filename for posterior samples files') parser.add_option('--par', action='append', default=[], type='string', help='parameter names for the p-p plot') parser.add_option('--skyPPfolder', action='store',dest='skypp',type='string',default=None,help='Path to folder containing png/pdf with 2D skyarea PP plots') (options, args) = parser.parse_args() injs = lsctables.SimInspiralTable.get_table(utils.load_filename(options.injxml,contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))) if options.par == []: parameters = ['m1', 'm2', 'mc', 'eta', 'q', 'theta_jn', 'a1', 'a2', 'tilt1', 'tilt2', 'phi12', 'phi_jl', 'ra', 'dec', 'distance', 'time', 'phi_orb', 'psi'] else: parameters = options.par try: os.mkdir(options.outdir) except: pass pvalues = { } posfiles=args Ninj=0 for index,posfile in enumerate(posfiles):
def fromsegmentxml(file, dict=False, id=None): """ Read a glue.segments.segmentlist from the file object file containing an xml segment table. Arguments: file : file object file object for segment xml file Keyword Arguments: dict : [ True | False ] returns a glue.segments.segmentlistdict containing coalesced glue.segments.segmentlists keyed by seg_def.name for each entry in the contained segment_def_table. Default False id : int returns a glue.segments.segmentlist object containing only those segments matching the given segment_def_id integer """ # load xmldocument and SegmentDefTable and SegmentTables xmldoc, digest = utils.load_fileobj(file, gz=file.name.endswith(".gz"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc) seg_table = lsctables.SegmentTable.get_table(xmldoc) if dict: segs = segments.segmentlistdict() else: segs = segments.segmentlist() seg_id = {} for seg_def in seg_def_table: seg_id[int(seg_def.segment_def_id)] = str(seg_def.name) if dict: segs[str(seg_def.name)] = segments.segmentlist() for seg in seg_table: if dict: segs[seg_id[int(seg.segment_def_id)]]\ .append(segments.segment(seg.start_time, seg.end_time)) continue if id and int(seg.segment_def_id)==id: segs.append(segments.segment(seg.start_time, seg.end_time)) continue segs.append(segments.segment(seg.start_time, seg.end_time)) if dict: for seg_name in seg_id.values(): segs[seg_name] = segs[seg_name].coalesce() else: segs = segs.coalesce() xmldoc.unlink() return segs
} # # Remove everything between the dashed lines once we get rid of xml # ----------------------------------------------------------------------------- # from glue.ligolw import utils as ligolw_utils from glue.ligolw import ligolw, table, lsctables # dummy class needed for loading LIGOLW files class LIGOLWContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(LIGOLWContentHandler) # Map parameter names used in pycbc to names used in the sim_inspiral # table, if they are different sim_inspiral_map = { 'ra': 'longitude', 'dec': 'latitude', 'approximant': 'waveform', } def set_sim_data(inj, field, data): """Sets data of a SimInspiral instance.""" try: sim_field = sim_inspiral_map[field] except KeyError:
from glue.ligolw import ligolw, lsctables, table, utils import matplotlib matplotlib.use('Agg') import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pycbc.pnutils import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations import lal logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) class DefaultContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(DefaultContentHandler) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument('--tmpltbank-file', type=str, required=True, help='HDF file containing template information for CBC search') parser.add_argument('--output-file', type=str, required=True, help='Full path to output file') parser.add_argument('--loudest-event-number', type=int, required=True, default=1, help='Script will plot the Nth loudest coincident trigger') parser.add_argument('--omicron-dir', type=str, required=True,
import matplotlib.mlab as mlab import matplotlib.pyplot as plt from pycbc.workflow.segment import fromsegmentxml import pycbc.pnutils import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations import lal logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) class DefaultContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(DefaultContentHandler) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument( '--tmpltbank-file', type=str, required=True,
help="") ( opts , args ) = parser.parse_args() required_opts = ["name", "time", "padding_time", "config_file", "injection_config", "log_path"] for opt in required_opts: if getattr(opts, opt) is None: raise ValueError("--%s is a required option" % opt) if not opts.grb_file and (not opts.time or not opts.name): raise ValueError("Either a valid GRB xml file must be specified or the GPS time and name of the GRB!") ############################################################################## # find available data if opts.grb_file: xmldoc = utils.load_filename(opts.grb_file, gz=opts.grb_file.endswith('.gz'), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) ext_table = lsctables.ExtTriggersTable.get_table(xmldoc) grb_time = ext_table[0].start_time grb_name = os.path.basename(opts.grb_file)[3:-4] grb_ra = ext_table[0].event_ra grb_dec = ext_table[0].event_dec else: grb_name = opts.name[0] grb_time = int(opts.time) grb_ra = float(opts.ra) grb_dec = float(opts.dec) exttrig_config_file, grb_ifolist, onSourceSegment, offSourceSegment = exttrig_dataquery.exttrig_dataquery(grb_name, grb_time, grb_ra, grb_dec, opts.offset, opts.config_file, opts.extend, opts.useold, opts.make_plots, opts.make_xml) ############################################################################## # create the config parser object and exttrig_dataquery ini file
from glue.ligolw import utils import lalinference.cmap from lalinference import fits from lalinference import plot import healpy as hp import numpy as np import os import pickle import sky_area.sky_area_clustering as sac import matplotlib.pyplot as pp class LIGOLWContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(LIGOLWContentHandler) def plot_skymap(output, skypost, pixresol=np.pi/180.0, nest=True,inj=None, fast=True): nside = 1 while hp.nside2resol(nside) > pixresol: nside *= 2 pix_post = skypost.as_healpix(nside, nest=nest, fast=fast) fig = pp.figure(frameon=False) ax = pp.subplot(111, projection='astro mollweide') ax.cla() ax.grid() plot.healpix_heatmap(pix_post, nest=nest, vmin=0.0, vmax=np.max(pix_post), cmap=pp.get_cmap('cylon')) if inj is not None:
def createInjectionFile(hipe_dir, cp, cpinj, injrun, injection_segment, source_file, ipn_gps=None, usertag=None, verbose=False): """ Creates an master injection file containing all injections for this run. Also reads the file and returns its contents """ cpinj = copy.deepcopy(cpinj) # get the number of injections to be made for opt in ['exttrig-inj-start','exttrig-inj-stop']: value = int(cpinj.get(injrun,opt)) cpinj.remove_option(injrun,opt) if 'start' in opt: injStart = value else: injEnd = value seed = hash_n_bits(hipe_dir, 31) numberInjections = injEnd - injStart + 1 # e.g., 1 through 5000 inclusive # get the jitter parameters if cpinj.has_option(injrun, "jitter-skyloc"): jitter_sigma_deg = cpinj.getfloat(injrun, "jitter-skyloc") cpinj.remove_option(injrun, "jitter-skyloc") else: jitter_sigma_deg = None # check if the specific Fermi systematic error needs to # be added to the location jittering if cpinj.has_option(injrun, "jitter-skyloc-fermi"): jitter_skyloc_fermi = cpinj.getboolean(injrun, "jitter-skyloc-fermi") cpinj.remove_option(injrun, "jitter-skyloc-fermi") else: jitter_skyloc_fermi = False # check if we should align the total angular momentum if cpinj.has_option(injrun, "align-total-spin"): align_total_spin = cpinj.getboolean(injrun, "align-total-spin") cpinj.remove_option(injrun, "align-total-spin") else: align_total_spin = False # set all the arguments argument = [] for (opt,value) in cpinj.items(injrun): argument.append("--%s %s" % (opt, value) ) # add arguments on times and time-intervals interval = abs(injection_segment) injInterval = interval / numberInjections argument.append(" --gps-start-time %d" % injection_segment[0] ) argument.append(" --gps-end-time %d" % injection_segment[1] ) argument.append(" --time-interval %f" % injInterval ) argument.append(" --time-step %f" % injInterval ) argument.append(" --seed %d" % seed ) argument.append(" --user-tag %s" % usertag) # set output file as exttrig-file or IPN file with IPN GPS time if ipn_gps: argument.append(" --ipn-gps-time %d" % ipn_gps ) else: argument.append(" --exttrig-file %s" % source_file ) # execute the command executable = cp.get("condor", "inspinj") arguments = " ".join(argument) inspiralutils.make_external_call(executable + " " + arguments, show_command=verbose) # recreate the output filename injFile = "HL-INJECTIONS_" + str(seed) if usertag is not None: injFile += "_" + usertag injFile += "-%d-%d.xml" % (injection_segment[0], abs(injection_segment)) # move it into the GRB directory to avoid clutter new_injFile = hipe_dir + "/" + injFile os.rename(injFile, new_injFile) # jitter the sky locations of the injections if jitter_sigma_deg is not None: # rename the original, then have ligolw_cbc_jitter_skyloc create a new one os.rename(new_injFile, new_injFile + ".prejitter") cmd = ["ligolw_cbc_jitter_skyloc"] if jitter_skyloc_fermi: cmd.append("--apply-fermi-error") cmd.extend(["--jitter-sigma-deg", str(jitter_sigma_deg), "--output-file", new_injFile, new_injFile + ".prejitter"]) if verbose: print(" ".join(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, "%s: %s" % (" ".join(cmd), err)) # rotate the binary so that total angular momentum has the current inclination if align_total_spin: # rename the original then have ligolw_cbc_align_total_spin create a new one os.rename(new_injFile, new_injFile + ".prealign") cmd = ["ligolw_cbc_align_total_spin", "--output-file", new_injFile, new_injFile + ".prealign"] if verbose: print(" ".join(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, "%s: %s" % (" ".join(cmd), err)) # read in the file and the tables doc = utils.load_filename(new_injFile, contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) sims = lsctables.SimInspiralTable.get_table(doc) return sims, injInterval, numberInjections, new_injFile
print >> index_html_obj, "Target Channel: %s"%gwchannel index_html_obj.close() #============================================= # sciseg query #============================================= if opts.ignore_science_segments: logger.info("ignoring science segments") scisegs = [[gpsstart, gpsstart+stride]] else: logger.info("generating science segments") try: seg_xml_file = idq.segment_query(config, gpsstart, gpsstart+stride, url=config.get("get_science_segments","segdb")) lsctables.use_in(ligolw.LIGOLWContentHandler) xmldoc = utils.load_fileobj(seg_xml_file, contenthandler=ligolw.LIGOLWContentHandler)[0] seg_file = "%s/science_segements-%d-%d.xml.gz"%(this_sumdir, int(gpsstart), int(stride)) logger.info("writting science segments to file : %s"%seg_file) utils.write_filename(xmldoc, seg_file, gz=seg_file.endswith(".gz")) (scisegs, coveredseg) = idq.extract_dq_segments(seg_file, config.get('get_science_segments', 'include')) except Exception as e: traceback.print_exc() logger.info("ERROR: segment generation failed. Skipping this summary period.") gpsstart += stride continue
def check_segment_availability(grb_name, grb_time, query_start, query_end, offset, ifo, segmentName): ''' Searches +/- offset from GRB time to download the latest segment lists then extracts times and puts them into a txt file. ''' args = {'grb_name' : grb_name, 'query_start' : query_start, 'query_end' : query_end, 'ifo' : ifo, 'segmentName' : segmentName} cmd = "ligolw_segment_query --database --query-segments --include-segments '{segmentName}' --gps-start-time {query_start} --gps-end-time {query_end} > ./segments{ifo}_grb{grb_name}.xml".format(**args) print '>>',cmd print process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output,err = process.communicate() # try to open the file try: doc = utils.load_filename("segments{ifo}_grb{grb_name}.xml".format(**args), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) except: raise IOError, "Error reading file: segments{ifo}_grb{grb_name}.xml".format(**args) # extract the segment list from segment:table and store in a txt file segs = table.get_table(doc, "segment") seglist = segments.segmentlist(segments.segment(s.start_time, s.end_time) for s in segs) segmentsUtils.tosegwizard(file("{ifo}-science_grb{grb_name}.txt".format(**args),'w'),seglist,header = True) print ">> %s segments +/-%ds from %ds found:"%(ifo,offset,grb_time) for s in segs: print "Start:",s.start_time,"End:",s.end_time,"Duration:",s.end_time-s.start_time print return
def table_from_file(f, tablename, columns=None, filt=None, contenthandler=None, nproc=1, verbose=False): """Read a `~glue.ligolw.table.Table` from a LIGO_LW file. Parameters ---------- f : `file`, `str`, `CacheEntry`, `list`, `Cache` object representing one or more files. One of - an open `file` - a `str` pointing to a file path on disk - a formatted `~lal.utils.CacheEntry` representing one file - a `list` of `str` file paths - a formatted `~glue.lal.Cache` representing many files tablename : `str` name of the table to read. columns : `list`, optional list of column name strings to read, default all. filt : `function`, optional function by which to `filter` events. The callable must accept as input a row of the table event and return `True`/`False`. contenthandler : `~glue.ligolw.ligolw.LIGOLWContentHandler` SAX content handler for parsing LIGO_LW documents. Returns ------- table : `~glue.ligolw.table.Table` `Table` of data with given columns filled """ from glue.ligolw.ligolw import Document from glue.ligolw import (table, lsctables) from glue.ligolw.utils.ligolw_add import ligolw_add # find table class tableclass = lsctables.TableByName[table.Table.TableName(tablename)] # get content handler if contenthandler is None: contenthandler = get_partial_contenthandler(tableclass) # allow cache multiprocessing if nproc != 1: return tableclass.read(f, columns=columns, contenthandler=contenthandler, nproc=nproc, format='cache') lsctables.use_in(contenthandler) # set columns to read if columns is not None: _oldcols = tableclass.loadcolumns tableclass.loadcolumns = columns # generate Document and populate files = file_list(f) xmldoc = Document() ligolw_add(xmldoc, files, non_lsc_tables_ok=True, contenthandler=contenthandler, verbose=verbose) # extract table out = tableclass.get_table(xmldoc) if verbose: gprint('%d rows found in %s table' % (len(out), out.tableName)) # filter output if filt: if verbose: gprint('filtering rows ...', end=' ') try: out_ = out.copy() except AttributeError: out_ = table.new_from_template(out) out_.extend(filter(filt, out)) out = out_ if verbose: gprint('%d rows remaining\n' % len(out)) # reset loadcolumns and return if columns is not None: tableclass.loadcolumns = _oldcols return out
def exttrig_dataquery(grb_name, grb_time, grb_ra, grb_dec, offset, config_file, extend=False, useold=False, make_plots=False, make_xml=False): ''' Finds science time of all available IFOs. ''' ############################################################################## # get segment duration and minimum amount of science time ############################################################################## # read the configuration file cp = ConfigParser.ConfigParser() cp.read(config_file) # define hardcoded variables basic_ifolist = ifolist = ['H1','H2','L1','V1'] catlist = [1,2,3] sensitivity_dict = {"H1": 1, "L1": 2, "H2": 3, "V1": 4, "G1": 5} # get segment length from configuration file pad_data = int(cp.get('data','pad-data')) if cp.has_option('data','segment-duration'): blockDuration = segmentDuration = psdDuration = int(cp.get('data','segment-duration')) elif cp.has_option('data','segment-length'): blockDuration = segmentDuration = psdDuration = int(cp.get('data','segment-length')) /int(cp.get('data','sample-rate')) else: raise ValueError, "EXIT: Cannot find segment-duration in [data] section of configuration file!" # get sample rate if cp.has_option('data','sample-rate'): sampleRate = int(cp.get('data', 'sample-rate')) print ">> Sample rate has been set to: %d"%sampleRate print else: print ">> ERROR: Need to specify sample-rate in [data] section of configuration file in order to calculate inputs for downstream processes." sys.exit() # if not extend option then need to get block duration if not extend: if cp.has_option('data','block-duration'): blockDuration = int(cp.get('data','block-duration')) elif cp.has_option('data','segment-length'): s_length = int(cp.get('data', 'segment-length')) s_num = int(cp.get('data', 'number-of-segments')) s_rate = int(cp.get('data', 'sample-rate')) s_overlap = int(cp.get('inspiral', 'segment-overlap')) # calculate blockDuration blockDuration = ( s_length * s_num - ( s_num - 1 ) * s_overlap ) / s_rate else: raise ValueError, "EXIT: Cannot find block-duration in [data] section of configuration file! Either set block-duration or use --extend option." # calculate the minimum amount of science time need and how the length of quanta to be added on both ends of the analysis time minscilength = blockDuration + 2 * pad_data quanta = segmentDuration / 2 # if extend beyond minscilength; add segments of quanta length to each end of segment print ">> Minimum science segment length is: %ss"%minscilength print if extend: print ">> Will extend minimum science segment by quanta of: %ss"%quanta print ############################################################################## # get list of segments for each IFO and put in science txt file ############################################################################## if not useold: # external call to ligolw_segment_query query_start = int(grb_time - offset) query_end = int(grb_time + offset) for ifo in ifolist: if cp.has_option('segments','%s-segments'%ifo.lower()): segmentName = cp.get('segments','%s-segments'%ifo.lower()) check_segment_availability(grb_name, grb_time, query_start, query_end, offset, ifo, segmentName) ############################################################################## # get veto files ############################################################################## if not useold: # get and read veto definer file veto_file_url = cp.get('exttrig','cvs_veto_definer') veto_file_path,headers = urllib.urlretrieve(veto_file_url,os.path.basename(veto_file_url)) # do ligolw_segments_from_cats deltat = 500 args = {'start_time' : int(grb_time - offset - deltat), 'end_time' : int(grb_time + offset + deltat), 'veto_file_path' : veto_file_path} cmd = "ligolw_segments_from_cats --database --veto-file={veto_file_path} --separate-categories --gps-start-time {start_time} --gps-end-time {end_time} --output-dir=. --individual-results".format(**args) print '>>',cmd print process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output,err = process.communicate() # Rename the veto files for easier handling veto_files = glob.glob('./*VETOTIME_CAT*{start_time}*xml'.format(**args)) for filename in veto_files: p = filename.split('-') newname = "%s-%s_grb%s.xml"%(p[0], p[1], grb_name) shutil.move(filename, newname) ############################################################################## # look in txt files and find segment with onsource and minscilength ############################################################################## # create segment that is +/- offset of GRB time onsource = [grb_time - int(cp.get('exttrig','onsource_left')),\ grb_time + int(cp.get('exttrig','onsource_right'))] onSourceSegment = segments.segment(onsource[0], onsource[1]) # get segments in science txt files; see if segments length at least minscilength # if no then discard them; if yes then put in segdict[ifo] and ifo in ifolist basic_segdict = segdict = segments.segmentlistdict() for ifo in ifolist: # check configuration file if not cp.has_option('segments','%s-segments' % ifo.lower()): continue # find segment with onsource and check it is at least minscilength ifo_segfile = '%s-science_grb%s.txt' %(ifo, grb_name) if os.path.exists(ifo_segfile): tmplist = segmentsUtils.fromsegwizard(open(ifo_segfile)) try: s = tmplist.find(onSourceSegment) except ValueError: # if onsource not in segments then move on to next IFO continue if abs(tmplist[s]) >= minscilength: segdict[ifo] = segments.segmentlist([tmplist[s]]) basic_segdict[ifo] = segments.segmentlist([s for s in tmplist]) ifolist = segdict.keys() if len(ifolist) < 2: print "EXIT: Less than 2 interferometers have available data!" sys.exit() ############################################################################## # apply vetoes ############################################################################## # apply print ">> Vetoes that overlap with science segments:" for ifo in ifolist: # flag; True if IFO not vetoed cat_flag = True for cat in catlist: # create list and check for overlaps xmlsegfile = "./%s-VETOTIME_CAT%s_grb%s.xml" %(ifo, cat, grb_name) if os.path.exists(xmlsegfile) and cat_flag: testseg = segments.segment([segdict[ifo][0][0],segdict[ifo][0][1]]) list_overlaps = [] # load the content of the veto-file xmldoc = utils.load_filename(xmlsegfile, gz = False, contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) segs = lsctables.SegmentTable.get_table(xmldoc) segdefs = lsctables.SegmentDefTable.get_table(xmldoc) # create a mapping between the segments and their definitions defdict = {} for segdef in segdefs: defdict[segdef.segment_def_id] = segdef.name # find veto segments that intersect science segment of IFO with onsource for seg in segs: s = segments.segment(seg.start_time, seg.end_time) if testseg.intersects(s): id = seg.segment_def_id list_overlaps.append([defdict[id], seg.start_time, seg.end_time]) # cut veto CAT1 segments out of science segment; CAT1,2,3 veto IFO if in onsource will veto IFO for name, segstart, segend in list_overlaps: print "CAT%s IFO %s, Start: %d End: %d because %s"%(cat, ifo, segstart, segend, name) s = segments.segment(segstart, segend) if onSourceSegment.intersects(s): segdict.pop(ifo, None) cat_flag = False break if cat == 1: vetoes = segments.segmentlist(segments.segment(s[1], s[2]) for s in list_overlaps) segdict[ifo] -= vetoes # get list of IFOs ifolist = segdict.keys() print if len(ifolist) < 2: print "EXIT: After vetoes, less than 2 interferometers have available data!" sys.exit() ############################################################################## # determine segment to be analyzed ############################################################################## # sort from most sensitive to least sensitive def sensitivity_cmp(ifo1, ifo2): return cmp(sensitivity_dict[ifo1], sensitivity_dict[ifo2]) ifolist.sort(sensitivity_cmp) # compares IFOs and finds the segment to analyze # now try getting off-source segments # start trying with all IFOs # work our way through subsets; beginning with most sensitive combinations test_combos = itertools.chain(*itertools.imap(lambda n: iterutils.choices(ifolist, n), xrange(len(ifolist), 1, -1))) off_source_segment = None the_ifo_combo = [] for ifo_combo in test_combos: # find conincident science time of IFOs trial_seglist = segdict.intersection(ifo_combo) if abs(trial_seglist) < minscilength: print "EXIT: IFOs do not overlap enough for minscilength",abs(trial_seglist) sys.exit() else: pass # find segment with grb_time inside try: super_seg = trial_seglist[trial_seglist.find(onSourceSegment)].contract(pad_data) except ValueError: print "EXIT: ValueError with super_seg" sys.exit() if onSourceSegment not in super_seg: print "EXIT: onsource not in super_seg" sys.exit() # find int division of onsource time intervals before and after grb tplus = (super_seg[1] - onSourceSegment[1]) tminus = (onSourceSegment[0] - super_seg[0]) # get minimum number of onsource time intervals in offsource tmin = ( minscilength - 2*pad_data - abs(onSourceSegment) ) # cut to get minscilength if tplus + tminus > tmin: half_max = tmin // 2 if tplus < half_max: print ">> Left sticks out so cut it." remainder = tmin - tplus tminus = min(remainder, tminus) elif tminus < half_max: print ">> Right sticks out so cut it." remainder = tmin - tminus tplus = min(remainder, tplus) else: print ">> Both sides stick out so cut as symmetrically as possible." tminus = half_max tplus = tmin - half_max # odd trial sticks out on right if tplus + tminus < tmin: offsource = None temp_segment = segments.segment((onSourceSegment[0] - tminus - pad_data, onSourceSegment[1] + tplus + pad_data)) if temp_segment is not None: offsource = temp_segment ifolist = list(ifo_combo) if extend: # extend as many adjacent 128 second blocks as possible begin_time = offsource[0] - quanta * (abs(super_seg[0]-offsource[0])//quanta) end_time = offsource[1] + quanta * (abs(super_seg[1]-offsource[1])//quanta) offsource = segments.segment((begin_time,end_time)) break print # check length at least minscilength if abs(offsource) < minscilength: print abs(offsource),minscilength print "EXIT: Calculated offsource segment but less than minscilength!" sys.exit() # check if no detectors can be used then exit if len(ifolist) < 2: print "EXIT: Calculated offsource segment but less than two IFOs!" sys.exit() # check edge case if abs(offsource[0]-onsource[0]) < pad_data or abs(offsource[1]-onsource[1]) < pad_data: print "WARNING: GRB time close to edge of offsource. Its within the padding time." # concatenate "H1L1V1", etc. ifolist.sort() ifotag = "".join(ifolist) print ">> Offsource segment for %s GRB is:"%ifotag print "Start:",offsource[0],"End:",offsource[1],"Duration:",offsource[1]-offsource[0],"Left:",grb_time-offsource[0],"Right:",offsource[1]-grb_time print ############################################################################## # output ############################################################################## # write analyse txt files for ifo in basic_ifolist: if ifo in ifolist: analysisFP = open('%s-analyse_grb%s.txt' %(ifo,grb_name),'w') analysisFP.write('# seg\t start \t stop \t duration\n') analysisFP.write('0\t %d\t %d\t %d\n' %(offsource[0],offsource[1],offsource[1]-offsource[0])) else: analysisFP = open('%s-analyse_grb%s.txt' %(ifo,grb_name),'w') analysisFP.write('# seg\t start \t stop \t duration\n') # calculate blockDuration blockDuration = int(abs(offsource[0]-offsource[1])) - 2 * pad_data # calculate psdDuration # gets largest power of two such that blockDuration/psdDuration = psdRatio # could have done a binary & operator that is faster but this is more user-friendly I believe min_psdDuration = int(cp.get('exttrig', 'min-psd-length')) psdRatio = int(cp.get('exttrig', 'psd-ratio')) psdDuration = 2**int(numpy.log2(blockDuration/psdRatio)) if psdDuration < min_psdDuration: print "EXIT: PSD segment duration is too short. It is %ds but needs to be at least %ds in length."%(psdDuration,min_psdDuration) sys.exit() # some downstream processes (e.g. lalapps_tmpltbank) cannot handle these inputs if cp.has_option('data', 'segment-duration'): cp.remove_option('data', 'segment-duration') cp.remove_option('data', 'block-duration') # some downstream processes (e.g. lalapps_tmpltbank) requires these options to run print ">> Using sample rate of %d to calculate inputs for downstream processes."%sampleRate print segmentLength = segmentDuration*sampleRate segmentCount = blockDuration/(segmentDuration/2) - 1 # subtract 1 because one segment length is overlapped segmentOverlap = segmentLength/2 cp.set('data', 'segment-length', segmentLength) cp.set('data', 'number-of-segments', segmentCount) cp.set('inspiral', 'segment-overlap', segmentOverlap) # set values for [coh_PTF_inspral] section in configuration file cp.set('coh_PTF_inspiral', 'block-duration', blockDuration) cp.set('coh_PTF_inspiral', 'segment-duration', segmentDuration) cp.set('coh_PTF_inspiral', 'psd-segment-duration', psdDuration) cp.set('coh_PTF_inspiral', 'pad-data', pad_data) f = open('grb%s.ini'%grb_name,'w') cp.write(f) f.close() print ">> The [data] section of the configuration file has been edited with the following values:" print "sample-rate=",sampleRate print "segment-length=",segmentLength print "number-of-segments=",segmentCount print "segment-overlap=",segmentOverlap print print ">> The [coh_PTF_inspiral] section of the configuration file has been edited with the following values:" print "block-duration =",blockDuration print "segment-duration =",segmentDuration print "psd-segment-duration =",psdDuration print "pad-data =",pad_data print # plot segments offSourceSegment = segments.segment(offsource[0], offsource[1]) plot_window = segments.segment(grb_time-offset, grb_time+offset) plot_segments(basic_segdict, onSourceSegment, offSourceSegment, grb_time, plot_window, "segment_plot_%s.png"%grb_name, grb_name) # make xml file if make_xml: # create a new xml document with an ExtTriggers Table xmldoc = ligolw.Document() xmldoc.appendChild(ligolw.LIGO_LW()) tbl = lsctables.New(lsctables.ExtTriggersTable) xmldoc.childNodes[-1].appendChild(tbl) # set the values we need row = lsctables.ExtTriggersTable() row.process_id = None row.det_alts = None row.det_band = None row.det_fluence = None row.det_fluence_int = None row.det_name = None row.det_peak = None row.det_peak_int = None row.det_snr = '' row.email_time = 0 row.event_dec = float(grb_dec) row.event_dec_err = 0.0 row.event_epoch = '' row.event_err_type = '' row.event_ra = float(grb_ra) row.event_ra_err = 0.0 row.start_time = grb_time row.start_time_ns = 0 row.event_type = '' row.event_z = 0.0 row.event_z_err = 0.0 row.notice_comments = '' row.notice_id = '' row.notice_sequence = '' row.notice_time = 0 row.notice_type = '' row.notice_url = '' row.obs_fov_dec = 0.0 row.obs_fov_dec_width = 0.0 row.obs_fov_ra = 0.0 row.obs_fov_ra_width = 0.0 row.obs_loc_ele = 0.0 row.obs_loc_lat = 0.0 row.obs_loc_long = 0.0 row.ligo_fave_lho = 0.0 row.ligo_fave_llo = 0.0 row.ligo_delay = 0.0 row.event_number_gcn = 9999 row.event_number_grb = grb_name row.event_status = 0 # insert into the table and write file tbl.extend([row]) filename = 'grb%s.xml' % grb_name utils.write_filename(xmldoc, filename) # plot all vetoes if make_plots: vetodict = segments.segmentlistdict() for cat in catlist: for ifo in ifolist: vetofile = "%s-VETOTIME_CAT%s_grb%s.xml" % (ifo, cat, grb_name) xmldoc = utils.load_filename(vetofile, gz = False, contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)) segs = lsctables.SegmentTable.get_table(xmldoc) segdefs = lsctables.SegmentDefTable.get_table(xmldoc) vetodict[ifo] = segments.segmentlist(segments.segment(s.start_time, s.end_time) for s in segs) if vetodict: plot_segments(vetodict, onSourceSegment, offSourceSegment, grb_time, plot_window, "veto_plot_CAT%s_%s.png"%(cat,grb_name), "%s CAT%s"%(grb_name, cat)) # return return 'grb%s.ini'%grb_name, ifolist, onSourceSegment, offSourceSegment