def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, _ = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, digest = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
def select_segments_by_definer(segment_file, segment_name=None, ifo=None): """ Return the list of segments that match the segment name Parameters ---------- segment_file: str path to segment xml file segment_name: str Name of segment ifo: str, optional Returns ------- seg: list of segments """ from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, 'segment') seg_def_table = table.get_table(indoc, 'segment_definer') def_ifos = seg_def_table.getColumnByName('ifos') def_names = seg_def_table.getColumnByName('name') def_ids = seg_def_table.getColumnByName('segment_def_id') valid_id = [] for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): if ifo and ifo != def_ifo: continue if segment_name and segment_name != def_name: continue valid_id += [def_id] start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns did = segment_table.getColumnByName('segment_def_id') keep = numpy.array([d in valid_id for d in did]) if sum(keep) > 0: return start_end_to_segments(start[keep], end[keep]) else: return segmentlist([])
def select_segments_by_definer(segment_file, segment_name=None, ifo=None): """ Return the list of segments that match the segment name Parameters ---------- segment_file: str path to segment xml file segment_name: str Name of segment ifo: str, optional Returns ------- seg: list of segments """ from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, 'segment') seg_def_table = table.get_table(indoc, 'segment_definer') def_ifos = seg_def_table.getColumnByName('ifos') def_names = seg_def_table.getColumnByName('name') def_ids = seg_def_table.getColumnByName('segment_def_id') valid_id = [] for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): if ifo and ifo != def_ifo: continue if segment_name and segment_name != def_name: continue valid_id += [def_id] start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns did = segment_table.getColumnByName('segment_def_id') keep = numpy.array([d in valid_id for d in did]) if sum(keep) > 0: return start_end_to_segments(start[keep], end[keep]) else: return segmentlist([])
def start_end_from_segments(segment_file): """ Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray """ from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
def start_end_from_segments(segment_file): """ Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray """ from pycbc_glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
def ReadSimInspiralFromFiles(fileList, verbose=False): """ Read the simInspiral tables from a list of files @param fileList: list of input files @param verbose: print ligolw_add progress """ simInspiralTriggers = None lsctables.use_in(ExtractSimInspiralTableLIGOLWContentHandler) for thisFile in fileList: doc = utils.load_filename(thisFile, gz=(thisFile or "stdin").endswith(".gz"), verbose=verbose, contenthandler=ExtractSimInspiralTableLIGOLWContentHandler) # extract the sim inspiral table try: simInspiralTable = \ table.get_table(doc, lsctables.SimInspiralTable.tableName) except: simInspiralTable = None if simInspiralTriggers and simInspiralTable: simInspiralTriggers.extend(simInspiralTable) elif not simInspiralTriggers: simInspiralTriggers = simInspiralTable return simInspiralTriggers
from pycbc_glue.ligolw import ligolw, table, lsctables from pycbc.types import float64, float32, TimeSeries from pycbc.detector import Detector injection_func_map = { np.dtype(float32): sim.SimAddInjectionREAL4TimeSeries, np.dtype(float64): sim.SimAddInjectionREAL8TimeSeries } # dummy class needed for loading LIGOLW files class LIGOLWContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(LIGOLWContentHandler) def legacy_approximant_name(apx): """Convert the old style xml approximant name to a name and phase_order. Alex: I hate this function. Please delet this when we use Collin's new tables. """ apx = str(apx) try: order = sim.GetOrderFromString(apx) except: print("Warning: Could not read phase order from string, using default") order = -1 name = sim.GetStringFromApproximant(sim.GetApproximantFromString(apx)) return name, order
kmin, kmax = get_cutoff_indices( self.min_f_lower or self.f_lower, self.end_frequency, self.delta_f, N) self.sslice = slice(kmin, kmax) self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f if not hasattr(psd, 'invsqrt'): psd.invsqrt = 1.0 / psd[self.sslice] self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt) return self._sigmasq[key] # dummy class needed for loading LIGOLW files class LIGOLWContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(LIGOLWContentHandler) # helper function for parsing approximant strings def boolargs_from_apprxstr(approximant_strs): """Parses a list of strings specifying an approximant and where that approximant should be used into a list that can be understood by FieldArray.parse_boolargs. Parameters ---------- apprxstr : (list of) string(s) The strings to parse. Each string should be formatted `APPRX:COND`, where `APPRX` is the approximant and `COND` is a string specifying where it should be applied (see `FieldArgs.parse_boolargs` for examples of conditional strings). The last string in the list may exclude a conditional argument, which is the same as specifying ':else'.
from __future__ import print_function import os, copy import urlparse import logging from pycbc_glue import segments, lal from pycbc_glue.ligolw import utils, table, lsctables, ligolw from pycbc.workflow.core import SegFile, File, FileList, make_analysis_dir from pycbc.frame import datafind_connection class ContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(ContentHandler) def setup_datafind_workflow(workflow, scienceSegs, outputDir, seg_file=None, tags=None): """ Setup datafind section of the workflow. This section is responsible for generating, or setting up the workflow to generate, a list of files that record the location of the frame files needed to perform the analysis. There could be multiple options here, the datafind jobs could be done at run time or could be put into a dag. The subsequent jobs will know what was done here from the OutFileList containing the datafind jobs (and the Dagman nodes if appropriate.
matplotlib.use('Agg') import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pycbc.pnutils import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations import lal logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) class DefaultContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(DefaultContentHandler) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument( '--tmpltbank-file', type=str, required=True,
from pycbc_glue.ligolw import ligolw, lsctables, table, utils import matplotlib matplotlib.use('Agg') import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pycbc.pnutils import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations import lal logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) class DefaultContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(DefaultContentHandler) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument('--tmpltbank-file', type=str, required=True, help='HDF file containing template information for CBC search') parser.add_argument('--output-file', type=str, required=True, help='Full path to output file') parser.add_argument('--loudest-event-number', type=int, required=True, default=1, help='Script will plot the Nth loudest coincident trigger') parser.add_argument('--omicron-dir', type=str, required=True,
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/datafind.html """ from __future__ import print_function import os, copy import urlparse import logging from pycbc_glue import segments, lal from pycbc_glue.ligolw import utils, table, lsctables, ligolw from pycbc.workflow.core import SegFile, File, FileList, make_analysis_dir from pycbc.frame import datafind_connection class ContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(ContentHandler) def setup_datafind_workflow(workflow, scienceSegs, outputDir, seg_file=None, tags=None): """ Setup datafind section of the workflow. This section is responsible for generating, or setting up the workflow to generate, a list of files that record the location of the frame files needed to perform the analysis. There could be multiple options here, the datafind jobs could be done at run time or could be put into a dag. The subsequent jobs will know what was done here from the OutFileList containing the datafind jobs (and the Dagman nodes if appropriate. For now the only implemented option is to generate the datafind files at runtime. This module can also check if the frameFiles actually exist, check whether the obtained segments line up with the original ones and update the science segments to reflect missing data files.