Exemplo n.º 1
0
def get_LVAdata_from_file(filename, as_dict=False):
  """
  this function takes the name of an xml file containing a single LVAlertTable
  and it returns:
  host: the machine the payload file was created on
  full_path: the full path to (and including) the payload file
  general_dir: the directory in gracedb that the output of your code should
               be written to
  uid: the gracedb unique id associated with the event in the LVAlertTable
  """
  doc = utils.load_filename(filename)
  lvatable = table.get_table(doc, LVAlertTable.tableName)
  file = lvatable[0].file
  uid = lvatable[0].uid
  data_loc = lvatable[0].temp_data_loc

  if as_dict:
    return {
      "file" : lvatable[0].file,
      "uid" : lvatable[0].uid,
      "data_loc" : lvatable[0].temp_data_loc,
      "description" : lvatable[0].description,
      "alert_type" : lvatable[0].alert_type,
    }

  return file, uid, data_loc  
    def from_filenames(cls, filenames, name, verbose=False):
        """
		Convenience function to deserialize
		CoincParamsDistributions objects from a collection of XML
		files and return their sum.  The return value is a
		two-element tuple.  The first element is the deserialized
		and summed CoincParamsDistributions object, the second is a
		segmentlistdict indicating the interval of time spanned by
		the out segments in the search_summary rows matching the
		process IDs that were attached to the
		CoincParamsDistributions objects in the XML.
		"""
        self = None
        for n, filename in enumerate(filenames, 1):
            if verbose:
                print >> sys.stderr, "%d/%d:" % (n, len(filenames)),
            xmldoc = ligolw_utils.load_filename(
                filename, verbose=verbose, contenthandler=cls.contenthandler)
            if self is None:
                self = cls.from_xml(xmldoc, name)
                seglists = lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([self.process_id
                                                         ])).coalesce()
            else:
                other = cls.from_xml(xmldoc, name)
                self += other
                seglists |= lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([other.process_id
                                                         ])).coalesce()
                del other
            xmldoc.unlink()
        return self, seglists
Exemplo n.º 3
0
def parse_psd_file(filestr, fvals):
    """
    Map the user-provided PSD file string into a function to be called as PSD(f).
    """

    if not os.path.isfile(filestr):
        try:
            psd_func = getattr(lalsimulation, filestr)
            return numpy.array(map(psd_func, fvals))
        except AttributeError:
            pass

    try:
        xmldoc = utils.load_filename(filestr, contenthandler=PSDContentHandler)
        psd = read_psd_xmldoc(xmldoc).values()[0]
        f = numpy.arange(0, len(psd.data) * psd.deltaF, psd.deltaF)
        psd = psd.data
    except:
        # FIXME: ugh!
        try:
            f, psd = numpy.loadtxt(filestr, unpack=True)
        except:
            exit("Can't parse PSD specifier %s as function or file." % filestr)

    def anon_interp(newf):
        return numpy.interp(newf, f, psd)

    return numpy.array(map(anon_interp, fvals))
Exemplo n.º 4
0
def reference_psds_for_filename(filename):
    xmldoc = ligolw_utils.load_filename(
        filename, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc)
    return dict(
        (key, timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data))
        for key, psd in psds.iteritems() if psd is not None)
Exemplo n.º 5
0
def ReadSnglInspiralFromFiles(fileList, verbose=False):
  """
  Read the SnglInspiralTables from a list of files

  @param fileList: list of input files
  @param verbose: print progress
  """
  # NOTE: this function no longer carries out event ID mangling (AKA
  # reassignment). Please adjust calling codes accordingly!
  # This means that identical event IDs produced by lalapps_thinca in
  # non-slide files having the same GPS start time will stay identical,
  # affecting zerolag and injection runs made over the same data.
  #
  # In consequence, if the calling code is going to reconstruct coincs
  # from the sngl event IDs, and if these include multiple injection
  # runs, coinc finding should be done one file at a time - see the
  # readCoincInspiralFromFiles function in CoincInspiralUtils.py

  sngls = lsctables.New(lsctables.SnglInspiralTable, \
      columns=lsctables.SnglInspiralTable.loadcolumns)

  for i,file in enumerate(fileList):
    if verbose: print str(i+1)+"/"+str(len(fileList))+": "
    xmldoc = utils.load_filename(file, verbose=verbose)
    try:
      sngl_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
    except ValueError: #some xml files have no sngl table, that's OK
      sngl_table = None
    if sngl_table: sngls.extend(sngl_table)

    xmldoc.unlink()    #free memory

  return sngls
Exemplo n.º 6
0
def coinc_without_inj(coinc, tmpdir):
    """Produce a coinc.xml file with the found coincs stripped out."""
    filename = str(tmpdir / 'coinc_without_inj.xml')
    xmldoc = ligolw_utils.load_filename(coinc, contenthandler=ContentHandler)

    # Prune coinc_def table
    coinc_def_table = lsctables.CoincDefTable.get_table(xmldoc)
    included = [row for row in coinc_def_table
                if row.search_coinc_type == InspiralCoincDef.search_coinc_type
                and row.search == InspiralCoincDef.search]
    included_coinc_def_ids = {row.coinc_def_id for row in included}
    coinc_def_table[:] = included

    # Prune coinc table
    coinc_table = lsctables.CoincTable.get_table(xmldoc)
    included = [row for row in coinc_table
                if row.coinc_def_id in included_coinc_def_ids]
    included_coinc_ids = {row.coinc_event_id for row in included}
    coinc_table[:] = included

    # Prune coinc_map table
    coinc_map_table = lsctables.CoincMapTable.get_table(xmldoc)
    coinc_map_table[:] = [row for row in coinc_map_table
                          if row.coinc_event_id in included_coinc_ids]

    ligolw_utils.write_filename(xmldoc, filename)
    return filename
Exemplo n.º 7
0
def get_loud_trigs(fList, veto_file, new_snr_cut):
    """ Return a list(s) of single inspiral triggers that are above the
        new snr threshold for every combination of file in the file list
        and application of veto in the veto file list.
    """
    trigs = lsctables.New(lsctables.SnglInspiralTable)
    searched_segs = segments.segmentlist()
    for fname in fList:
        xmldoc = utils.load_filename(fname,
                                     gz=True,
                                     contenthandler=DefaultContentHandler)
        tbl = lsctables.table.get_table(xmldoc,
                                        lsctables.SnglInspiralTable.tableName)
        trigs.extend(
            [tbl[i] for i in (tbl.get_new_snr() > new_snr_cut).nonzero()[0]])
        search_summary = lsctables.table.get_table(
            xmldoc, lsctables.SearchSummaryTable.tableName)
        searched_segs += search_summary.get_outlist()

    if isinstance(veto_file, list):
        # If we have multiple veto files, return results for applying each one
        lt = []
        tg = []
        for vf in veto_file:
            veto_segs = get_segments_from_xml(vf)
            segs_after_veto = searched_segs - veto_segs
            print vf, 'livetime', abs(segs_after_veto)
            tg.append(trigs.veto(veto_segs))
            lt.append(abs(segs_after_veto))
        return tg, lt
    else:
        veto_segs = get_segments_from_xml(veto_file)
        segs_after_veto = searched_segs - veto_segs
        print veto_file, 'livetime', abs(segs_after_veto)
        return trigs.veto(veto_segs), abs(segs_after_veto)
Exemplo n.º 8
0
def check_segment_availability(grb_name, grb_time, query_start, query_end, offset, ifo, segmentName):
  '''
  Searches +/- offset from GRB time to download the latest segment lists then extracts times and puts them into a txt file.
  '''
  args = {'grb_name'    : grb_name,
          'query_start' : query_start,
          'query_end'   : query_end,
          'ifo'         : ifo,
          'segmentName' : segmentName}
  cmd  = "ligolw_segment_query --database --query-segments --include-segments '{segmentName}' --gps-start-time {query_start} --gps-end-time {query_end} > ./segments{ifo}_grb{grb_name}.xml".format(**args)
  print '>>',cmd
  print
  process    = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
  output,err = process.communicate()

  # try to open the file
  try:
    doc = utils.load_filename("segments{ifo}_grb{grb_name}.xml".format(**args), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))
  except:
    raise IOError, "Error reading file: segments{ifo}_grb{grb_name}.xml".format(**args)

  # extract the segment list from segment:table and store in a txt file
  segs = table.get_table(doc, "segment")
  seglist = segments.segmentlist(segments.segment(s.start_time, s.end_time) for s in segs)
  segmentsUtils.tosegwizard(file("{ifo}-science_grb{grb_name}.txt".format(**args),'w'),seglist,header = True)

  print ">> %s segments +/-%ds from %ds found:"%(ifo,offset,grb_time)
  for s in segs:
    print "Start:",s.start_time,"End:",s.end_time,"Duration:",s.end_time-s.start_time
  print

  return
def retrieve_event_from_coinc(fname_coinc):
    from glue.ligolw import lsctables, table, utils
    from RIFT import lalsimutils
    event_dict = {}
    samples = table.get_table(
        utils.load_filename(fname_coinc, contenthandler=lalsimutils.cthdler),
        lsctables.SnglInspiralTable.tableName)
    event_duration = 4  # default
    ifo_list = []
    for row in samples:
        m1 = row.mass1
        m2 = row.mass2
        ifo_list.append(row.ifo)
        try:
            event_duration = row.event_duration  # may not exist
        except:
            print(" event_duration field not in XML ")
    event_dict["m1"] = row.mass1
    event_dict["m2"] = row.mass2
    event_dict["s1z"] = row.spin1z
    event_dict["s2z"] = row.spin2z
    event_dict["tref"] = row.end_time + 1e-9 * row.end_time_ns
    event_dict["IFOs"] = list(set(ifo_list))
    event_dict["SNR"] = row.snr
    return event_dict
Exemplo n.º 10
0
	def from_filenames(cls, filenames, name, verbose = False):
		"""
		Convenience function to deserialize
		CoincParamsDistributions objects from a collection of XML
		files and return their sum.  The return value is a
		two-element tuple.  The first element is the deserialized
		and summed CoincParamsDistributions object, the second is a
		segmentlistdict indicating the interval of time spanned by
		the out segments in the search_summary rows matching the
		process IDs that were attached to the
		CoincParamsDistributions objects in the XML.
		"""
		self = None
		for n, filename in enumerate(filenames, 1):
			if verbose:
				print >>sys.stderr, "%d/%d:" % (n, len(filenames)),
			xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = cls.contenthandler)
			if self is None:
				self = cls.from_xml(xmldoc, name)
				seglists = lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(set([self.process_id])).coalesce()
			else:
				other = cls.from_xml(xmldoc, name)
				self += other
				seglists |= lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(set([other.process_id])).coalesce()
				del other
			xmldoc.unlink()
		return self, seglists
Exemplo n.º 11
0
    def do_start(self):
        """GstBaseSrc->start virtual method"""

        xml_location = self.get_property("xml-location")
        if xml_location is None:
            self.error(
                "xml-location property is unset, cannot load template bank")
            return False

        self.__templates = list(
            lsctables.SnglInspiralTable.get_table(
                utils.load_filename(xml_location)))

        start_time = self.get_property("start-time")
        duration = self.get_property("duration")
        end_time = start_time + duration
        min_waiting_time = self.get_property("min-waiting-time")
        max_waiting_time = self.get_property("max-waiting-time")

        self.__last_time = start_time
        self.__stream_end_time = end_time
        self.__triggertimes = [
            self.make_trigger_times(start_time, end_time, min_waiting_time,
                                    max_waiting_time)
            for i in range(len(self.__templates))
        ]
        self.__ntriggers = 0

        #self.src_pads().next().push_event(gst.event_new_new_segment(False, 1.0, gst.FORMAT_TIME, start_time, end_time, start_time))

        return True
Exemplo n.º 12
0
def ReadSimInspiralFromFiles(fileList, verbose=False):
    """
  Read the simInspiral tables from a list of files

  @param fileList: list of input files
  @param verbose: print ligolw_add progress
  """
    simInspiralTriggers = None

    lsctables.use_in(ExtractSimInspiralTableLIGOLWContentHandler)
    for thisFile in fileList:
        doc = utils.load_filename(
            thisFile,
            gz=(thisFile or "stdin").endswith(".gz"),
            verbose=verbose,
            contenthandler=ExtractSimInspiralTableLIGOLWContentHandler)
        # extract the sim inspiral table
        try:
            simInspiralTable = lsctables.SimInspiralTable.get_table(doc)
        except:
            simInspiralTable = None
        if simInspiralTriggers and simInspiralTable:
            simInspiralTriggers.extend(simInspiralTable)
        elif not simInspiralTriggers:
            simInspiralTriggers = simInspiralTable

    return simInspiralTriggers
Exemplo n.º 13
0
def open_xmldoc(fobj, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document

    Parameters
    ----------
    fobj : `str`, `file`
        file path or open file object to read

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate

    Returns
    --------
    xmldoc : :class:`~glue.ligolw.ligolw.Document`
        either the `Document` as parsed from an existing file, or a new, empty
        `Document`
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import (Document, LIGOLWContentHandler)
    from glue.ligolw.utils import load_filename, load_fileobj
    try:  # try and load existing file
        if isinstance(fobj, string_types):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_filename(fobj, **kwargs)
        if isinstance(fobj, FILE_LIKE):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_fileobj(fobj, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
Exemplo n.º 14
0
 def getAuxChannels(self, inputList):
     intermediateTable = {
         "type": [],
         "ifo": [],
         "qscan_time": [],
         "qscan_dir": [],
         "channel_name": [],
         "peak_time": [],
         "peak_frequency": [],
         "peak_q": [],
         "peak_significance": [],
         "peak_amplitude": [],
     }
     try:
         doc = utils.load_filename(
             inputList[0] + "/summary.xml", verbose=True, gz=False, xmldoc=None, contenthandler=None
         )
         qscanTable = table.get_table(doc, "qscan:summary:table")
     except:
         print >> sys.stderr, "failed to read" + inputList[0] + "/summary.xml"
         return intermediateTable
     for channel in qscanTable:
         for param in self.paramMaps:
             intermediateTable[param[0]].append(eval("channel." + param[1]))
         intermediateTable["qscan_dir"].append(inputList[0])
         # if len(inputList) == 4:
         intermediateTable["qscan_time"].append(inputList[1])
         intermediateTable["type"].append(inputList[2])
         intermediateTable["ifo"].append(inputList[3])
     return intermediateTable
Exemplo n.º 15
0
def parse_psd_file(filestr, fvals):
    """
    Map the user-provided PSD file string into a function to be called as PSD(f).
    """ 
  
    if not os.path.isfile(filestr):
        try:
            psd_func = getattr(lalsimulation, filestr)
            return numpy.array(map(psd_func, fvals))
        except AttributeError:
            pass

    try:
        xmldoc = utils.load_filename(filestr, contenthandler=PSDContentHandler)
        psd = read_psd_xmldoc(xmldoc).values()[0]
        f = numpy.arange(0, len(psd.data)*psd.deltaF, psd.deltaF)
        psd = psd.data
    except:
        # FIXME: ugh!
        try:
            f, psd = numpy.loadtxt(filestr, unpack=True)
        except:
           exit("Can't parse PSD specifier %s as function or file." % filestr)

    def anon_interp(newf):
        return numpy.interp(newf, f, psd)
    return numpy.array(map(anon_interp, fvals))
Exemplo n.º 16
0
    def parse(self, stop_on=[]):
        from glue.ligolw import table
        from glue.ligolw import lsctables
        from glue.ligolw import utils

        xml_doc = utils.load_filename(self.path, gz=False)
        print xml_doc
Exemplo n.º 17
0
def loadSingleBurst( files, trigs_dict=None):
    """
    loads snglburst tables (produced by Omicron) into trgdict object
    files - is the list of file names
    """
    if type(files) is str:
        files = [files]
    if trigs_dict is None:
        trigs_dict = trigdict()
    for file in files:
        for row in table.get_table( ligolw_utils.load_filename(file, contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler)), lsctables.SnglBurstTable.tableName ):
            channel = "%s-%s_%s"%(row.ifo, row.channel.replace("-","_"), row.search)
            tcent = row.peak_time + 1e-9*row.peak_time_ns
            tstart = row.start_time + 1e-9*row.start_time_ns
            dur = row.duration
            fpeak = row.peak_frequency
            fcent = row.central_freq
            bndwth = row.bandwidth
            amp = row.amplitude
            snr = row.snr
            conf = row.confidence
            chi2 = row.chisq
            chi2_dof = row.chisq_dof

            trigger = [tstart, dur, tcent, fpeak, fcent, bndwth, amp, snr, conf, chi2, chi2_dof]

            if channel in trigs_dict.channels():
                trigs_dict[channel].append( trigger ) ### SingleBurst trigger structure
            else:
                trigs_dict[channel] = [ trigger ]

    return trigs_dict
Exemplo n.º 18
0
def get_loud_trigs(fList, veto_file, new_snr_cut):
    """ Return a list(s) of single inspiral triggers that are above the
        new snr threshold for every combination of file in the file list
        and application of veto in the veto file list.
    """
    trigs = lsctables.New(lsctables.SnglInspiralTable)
    searched_segs = segments.segmentlist()
    for fname in fList:
        xmldoc = utils.load_filename(fname, gz=True, contenthandler=DefaultContentHandler)
        tbl = lsctables.SnglInspiralTable.get_table(xmldoc)
        trigs.extend([tbl[i] for i in (tbl.get_new_snr() > new_snr_cut).nonzero()[0]])
        search_summary = lsctables.SearchSummaryTable.get_table(xmldoc)
        searched_segs += search_summary.get_outlist()

    if isinstance(veto_file, list):
        # If we have multiple veto files, return results for applying each one
        lt = []
        tg = []
        for vf in veto_file:
            veto_segs = get_segments_from_xml(vf)
            segs_after_veto = searched_segs - veto_segs
            print vf, 'livetime', abs(segs_after_veto)
            tg.append(trigs.veto(veto_segs))
            lt.append(abs(segs_after_veto))
        return tg, lt
    else:
        veto_segs = get_segments_from_xml(veto_file)
        segs_after_veto = searched_segs - veto_segs
        print veto_file, 'livetime', abs(segs_after_veto)
        return trigs.veto(veto_segs), abs(segs_after_veto)
Exemplo n.º 19
0
def get_combined_array(tablename, childnode):
  # FIXME assumes that all the xml files have the same binned array tables
  # Figure out the shape of the arrays in the file, make an array with one more
  # dimension, the number of files from sys.argv[1:]
  xmldoc = utils.load_filename(sys.argv[1], verbose=True, gz = (sys.argv[1] or "stdin").endswith(".gz"))
  xmldoc = xmldoc.childNodes[0]
  A  = rate.binned_array_from_xml(xmldoc.childNodes[childnode], tablename) 
  bins = rate.bins_from_xml(xmldoc.childNodes[childnode])
  out = numpy.zeros((len(sys.argv[1:]),)+A.array.shape,dtype="float")
  # Read the data
  for i, f in enumerate(sys.argv[1:]):
    xmldoc = utils.load_filename(f, verbose=True, gz = (f or "stdin").endswith(".gz"))
    xmldoc = xmldoc.childNodes[0]
    out[i] = rate.binned_array_from_xml(xmldoc.childNodes[childnode], tablename).array
  A.array = numpy.zeros(A.array.shape)
  return bins, out, A 
Exemplo n.º 20
0
def query_segments_xml(xml_location, gps_start, gps_end, spec):
    """
	Retrieve the segment table from a location, and clip segments to (gps_start, gps_end). If spec is given, retrieve only segments with this definer, otherwise, get all of them.
	"""
    if spec is None:
        spec = True
    else:
        #ifo, definer, version = spec.split(":")
        definer = spec.split(":")
        ifo, definer, version = definer[0], ":".join(
            definer[1:-1]), definer[-1]
    xmldoc = utils.load_filename(xml_location)
    segment_definer = table.get_table(xmldoc,
                                      lsctables.SegmentDefTable.tableName)
    # FIXME: ifo in ifos? What does a segment for a set of ifos even mean?
    seg_def_id = [
        sd.segment_def_id for sd in segment_definer
        if spec and ifo in sd.get_ifos() and definer == sd.name
    ]
    if len(seg_def_id) != 1:
        raise ValueError("Need exactly one definer row for %s:%s:%s, got %d" %
                         (ifo, definer, version, len(seg_def_id)))
    seg_def_id = seg_def_id[0]

    segment = table.get_table(xmldoc, lsctables.SegmentTable.tableName)
    return segmentlist(
        [s.get() for s in segment if s.segment_def_id == seg_def_id])
Exemplo n.º 21
0
def open_xmldoc(fobj, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document

    Parameters
    ----------
    fobj : `str`, `file`
        file path or open file object to read

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate

    Returns
    --------
    xmldoc : :class:`~glue.ligolw.ligolw.Document`
        either the `Document` as parsed from an existing file, or a new, empty
        `Document`
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import (Document, LIGOLWContentHandler)
    from glue.ligolw.utils import load_filename, load_fileobj
    try:  # try and load existing file
        if isinstance(fobj, string_types):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_filename(fobj, **kwargs)
        if isinstance(fobj, FILE_LIKE):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_fileobj(fobj, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
Exemplo n.º 22
0
def parse_veto_definer(veto_def_filename):
    """ Parse a veto definer file from the filename and return a dictionary
    indexed by ifo and veto definer category level.

    Parameters
    ----------
    veto_def_filename: str
        The path to the veto definer file

    Returns:
        parsed_definition: dict
            Returns a dictionary first indexed by ifo, then category level, and
            finally a list of veto definitions.
    """
    from glue.ligolw import table, lsctables, utils as ligolw_utils
    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)

    indoc = ligolw_utils.load_filename(veto_def_filename, False,
                                       contenthandler=h)
    veto_table = table.get_table(indoc, 'veto_definer')

    ifo = veto_table.getColumnByName('ifo')
    name = veto_table.getColumnByName('name')
    version = numpy.array(veto_table.getColumnByName('version'))
    category = numpy.array(veto_table.getColumnByName('category'))
    start = numpy.array(veto_table.getColumnByName('start_time'))
    end = numpy.array(veto_table.getColumnByName('end_time'))
    start_pad = numpy.array(veto_table.getColumnByName('start_pad'))
    end_pad = numpy.array(veto_table.getColumnByName('end_pad'))

    data = {}
    for i in range(len(veto_table)):
        if ifo[i] not in data:
            data[ifo[i]] = {}

        # The veto-definer categories are weird! Hardware injections are stored
        # in "3" and numbers above that are bumped up by one (although not
        # often used any more). So we remap 3 to H and anything above 3 to
        # N-1. 2 and 1 correspond to 2 and 1 (YAY!)
        if category[i] > 3:
            curr_cat = "CAT_{}".format(category[i]-1)
        elif category[i] == 3:
            curr_cat = "CAT_H"
        else:
            curr_cat = "CAT_{}".format(category[i])

        if curr_cat not in data[ifo[i]]:
            data[ifo[i]][curr_cat] = []

        veto_info = {'name': name[i],
                     'version': version[i],
                     'start': start[i],
                     'end': end[i],
                     'start_pad': start_pad[i],
                     'end_pad': end_pad[i],
                     }
        data[ifo[i]][curr_cat].append(veto_info)
    return data
def load_xml_file(filename):
    """Wrapper to ligolw's utils.load_filename"""

    xml_doc = utils.load_filename(filename, gz=filename.endswith("gz"),
                                  contenthandler=lsctables.use_in(
                                      ligolw.LIGOLWContentHandler))

    return xml_doc
Exemplo n.º 24
0
    def __init__(self, filename, approximant=None, **kwds):
        self.indoc = ligolw_utils.load_filename(
            filename, False, contenthandler=LIGOLWContentHandler)
        self.table = table.get_table(self.indoc,
                                     lsctables.SnglInspiralTable.tableName)
        self.extra_args = kwds

        self.approximant_str = approximant
 def reference_psds_for_filename(filename):
     xmldoc = ligolw_utils.load_filename(
         filename, contenthandler=lal.series.PSDContentHandler)
     psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)
     return {
         key: timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
             f_high_truncate=opts.f_high_truncate)
         for key, psd in psds.items() if psd is not None}
Exemplo n.º 26
0
 def reference_psds_for_filename(filename):
     xmldoc = ligolw_utils.load_filename(
         filename, contenthandler=lal.series.PSDContentHandler)
     psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)
     return {
         key: timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
             f_high_truncate=opts.f_high_truncate)
         for key, psd in psds.items() if psd is not None}
Exemplo n.º 27
0
Arquivo: bank.py Projeto: lenona/pycbc
    def __init__(self, filename, approximant=None, **kwds):
        self.indoc = ligolw_utils.load_filename(
            filename, False, contenthandler=LIGOLWContentHandler)
        self.table = table.get_table(
            self.indoc, lsctables.SnglInspiralTable.tableName)
        self.extra_args = kwds  

        self.approximant_str = approximant
Exemplo n.º 28
0
    def get_coincs_from_coinctable(self, files):
        """
    read data from coinc tables (xml format)
    
    FIXME: currently assumes one coinc per file!!!
    """
        for file in files:
            coinc = CoincData()
            xmldoc = utils.load_filename(file)
            sngltab = tab.get_table(xmldoc,
                                    lsctables.SnglInspiralTable.tableName)
            coinc.set_snr(dict((row.ifo, row.snr) for row in sngltab))
            coinc.set_gps(
                dict((row.ifo, LIGOTimeGPS(row.get_end())) for row in sngltab))
            #FIXME: this is put in place to deal with eff_distance = 0
            # needs to be fixed upstream in the pipeline
            effDs = list((row.ifo, row.eff_distance) for row in sngltab)
            for eD in effDs:
                if eD[1] == 0.:
                    effDs.append((eD[0], 1.))
                    effDs.remove(eD)
            coinc.set_effDs(dict(effDs))
            #      coinc.set_effDs(dict((row.ifo,row.eff_distance) for row in sngltab))
            coinc.set_masses(dict((row.ifo, row.mass1) for row in sngltab), \
                             dict((row.ifo, row.mass2) for row in sngltab))
            ctab = tab.get_table(xmldoc,
                                 lsctables.CoincInspiralTable.tableName)
            #FIXME: ignoring H2 for now, but should be dealt in a better way
            allifos = list(ctab[0].get_ifos())
            try:
                allifos.remove('H2')
            except ValueError:
                pass
            coinc.set_ifos(allifos)
            if ctab[0].false_alarm_rate is not None:
                coinc.set_FAR(ctab[0].false_alarm_rate)

            try:
                simtab = tab.get_table(xmldoc,
                                       lsctables.SimInspiralTable.tableName)
                row = siminsptab[0]
                effDs_inj = {}
                for ifo in coinc.ifo_list:
                    if ifo == 'H1':
                        effDs_inj[ifo] = row.eff_dist_h
                    elif ifo == 'L1':
                        effDs_inj[ifo] = row.eff_dist_l
                    elif ifo == 'V1':
                        effDs_inj[ifo] = row.eff_dist_v
                dist_inj = row.distance
                coinc.set_inj_params(row.latitude,row.longitude,row.mass1,row.mass2, \
                                     dist_inj,effDs_inj)
                coinc.is_injection = True
            #FIXME: name the exception!
            except:
                pass

            self.append(coinc)
Exemplo n.º 29
0
def readHorizonDistanceFromSummValueTable(
        fList, verbose=False, contenthandler=SummValueContentHandler):
    """
  read in the SummValueTables from a list of files and return the
  horizon distance versus total mass

  @param fList:   list of input files
  @param verbose: boolean (default False)
  """

    output = {}
    massOutput = {}
    count = 0
    if len(fList) == 0:
        return output

    # for each file in the list
    for thisFile in fList:
        if verbose:
            print str(count + 1) + "/" + str(len(fList)) + " " + thisFile
        count = count + 1
        massNum = 0

        doc = utils.load_filename(thisFile, contenthandler=contenthandler)
        try:
            summ_value_table = table.get_table(
                doc, lsctables.SummValueTable.tableName)
        except ValueError:
            print "ValueError in readHorizonDistanceFromSummValueTable while reading summvalue table from file ", thisFile
            return output, massOutput

        # if not summ_value table was filled , then simply returns
        if summ_value_table is None:
            return output, massOutput

        # else
        for row in summ_value_table:
            # we should find a name "inspiral_effective_distance"
            if row.name == 'inspiral_effective_distance':
                # it may be that the file read is an inspiral file containing only the BNS infomration
                if (row.comment == '1.40_1.40_8.00') or (row.comment
                                                         == '1.4_1.4_8'):
                    if not output.has_key(row.ifo):
                        output[row.ifo] = lsctables.New(
                            lsctables.SummValueTable)
                    output[row.ifo].append(row)
                # or a template bank containing a whole list of inspiral_effective_distance
                else:
                    if not massOutput.has_key(row.ifo):
                        massOutput[row.ifo] = [
                            lsctables.New(lsctables.SummValueTable)
                        ]
                    if len(massOutput[row.ifo]) < massNum + 1:
                        massOutput[row.ifo].append(
                            lsctables.New(lsctables.SummValueTable))
                    massOutput[row.ifo][massNum].append(row)
                    massNum += 1
    return output, massOutput
Exemplo n.º 30
0
def read_triggers(trigger_file):
    from glue.ligolw import array, param, ligolw, table, lsctables, utils
    class ContentHandler(ligolw.LIGOLWContentHandler): pass
    for module in [array, param, table, lsctables]:
        module.use_in(ContentHandler)

    xml_doc = utils.load_filename(trigger_file.path,
                                  contenthandler=ContentHandler)
    return table.get_table(xml_doc, lsctables.SnglBurstTable.tableName)
Exemplo n.º 31
0
def load_table(sim_file):

    xml_doc = utils.load_filename(sim_file,
                                  contenthandler=lsctables.use_in(
                                      ligolw.LIGOLWContentHandler))

    sim_inspiral_table = table.get_table(xml_doc,
                                         lsctables.SimInspiralTable.tableName)

    return sim_inspiral_table
Exemplo n.º 32
0
  def get_coincs_from_coinctable(self,files):
    """
    read data from coinc tables (xml format)
    
    FIXME: currently assumes one coinc per file!!!
    """
    for file in files:
      coinc = CoincData()
      xmldoc = utils.load_filename(file, contenthandler=ligolw.LIGOLWContentHandler)
      sngltab = lsctables.SnglInspiralTable.get_table(xmldoc)
      coinc.set_snr(dict((row.ifo, row.snr) for row in sngltab))
      coinc.set_gps(dict((row.ifo, lal.LIGOTimeGPS(row.get_end())) for row in sngltab))
      #FIXME: this is put in place to deal with eff_distance = 0
      # needs to be fixed upstream in the pipeline
      effDs = list((row.ifo,row.eff_distance) for row in sngltab)
      for eD in effDs:
        if eD[1] == 0.:
          effDs.append((eD[0],1.))
          effDs.remove(eD)
      coinc.set_effDs(dict(effDs))
#      coinc.set_effDs(dict((row.ifo,row.eff_distance) for row in sngltab))
      coinc.set_masses(dict((row.ifo, row.mass1) for row in sngltab), \
                       dict((row.ifo, row.mass2) for row in sngltab))
      ctab = lsctables.CoincInspiralTable.get_table(xmldoc)
      #FIXME: ignoring H2 for now, but should be dealt in a better way
      allifos = list(ctab[0].get_ifos())
      try:
        allifos.remove('H2')
      except ValueError:
        pass
      coinc.set_ifos(allifos)
      if ctab[0].false_alarm_rate is not None:
        coinc.set_FAR(ctab[0].false_alarm_rate)

      try:
        simtab = lsctables.SimInspiralTable.get_table(xmldoc)
        row = siminsptab[0]
        effDs_inj = {}
        for ifo in coinc.ifo_list:
          if ifo == 'H1':
            effDs_inj[ifo] = row.eff_dist_h
          elif ifo == 'L1':
            effDs_inj[ifo] = row.eff_dist_l
          elif ifo == 'V1':
            effDs_inj[ifo] = row.eff_dist_v
        dist_inj = row.distance
        coinc.set_inj_params(row.latitude,row.longitude,row.mass1,row.mass2, \
                             dist_inj,effDs_inj)
        coinc.is_injection = True
      #FIXME: name the exception!
      except:
        pass

      self.append(coinc)
Exemplo n.º 33
0
def get_segment_summary_times(scienceFile, segmentName):
    """
    This function will find the times for which the segment_summary is set
    for the flag given by segmentName.

    Parameters
    -----------
    scienceFile : SegFile
        The segment file that we want to use to determine this.
    segmentName : string
        The DQ flag to search for times in the segment_summary table.

    Returns
    ---------
    summSegList : glue.segments.segmentlist
        The times that are covered in the segment summary table.
    """
    # Parse the segmentName
    segmentName = segmentName.split(':')
    if not len(segmentName) in [2, 3]:
        raise ValueError("Invalid channel name %s." % (segmentName))
    ifo = segmentName[0]
    channel = segmentName[1]
    version = ''
    if len(segmentName) == 3:
        version = int(segmentName[2])

    # Load the filename
    xmldoc = utils.load_filename(
        scienceFile.cache_entry.path,
        gz=scienceFile.cache_entry.path.endswith("gz"),
        contenthandler=ContentHandler)

    # Get the segment_def_id for the segmentName
    segmentDefTable = table.get_table(xmldoc, "segment_definer")
    for entry in segmentDefTable:
        if (entry.ifos == ifo) and (entry.name == channel):
            if len(segmentName) == 2 or (entry.version == version):
                segDefID = entry.segment_def_id
                break
    else:
        raise ValueError("Cannot find channel %s in segment_definer table."\
                         %(segmentName))

    # Get the segmentlist corresponding to this segmentName in segment_summary
    segmentSummTable = table.get_table(xmldoc, "segment_summary")
    summSegList = segments.segmentlist([])
    for entry in segmentSummTable:
        if entry.segment_def_id == segDefID:
            segment = segments.segment(entry.start_time, entry.end_time)
            summSegList.append(segment)
    summSegList.coalesce()

    return summSegList
Exemplo n.º 34
0
def get_combined_array(tablename, childnode):
    # FIXME assumes that all the xml files have the same binned array tables
    # Figure out the shape of the arrays in the file, make an array with one more
    # dimension, the number of files from sys.argv[1:]
    xmldoc = utils.load_filename(sys.argv[1],
                                 verbose=True,
                                 gz=(sys.argv[1] or "stdin").endswith(".gz"))
    xmldoc = xmldoc.childNodes[0]
    A = rate.binned_array_from_xml(xmldoc.childNodes[childnode], tablename)
    bins = rate.bins_from_xml(xmldoc.childNodes[childnode])
    out = numpy.zeros((len(sys.argv[1:]), ) + A.array.shape, dtype="float")
    # Read the data
    for i, f in enumerate(sys.argv[1:]):
        xmldoc = utils.load_filename(f,
                                     verbose=True,
                                     gz=(f or "stdin").endswith(".gz"))
        xmldoc = xmldoc.childNodes[0]
        out[i] = rate.binned_array_from_xml(xmldoc.childNodes[childnode],
                                            tablename).array
    A.array = numpy.zeros(A.array.shape)
    return bins, out, A
Exemplo n.º 35
0
def get_segment_summary_times(scienceFile, segmentName):
    """
    This function will find the times for which the segment_summary is set
    for the flag given by segmentName.

    Parameters
    -----------
    scienceFile : SegFile
        The segment file that we want to use to determine this.
    segmentName : string
        The DQ flag to search for times in the segment_summary table.

    Returns
    ---------
    summSegList : glue.segments.segmentlist
        The times that are covered in the segment summary table.
    """
    # Parse the segmentName
    segmentName = segmentName.split(':')
    if not len(segmentName) in [2,3]:
        raise ValueError("Invalid channel name %s." %(segmentName))
    ifo = segmentName[0]
    channel = segmentName[1]
    version = ''
    if len(segmentName) == 3:
        version = int(segmentName[2])

    # Load the filename
    xmldoc = utils.load_filename(scienceFile.cache_entry.path,
                             gz=scienceFile.cache_entry.path.endswith("gz"),
                             contenthandler=ContentHandler)

    # Get the segment_def_id for the segmentName
    segmentDefTable = table.get_table(xmldoc, "segment_definer")
    for entry in segmentDefTable:
        if (entry.ifos == ifo) and (entry.name == channel):
            if len(segmentName) == 2 or (entry.version==version):
                segDefID = entry.segment_def_id
                break
    else:
        raise ValueError("Cannot find channel %s in segment_definer table."\
                         %(segmentName))

    # Get the segmentlist corresponding to this segmentName in segment_summary
    segmentSummTable = table.get_table(xmldoc, "segment_summary")
    summSegList = segments.segmentlist([])
    for entry in segmentSummTable:
        if entry.segment_def_id == segDefID:
            segment = segments.segment(entry.start_time, entry.end_time)
            summSegList.append(segment)
    summSegList.coalesce()

    return summSegList
Exemplo n.º 36
0
def readHorizonDistanceFromSummValueTable(fList, verbose=False, contenthandler=SummValueContentHandler):
  """
  read in the SummValueTables from a list of files and return the
  horizon distance versus total mass

  @param fList:   list of input files
  @param verbose: boolean (default False)
  """

  output = {}
  massOutput = {}
  count = 0
  if len(fList) == 0:
    return output

  # for each file in the list
  for thisFile in fList:
    if verbose:
      print str(count+1)+"/"+str(len(fList))+" " + thisFile
    count = count+1
    massNum = 0

    doc = utils.load_filename(thisFile, contenthandler = contenthandler)
    try:
      summ_value_table = table.get_table(doc, lsctables.SummValueTable.tableName)
    except ValueError:
      print "ValueError in readHorizonDistanceFromSummValueTable while reading summvalue table from file ", thisFile
      return output,massOutput

    # if not summ_value table was filled , then simply returns
    if summ_value_table is None:
      return output,massOutput

    # else
    for row in summ_value_table:
      # we should find a name "inspiral_effective_distance"
      if row.name == 'inspiral_effective_distance':
        # it may be that the file read is an inspiral file containing only the BNS infomration
        if (row.comment == '1.40_1.40_8.00') or (row.comment == '1.4_1.4_8'):
          if not output.has_key(row.ifo):
            output[row.ifo] = lsctables.New(lsctables.SummValueTable)
          output[row.ifo].append(row)
        # or a template bank containing a whole list of inspiral_effective_distance
        else:
          if not massOutput.has_key(row.ifo):
            massOutput[row.ifo] = [lsctables.New(lsctables.SummValueTable)]
          if len(massOutput[row.ifo]) < massNum + 1:
            massOutput[row.ifo].append(lsctables.New(lsctables.SummValueTable))
          massOutput[row.ifo][massNum].append(row)
          massNum += 1
  return output,massOutput
def rapidpe_to_hdf(basegrp, bankfile, samplefiles):
    # import (or look up template bank)
    #bank_xmldoc = utils.load_filename(args.tmplt_bank_file, contenthandler=ligolw.LIGOLWContentHandler)
    bank_xmldoc = utils.load_filename(bankfile, contenthandler=ligolw.LIGOLWContentHandler)
    try:
        tmplt_bank = lsctables.SimInspiralTable.get_table(bank_xmldoc)
    except:
        tmplt_bank = lsctables.SnglInspiralTable.get_table(bank_xmldoc)

    for sample_file in samplesfiles:
        print "Processing %s" % sample_file

        # Get tables from xmldoc
        xmldoc = utils.load_filename(sample_file, contenthandler=ligolw.LIGOLWContentHandler)
        process_params = lsctables.ProcessParamsTable.get_table(xmldoc)
        samples = lsctables.SimInspiralTable.get_table(xmldoc)
        run_result = lsctables.SnglInspiralTable.get_table(xmldoc)[0]

        # Get intrinsic parameter used
        intr_prms = get_intr_prms_from_pp_table(process_params)

        # Identify intrinisic ID
        tmplt_id = obtain_tmplt_id(tmplt_bank, **intr_prms)

        # Append sample data and metadata
        subgrp = base_grp.create_group(str(tmplt_id))
        xmlutils.append_samples_to_hdf5_group(subgrp, samples)

        def pp_table_to_dict(pptable):
            return dict([(pp.param, pp.value) for pp in pptable])

        run_info = pp_table_to_dict(process_params)
        for key in ("snr", "tau0", "ttotal"):
            run_info[KEY_MAPPING[key]] = getattr(run_result, key)

        xmlutils.append_metadata_to_hdf5_group(subgrp, run_info)

        xmldoc.unlink()
Exemplo n.º 38
0
def get_refpsd_xml(xml):
    retdict = dict()
    xmldoc = ligolw_utils.load_filename(refpsd, contenthandler = PSDContentHandler, verbose = True)
    root_name = u"psd"
    xmldoc, = (elem for elem in xmldoc.getElementsByTagName(ligolw.LIGO_LW.tagName) if elem.hasAttribute(u"Name") and elem.Name == root_name)
    for elem in xmldoc.getElementsByTagName(ligolw.LIGO_LW.tagName):
        if elem.hasAttribute(u"Name") and elem.Name == u"REAL8FrequencySeries":
            ifo = ligolw_param.get_pyvalue(elem, u"instrument")
            # t, = elem.getElementsByTagName(ligolw.Time.tagName)
            a, = elem.getElementsByTagName(ligolw.Array.tagName)
            # dims = a.getElementsByTagName(ligolw.Dim.tagName)
            # f0 = ligolw_param.get_param(elem, u"f0")
            retdict[str(ifo)] = a.array
    return retdict
Exemplo n.º 39
0
def open_xmldoc(f, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import Document
    from glue.ligolw.utils import load_filename, load_fileobj
    use_in(kwargs['contenthandler'])
    try:  # try and load existing file
        if isinstance(f, string_types):
            return load_filename(f, **kwargs)
        if isinstance(f, FILE_LIKE):
            return load_fileobj(f, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
Exemplo n.º 40
0
def read_ligolw(filepath, table_name, columns=None):
    from . import utils
    # read table into GLUE LIGO_LW
    if columns:
        TableType = lsctables.TableByName[table_name]
        _oldcols = TableType.loadcolumns
        TableType.loadcolumns = columns
    if isinstance(filepath, basestring):
        xmldoc = ligolw_utils.load_filename(filepath)
    else:
        xmldoc,_ = ligolw_utils.load_fileobj(filepath)
    out = ligolw_table.get_table(xmldoc, table_name)
    if columns:
        TableType.loadcolumns = _oldcols
    return utils.to_table(out, columns=columns)
Exemplo n.º 41
0
def _read_xml(f, fallbackpath=None):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, str):
        try:
            doc = load_filename(f, contenthandler=ContentHandler)
        except IOError as e:
            if e.errno == errno.ENOENT and fallbackpath and \
                    not os.path.isabs(f):
                f = os.path.join(fallbackpath, f)
                doc = load_filename(f, contenthandler=ContentHandler)
            else:
                raise
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
Exemplo n.º 42
0
def _read_xml(f):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, six.string_types):
        doc = load_filename(f, contenthandler=_ContentHandler)
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=_ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
Exemplo n.º 43
0
def _read_xml(f):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, six.string_types):
        doc = load_filename(f, contenthandler=_ContentHandler)
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=_ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
Exemplo n.º 44
0
def readCoincInspiralFromFiles(fileList, statistic=None):
    """
  read in the Sngl and SimInspiralTables from a list of files
  if Sngls are found, construct coincs, add injections (if any)
  also return Sims (if any)
  @param fileList: list of input files
  @param statistic: instance of coincStatistic, to use in creating coincs
  """
    if not fileList:
        return coincInspiralTable(), None

    if not (isinstance(statistic, coincStatistic)):
        raise TypeError, "invalid statistic, must be coincStatistic"

    sims = None
    coincs = None

    lsctables.use_in(ExtractCoincInspiralTableLIGOLWContentHandler)
    for thisFile in fileList:
        doc = utils.load_filename(
            thisFile,
            gz=(thisFile or "stdin").endswith(".gz"),
            contenthandler=ExtractCoincInspiralTableLIGOLWContentHandler)
        # extract the sim inspiral table
        try:
            simInspiralTable = lsctables.SimInspiralTable.get_table(doc)
            if sims: sims.extend(simInspiralTable)
            else: sims = simInspiralTable
        except:
            simInspiralTable = None

        # extract the sngl inspiral table, construct coincs
        try:
            snglInspiralTable = lsctables.SnglInspiralTable.get_table(doc)
        except:
            snglInspiralTable = None
        if snglInspiralTable:
            coincFromFile = coincInspiralTable(snglInspiralTable, statistic)
            if simInspiralTable:
                coincFromFile.add_sim_inspirals(simInspiralTable)
            if coincs: coincs.extend(coincFromFile)
            else: coincs = coincFromFile

        doc.unlink()

    return coincs, sims
Exemplo n.º 45
0
def check_segment_availability(grb_name, grb_time, query_start, query_end,
                               offset, ifo, segmentName):
    '''
  Searches +/- offset from GRB time to download the latest segment lists then extracts times and puts them into a txt file.
  '''
    args = {
        'grb_name': grb_name,
        'query_start': query_start,
        'query_end': query_end,
        'ifo': ifo,
        'segmentName': segmentName
    }
    cmd = "ligolw_segment_query --database --query-segments --include-segments '{segmentName}' --gps-start-time {query_start} --gps-end-time {query_end} > ./segments{ifo}_grb{grb_name}.xml".format(
        **args)
    print '>>', cmd
    print
    process = subprocess.Popen([cmd],
                               shell=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT)
    output, err = process.communicate()

    # try to open the file
    try:
        doc = utils.load_filename(
            "segments{ifo}_grb{grb_name}.xml".format(**args),
            contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))
    except:
        raise IOError, "Error reading file: segments{ifo}_grb{grb_name}.xml".format(
            **args)

    # extract the segment list from segment:table and store in a txt file
    segs = table.get_table(doc, "segment")
    seglist = segments.segmentlist(
        segments.segment(s.start_time, s.end_time) for s in segs)
    segmentsUtils.tosegwizard(file(
        "{ifo}-science_grb{grb_name}.txt".format(**args), 'w'),
                              seglist,
                              header=True)

    print ">> %s segments +/-%ds from %ds found:" % (ifo, offset, grb_time)
    for s in segs:
        print "Start:", s.start_time, "End:", s.end_time, "Duration:", s.end_time - s.start_time
    print

    return
Exemplo n.º 46
0
def select_segments_by_definer(segment_file, segment_name=None, ifo=None):
    """ Return the list of segments that match the segment name
    
    Parameters
    ----------
    segment_file: str
        path to segment xml file
    
    segment_name: str
        Name of segment
    ifo: str, optional
    
    Returns
    -------
    seg: list of segments
    """
    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)
    indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
    segment_table = table.get_table(indoc, 'segment')

    seg_def_table = table.get_table(indoc, 'segment_definer')
    def_ifos = seg_def_table.getColumnByName('ifos')
    def_names = seg_def_table.getColumnByName('name')
    def_ids = seg_def_table.getColumnByName('segment_def_id')

    valid_id = []
    for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids):
        if ifo and ifo != def_ifo:
            continue
        if segment_name and segment_name != def_name:
            continue
        valid_id += [def_id]

    start = numpy.array(segment_table.getColumnByName('start_time'))
    start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
    end = numpy.array(segment_table.getColumnByName('end_time'))
    end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
    start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns
    did = segment_table.getColumnByName('segment_def_id')

    keep = numpy.array([d in valid_id for d in did])
    if sum(keep) > 0:
        return start_end_to_segments(start[keep], end[keep])
    else:
        return segmentlist([])
def get_segments_from_xml(filename):
    """ Return a segmentlist of every segment in an XML file with a
        segments table.
    """

    # read XML file
    seg_xml = utils.load_filename(filename, contenthandler=DefaultContentHandler)

    # get the segment table
    seg_table = lsctables.SegmentTable.get_table(seg_xml)

    # loop over segments table to get all the segments
    segs = segments.segmentlist()
    for seg in seg_table:
        segs.append(segments.segment(seg.start_time, seg.end_time))

    return segs
Exemplo n.º 48
0
 def getAuxChannels(self,inputList):
   intermediateTable = {'type':[],'ifo':[],'qscan_time':[],'qscan_dir':[],'channel_name':[],'peak_time':[],'peak_frequency':[],'peak_q':[],'peak_significance':[],'peak_amplitude':[]}
   try:
     doc = utils.load_filename(inputList[0] + "/summary.xml",verbose=True,gz=False,xmldoc=None,contenthandler=None)
     qscanTable = table.get_table(doc, "qscan:summary:table")
   except:
     print >> sys.stderr, "failed to read" + inputList[0] + "/summary.xml"
     return intermediateTable
   for channel in qscanTable:
     for param in self.paramMaps:
       intermediateTable[param[0]].append(eval('channel.' + param[1]))
     intermediateTable['qscan_dir'].append(inputList[0])
     #if len(inputList) == 4:
     intermediateTable['qscan_time'].append(inputList[1])
     intermediateTable['type'].append(inputList[2])
     intermediateTable['ifo'].append(inputList[3])
   return intermediateTable
Exemplo n.º 49
0
def select_segments_by_definer(segment_file, segment_name=None, ifo=None):
    """ Return the list of segments that match the segment name

    Parameters
    ----------
    segment_file: str
        path to segment xml file

    segment_name: str
        Name of segment
    ifo: str, optional

    Returns
    -------
    seg: list of segments
    """
    from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h)
    indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
    segment_table  = table.get_table(indoc, 'segment')

    seg_def_table = table.get_table(indoc, 'segment_definer')
    def_ifos = seg_def_table.getColumnByName('ifos')
    def_names = seg_def_table.getColumnByName('name')
    def_ids = seg_def_table.getColumnByName('segment_def_id')

    valid_id = []
    for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids):
        if ifo and ifo != def_ifo:
            continue
        if segment_name and segment_name != def_name:
            continue
        valid_id += [def_id]

    start = numpy.array(segment_table.getColumnByName('start_time'))
    start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
    end = numpy.array(segment_table.getColumnByName('end_time'))
    end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
    start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns
    did = segment_table.getColumnByName('segment_def_id')

    keep = numpy.array([d in valid_id for d in did])
    if sum(keep) > 0:
        return start_end_to_segments(start[keep], end[keep])
    else:
        return segmentlist([])
Exemplo n.º 50
0
    def __init__(self, filename, filter_length, delta_f, f_lower,
                 dtype, out=None, approximant=None, **kwds):
        self.out = out
        self.dtype = dtype
        self.f_lower = f_lower
        self.approximant = approximant
        self.filename = filename
        self.delta_f = delta_f
        self.N = (filter_length - 1 ) * 2
        self.delta_t = 1.0 / (self.N * self.delta_f)
        self.filter_length = filter_length
        self.kmin = int(f_lower / delta_f)

        self.indoc = ligolw_utils.load_filename(
            filename, False, contenthandler=LIGOLWContentHandler)
        self.table = table.get_table(
            self.indoc, lsctables.SnglInspiralTable.tableName)
        self.extra_args = kwds  
Exemplo n.º 51
0
def load_likelihood_data(filenames, verbose = False):
	coinc_params = None
	seglists = None
	for n, filename in enumerate(filenames, 1):
		if verbose:
			print >>sys.stderr, "%d/%d:" % (n, len(filenames)),
		xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = StringCoincParamsDistributions.contenthandler)
		this_coinc_params = StringCoincParamsDistributions.from_xml(xmldoc, u"string_cusp_likelihood")
		this_seglists = lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(set([this_coinc_params.process_id])).coalesce()
		xmldoc.unlink()
		if coinc_params is None:
			coinc_params = this_coinc_params
		else:
			coinc_params += this_coinc_params
		if seglists is None:
			seglists = this_seglists
		else:
			seglists |= this_seglists
	return coinc_params, seglists
def load_time_slides(filename, verbose = False, gz = False):
	"""
	Load a time_slide table from the LIGO Light Weight XML file named
	filename, or stdin if filename is None.  Extra verbosity is printed
	if verbose is True, and the file is gzip decompressed while reading
	if gz is Tue.  The output is returned as a dictionary, mapping each
	time slide ID to a dictionary providing a mapping of instrument to
	offset for that time slide.

	Note that a side effect of this function is that the ID generator
	associated with the TimeSlideTable class in glue.ligolw.lsctables
	is synchronized with the result, so that the next ID it generates
	will be immediately following the IDs listed in the dictionary
	returned by this function.
	"""
	time_slide_table = table.get_table(utils.load_filename(filename, verbose = verbose, gz = (filename or "stdin")[-3:] == ".gz"), lsctables.TimeSlideTable.tableName)
	time_slides = time_slide_table.as_dict()
	time_slide_table.sync_next_id()
	return time_slides
Exemplo n.º 53
0
def load_likelihood_data(filenames, verbose = False):
	coinc_params = None
	seglists = None
	for n, filename in enumerate(filenames, 1):
		if verbose:
			print("%d/%d:" % (n, len(filenames)), end=' ', file=sys.stderr)
		xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = StringCoincParamsDistributions.LIGOLWContentHandler)
		this_coinc_params = StringCoincParamsDistributions.from_xml(xmldoc, u"string_cusp_likelihood")
		this_seglists = lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(u"lalapps_string_meas_likelihood")).coalesce()
		xmldoc.unlink()
		if coinc_params is None:
			coinc_params = this_coinc_params
		else:
			coinc_params += this_coinc_params
		if seglists is None:
			seglists = this_seglists
		else:
			seglists |= this_seglists
	return coinc_params, seglists
Exemplo n.º 54
0
def load_likelihood_data(filenames, verbose = False):
	coinc_params = None
	seglists = None
	for n, filename in enumerate(filenames, 1):
		if verbose:
			print >>sys.stderr, "%d/%d:" % (n, len(filenames)),
		xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = StringCoincParamsDistributions.contenthandler)
		this_coinc_params = StringCoincParamsDistributions.from_xml(xmldoc, u"string_cusp_likelihood")
		this_seglists = lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(set([this_coinc_params.process_id])).coalesce()
		xmldoc.unlink()
		if coinc_params is None:
			coinc_params = this_coinc_params
		else:
			coinc_params += this_coinc_params
		if seglists is None:
			seglists = this_seglists
		else:
			seglists |= this_seglists
	return coinc_params, seglists
Exemplo n.º 55
0
def readCoincInspiralFromFiles(fileList,statistic=None):
  """
  read in the Sngl and SimInspiralTables from a list of files
  if Sngls are found, construct coincs, add injections (if any)
  also return Sims (if any)
  @param fileList: list of input files
  @param statistic: instance of coincStatistic, to use in creating coincs
  """
  if not fileList:
    return coincInspiralTable(), None

  if not (isinstance(statistic,coincStatistic)):
    raise TypeError, "invalid statistic, must be coincStatistic"

  sims = None
  coincs = None

  lsctables.use_in(ExtractCoincInspiralTableLIGOLWContentHandler)
  for thisFile in fileList:
    doc = utils.load_filename(thisFile, gz = (thisFile or "stdin").endswith(".gz"), contenthandler=ExtractCoincInspiralTableLIGOLWContentHandler)
    # extract the sim inspiral table
    try: 
      simInspiralTable = \
          table.get_table(doc, lsctables.SimInspiralTable.tableName)
      if sims: sims.extend(simInspiralTable)
      else: sims = simInspiralTable
    except: simInspiralTable = None

    # extract the sngl inspiral table, construct coincs
    try: snglInspiralTable = \
      table.get_table(doc, lsctables.SnglInspiralTable.tableName)
    except: snglInspiralTable = None
    if snglInspiralTable:
      coincFromFile = coincInspiralTable(snglInspiralTable,statistic)
      if simInspiralTable: 
        coincFromFile.add_sim_inspirals(simInspiralTable) 
      if coincs: coincs.extend(coincFromFile)
      else: coincs = coincFromFile

    doc.unlink()

  return coincs, sims
Exemplo n.º 56
0
def ReadMultiInspiralFromFiles(fileList):
  """
  Read the multiInspiral tables from a list of files
  @param fileList: list of input files
  """
  if not fileList:
    return multiInspiralTable(), None

  multis = None

  for thisFile in fileList:
    doc = utils.load_filename(thisFile,
        gz=(thisFile or "stdin").endswith(".gz"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))
    # extract the multi inspiral table
    try:
      multiInspiralTable = lsctables.MultiInspiralTable.get_table(doc)
      if multis: multis.extend(multiInspiralTable)
      else: multis = multiInspiralTable
    except: multiInspiralTable = None
  return multis
Exemplo n.º 57
0
def query_segments_xml(xml_location, gps_start, gps_end, spec):
    """
	Retrieve the segment table from a location, and clip segments to (gps_start, gps_end). If spec is given, retrieve only segments with this definer, otherwise, get all of them.
	"""
    if spec is None:
        spec = True
    else:
        # ifo, definer, version = spec.split(":")
        definer = spec.split(":")
        ifo, definer, version = definer[0], ":".join(definer[1:-1]), definer[-1]
    xmldoc = utils.load_filename(xml_location)
    segment_definer = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)
    # FIXME: ifo in ifos? What does a segment for a set of ifos even mean?
    seg_def_id = [sd.segment_def_id for sd in segment_definer if spec and ifo in sd.get_ifos() and definer == sd.name]
    if len(seg_def_id) != 1:
        raise ValueError("Need exactly one definer row for %s:%s:%s, got %d" % (ifo, definer, version, len(seg_def_id)))
    seg_def_id = seg_def_id[0]

    segment = table.get_table(xmldoc, lsctables.SegmentTable.tableName)
    return segmentlist([s.get() for s in segment if s.segment_def_id == seg_def_id])