コード例 #1
0
ファイル: ligolw_sky_map.py プロジェクト: Solaro/lalsuite
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None, nside=-1):
    # LIGO-LW XML imports.
    from . import ligolw
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables
    import lal.series

    # Determine approximant, amplitude order, and phase order from command line arguments.
    approximant, amplitude_order, phase_order = \
        timing.get_approximant_and_orders_from_string(waveform)

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [(sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id).next() for event_id in event_ids]
    instruments = set(sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals)

    # Read PSDs.
    if psd_file is None:
        psds = None
    else:
        xmldoc, _ = ligolw_utils.load_fileobj(
            psd_file, contenthandler=lal.series.PSDContentHandler)
        psds = lal.series.read_psd_xmldoc(xmldoc)

        # Rearrange PSDs into the same order as the sngl_inspirals.
        psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

        # Interpolate PSDs.
        psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
            for psd in psds]

    # TOA+SNR sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, approximant,
        amplitude_order, phase_order, f_low,
        min_distance, max_distance, prior_distance_power,
        nside=nside, psds=psds)

    return prob, epoch, elapsed_time, instruments
コード例 #2
0
ファイル: sky_map.py プロジェクト: johnveitch/lalsuite
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None,
        method="toa_phoa_snr", nside=-1, chain_dump=None,
        phase_convention='antifindchirp', f_high_truncate=1.0,
        enable_snr_series=False):
    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesAndSeriesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id)) for event_id in event_ids]
    instruments = {sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals}

    # Try to load complex SNR time series.
    snrs = ligolw.snr_series_by_sngl_inspiral_id_for_xmldoc(xmldoc)
    try:
        snrs = [snrs[sngl.event_id] for sngl in sngl_inspirals]
    except KeyError:
        snrs = None

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
            f_high_truncate=f_high_truncate)
        for psd in psds]

    # Run sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, waveform, f_low,
        min_distance, max_distance, prior_distance_power, method=method,
        nside=nside, psds=psds, phase_convention=phase_convention,
        chain_dump=chain_dump, snr_series=snrs,
        enable_snr_series=enable_snr_series)

    return prob, epoch, elapsed_time, instruments
コード例 #3
0
ファイル: sky_map.py プロジェクト: mattpitkin/lalsuite
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None,
        method="toa_phoa_snr", nside=-1, chain_dump=None,
        phase_convention='antifindchirp', f_high_truncate=1.0):
    # LIGO-LW XML imports.
    from . import ligolw
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables
    import lal.series

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id)) for event_id in event_ids]
    instruments = set(sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals)

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
            f_high_truncate=f_high_truncate)
        for psd in psds]

    # Run sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, waveform, f_low,
        min_distance, max_distance, prior_distance_power, method=method,
        nside=nside, psds=psds, phase_convention=phase_convention,
        chain_dump=chain_dump)

    return prob, epoch, elapsed_time, instruments
コード例 #4
0
ファイル: veto.py プロジェクト: aburony1970/pycbc
def get_segment_definer_comments(xml_file, include_version=True):
    """Returns a dict with the comment column as the value for each segment"""

    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)

    # read segment definer table
    xmldoc, _ = ligolw_utils.load_fileobj(xml_file,
                                        gz=xml_file.name.endswith(".gz"),
                                        contenthandler=h)
    seg_def_table = table.get_table(xmldoc,
                                    lsctables.SegmentDefTable.tableName)

    # put comment column into a dict
    comment_dict = {}
    for seg_def in seg_def_table:
        if include_version:
            full_channel_name = ':'.join([str(seg_def.ifos),
                                          str(seg_def.name),
                                          str(seg_def.version)])
        else:
            full_channel_name = ':'.join([str(seg_def.ifos),
                                          str(seg_def.name)])

        comment_dict[full_channel_name] = seg_def.comment

    return comment_dict
コード例 #5
0
def fromsegmentxml(file, dict=False, id=None):
    """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Arguments:

      file : file object
        file object for segment xml file

    Keyword Arguments:

      dict : [ True | False ]
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table. Default False
      id : int
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer
        
  """

    # load xmldocument and SegmentDefTable and SegmentTables
    xmldoc, digest = utils.load_fileobj(file,
                                        gz=file.name.endswith(".gz"),
                                        contenthandler=lsctables.use_in(
                                            ligolw.LIGOLWContentHandler))
    seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
    seg_table = lsctables.SegmentTable.get_table(xmldoc)

    if dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:
        seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
        if dict:
            segs[str(seg_def.name)] = segments.segmentlist()

    for seg in seg_table:
        if dict:
            segs[seg_id[int(seg.segment_def_id)]]\
                .append(segments.segment(seg.start_time, seg.end_time))
            continue
        if id and int(seg.segment_def_id) == id:
            segs.append(segments.segment(seg.start_time, seg.end_time))
            continue
        segs.append(segments.segment(seg.start_time, seg.end_time))

    if dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    xmldoc.unlink()

    return segs
コード例 #6
0
ファイル: veto.py プロジェクト: ueno-phys/pycbc
def get_segment_definer_comments(xml_file, include_version=True):
    """Returns a dict with the comment column as the value for each segment"""

    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)

    # read segment definer table
    xmldoc, digest = ligolw_utils.load_fileobj(
        xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h)
    seg_def_table = table.get_table(xmldoc,
                                    lsctables.SegmentDefTable.tableName)

    # put comment column into a dict
    comment_dict = {}
    for seg_def in seg_def_table:
        if include_version:
            full_channel_name = ':'.join(
                [str(seg_def.ifos),
                 str(seg_def.name),
                 str(seg_def.version)])
        else:
            full_channel_name = ':'.join(
                [str(seg_def.ifos), str(seg_def.name)])

        comment_dict[full_channel_name] = seg_def.comment

    return comment_dict
コード例 #7
0
 def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None):
     """Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
     """
     # open file
     if isinstance(fp, (str, unicode)):
         fobj = open(fp, 'r')
     else:
         fobj = fp
     xmldoc = ligolw_utils.load_fileobj(fobj)[0]
     # read veto definers
     veto_def_table = VetoDefTable.get_table(xmldoc)
     out = cls()
     for row in veto_def_table:
         if ifo and row.ifo != ifo:
             continue
         if start and 0 < row.end_time < start:
             continue
         elif start:
             row.start_time = max(row.start_time, start)
         if end and row.start_time > end:
             continue
         elif end and not row.end_time:
             row.end_time = end
         elif end:
             row.end_time = min(row.end_time, end)
         flag = DataQualityFlag.from_veto_def(row)
         if flag.name in out:
             out[flag.name].valid.extend(flag.valid)
             out[flag.name].valid.coalesce()
         else:
             out[flag.name] = flag
     return out
コード例 #8
0
def open_xmldoc(fobj, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document

    Parameters
    ----------
    fobj : `str`, `file`
        file path or open file object to read

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate

    Returns
    --------
    xmldoc : :class:`~glue.ligolw.ligolw.Document`
        either the `Document` as parsed from an existing file, or a new, empty
        `Document`
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import (Document, LIGOLWContentHandler)
    from glue.ligolw.utils import load_filename, load_fileobj
    try:  # try and load existing file
        if isinstance(fobj, string_types):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_filename(fobj, **kwargs)
        if isinstance(fobj, FILE_LIKE):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_fileobj(fobj, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
コード例 #9
0
ファイル: utils.py プロジェクト: smirshekari/lalsuite
def get_LVAdata_from_stdin(std_in, as_dict=False):
  """
  this function takes an LVAlertTable from sys.stdin and it returns:
  host: the machine the payload file was created on
  full_path: the full path to (and including) the payload file
  general_dir: the directory in gracedb that the output of your code should
               be written to
  uid: the gracedb unique id associated with the event in the LVAlertTable
  """
  doc = utils.load_fileobj(std_in)[0]
  lvatable = table.get_table(doc, LVAlertTable.tableName)
  file = lvatable[0].file
  uid = lvatable[0].uid
  data_loc = lvatable[0].temp_data_loc

  if as_dict:
    return {
      "file" : lvatable[0].file,
      "uid" : lvatable[0].uid,
      "data_loc" : lvatable[0].temp_data_loc,
      "description" : lvatable[0].description,
      "alert_type" : lvatable[0].alert_type,
    }

  return file, uid, data_loc
コード例 #10
0
ファイル: ligolw.py プロジェクト: stefco/gwpy
def open_xmldoc(fobj, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document

    Parameters
    ----------
    fobj : `str`, `file`
        file path or open file object to read

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate

    Returns
    --------
    xmldoc : :class:`~glue.ligolw.ligolw.Document`
        either the `Document` as parsed from an existing file, or a new, empty
        `Document`
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import (Document, LIGOLWContentHandler)
    from glue.ligolw.utils import load_filename, load_fileobj
    try:  # try and load existing file
        if isinstance(fobj, string_types):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_filename(fobj, **kwargs)
        if isinstance(fobj, FILE_LIKE):
            kwargs.setdefault('contenthandler', use_in(LIGOLWContentHandler))
            return load_fileobj(fobj, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
コード例 #11
0
def from_xml(filename,
             length,
             delta_f,
             low_freq_cutoff,
             ifo_string=None,
             root_name='psd'):
    """Read an ASCII file containing one-sided ASD or PSD  data and generate
    a frequency series with the corresponding PSD. The ASD or PSD data is
    interpolated in order to match the desired resolution of the
    generated frequency series.

    Parameters
    ----------
    filename : string
        Path to a two-column ASCII file. The first column must contain
        the frequency (positive frequencies only) and the second column
        must contain the amplitude density OR power spectral density.
    length : int
        Length of the frequency series in samples.
    delta_f : float
        Frequency resolution of the frequency series in Herz.
    low_freq_cutoff : float
        Frequencies below this value are set to zero.
    ifo_string : string
        Use the PSD in the file's PSD dictionary with this ifo string.
        If not given and only one PSD present in the file return that, if not
        given and multiple (or zero) PSDs present an exception will be raised.
    root_name : string (default='psd')
        If given use this as the root name for the PSD XML file. If this means
        nothing to you, then it is probably safe to ignore this option.

    Returns
    -------
    psd : FrequencySeries
        The generated frequency series.

    """
    import lal.series
    from glue.ligolw import utils as ligolw_utils
    fp = open(filename, 'r')
    ct_handler = lal.series.PSDContentHandler
    fileobj, _ = ligolw_utils.load_fileobj(fp, contenthandler=ct_handler)
    psd_dict = lal.series.read_psd_xmldoc(fileobj, root_name=root_name)

    if ifo_string is not None:
        psd_freq_series = psd_dict[ifo_string]
    else:
        if len(psd_dict.keys()) == 1:
            psd_freq_series = psd_dict[tuple(psd_dict.keys())[0]]
        else:
            err_msg = "No ifo string given and input XML file contains not "
            err_msg += "exactly one PSD. Specify which PSD you want to use."
            raise ValueError(err_msg)

    noise_data = psd_freq_series.data.data[:]
    freq_data = numpy.arange(len(noise_data)) * psd_freq_series.deltaF

    return from_numpy_arrays(freq_data, noise_data, length, delta_f,
                             low_freq_cutoff)
コード例 #12
0
def fromsegmentxml(file, dict=False, id=None):

  """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Arguments:

      file : file object
        file object for segment xml file

    Keyword Arguments:

      dict : [ True | False ]
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table. Default False
      id : int
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer
        
  """

  # load xmldocument and SegmentDefTable and SegmentTables
  xmldoc, digest = utils.load_fileobj(file, gz=file.name.endswith(".gz"))
  seg_def_table  = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)
  seg_table      = table.get_table(xmldoc, lsctables.SegmentTable.tableName)

  if dict:
    segs = segments.segmentlistdict()
  else:
    segs = segments.segmentlist()

  seg_id = {}
  for seg_def in seg_def_table:
    seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
    if dict:
      segs[str(seg_def.name)] = segments.segmentlist()

  for seg in seg_table:
    if dict:
      segs[seg_id[int(seg.segment_def_id)]]\
          .append(segments.segment(seg.start_time, seg.end_time))
      continue
    if id and int(seg.segment_def_id)==id:
      segs.append(segments.segment(seg.start_time, seg.end_time))
      continue
    segs.append(segments.segment(seg.start_time, seg.end_time))

  if dict:
   for seg_name in seg_id.values():
     segs[seg_name] = segs[seg_name].coalesce()
  else:
    segs = segs.coalesce()

  xmldoc.unlink()

  return segs
コード例 #13
0
def test_pick_coinc():
    coinc = pick_coinc()
    xmldoc, _ = utils.load_fileobj(io.BytesIO(coinc),
                                   contenthandler=ContentHandler)

    coinc_inspiral_table = lsctables.CoincInspiralTable.get_table(xmldoc)

    assert len(coinc_inspiral_table) == 1
    coinc_inspiral, = coinc_inspiral_table
    assert coinc_inspiral.get_end() <= mock_now()
コード例 #14
0
ファイル: read.py プロジェクト: bhooshan-gadre/pycbc
def from_xml(filename, length, delta_f, low_freq_cutoff, ifo_string=None,
             root_name='psd'):
    """Read an ASCII file containing one-sided ASD or PSD  data and generate
    a frequency series with the corresponding PSD. The ASD or PSD data is
    interpolated in order to match the desired resolution of the
    generated frequency series.

    Parameters
    ----------
    filename : string
        Path to a two-column ASCII file. The first column must contain
        the frequency (positive frequencies only) and the second column
        must contain the amplitude density OR power spectral density.
    length : int
        Length of the frequency series in samples.
    delta_f : float
        Frequency resolution of the frequency series in Herz.
    low_freq_cutoff : float
        Frequencies below this value are set to zero.
    ifo_string : string
        Use the PSD in the file's PSD dictionary with this ifo string.
        If not given and only one PSD present in the file return that, if not
        given and multiple (or zero) PSDs present an exception will be raised.
    root_name : string (default='psd')
        If given use this as the root name for the PSD XML file. If this means
        nothing to you, then it is probably safe to ignore this option.

    Returns
    -------
    psd : FrequencySeries
        The generated frequency series.

    """
    import lal.series
    from glue.ligolw import utils as ligolw_utils
    fp = open(filename, 'r')
    ct_handler = lal.series.PSDContentHandler
    fileobj, _ = ligolw_utils.load_fileobj(fp, contenthandler=ct_handler)
    psd_dict = lal.series.read_psd_xmldoc(fileobj, root_name=root_name)

    if ifo_string is not None:
        psd_freq_series = psd_dict[ifo_string]
    else:
        if len(psd_dict.keys()) == 1:
            psd_freq_series = psd_dict[psd_dict.keys()[0]]
        else:
            err_msg = "No ifo string given and input XML file contains not "
            err_msg += "exactly one PSD. Specify which PSD you want to use."
            raise ValueError(err_msg)

    noise_data = psd_freq_series.data.data[:]
    freq_data = numpy.arange(len(noise_data)) * psd_freq_series.deltaF

    return from_numpy_arrays(freq_data, noise_data, length, delta_f,
                             low_freq_cutoff)
コード例 #15
0
ファイル: ligolw.py プロジェクト: jumbokh/gwpy
def open_xmldoc(f, **kwargs):
    """Try and open an existing LIGO_LW-format file, or create a new Document
    """
    from glue.ligolw.lsctables import use_in
    from glue.ligolw.ligolw import Document
    from glue.ligolw.utils import load_filename, load_fileobj
    use_in(kwargs['contenthandler'])
    try:  # try and load existing file
        if isinstance(f, string_types):
            return load_filename(f, **kwargs)
        if isinstance(f, FILE_LIKE):
            return load_fileobj(f, **kwargs)[0]
    except (OSError, IOError):  # or just create a new Document
        return Document()
コード例 #16
0
ファイル: connect.py プロジェクト: mcoughlin/gwpy
def read_ligolw(filepath, table_name, columns=None):
    from . import utils
    # read table into GLUE LIGO_LW
    if columns:
        TableType = lsctables.TableByName[table_name]
        _oldcols = TableType.loadcolumns
        TableType.loadcolumns = columns
    if isinstance(filepath, basestring):
        xmldoc = ligolw_utils.load_filename(filepath)
    else:
        xmldoc,_ = ligolw_utils.load_fileobj(filepath)
    out = ligolw_table.get_table(xmldoc, table_name)
    if columns:
        TableType.loadcolumns = _oldcols
    return utils.to_table(out, columns=columns)
コード例 #17
0
ファイル: ligolw.py プロジェクト: farr/lalsuite
def _read_xml(f):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, six.string_types):
        doc = load_filename(f, contenthandler=_ContentHandler)
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=_ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
コード例 #18
0
ファイル: ligolw.py プロジェクト: bfarr/lalsuite
def _read_xml(f):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, six.string_types):
        doc = load_filename(f, contenthandler=_ContentHandler)
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=_ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
コード例 #19
0
ファイル: segments.py プロジェクト: mcoughlin/gwpy
def read_ligolw_segments(file, flag=None):
    """Read segments for the given flag from the LIGO_LW XML file
    """
    if isinstance(file, basestring):
        f = open(file, 'r')
    else:
        f = file
    xmldoc,_ = ligolw_utils.load_fileobj(f)
    seg_def_table = ligolw_table.get_table(xmldoc,
                                           lsctables.SegmentDefTable.tableName)
    flags = []
    dqflag = None
    for row in seg_def_table:
        name = ':'.join(["".join(row.get_ifos()), row.name])
        if row.version:
            name += ':%d' % row.version
        if flag is None or name == flag:
            dqflag = DataQualityFlag(name)
            id_ = row.segment_def_id
            break
    if not dqflag:
        raise ValueError("No segment definition found for flag='%s' in "
                         "file '%s'" % (flag, f.name))
    dqflag.valid = SegmentList()
    seg_sum_table = ligolw_table.get_table(xmldoc,
                                           lsctables.SegmentSumTable.tableName)
    for row in seg_sum_table:
        if row.segment_def_id == id_:
            try:
                dqflag.valid.append(row.get())
            except AttributeError:
                dqflag.valid.append(Segment(row.start_time, row.end_time))
    dqflag.active = SegmentList()
    seg_table = ligolw_table.get_table(xmldoc, lsctables.SegmentTable.tableName)
    for row in seg_table:
        if row.segment_def_id == id_:
            try:
                dqflag.active.append(row.get())
            except AttributeError:
                dqflag.active.append(Segment(row.start_time, row.end_time))
    if isinstance(file, basestring):
        f.close()
    return dqflag
コード例 #20
0
def _read_xml(f, fallbackpath=None):
    if f is None:
        doc = filename = None
    elif isinstance(f, Element):
        doc = f
        filename = ''
    elif isinstance(f, str):
        try:
            doc = load_filename(f, contenthandler=ContentHandler)
        except IOError as e:
            if e.errno == errno.ENOENT and fallbackpath and \
                    not os.path.isabs(f):
                f = os.path.join(fallbackpath, f)
                doc = load_filename(f, contenthandler=ContentHandler)
            else:
                raise
        filename = f
    else:
        doc, _ = load_fileobj(f, contenthandler=ContentHandler)
        try:
            filename = f.name
        except AttributeError:
            filename = ''
    return doc, filename
コード例 #21
0
        for attr in 'spin1x spin1y spin2x spin2y'.split():
            if getattr(sim_inspiral, attr):
                raise NotImplementedError('sim_inspiral:{} column is nonzero,'
                'but only aligned-spin templates are supported'.format(attr))
        return super(TaylorF2RedSpinIntrinsicParams, cls).__new__(cls,
            *(getattr(sim_inspiral, field) for field in cls._fields)
        )

    @property
    def chi(self):
        return lalsimulation.SimInspiralTaylorF2ReducedSpinComputeChi(
            self.mass1, self.mass2, self.spin1z, self.spin2z)


# Read injection file.
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.input, contenthandler=ligolw_bayestar.LSCTablesContentHandler)

# Extract simulation table from injection file.
sim_inspiral_table = ligolw_table.get_table(xmldoc,
    lsctables.SimInspiralTable.tableName)

# Get just the intrinsic parameters from the sim_inspiral table.
sim_inspiral_intrinsic_params = set(TaylorF2RedSpinIntrinsicParams(sim_inspiral)
    for sim_inspiral in sim_inspiral_table)

if opts.low_frequency_cutoff is None:
    # Get the low-frequency cutoffs from the sim_inspiral table.
    f_lows = set(sim_inspiral.f_lower for sim_inspiral in sim_inspiral_table)

    # There can be only one!
    try:
コード例 #22
0
def dump_flags(ifos=None, segment_url=None, match=None, unmatch=None,\
               latest=False):

  """
    Returns the list of all flags defined in the database.

    Keyword rguments:
      ifo : [ str | list ]
        list of ifos to query, or str for single ifo
      segment_url : str 
        url of segment database, defaults to contents of S6_SEGMENT_SERVER
        environment variable
      match : [ str | regular pattern ]
        regular expression to search against returned flag names, e.g, 'UPV'
      unmatch : [ str | regular pattern ]
        regular expression to negatively search against returned flag names
  """

  if isinstance(ifos, str):
    ifos = [ifos]

  # get url
  if not segment_url:
    segment_url = os.getenv('S6_SEGMENT_SERVER')

  # open connection to LDBD(W)Server
  myClient = segmentdb_utils.setup_database(segment_url)

  reply = StringIO(myClient.query(squery))
  xmldoc, digest = utils.load_fileobj(reply)
  seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)

  # sort table by ifo, name and version
  seg_def_table.sort(key=lambda flag: (flag.ifos[0], flag.name, \
                                       flag.version), reverse=True)

  flags = lsctables.New(type(seg_def_table))

  for row in seg_def_table:

    # test re match
    if match and not re.search(match, row.name):  continue

    # test re unmatch
    if unmatch and re.search(unmatch, row.name):  continue

    # only append latest versions of multiple flags
    flatest=True
    if latest:
      # get all flags with same ifo and name
      vflags = [f for f in flags if row.name==f.name and\
                row.get_ifos()==f.get_ifos()]
      # if later version in list, move on
      for f in vflags:
        if f.version>=row.version:
          flatest=False
          break
    if not flatest:
      continue

    # append those flags matching ifos requirement
    for ifo in ifos:
      if ifo in row.get_ifos():
        flags.append(row)
        break

  return flags
コード例 #23
0
ファイル: segment.py プロジェクト: jsread/pycbc
def fromsegmentxml(xml_file, return_dict=False, select_seg_def_id=None):
    """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Parameters
    -----------
    xml_file : file object
        file object for segment xml file
    return_dict : boolean, optional (default = False)
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table.
    select_seg_def_id : int, optional (default = None)
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer

    Returns
    --------
    segs : glue.segments.segmentlist instance
        The segment list contained in the file.
    """

    # load xmldocument and SegmentDefTable and SegmentTables
    xmldoc, digest = utils.load_fileobj(xml_file,
                                        gz=xml_file.name.endswith(".gz"),
                                        contenthandler=ContentHandler)
    seg_def_table = table.get_table(xmldoc,
                                    lsctables.SegmentDefTable.tableName)
    seg_table = table.get_table(xmldoc, lsctables.SegmentTable.tableName)

    if return_dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:
        # Here we want to encode ifo, channel name and version
        full_channel_name = ':'.join([str(seg_def.ifos),
                                      str(seg_def.name),
                                      str(seg_def.version)])
        seg_id[int(seg_def.segment_def_id)] = full_channel_name
        if return_dict:
            segs[full_channel_name] = segments.segmentlist()

    for seg in seg_table:
        seg_obj = segments.segment(
                lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
                lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
        if return_dict:
            segs[seg_id[int(seg.segment_def_id)]].append(seg_obj)
        elif select_seg_def_id is not None:
            if int(seg.segment_def_id) == select_seg_def_id:
                segs.append(seg_obj)
        else:
            segs.append(seg_obj)

    if return_dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    xmldoc.unlink()

    return segs
コード例 #24
0
ファイル: lalinference_pipe.py プロジェクト: lscsoft/lalsuite
def setup_roq(cp):
    """
    Generates cp objects with the different ROQs applied
    """
    use_roq=False
    if cp.has_option('paths','roq_b_matrix_directory') or cp.has_option('paths','computeroqweights'):
        if not cp.has_option('analysis','roq'):
            print("Warning: If you are attempting to enable ROQ by specifying roq_b_matrix_directory or computeroqweights,\n\
            please use analysis.roq in your config file in future. Enabling ROQ.")
            cp.set('analysis','roq',True)
    if not cp.getboolean('analysis','roq'):
        yield cp
        raise StopIteration()
    from numpy import genfromtxt, array
    path=cp.get('paths','roq_b_matrix_directory')
    if not os.path.isdir(path):
        print("The ROQ directory %s does not seem to exist\n"%path)
        sys.exit(1)
    use_roq=True
    roq_paths=os.listdir(path)
    roq_params={}
    roq_force_flow = None

    if cp.has_option('lalinference','roq_force_flow'):
        roq_force_flow = cp.getfloat('lalinference','roq_force_flow')
        print("WARNING: Forcing the f_low to ", str(roq_force_flow), "Hz")
        print("WARNING: Overwriting user choice of flow, srate, seglen, and (mc_min, mc_max and q-min) or (mass1_min, mass1_max, mass2_min, mass2_max)")

    def key(item): # to order the ROQ bases
        return float(item[1]['seglen'])

    coinc_xml_obj = None
    row=None

    # Get file object of coinc.xml
    if opts.gid is not None:
        from ligo.gracedb.rest import GraceDb
        gid=opts.gid
        cwd=os.getcwd()
        if cp.has_option('analysis', 'service-url'):
            client = GraceDb(cp.get('analysis', 'service-url'))
        else:
            client = GraceDb()
        coinc_xml_obj = ligolw_utils.load_fileobj(client.files(gid, "coinc.xml"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))[0]
    elif cp.has_option('input', 'coinc-xml'):
        coinc_xml_obj = ligolw_utils.load_fileobj(open(cp.get('input', 'coinc-xml'), "rb"), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))[0]

    # Get sim_inspiral from injection file
    if cp.has_option('input','injection-file'):
        print("Only 0-th event in the XML table will be considered while running with ROQ\n")
        row = lsctables.SimInspiralTable.get_table(
                  ligolw_utils.load_filename(cp.get('input','injection-file'),contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))
              )[0]

    roq_bounds = pipe_utils.Query_ROQ_Bounds_Type(path, roq_paths)
    if roq_bounds == 'chirp_mass_q':
        print('ROQ has bounds in chirp mass and mass-ratio')
        mc_priors, trigger_mchirp = pipe_utils.get_roq_mchirp_priors(
            path, roq_paths, roq_params, key, coinc_xml_obj=coinc_xml_obj, sim_inspiral=row
        )
    elif roq_bounds == 'component_mass':
        print('ROQ has bounds in component masses')
        # get component mass bounds, then compute the chirp mass that can be safely covered
        # further below we pass along the component mass bounds to the sampler, not the tighter chirp-mass, q bounds
        m1_priors, m2_priors, trigger_mchirp = pipe_utils.get_roq_component_mass_priors(
            path, roq_paths, roq_params, key, coinc_xml_obj=coinc_xml_obj, sim_inspiral=row
        )
        mc_priors = {}
        for (roq,m1_prior), (roq2,m2_prior) in zip(m1_priors.items(), m2_priors.items()):
            mc_priors[roq] = sorted([pipe_utils.mchirp_from_components(m1_prior[1], m2_prior[0]), pipe_utils.mchirp_from_components(m1_prior[0], m2_prior[1])])

    if cp.has_option('lalinference','trigger_mchirp'):
        trigger_mchirp=float(cp.get('lalinference','trigger_mchirp'))
    roq_mass_freq_scale_factor = pipe_utils.get_roq_mass_freq_scale_factor(mc_priors, trigger_mchirp, roq_force_flow)
    if roq_mass_freq_scale_factor != 1.:
        print('WARNING: Rescaling ROQ basis, please ensure it is allowed with the model used.')

    # If the true chirp mass is unknown, add variations over the mass bins
    if opts.gid is not None or (opts.injections is not None or cp.has_option('input','injection-file')) or cp.has_option('lalinference','trigger_mchirp') or cp.has_option('input', 'coinc-xml'):

        for mc_prior in mc_priors:
            mc_priors[mc_prior] = array(mc_priors[mc_prior])
        # find mass bin containing the trigger
        trigger_bin = None
        for roq in roq_paths:
            if mc_priors[roq][0]*roq_mass_freq_scale_factor <= trigger_mchirp <= mc_priors[roq][1]*roq_mass_freq_scale_factor:
                trigger_bin = roq
                print('Prior in Mchirp will be ['+str(mc_priors[roq][0]*roq_mass_freq_scale_factor)+','+str(mc_priors[roq][1]*roq_mass_freq_scale_factor)+'] to contain the trigger Mchirp '+str(trigger_mchirp))
                break
        roq_paths = [trigger_bin]
    else:
        for mc_prior in mc_priors:
            mc_priors[mc_prior] = array(mc_priors[mc_prior])*roq_mass_freq_scale_factor

    # write the master configparser
    cur_basedir = cp.get('paths','basedir')
    masterpath=os.path.join(cur_basedir,'config.ini')
    with open(masterpath,'w') as cpfile:
        cp.write(cpfile)

    for roq in roq_paths:
        this_cp = configparser.ConfigParser()
        this_cp.optionxform = str
        this_cp.read(masterpath)
        basedir = this_cp.get('paths','basedir')
        for dirs in 'basedir','daglogdir','webdir':
            val = this_cp.get('paths',dirs)
            newval = os.path.join(val,roq)
            mkdirs(newval)
            this_cp.set('paths',dirs,newval)
        this_cp.set('paths','roq_b_matrix_directory',os.path.join(cp.get('paths','roq_b_matrix_directory'),roq))
        flow=roq_params[roq]['flow'] / roq_mass_freq_scale_factor
        srate=2.*roq_params[roq]['fhigh'] / roq_mass_freq_scale_factor
        #if srate > 8192:
        #    srate = 8192

        seglen=roq_params[roq]['seglen'] * roq_mass_freq_scale_factor
        # params.dat uses the convention q>1 so our q_min is the inverse of their qmax
        this_cp.set('engine','srate',str(srate))
        this_cp.set('engine','seglen',str(seglen))
        if this_cp.has_option('lalinference','flow'):
            tmp=this_cp.get('lalinference','flow')
            tmp=eval(tmp)
            ifos=tmp.keys()
        else:
            tmp={}
            ifos=eval(this_cp.get('analysis','ifos'))
        for i in ifos:
            tmp[i]=flow
            this_cp.set('lalinference','flow',str(tmp))
        if roq_bounds == 'chirp_mass_q':
            mc_min=mc_priors[roq][0]*roq_mass_freq_scale_factor
            mc_max=mc_priors[roq][1]*roq_mass_freq_scale_factor
            # params.dat uses the convention q>1 so our q_min is the inverse of their qmax
            q_min=1./float(roq_params[roq]['qmax'])
            this_cp.set('engine','chirpmass-min',str(mc_min))
            this_cp.set('engine','chirpmass-max',str(mc_max))
            this_cp.set('engine','q-min',str(q_min))
            this_cp.set('engine','comp-min', str(max(roq_params[roq]['compmin'] * roq_mass_freq_scale_factor, mc_min * pow(1+q_min, 1./5.) * pow(q_min, 2./5.))))
            this_cp.set('engine','comp-max', str(mc_max * pow(1+q_min, 1./5.) * pow(q_min, -3./5.)))
        elif roq_bounds == 'component_mass':
            m1_min = m1_priors[roq][0]
            m1_max = m1_priors[roq][1]
            m2_min = m2_priors[roq][0]
            m2_max = m2_priors[roq][1]
            this_cp.set('engine','mass1-min',str(m1_min))
            this_cp.set('engine','mass1-max',str(m1_max))
            this_cp.set('engine','mass2-min',str(m2_min))
            this_cp.set('engine','mass2-max',str(m2_max))
        yield this_cp
    raise StopIteration()
コード例 #25
0
def main(args=None):
    from glue.ligolw import lsctables
    from glue.ligolw.utils import process as ligolw_process
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import ligolw
    import lal.series
    from scipy import stats

    p = parser()
    args = p.parse_args(args)

    xmldoc = ligolw.Document()
    xmlroot = xmldoc.appendChild(ligolw.LIGO_LW())
    process = register_to_xmldoc(xmldoc, p, args)

    cosmo = cosmology.default_cosmology.get_cosmology_from_string(
        args.cosmology)

    ns_mass_min = 1.0
    ns_mass_max = 2.0
    bh_mass_min = 5.0
    bh_mass_max = 50.0

    ns_astro_spin_min = -0.05
    ns_astro_spin_max = +0.05
    ns_astro_mass_dist = stats.norm(1.33, 0.09)
    ns_astro_spin_dist = stats.uniform(ns_astro_spin_min,
                                       ns_astro_spin_max - ns_astro_spin_min)

    ns_broad_spin_min = -0.4
    ns_broad_spin_max = +0.4
    ns_broad_mass_dist = stats.uniform(ns_mass_min, ns_mass_max - ns_mass_min)
    ns_broad_spin_dist = stats.uniform(ns_broad_spin_min,
                                       ns_broad_spin_max - ns_broad_spin_min)

    bh_astro_spin_min = -0.99
    bh_astro_spin_max = +0.99
    bh_astro_mass_dist = stats.pareto(b=1.3)
    bh_astro_spin_dist = stats.uniform(bh_astro_spin_min,
                                       bh_astro_spin_max - bh_astro_spin_min)

    bh_broad_spin_min = -0.99
    bh_broad_spin_max = +0.99
    bh_broad_mass_dist = stats.reciprocal(bh_mass_min, bh_mass_max)
    bh_broad_spin_dist = stats.uniform(bh_broad_spin_min,
                                       bh_broad_spin_max - bh_broad_spin_min)

    if args.distribution.startswith('bns_'):
        m1_min = m2_min = ns_mass_min
        m1_max = m2_max = ns_mass_max
        if args.distribution.endswith('_astro'):
            x1_min = x2_min = ns_astro_spin_min
            x1_max = x2_max = ns_astro_spin_max
            m1_dist = m2_dist = ns_astro_mass_dist
            x1_dist = x2_dist = ns_astro_spin_dist
        elif args.distribution.endswith('_broad'):
            x1_min = x2_min = ns_broad_spin_min
            x1_max = x2_max = ns_broad_spin_max
            m1_dist = m2_dist = ns_broad_mass_dist
            x1_dist = x2_dist = ns_broad_spin_dist
        else:  # pragma: no cover
            assert_not_reached()
    elif args.distribution.startswith('nsbh_'):
        m1_min = bh_mass_min
        m1_max = bh_mass_max
        m2_min = ns_mass_min
        m2_max = ns_mass_max
        if args.distribution.endswith('_astro'):
            x1_min = bh_astro_spin_min
            x1_max = bh_astro_spin_max
            x2_min = ns_astro_spin_min
            x2_max = ns_astro_spin_max
            m1_dist = bh_astro_mass_dist
            m2_dist = ns_astro_mass_dist
            x1_dist = bh_astro_spin_dist
            x2_dist = ns_astro_spin_dist
        elif args.distribution.endswith('_broad'):
            x1_min = bh_broad_spin_min
            x1_max = bh_broad_spin_max
            x2_min = ns_broad_spin_min
            x2_max = ns_broad_spin_max
            m1_dist = bh_broad_mass_dist
            m2_dist = ns_broad_mass_dist
            x1_dist = bh_broad_spin_dist
            x2_dist = ns_broad_spin_dist
        else:  # pragma: no cover
            assert_not_reached()
    elif args.distribution.startswith('bbh_'):
        m1_min = m2_min = bh_mass_min
        m1_max = m2_max = bh_mass_max
        if args.distribution.endswith('_astro'):
            x1_min = x2_min = bh_astro_spin_min
            x1_max = x2_max = bh_astro_spin_max
            m1_dist = m2_dist = bh_astro_mass_dist
            x1_dist = x2_dist = bh_astro_spin_dist
        elif args.distribution.endswith('_broad'):
            x1_min = x2_min = bh_broad_spin_min
            x1_max = x2_max = bh_broad_spin_max
            m1_dist = m2_dist = bh_broad_mass_dist
            x1_dist = x2_dist = bh_broad_spin_dist
        else:  # pragma: no cover
            assert_not_reached()
    else:  # pragma: no cover
        assert_not_reached()

    dists = (m1_dist, m2_dist, x1_dist, x2_dist)

    # Read PSDs
    psds = list(
        lal.series.read_psd_xmldoc(
            ligolw_utils.load_fileobj(
                args.reference_psd,
                contenthandler=lal.series.PSDContentHandler)[0]).values())

    # Construct mass1, mass2, spin1z, spin2z grid.
    m1 = np.geomspace(m1_min, m1_max, 10)
    m2 = np.geomspace(m2_min, m2_max, 10)
    x1 = np.linspace(x1_min, x1_max, 10)
    x2 = np.linspace(x2_min, x2_max, 10)
    params = m1, m2, x1, x2

    # Calculate the maximum distance on the grid.
    max_z = get_max_z(cosmo,
                      psds,
                      args.waveform,
                      args.f_low,
                      args.min_snr,
                      m1,
                      m2,
                      x1,
                      x2,
                      jobs=args.jobs)
    max_distance = sensitive_distance(cosmo, max_z).to_value(units.Mpc)

    # Find piecewise constant approximate upper bound on distance.
    max_distance = cell_max(max_distance)

    # Calculate V * T in each grid cell
    cdfs = [dist.cdf(param) for param, dist in zip(params, dists)]
    cdf_los = [cdf[:-1] for cdf in cdfs]
    cdfs = [np.diff(cdf) for cdf in cdfs]
    probs = np.prod(np.meshgrid(*cdfs, indexing='ij'), axis=0)
    probs /= probs.sum()
    probs *= 4 / 3 * np.pi * max_distance**3
    volume = probs.sum()
    probs /= volume
    probs = probs.ravel()

    volumetric_rate = args.nsamples / volume * units.year**-1 * units.Mpc**-3

    # Draw random grid cells
    dist = stats.rv_discrete(values=(np.arange(len(probs)), probs))
    indices = np.unravel_index(dist.rvs(size=args.nsamples),
                               max_distance.shape)

    # Draw random intrinsic params from each cell
    cols = {}
    cols['mass1'], cols['mass2'], cols['spin1z'], cols['spin2z'] = [
        dist.ppf(stats.uniform(cdf_lo[i], cdf[i]).rvs(size=args.nsamples))
        for i, dist, cdf_lo, cdf in zip(indices, dists, cdf_los, cdfs)
    ]

    # Draw random extrinsic parameters
    cols['distance'] = stats.powerlaw(
        a=3, scale=max_distance[indices]).rvs(size=args.nsamples)
    cols['longitude'] = stats.uniform(0, 2 * np.pi).rvs(size=args.nsamples)
    cols['latitude'] = np.arcsin(stats.uniform(-1, 2).rvs(size=args.nsamples))
    cols['inclination'] = np.arccos(
        stats.uniform(-1, 2).rvs(size=args.nsamples))
    cols['polarization'] = stats.uniform(0, 2 * np.pi).rvs(size=args.nsamples)
    cols['coa_phase'] = stats.uniform(-np.pi,
                                      2 * np.pi).rvs(size=args.nsamples)
    cols['time_geocent'] = stats.uniform(1e9, units.year.to(
        units.second)).rvs(size=args.nsamples)

    # Convert from sensitive distance to redshift and comoving distance.
    # FIXME: Replace this brute-force lookup table with a solver.
    z = np.linspace(0, max_z.max(), 10000)
    ds = sensitive_distance(cosmo, z).to_value(units.Mpc)
    dc = cosmo.comoving_distance(z).to_value(units.Mpc)
    z_for_ds = interp1d(ds, z, kind='cubic', assume_sorted=True)
    dc_for_ds = interp1d(ds, dc, kind='cubic', assume_sorted=True)
    zp1 = 1 + z_for_ds(cols['distance'])
    cols['distance'] = dc_for_ds(cols['distance'])

    # Apply redshift factor to convert from comoving distance and source frame
    # masses to luminosity distance and observer frame masses.
    for key in ['distance', 'mass1', 'mass2']:
        cols[key] *= zp1

    # Populate sim_inspiral table
    sims = xmlroot.appendChild(lsctables.New(lsctables.SimInspiralTable))
    for row in zip(*cols.values()):
        sims.appendRow(**dict(dict.fromkeys(sims.validcolumns, None),
                              process_id=process.process_id,
                              simulation_id=sims.get_next_id(),
                              waveform=args.waveform,
                              f_lower=args.f_low,
                              **dict(zip(cols.keys(), row))))

    # Record process end time.
    process.comment = str(volumetric_rate)
    ligolw_process.set_process_end_time(process)

    # Write output file.
    write_fileobj(xmldoc, args.output)
コード例 #26
0
def main(args=None):
    p = parser()
    opts = p.parse_args(args)

    # LIGO-LW XML imports.
    from glue.ligolw import ligolw
    from glue.ligolw.param import Param
    from glue.ligolw.utils import process as ligolw_process
    from glue.ligolw.utils.search_summary import append_search_summary
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw.lsctables import (New, CoincDefTable, CoincID,
                                       CoincInspiralTable, CoincMapTable,
                                       CoincTable, ProcessParamsTable,
                                       ProcessTable, SimInspiralTable,
                                       SnglInspiralTable, TimeSlideTable)

    # glue, LAL and pylal imports.
    from ligo import segments
    import glue.lal
    import lal.series
    import lalsimulation
    from lalinspiral.inspinjfind import InspiralSCExactCoincDef
    from lalinspiral.thinca import InspiralCoincDef
    from tqdm import tqdm

    # FIXME: disable progress bar monitor thread.
    #
    # I was getting error messages that look like this:
    #
    # Traceback (most recent call last):
    #   File "/tqdm/_tqdm.py", line 885, in __del__
    #     self.close()
    #   File "/tqdm/_tqdm.py", line 1090, in close
    #     self._decr_instances(self)
    #   File "/tqdm/_tqdm.py", line 454, in _decr_instances
    #     cls.monitor.exit()
    #   File "/tqdm/_monitor.py", line 52, in exit
    #     self.join()
    #   File "/usr/lib64/python3.6/threading.py", line 1053, in join
    #     raise RuntimeError("cannot join current thread")
    # RuntimeError: cannot join current thread
    #
    # I don't know what causes this... maybe a race condition in tqdm's cleanup
    # code. Anyway, this should disable the tqdm monitor thread entirely.
    tqdm.monitor_interval = 0

    # BAYESTAR imports.
    from ..io.events.ligolw import ContentHandler
    from ..bayestar import filter

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        opts.reference_psd, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)
    psds = {
        key: filter.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
        for key, psd in psds.items() if psd is not None
    }
    psds = [psds[ifo] for ifo in opts.detector]

    # Extract simulation table from injection file.
    inj_xmldoc, _ = ligolw_utils.load_fileobj(opts.input,
                                              contenthandler=ContentHandler)
    orig_sim_inspiral_table = SimInspiralTable.get_table(inj_xmldoc)

    # Prune injections that are outside distance limits.
    orig_sim_inspiral_table[:] = [
        row for row in orig_sim_inspiral_table
        if opts.min_distance <= row.distance <= opts.max_distance
    ]

    # Open output file.
    xmldoc = ligolw.Document()
    xmlroot = xmldoc.appendChild(ligolw.LIGO_LW())

    # Create tables. Process and ProcessParams tables are copied from the
    # injection file.
    coinc_def_table = xmlroot.appendChild(New(CoincDefTable))
    coinc_inspiral_table = xmlroot.appendChild(New(CoincInspiralTable))
    coinc_map_table = xmlroot.appendChild(New(CoincMapTable))
    coinc_table = xmlroot.appendChild(New(CoincTable))
    xmlroot.appendChild(ProcessParamsTable.get_table(inj_xmldoc))
    xmlroot.appendChild(ProcessTable.get_table(inj_xmldoc))
    sim_inspiral_table = xmlroot.appendChild(New(SimInspiralTable))
    sngl_inspiral_table = xmlroot.appendChild(New(SnglInspiralTable))
    time_slide_table = xmlroot.appendChild(New(TimeSlideTable))

    # Write process metadata to output file.
    process = register_to_xmldoc(xmldoc,
                                 p,
                                 opts,
                                 ifos=opts.detector,
                                 comment="Simulated coincidences")

    # Add search summary to output file.
    all_time = segments.segment(
        [glue.lal.LIGOTimeGPS(0),
         glue.lal.LIGOTimeGPS(2e9)])
    append_search_summary(xmldoc, process, inseg=all_time, outseg=all_time)

    # Create a time slide entry.  Needed for coinc_event rows.
    time_slide_id = time_slide_table.get_time_slide_id(
        {ifo: 0
         for ifo in opts.detector}, create_new=process)

    # Populate CoincDef table.
    inspiral_coinc_def = copy.copy(InspiralCoincDef)
    inspiral_coinc_def.coinc_def_id = coinc_def_table.get_next_id()
    coinc_def_table.append(inspiral_coinc_def)
    found_coinc_def = copy.copy(InspiralSCExactCoincDef)
    found_coinc_def.coinc_def_id = coinc_def_table.get_next_id()
    coinc_def_table.append(found_coinc_def)

    # Precompute values that are common to all simulations.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(ifo) for ifo in opts.detector
    ]
    responses = [det.response for det in detectors]
    locations = [det.location for det in detectors]

    if opts.jobs == 1:
        pool_map = map
    else:
        from .. import omp
        from multiprocessing import Pool
        omp.num_threads = 1  # disable OpenMP parallelism
        pool_map = Pool(opts.jobs).imap

    func = functools.partial(simulate,
                             psds=psds,
                             responses=responses,
                             locations=locations,
                             measurement_error=opts.measurement_error,
                             f_low=opts.f_low,
                             waveform=opts.waveform)

    # Make sure that each thread gets a different random number state.
    # We start by drawing a random integer s in the main thread, and
    # then the i'th subprocess will seed itself with the integer i + s.
    #
    # The seed must be an unsigned 32-bit integer, so if there are n
    # threads, then s must be drawn from the interval [0, 2**32 - n).
    #
    # Note that *we* are thread 0, so there are a total of
    # n=1+len(sim_inspiral_table) threads.
    seed = np.random.randint(0, 2**32 - len(sim_inspiral_table) - 1)
    np.random.seed(seed)

    count_coincs = 0

    with tqdm(total=len(orig_sim_inspiral_table)) as progress:
        for sim_inspiral, simulation in zip(
                orig_sim_inspiral_table,
                pool_map(
                    func,
                    zip(
                        np.arange(len(orig_sim_inspiral_table)) + seed + 1,
                        orig_sim_inspiral_table))):
            progress.update()

            sngl_inspirals = []
            used_snr_series = []
            net_snr = 0.0
            count_triggers = 0

            # Loop over individual detectors and create SnglInspiral entries.
            for ifo, (horizon, abs_snr, arg_snr, toa, series) \
                    in zip(opts.detector, simulation):

                if np.random.uniform() > opts.duty_cycle:
                    continue
                elif abs_snr >= opts.snr_threshold:
                    # If SNR < threshold, then the injection is not found.
                    # Skip it.
                    count_triggers += 1
                    net_snr += np.square(abs_snr)
                elif not opts.keep_subthreshold:
                    continue

                # Create SnglInspiral entry.
                used_snr_series.append(series)
                sngl_inspirals.append(
                    sngl_inspiral_table.RowType(**dict(
                        dict.fromkeys(sngl_inspiral_table.validcolumns, None),
                        process_id=process.process_id,
                        ifo=ifo,
                        mass1=sim_inspiral.mass1,
                        mass2=sim_inspiral.mass2,
                        spin1x=sim_inspiral.spin1x,
                        spin1y=sim_inspiral.spin1y,
                        spin1z=sim_inspiral.spin1z,
                        spin2x=sim_inspiral.spin2x,
                        spin2y=sim_inspiral.spin2y,
                        spin2z=sim_inspiral.spin2z,
                        end=toa,
                        snr=abs_snr,
                        coa_phase=arg_snr,
                        eff_distance=horizon / abs_snr)))

            net_snr = np.sqrt(net_snr)

            # If too few triggers were found, then skip this event.
            if count_triggers < opts.min_triggers:
                continue

            # If network SNR < threshold, then the injection is not found.
            # Skip it.
            if net_snr < opts.net_snr_threshold:
                continue

            # Add Coinc table entry.
            coinc = coinc_table.appendRow(
                coinc_event_id=coinc_table.get_next_id(),
                process_id=process.process_id,
                coinc_def_id=inspiral_coinc_def.coinc_def_id,
                time_slide_id=time_slide_id,
                insts=opts.detector,
                nevents=len(opts.detector),
                likelihood=None)

            # Add CoincInspiral table entry.
            coinc_inspiral_table.appendRow(
                coinc_event_id=coinc.coinc_event_id,
                instruments=[
                    sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals
                ],
                end=lal.LIGOTimeGPS(1e-9 * np.mean([
                    sngl_inspiral.end.ns() for sngl_inspiral in sngl_inspirals
                    if sngl_inspiral.end is not None
                ])),
                mass=sim_inspiral.mass1 + sim_inspiral.mass2,
                mchirp=sim_inspiral.mchirp,
                combined_far=0.0,  # Not provided
                false_alarm_rate=0.0,  # Not provided
                minimum_duration=None,  # Not provided
                snr=net_snr)

            # Record all sngl_inspiral records and associate them with coincs.
            for sngl_inspiral, series in zip(sngl_inspirals, used_snr_series):
                # Give this sngl_inspiral record an id and add it to the table.
                sngl_inspiral.event_id = sngl_inspiral_table.get_next_id()
                sngl_inspiral_table.append(sngl_inspiral)

                if opts.enable_snr_series:
                    elem = lal.series.build_COMPLEX8TimeSeries(series)
                    elem.appendChild(
                        Param.from_pyvalue(u'event_id',
                                           sngl_inspiral.event_id))
                    xmlroot.appendChild(elem)

                # Add CoincMap entry.
                coinc_map_table.appendRow(
                    coinc_event_id=coinc.coinc_event_id,
                    table_name=sngl_inspiral_table.tableName,
                    event_id=sngl_inspiral.event_id)

            # Record injection
            if not opts.preserve_ids:
                sim_inspiral.simulation_id = sim_inspiral_table.get_next_id()
            sim_inspiral_table.append(sim_inspiral)

            count_coincs += 1
            progress.set_postfix(saved=count_coincs)

    # Record coincidence associating injections with events.
    for i, sim_inspiral in enumerate(sim_inspiral_table):
        coinc = coinc_table.appendRow(
            coinc_event_id=coinc_table.get_next_id(),
            process_id=process.process_id,
            coinc_def_id=found_coinc_def.coinc_def_id,
            time_slide_id=time_slide_id,
            instruments=None,
            nevents=None,
            likelihood=None)
        coinc_map_table.appendRow(coinc_event_id=coinc.coinc_event_id,
                                  table_name=sim_inspiral_table.tableName,
                                  event_id=sim_inspiral.simulation_id)
        coinc_map_table.appendRow(coinc_event_id=coinc.coinc_event_id,
                                  table_name=coinc_table.tableName,
                                  event_id=CoincID(i))

    # Record process end time.
    ligolw_process.set_process_end_time(process)

    # Write output file.
    write_fileobj(xmldoc, opts.output)
コード例 #27
0
# BAYESTAR imports.
from lalinference.bayestar.decorator import memoized
from lalinference.io import fits
from lalinference.bayestar import ligolw as ligolw_bayestar
from lalinference.bayestar import filter
from lalinference.bayestar import timing
from lalinference.bayestar.sky_map import ligolw_sky_map, rasterize

# Other imports.
import numpy as np

# Read coinc file.
log.info('%s:reading input XML file', opts.input.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.input,
    contenthandler=ligolw_bayestar.LSCTablesAndSeriesContentHandler)

if opts.psd_files:  # read pycbc psds here
    import lal
    from glue.segments import segment, segmentlist
    import h5py

    # FIXME: This should be imported from pycbc.
    DYN_RANGE_FAC = 5.9029581035870565e+20

    class psd_segment(segment):
        def __new__(cls, psd, *args):
            return segment.__new__(cls, *args)

        def __init__(self, psd, *args):
コード例 #28
0
def setup_roq(cp):
    """
    Generates cp objects with the different ROQs applied
    """
    use_roq = False
    if cp.has_option('paths', 'roq_b_matrix_directory') or cp.has_option(
            'paths', 'computeroqweights'):
        if not cp.has_option('analysis', 'roq'):
            print(
                "Warning: If you are attempting to enable ROQ by specifying roq_b_matrix_directory or computeroqweights,\n\
            please use analysis.roq in your config file in future. Enabling ROQ."
            )
            cp.set('analysis', 'roq', True)
    if not cp.getboolean('analysis', 'roq'):
        yield cp
        raise StopIteration()
    from numpy import genfromtxt, array
    path = cp.get('paths', 'roq_b_matrix_directory')
    if not os.path.isdir(path):
        print("The ROQ directory %s does not seem to exist\n" % path)
        sys.exit(1)
    use_roq = True
    roq_paths = os.listdir(path)
    roq_params = {}
    roq_force_flow = None

    if cp.has_option('lalinference', 'roq_force_flow'):
        roq_force_flow = cp.getfloat('lalinference', 'roq_force_flow')
        print("WARNING: Forcing the f_low to ", str(roq_force_flow), "Hz")
        print(
            "WARNING: Overwriting user choice of flow, srate, seglen, and (mc_min, mc_max and q-min) or (mass1_min, mass1_max, mass2_min, mass2_max)"
        )

    def key(item):  # to order the ROQ bases
        return float(item[1]['seglen'])

    coinc_xml_obj = None
    row = None

    # Get file object of coinc.xml
    if opts.gid is not None:
        from ligo.gracedb.rest import GraceDb
        gid = opts.gid
        cwd = os.getcwd()
        if cp.has_option('analysis', 'service-url'):
            client = GraceDb(cp.get('analysis', 'service-url'))
        else:
            client = GraceDb()
        coinc_xml_obj = ligolw_utils.load_fileobj(
            client.files(gid, "coinc.xml"),
            contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))[0]
    elif cp.has_option('input', 'coinc-xml'):
        coinc_xml_obj = ligolw_utils.load_fileobj(
            open(cp.get('input', 'coinc-xml'), "rb"),
            contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler))[0]

    # Get sim_inspiral from injection file
    if cp.has_option('input', 'injection-file'):
        print(
            "Only 0-th event in the XML table will be considered while running with ROQ\n"
        )
        row = lsctables.SimInspiralTable.get_table(
            ligolw_utils.load_filename(cp.get('input', 'injection-file'),
                                       contenthandler=lsctables.use_in(
                                           ligolw.LIGOLWContentHandler)))[0]

    roq_bounds = pipe_utils.Query_ROQ_Bounds_Type(path, roq_paths)
    if roq_bounds == 'chirp_mass_q':
        print('ROQ has bounds in chirp mass and mass-ratio')
        mc_priors, trigger_mchirp = pipe_utils.get_roq_mchirp_priors(
            path,
            roq_paths,
            roq_params,
            key,
            coinc_xml_obj=coinc_xml_obj,
            sim_inspiral=row)
    elif roq_bounds == 'component_mass':
        print('ROQ has bounds in component masses')
        # get component mass bounds, then compute the chirp mass that can be safely covered
        # further below we pass along the component mass bounds to the sampler, not the tighter chirp-mass, q bounds
        m1_priors, m2_priors, trigger_mchirp = pipe_utils.get_roq_component_mass_priors(
            path,
            roq_paths,
            roq_params,
            key,
            coinc_xml_obj=coinc_xml_obj,
            sim_inspiral=row)
        mc_priors = {}
        for (roq, m1_prior), (roq2, m2_prior) in zip(m1_priors.items(),
                                                     m2_priors.items()):
            mc_priors[roq] = sorted([
                pipe_utils.mchirp_from_components(m1_prior[1], m2_prior[0]),
                pipe_utils.mchirp_from_components(m1_prior[0], m2_prior[1])
            ])

    if cp.has_option('lalinference', 'trigger_mchirp'):
        trigger_mchirp = float(cp.get('lalinference', 'trigger_mchirp'))
    roq_mass_freq_scale_factor = pipe_utils.get_roq_mass_freq_scale_factor(
        mc_priors, trigger_mchirp, roq_force_flow)
    if roq_mass_freq_scale_factor != 1.:
        print(
            'WARNING: Rescaling ROQ basis, please ensure it is allowed with the model used.'
        )

    # If the true chirp mass is unknown, add variations over the mass bins
    if opts.gid is not None or (opts.injections is not None or cp.has_option(
            'input', 'injection-file')) or cp.has_option(
                'lalinference', 'trigger_mchirp') or cp.has_option(
                    'input', 'coinc-xml'):

        for mc_prior in mc_priors:
            mc_priors[mc_prior] = array(mc_priors[mc_prior])
        # find mass bin containing the trigger
        trigger_bin = None
        for roq in roq_paths:
            if mc_priors[roq][
                    0] * roq_mass_freq_scale_factor <= trigger_mchirp <= mc_priors[
                        roq][1] * roq_mass_freq_scale_factor:
                trigger_bin = roq
                print('Prior in Mchirp will be [' +
                      str(mc_priors[roq][0] * roq_mass_freq_scale_factor) +
                      ',' +
                      str(mc_priors[roq][1] * roq_mass_freq_scale_factor) +
                      '] to contain the trigger Mchirp ' + str(trigger_mchirp))
                break
        roq_paths = [trigger_bin]
    else:
        for mc_prior in mc_priors:
            mc_priors[mc_prior] = array(
                mc_priors[mc_prior]) * roq_mass_freq_scale_factor

    # write the master configparser
    cur_basedir = cp.get('paths', 'basedir')
    masterpath = os.path.join(cur_basedir, 'config.ini')
    with open(masterpath, 'w') as cpfile:
        cp.write(cpfile)

    for roq in roq_paths:
        this_cp = configparser.ConfigParser()
        this_cp.optionxform = str
        this_cp.read(masterpath)
        basedir = this_cp.get('paths', 'basedir')
        for dirs in 'basedir', 'daglogdir', 'webdir':
            val = this_cp.get('paths', dirs)
            newval = os.path.join(val, roq)
            mkdirs(newval)
            this_cp.set('paths', dirs, newval)
        this_cp.set(
            'paths', 'roq_b_matrix_directory',
            os.path.join(cp.get('paths', 'roq_b_matrix_directory'), roq))
        flow = roq_params[roq]['flow'] / roq_mass_freq_scale_factor
        srate = 2. * roq_params[roq]['fhigh'] / roq_mass_freq_scale_factor
        #if srate > 8192:
        #    srate = 8192

        seglen = roq_params[roq]['seglen'] * roq_mass_freq_scale_factor
        # params.dat uses the convention q>1 so our q_min is the inverse of their qmax
        this_cp.set('engine', 'srate', str(srate))
        this_cp.set('engine', 'seglen', str(seglen))
        if this_cp.has_option('lalinference', 'flow'):
            tmp = this_cp.get('lalinference', 'flow')
            tmp = eval(tmp)
            ifos = tmp.keys()
        else:
            tmp = {}
            ifos = eval(this_cp.get('analysis', 'ifos'))
        for i in ifos:
            tmp[i] = flow
            this_cp.set('lalinference', 'flow', str(tmp))
        if roq_bounds == 'chirp_mass_q':
            mc_min = mc_priors[roq][0] * roq_mass_freq_scale_factor
            mc_max = mc_priors[roq][1] * roq_mass_freq_scale_factor
            # params.dat uses the convention q>1 so our q_min is the inverse of their qmax
            q_min = 1. / float(roq_params[roq]['qmax'])
            this_cp.set('engine', 'chirpmass-min', str(mc_min))
            this_cp.set('engine', 'chirpmass-max', str(mc_max))
            this_cp.set('engine', 'q-min', str(q_min))
            this_cp.set(
                'engine', 'comp-min',
                str(
                    max(
                        roq_params[roq]['compmin'] *
                        roq_mass_freq_scale_factor,
                        mc_min * pow(1 + q_min, 1. / 5.) *
                        pow(q_min, 2. / 5.))))
            this_cp.set(
                'engine', 'comp-max',
                str(mc_max * pow(1 + q_min, 1. / 5.) * pow(q_min, -3. / 5.)))
        elif roq_bounds == 'component_mass':
            m1_min = m1_priors[roq][0]
            m1_max = m1_priors[roq][1]
            m2_min = m2_priors[roq][0]
            m2_max = m2_priors[roq][1]
            this_cp.set('engine', 'mass1-min', str(m1_min))
            this_cp.set('engine', 'mass1-max', str(m1_max))
            this_cp.set('engine', 'mass2-min', str(m2_min))
            this_cp.set('engine', 'mass2-max', str(m2_max))
        yield this_cp
    raise StopIteration()
コード例 #29
0
def gracedb_sky_map(coinc_file,
                    psd_file,
                    waveform,
                    f_low,
                    min_distance=None,
                    max_distance=None,
                    prior_distance_power=None,
                    method="toa_phoa_snr",
                    nside=-1,
                    chain_dump=None,
                    f_high_truncate=1.0,
                    enable_snr_series=False):
    # Read input file.
    log.debug('reading coinc file')
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesAndSeriesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.SnglInspiralTable.tableName)

    # Attempt to determine phase convention from process table.
    try:
        process_table = ligolw_table.get_table(
            xmldoc, lsctables.ProcessTable.tableName)
        process, = process_table
        process_name = process.program.lower()
    except ValueError:
        process_name = ''
    if 'pycbc' in process_name:
        phase_convention = 'findchirp'
    else:
        phase_convention = 'antifindchirp'

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [
        coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id
    ]
    sngl_inspirals = [
        next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
              if sngl_inspiral.event_id == event_id)) for event_id in event_ids
    ]

    # Try to load complex SNR time series.
    snrs = ligolw.snr_series_by_sngl_inspiral_id_for_xmldoc(xmldoc)
    try:
        snrs = [snrs[sngl.event_id] for sngl in sngl_inspirals]
    except KeyError:
        snrs = None

    # Read PSDs.
    log.debug('reading PSDs time series')
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    log.debug('constructing PSD interpolants')
    psds = [
        timing.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    # Run sky localization
    return ligolw_sky_map(sngl_inspirals,
                          waveform,
                          f_low,
                          min_distance,
                          max_distance,
                          prior_distance_power,
                          method=method,
                          nside=nside,
                          psds=psds,
                          phase_convention=phase_convention,
                          chain_dump=chain_dump,
                          snr_series=snrs,
                          enable_snr_series=enable_snr_series)
コード例 #30
0
ファイル: ligolw_sky_map.py プロジェクト: d80b2t/LIGO_O3
def gracedb_sky_map(coinc_file,
                    psd_file,
                    waveform,
                    f_low,
                    min_distance=None,
                    max_distance=None,
                    prior=None,
                    reference_frequency=None,
                    nside=-1):
    # LIGO-LW XML imports.
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables

    # Determine approximant, amplitude order, and phase order from command line arguments.
    approximant, amplitude_order, phase_order = timing.get_approximant_and_orders_from_string(
        waveform)

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(coinc_file)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [
        coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id
    ]
    sngl_inspirals = [(sngl_inspiral for sngl_inspiral in sngl_inspiral_table
                       if sngl_inspiral.event_id == event_id).next()
                      for event_id in event_ids]

    # Read PSDs.
    if psd_file is None:
        psds = None
    else:
        xmldoc, _ = ligolw_utils.load_fileobj(psd_file)
        psds = read_psd_xmldoc(xmldoc)

        # Rearrange PSDs into the same order as the sngl_inspirals.
        psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

        # Interpolate PSDs.
        psds = [
            timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
            for psd in psds
        ]

    # TOA+SNR sky localization
    return ligolw_sky_map(sngl_inspirals,
                          approximant,
                          amplitude_order,
                          phase_order,
                          f_low,
                          min_distance,
                          max_distance,
                          prior,
                          reference_frequency=reference_frequency,
                          nside=nside,
                          psds=psds)
コード例 #31
0
def fromsegmentxml(xml_file, return_dict=False, select_seg_def_id=None):
    """ Read a glue.segments.segmentlist from the file object file
    containing an xml segment table.

    Parameters
    -----------
    xml_file : file object
        file object for segment xml file
    return_dict : boolean, optional (default = False)
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table.
    select_seg_def_id : int, optional (default = None)
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer

    Returns
    --------
    segs : glue.segments.segmentlist instance
        The segment list contained in the file.
    """

    # load XML with SegmentDefTable and SegmentTable
    xmldoc, digest = utils.load_fileobj(xml_file,
                                        gz=xml_file.name.endswith(".gz"),
                                        contenthandler=ContentHandler)
    seg_def_table = table.get_table(xmldoc,
                                    lsctables.SegmentDefTable.tableName)
    seg_table = table.get_table(xmldoc, lsctables.SegmentTable.tableName)

    if return_dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:

        # encode ifo, channel name and version
        full_channel_name = ':'.join(
            [str(seg_def.ifos),
             str(seg_def.name),
             str(seg_def.version)])
        seg_id[int(seg_def.segment_def_id)] = full_channel_name
        if return_dict:
            segs[full_channel_name] = segments.segmentlist()

    for seg in seg_table:
        seg_obj = segments.segment(
            lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
            lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
        if return_dict:
            segs[seg_id[int(seg.segment_def_id)]].append(seg_obj)
        elif select_seg_def_id is not None:
            if int(seg.segment_def_id) == select_seg_def_id:
                segs.append(seg_obj)
        else:
            segs.append(seg_obj)

    if return_dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    xmldoc.unlink()

    return segs
コード例 #32
0
def main():
    usage ="""%%prog [options] GROUP TYPE EVENTFILE
   where GROUP is one of %(groups)s
         TYPE is one of %(types)s
         EVENTFILE is file containing event data. '-' indicates stdin.

%%prog [options] replace GRACEID EVENTFILE
   where GROUP is one of %(groups)s
         TYPE is one of %(types)s
         EVENTFILE is file containing event data. '-' indicates stdin.

%%prog [options] ping
   Test server connection

%%prog [options] upload GRACEID FILE [COMMENT] 
   where GRACEID is the id of an existing candidate event in GraCEDb
         FILE      is the name of the file to upload. '-' indicates stdin.
         COMMENT   is an optional annotation to enter into the log
   Upload FILE to the private data area for a candidate event. To apply 
   a tag, use the --tag-name option (and --tag-display-name if desired.)

%%prog [options] download GRACEID FILE [DESTINATION]
   where GRACEID      is the id of an existing candidate event in GraCEDb
         FILE         is the name of the file previosuly uploaded.
         DESTINATION  is the download destination.  '-' indicates stdout.
                      default is same file name as FILE
    Download FILE from private data area of a candidate event

%%prog [options] log GRACEID COMMENT
   where GRACEID  is the id of an existing candidate event in GraCEDb
         COMMENT  is text that will be entered into the event's log
   Enter a comment into the log for a candidate event.  To apply a tag,
   use the --tag-name option (and --tag-display-name if desired).

%%prog [options] label GRACEID LABEL
    Label event with GRACEDID with LABEL.  LABEL must already exist.

%%prog [options] tag GRACEID LOG_N TAG_NAME [DISP_NAME]
   where GRACEID   is the id of an existing candidate event in GraCEDb
         LOG_N     is the number of the log message.
         TAG_NAME  is the name of the tag
         DISP_NAME is the tag display name (ignored for existing tags)
    Tag an existing log message. Alternatively, the tag name and 
    display name can be passed in with the --tag-name and 
    --tag-display-name options.

%%prog [options] delete_tag GRACEID LOG_N TAG_NAME
    Remove a tag from a log message. Alternatively, the tag name 
    can be passed in with the --tag-name option.

%%prog [options] search SEARCH PARAMS
    Search paramaters are a list of requirements to be satisfied.  They
    may be GPS times, GPS time ranges, graceids and ranges, group(s),
    analysis type(s), labels, etc.  Note that text is case insensitive
    Example: %%prog search G0100..G0200 mbta LUMIN_GO

Environment Variables:
    GRACEDB_SERVICE_URL   (can be overridden by --service-url)
    HTTP_PROXY            (can be overridden by --proxy)
    X509_USER_PROXY
    X509_USER_CERT
    X509_USER_KEY

Credentials are looked for in this order:
    (1) $(X509_USER_CERT) / $(X509_USER_KEY)
    (2) $(X509_USER_PROXY)
    (3) Default location of grid proxy ( /tmp/x509up_u$(UID) )
    (4) $(HOME)/.globus/usercert.pem / $(HOME)/.globus/userkey.pem

Note that comments can only be 200 characters long.
Longer strings will be truncated.""" % {
        'groups' : 'CBC, Burst, Stochastic, Coherent, Test, External',
        'types'  : ", ".join(validTypes),
    }

    from optparse import OptionParser
    op = OptionParser(usage=usage)
    op.add_option("-p", "--proxy", dest="proxy",
                  help="HTTP Proxy", metavar="PROXY[:PORT]")
    op.add_option("-s", "--service-url", dest="service",
                  help="GraCEDb Service URL", metavar="URL")
    op.add_option("-f", "--filename", dest="filename",
                  help="If data is read from stdin, use this as the filename.", metavar="NAME")

    op.add_option("-a", "--alert", dest="alert",
                  help="Send an LV alert (deprecated; alerts sent by default)",
                  action="store_true", default=None
                 )

    op.add_option("-c", "--columns", dest="columns",
                  help="Comma separated list of event attributes to include in results (only meaningful in search)",
                  default=DEFAULT_COLUMNS
                 )

    op.add_option("-l", "--ligolw", dest="ligolw",
                  help="Download ligolw file of combined search results (not meaningful outside of search). NOTE: Produces an ERROR if any of the events returned by the search do not have coinc.xml files.",
                  action="store_true", default=False
                 )
    op.add_option("-t", "--tag-name", dest="tagName",
                  help="tag name in database (only used for log, upload, tag, and delete_tag)",
                  default=None
                 )
    op.add_option("-d", "--tag-display-name", dest="tagDispName",
                  help="tag display name (ignored for existing tags)",
                  default=None
                 )

    options, args = op.parse_args()

    try:
        from glue.ligolw import ligolw
        from glue.ligolw import lsctables
        from glue.ligolw import utils
        from glue.ligolw.utils import ligolw_add
    except:
        if options.ligolw:
            error("ligolw modules not found")
            exit(1)
        else:
            pass

    proxy = options.proxy or os.environ.get('HTTP_PROXY', None)
    service = options.service or \
              os.environ.get('GRACEDB_SERVICE_URL', None) or \
              DEFAULT_SERVICE_URL

    if options.alert is not None:
        warning("alert option is deprecated.  Alerts are now sent by default.")

    proxyport = None
    if proxy and proxy.find(':') > 0:
        try:
            proxy, proxyport = proxy.split(':')
            proxyport = int(proxyport)
        except:
            op.error("Malformed proxy: '%s'" % proxy)
    if proxyport:
        client = Client(service,
                        proxy_host=proxy,
                        proxy_port=proxyport)
    else:
        client = Client(service, proxy_host=proxy)

    if len(args) < 1:
        op.error("not enough arguments")
    elif args[0] == 'ping':
        response = client.ping()
        if response.status==200:
            output("%s: 200 OK" % service)
            exit(0)
    elif args[0] == 'upload':
        if len(args) < 3:
            op.error("not enough arguments for upload")
        graceid = args[1]
        filename = args[2]
        comment = " ".join(args[3:])
        tagName = options.tagName
        tagDispName = options.tagDispName
        response = client.writeLog(graceid, comment, filename, None,
            tagName, tagDispName)
    elif args[0] == 'download':
        if len(args) not in [2,3,4]:
            op.error("not enough arguments for download")
        graceid = args[1]
        if len(args) == 2:
            # get/print listing.
            response = client.files(graceid)
            if response and response.status == 200:
                for fname in json.loads(response.read()):
                    print(fname)
                exit(0)
            print(response.reason)
            exit(1)
        filename = args[2]
        if len(args) == 4:
            outfile = args[3]
        else:
            outfile = os.path.basename(filename)
        response = client.download(graceid, filename, outfile)
        if response:
            # no response means file saved.  any other response is an error message.
            print response
            exit(1)
        exit(0)
    elif args[0] == 'log':
        if len(args) < 3:
            op.error("not enough arguments for log")
        graceid = args[1]
        message = " ".join(args[2:])
        response = client.writeLog(graceid, message, options.tagName, options.tagDispName)
    elif args[0] == 'tag':
        if options.tagName:
            if len(args) != 2:
                op.error("wrong number of arguments for tag")
            tagName = options.tagName
            tagDispName = options.tagDispName
        else:
            if len(args) not in [4,5]:
                op.error("wrong number of arguments for tag")
            tagName = args[3]
            tagDispName = None
            if len(args)==5:
                tagDispName = args[4]
        graceid = args[1]
        logN = args[2]
        response = client.createTag(graceid, logN, tagName, tagDispName)
    elif args[0] == 'delete_tag':
        error("len args = %s" % len(args))
        error("args = %s" % args)
        if options.tagName:
            if len(args) != 2:
                op.error("wrong number of arguments for delete_tag")
            tagName = options.tagName
        else:
            if len(args) != 4:
                op.error("wrong number of arguments for delete_tag")
            tagName = args[3]
        graceid = args[1]
        logN = args[2]
        response = client.deleteTag(graceid, logN, tagName)
    elif args[0] == 'label':
        if len(args) != 3:
            op.error("wrong number of arguments for label")
        graceid = args[1]
        label = args[2]
        response = client.writeLabel(graceid, label)
    elif args[0] == 'search':
        query = " ".join(args[1:])
        
        columns = options.columns
        columns = columns.replace('DEFAULTS',DEFAULT_COLUMNS)
        columns = columns.split(',')

        count = None # XXX Let's just get rid of this?
        orderby = None # XXX Should we implement this?

        events = client.events(query, orderby, count, columns)

        if options.ligolw:
            xmldoc = ligolw.Document()
            for e in events:
                graceid = e['graceid']
                try:
                    r = client.files(graceid, "coinc.xml")
                    utils.load_fileobj(r, xmldoc = xmldoc)
                except:
                    error("Missing coinc.xml for %s. Cannot build ligolw output." % graceid)
                    exit(1)
            ligolw_add.reassign_ids(xmldoc)
            ligolw_add.merge_ligolws(xmldoc)
            ligolw_add.merge_compatible_tables(xmldoc)
            xmldoc.write()
        else:
            accessFun = {
                "labels" : lambda e: \
                    ",".join(e['labels'].keys()),
                "dataurl" : lambda e: e['links']['files'],
            }

            header = "#" + "\t".join(columns)
            output(header)
            for e in events:
                row = [ accessFun.get(column, lambda e: defaultAccess(e,column))(e) for column in columns ]
                row = "\t".join(row)
                output(row)
        
        return 0
    elif args[0] == 'replace':
        if len(args) != 3:
            op.error("wrong number of args for replace")
        graceid = args[1]
        filename = args[2]
        response = client.replaceEvent(graceid, filename)
    elif len(args) == 3:
        # Create a new event.
        group = args[0]
        type = args[1]
        filename = args[2]

        # Check that the group and type are known to the API.
        # NB: the dictionary returned by the API has keys and values
        # reversed w.r.t. the typeCodeMap above.
        foundType = False
        for key, value in client.analysis_types.items():
            if type==str(value):
                type = key
                foundType = True
        if not foundType:
            error("Type must be one of: ", ", ".join(client.analysis_types.values()))
            sys.exit(1)

        foundGroup = True if (unicode(group) in client.groups) else False
        if not foundGroup:
            error("Group must be one of: ", ", ".join(client.groups))
            sys.exit(1)

        response = client.createEvent(group, type, filename)
        if not response:
            error("There was a problem.  Did you do grid-proxy-init -rfc?")
            sys.exit(1)

        # XXX Must output graceid for consistency with earlier client.
        # Therefore, must deal with response here rather than at the end.
        exitCode = 0
        status = response.status
        if status >= 400:
            exitCode=1
        try:
            rv = response.read()
        except:
            rv = response
        try:
            rv = json.loads(rv)
        except:
            pass

        if 'graceid' in rv.keys():
            output(rv['graceid'])
        elif 'error' in rv.keys():
            exitCode=1
            error(rv['error'])

        return exitCode

    else:
        op.error("")
        sys.exit(1)

    # Output the response.
    exitCode = 0
    try:
        rv = response.read()
        status = response.status
    except:
        rv = response

    try:
        responseBody = json.loads(rv)
    except:
        responseBody = rv

    if status >= 400:
        exitCode=1
    if isinstance(responseBody, str):
        output("%d: %s" % (status, responseBody))
    else:
        output("Server returned %d" % status)
        if ('error' in responseBody) and response['error']:
            error(response['error'])
            exitCode = 1
        if ('warning' in responseBody) and response['warning']:
            warning(response['warning'])
        if ('output' in responseBody) and response['output']:
            output(response['output'])

    return exitCode
コード例 #33
0
def frominjectionfile(file, type, ifo=None, start=None, end=None):
  
  """
    Read generic injection file object file containing injections of the given
    type string. Returns an 'Sim' lsctable of the corresponding type.

    Arguments:
   
      file : file object
      type : [ "inspiral" | "burst" | "ringdown" ]

    Keyword arguments:

      ifo : [ "G1" | "H1" | "H2" | "L1" | "V1" ]
  """

  # read type
  type = type.lower()

  # read injection xml
  xml = re.compile('(xml$|xml.gz$)')
  if re.search(xml,file.name):
    xmldoc,digest = utils.load_fileobj(file)
    injtable = table.get_table(xmldoc,'sim_%s:table' % (type))

  # read injection txt
  else:
    cchar = re.compile('[#%<!()_\[\]{}:;\'\"]+')

    #== construct new Sim{Burst,Inspiral,Ringdown}Table
    injtable = lsctables.New(lsctables.__dict__['Sim%sTable' % (type.title())])
    if type=='inspiral':
      columns = ['geocent_end_time.geocent_end_time_ns',\
                 'h_end_time.h_end_time_ns',\
                 'l_end_time.l_end_time_ns',\
                 'v_end_time.v_end_time_ns',\
                 'distance'] 
      for line in file.readlines():
        if re.match(cchar,line):
          continue
        # set up siminspiral object
        inj = lsctables.SimInspiral()
        # split data
        sep = re.compile('[\s,=]+')
        data = sep.split(line)
        # set attributes
        inj.geocent_end_time    = int(data[0].split('.')[0])
        inj.geocent_end_time_ns = int(data[0].split('.')[1])
        inj.h_end_time          = int(data[1].split('.')[0])
        inj.h_end_time_ns       = int(data[1].split('.')[1])
        inj.l_end_time          = int(data[2].split('.')[0])
        inj.l_end_time_ns       = int(data[2].split('.')[1])
        inj.v_end_time          = int(data[3].split('.')[0])
        inj.v_end_time_ns       = int(data[3].split('.')[1])
        inj.distance            = float(data[4])

        injtable.append(inj)

    if type=='burst':
      if file.readlines()[0].startswith('filestart'):
        # if given parsed burst file
        file.seek(0)

        snrcol = { 'G1':23, 'H1':19, 'L1':21, 'V1':25 }

        for line in file.readlines():
          inj = lsctables.SimBurst()
          # split data
          sep = re.compile('[\s,=]+')
          data = sep.split(line)
          # set attributes

          # gps time
          if 'burstgps' in data:
            idx = data.index('burstgps')+1
            geocent = LIGOTimeGPS(data[idx])

            inj.time_geocent_gps    = geocent.seconds
            inj.time_geocent_gps_ns = geocent.nanoseconds
          else:
            continue


          #inj.waveform            = data[4]
          #inj.waveform_number     = int(data[5])

          # frequency
          if 'freq' in data:
            idx = data.index('freq')+1
            inj.frequency = float(data[idx])
          else:
            continue

          # SNR a.k.a. amplitude
          if ifo and 'snr%s' % ifo in data:
            idx = data.index('snr%s' % ifo)+1
            inj.amplitude = float(data[idx])
          elif 'rmsSNR' in data:
            idx = data.index('rmsSNR')+1
            inj.amplitude = float(data[idx])
          else:
            continue

          if 'phi' in data:
            idx = data.index('phi' )+1
            inj.ra = float(data[idx])*24/(2*math.pi)       

          if 'theta' in data:
            idx = data.index('theta' )+1 
            inj.ra = 90-(float(data[idx])*180/math.pi)

          if ifo and 'hrss%s' % ifo in data:
            idx = data.index('hrss%s' % ifo)+1
            inj.hrss = float(data[idx])
          elif 'hrss' in data:
            idx = data.index('hrss')+1
            inj.hrss = float(data[idx])

          # extra columns to be added when I know how
          #inj.q = 0
          #inj.q                   = float(data[11])
          #h_delay = LIGOTimeGPS(data[41])
          #inj.h_peak_time         = inj.time_geocent_gps+h_delay.seconds
          #inj.h_peak_time_ns      = inj.time_geocent_gps_ns+h_delay.nanoseconds
          #l_delay = LIGOTimeGPS(data[43])
          #inj.l_peak_time         = inj.time_geocent_gps+l_delay.seconds
          #inj.l_peak_time_ns      = inj.time_geocent_gps_ns+l_delay.nanoseconds
          #v_delay = LIGOTimeGPS(data[43])
          #inj.v_peak_time         = inj.time_geocent_gps+v_delay.seconds
          #inj.v_peak_time_ns      = inj.time_geocent_gps_ns+v_delay.nanoseconds

          injtable.append(inj)

      else:
        # if given parsed burst file
        file.seek(0)
        for line in file.readlines():
          inj = lsctables.SimBurst()
          # split data
          sep = re.compile('[\s,]+')
          data = sep.split(line)
          # set attributes
          geocent = LIGOTimeGPS(data[0])
          inj.time_geocent_gps    = geocent.seconds
          inj.time_geocent_gps_ns = geocent.nanoseconds

          injtable.append(inj)

  injections = table.new_from_template(injtable)
  if not start:  start = 0
  if not end:    end   = 9999999999
  span = segments.segmentlist([ segments.segment(start, end) ])
  get_time = dqTriggerUtils.def_get_time(injections.tableName)
  injections.extend(inj for inj in injtable if get_time(inj) in span)

  return injections
コード例 #34
0
def get_coinc_xmldoc(gracedb_client, graceid, filename="coinc.xml"):
    return ligolw_utils.load_fileobj(get_filename(gracedb_client,
                                                  graceid,
                                                  filename=filename),
                                     contenthandler=LIGOLWContentHandler)[0]
コード例 #35
0
    ### load settings for accessing dmt segment files
    dq_name = config.get('get_science_segments', 'include')
    segdb_url = config.get('get_science_segments', 'segdb')

    if opts.verbose:
        print 'querrying science segments'

    ### this returns a string
    seg_xml_file = idq.segment_query(config,
                                     opts.start,
                                     opts.end,
                                     url=segdb_url)

    ### write seg_xml_file to disk
    lsctables.use_in(ligolw.LIGOLWContentHandler)
    xmldoc = ligolw_utils.load_fileobj(
        seg_xml_file, contenthandler=ligolw.LIGOLWContentHandler)[0]

    ### science segments xml filename
    seg_file = idq.segxml(gdbdir, "%s_%s" % (filetag, dq_name), opts.start,
                          opts.end - opts.start)
    if opts.verbose:
        print '  writing science segments to file : ' + seg_file
    ligolw_utils.write_filename(xmldoc, seg_file, gz=seg_file.endswith(".gz"))

    (scisegs, coveredseg) = idq.extract_dq_segments(
        seg_file, dq_name)  ### read in segments from xml file

if opts.verbose:
    print 'finding idq segments'
idqsegs = idq.get_idq_segments(realtimedir,
                               opts.start,
コード例 #36
0
out_xmldoc.appendChild(ligolw.LIGO_LW())

# Write process metadata to output file.
process = command.register_to_xmldoc(
    out_xmldoc, parser, opts, ifos=opts.detector, comment="Little hope!")

# Add search summary to output file.
all_time = segments.segment([glue.lal.LIGOTimeGPS(0), glue.lal.LIGOTimeGPS(2e9)])
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
out_xmldoc.childNodes[0].appendChild(search_summary_table)
summary = ligolw_search_summary.append_search_summary(out_xmldoc, process,
    inseg=all_time, outseg=all_time)

# Read template bank file.
progress.update(-1, 'reading ' + opts.template_bank.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.template_bank, contenthandler=ligolw_bayestar.LSCTablesContentHandler)

# Determine the low frequency cutoff from the template bank file.
template_bank_f_low = ligolw_bayestar.get_template_bank_f_low(xmldoc)

template_bank = ligolw_table.get_table(xmldoc,
    lsctables.SnglInspiralTable.tableName)

# Read injection file.
progress.update(-1, 'reading ' + opts.input.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.input, contenthandler=ligolw_bayestar.LSCTablesContentHandler)

# Extract simulation table from injection file.
sim_inspiral_table = ligolw_table.get_table(xmldoc,
    lsctables.SimInspiralTable.tableName)
コード例 #37
0
subparser.add_argument('ifo1', choices=available_ifos)
subparser.add_argument('ifo2', choices=available_ifos)

subparser = add_parser(conj)
subparser.add_argument('ifos', choices=available_ifos, nargs='+')

subparser = add_parser(amplify)
subparser.add_argument('ifos', choices=available_ifos, nargs='+')
subparser.add_argument('gain', type=float)

args = parser.parse_args()
kwargs = dict(args.__dict__)
func = locals()[kwargs.pop('func')]
infile = kwargs.pop('input')
outfile = kwargs.pop('output')


# Read input file.
xmldoc, _ = load_fileobj(
    infile, contenthandler=LSCTablesAndSeriesContentHandler)

# Process it.
process = command.register_to_xmldoc(xmldoc, parser, args)
func(xmldoc, **kwargs)
set_process_end_time(process)

# Write output file.
with SignalsTrap():
    write_fileobj(
        xmldoc, outfile, gz=(os.path.splitext(outfile.name)[-1] == '.gz'))
コード例 #38
0
    index_html_obj.close()

    #=============================================
    # sciseg query
    #=============================================
    if opts.ignore_science_segments:
        logger.info("ignoring science segments")
        scisegs = [[gpsstart, gpsstart+stride]]

    else:
        logger.info("generating science segments")
        try:
            seg_xml_file = idq.segment_query(config, gpsstart, gpsstart+stride, url=config.get("get_science_segments","segdb"))

            lsctables.use_in(ligolw.LIGOLWContentHandler)
            xmldoc = utils.load_fileobj(seg_xml_file, contenthandler=ligolw.LIGOLWContentHandler)[0]

            seg_file = "%s/science_segements-%d-%d.xml.gz"%(this_sumdir, int(gpsstart), int(stride))
            logger.info("writting science segments to file : %s"%seg_file)
            utils.write_filename(xmldoc, seg_file, gz=seg_file.endswith(".gz"))

            (scisegs, coveredseg) = idq.extract_dq_segments(seg_file, config.get('get_science_segments', 'include'))

        except Exception as e:
            traceback.print_exc()
            logger.info("ERROR: segment generation failed. Skipping this summary period.")

            gpsstart += stride
            continue

    #=============================================
コード例 #39
0
                                     comment="Simulated coincidences")

# Add search summary to output file.
all_time = segments.segment(
    [glue.lal.LIGOTimeGPS(0),
     glue.lal.LIGOTimeGPS(2e9)])
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
out_xmldoc.childNodes[0].appendChild(search_summary_table)
summary = ligolw_search_summary.append_search_summary(out_xmldoc,
                                                      process,
                                                      inseg=all_time,
                                                      outseg=all_time)

# Read PSDs.
progress.update(-1, 'reading ' + opts.reference_psd.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.reference_psd, contenthandler=lal.series.PSDContentHandler)
psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)
psds = {
    key: timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
    for key, psd in psds.items() if psd is not None
}

# Read injection file.
progress.update(-1, 'reading ' + opts.input.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.input, contenthandler=ligolw_bayestar.LSCTablesContentHandler)

# Extract simulation table from injection file.
sim_inspiral_table = ligolw_table.get_table(
    xmldoc, lsctables.SimInspiralTable.tableName)
コード例 #40
0
ファイル: sky_map.py プロジェクト: llondon6/lalsuite-mmrd
def gracedb_sky_map(coinc_file,
                    psd_file,
                    waveform,
                    f_low,
                    min_distance=None,
                    max_distance=None,
                    prior_distance_power=None,
                    method="toa_phoa_snr",
                    nside=-1,
                    chain_dump=None,
                    phase_convention='antifindchirp',
                    f_high_truncate=1.0,
                    enable_snr_series=False):
    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesAndSeriesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [
        coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id
    ]
    sngl_inspirals = [
        next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
              if sngl_inspiral.event_id == event_id)) for event_id in event_ids
    ]
    instruments = {sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals}

    # Try to load complex SNR time series.
    snrs = ligolw.snr_series_by_sngl_inspiral_id_for_xmldoc(xmldoc)
    try:
        snrs = [snrs[sngl.event_id] for sngl in sngl_inspirals]
    except KeyError:
        snrs = None

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    psds = [
        timing.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    # Run sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(
        sngl_inspirals,
        waveform,
        f_low,
        min_distance,
        max_distance,
        prior_distance_power,
        method=method,
        nside=nside,
        psds=psds,
        phase_convention=phase_convention,
        chain_dump=chain_dump,
        snr_series=snrs,
        enable_snr_series=enable_snr_series)

    return prob, epoch, elapsed_time, instruments
コード例 #41
0
def dump_flags(ifos=None, segment_url=None, match=None, unmatch=None,\
               latest=False):
    """
    Returns the list of all flags defined in the database.

    Keyword rguments:
      ifo : [ str | list ]
        list of ifos to query, or str for single ifo
      segment_url : str 
        url of segment database, defaults to contents of S6_SEGMENT_SERVER
        environment variable
      match : [ str | regular pattern ]
        regular expression to search against returned flag names, e.g, 'UPV'
      unmatch : [ str | regular pattern ]
        regular expression to negatively search against returned flag names
  """

    if isinstance(ifos, str):
        ifos = [ifos]

    # get url
    if not segment_url:
        segment_url = os.getenv('S6_SEGMENT_SERVER')

    # open connection to LDBD(W)Server
    myClient = segmentdb_utils.setup_database(segment_url)

    reply = StringIO(myClient.query(squery))
    xmldoc, digest = utils.load_fileobj(reply)
    seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)

    # sort table by ifo, name and version
    seg_def_table.sort(key=lambda flag: (flag.ifos[0], flag.name, \
                                         flag.version), reverse=True)

    flags = lsctables.New(type(seg_def_table))

    for row in seg_def_table:

        # test re match
        if match and not re.search(match, row.name): continue

        # test re unmatch
        if unmatch and re.search(unmatch, row.name): continue

        # only append latest versions of multiple flags
        flatest = True
        if latest:
            # get all flags with same ifo and name
            vflags = [f for f in flags if row.name==f.name and\
                      row.get_ifos()==f.get_ifos()]
            # if later version in list, move on
            for f in vflags:
                if f.version >= row.version:
                    flatest = False
                    break
        if not flatest:
            continue

        # append those flags matching ifos requirement
        for ifo in ifos:
            if ifo in row.get_ifos():
                flags.append(row)
                break

    return flags