Esempio n. 1
0
def append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, neff=0, converged=False,**cols):
    try: 
        si_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
        new_table = False
        # NOTE: MultiInspiralTable has no spin columns
        #si_table = table.get_table(xmldoc, lsctables.MultiInspiralTable.tableName)
    # Warning: This will also get triggered if there is *more* than one table
    except ValueError:
        si_table = lsctables.New(lsctables.SnglInspiralTable, sngl_valid_cols + cols.keys())
        new_table = True
        # NOTE: MultiInspiralTable has no spin columns
        #si_table = lsctables.New(lsctables.MultiInspiralTable, multi_valid_cols + cols.keys())

    # Get the process
    # FIXME: Assumed that only we have appended information
    procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id
    
    # map the samples to sim inspiral rows
    si_table.append(likelihood_to_snglinsp_row(si_table, loglikelihood, neff, converged,**cols))
    si_table[-1].process_id = procid

    if new_table:
        xmldoc.childNodes[0].appendChild(si_table)

    return xmldoc
Esempio n. 2
0
def append_samples_to_xmldoc(xmldoc, sampdict):
    try: 
        si_table = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName)
        new_table = False
    # Warning: This will also get triggered if there is *more* than one table
    except ValueError:
        si_table = lsctables.New(lsctables.SimInspiralTable, sim_valid_cols)
        new_table = True
    
    keys = sampdict.keys()
    # Just in case the key/value pairs don't come out synchronized
    values = numpy.array([sampdict[k] for k in keys], object)
    
    # Flatten the keys
    keys = reduce(list.__add__, [list(i) if isinstance(i, tuple) else [i] for i in keys])

    # Get the process
    # FIXME: Assumed that only we have appended information
    procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id
    
    # map the samples to sim inspiral rows
    # NOTE :The list comprehension is to preserve the grouping of multiple 
    # parameters across the transpose operation. It's probably not necessary,
    # so if speed dictates, it can be reworked by flattening before arriving 
    # here
    for vrow in numpy.array(zip(*[vrow_sub.T for vrow_sub in values]), dtype=numpy.object):
        #si_table.append(samples_to_siminsp_row(si_table, **dict(zip(keys, vrow.flatten()))))
        vrow = reduce(list.__add__, [list(i) if isinstance(i, Iterable) else [i] for i in vrow])
        si_table.append(samples_to_siminsp_row(si_table, **dict(zip(keys, vrow))))
        si_table[-1].process_id = procid

    if new_table:
        xmldoc.childNodes[0].appendChild(si_table)
    return xmldoc
Esempio n. 3
0
def coinc_and_sngl_inspirals_for_xmldoc(xmldoc):
    """Retrieve (as a generator) all of the
    (sngl_inspiral, sngl_inspiral, ... sngl_inspiral) tuples from coincidences
    in a LIGO-LW XML document."""

    # Look up necessary tables.
    coinc_table = ligolw_table.get_table(xmldoc, lsctables.CoincTable.tableName)
    coinc_def_table = ligolw_table.get_table(xmldoc, lsctables.CoincDefTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc, lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)

    # Look up coinc_def id.
    sngl_sngl_coinc_def_ids = set(row.coinc_def_id for row in coinc_def_table
        if (row.search, row.search_coinc_type) ==
        (ligolw_thinca.InspiralCoincDef.search,
        ligolw_thinca.InspiralCoincDef.search_coinc_type))

    # Indices to speed up lookups by ID.
    key = operator.attrgetter('coinc_event_id')
    coinc_maps_by_coinc_event_id = dict((coinc_event_id, tuple(coinc_maps))
        for coinc_event_id, coinc_maps
        in itertools.groupby(sorted(coinc_map_table, key=key), key=key))
    sngl_inspirals_by_event_id = dict((sngl_inspiral.event_id, sngl_inspiral)
        for sngl_inspiral in sngl_inspiral_table)

    # Loop over all sngl_inspiral <-> sngl_inspiral coincs.
    for coinc in coinc_table:
        if coinc.coinc_def_id in sngl_sngl_coinc_def_ids:
            coinc_maps = coinc_maps_by_coinc_event_id[coinc.coinc_event_id]
            yield coinc, tuple(sngl_inspirals_by_event_id[coinc_map.event_id]
                for coinc_map in coinc_maps)
Esempio n. 4
0
def drop_segment_tables(xmldoc, verbose = False):
    """
    Drop the segment, segment_definer & segment_summary tables from the
    xmldoc. In addition, remove the rows in the process & process_params
    tables that have process_ids found in the segment_definer table.
    """
    seg_tbl = table.get_table(xmldoc, lsctables.SegmentTable.tableName)
    seg_sum_tbl = table.get_table(xmldoc, lsctables.SegmentSumTable.tableName)
    seg_def_tbl = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)
    # determine the unique process_ids for the segment tables
    sd_pids = set(seg_def_tbl.getColumnByName("process_id"))

    if verbose:
        print >> sys.stderr, "Depopulate process tables of segment process_ids"
    remove_process_rows(xmldoc, sd_pids, verbose=verbose)

    # remove segment, segment_definer & segment_summary tables from xmldoc
    xmldoc.childNodes[0].removeChild(seg_tbl)
    seg_tbl.unlink()
    xmldoc.childNodes[0].removeChild(seg_sum_tbl)
    seg_sum_tbl.unlink()
    xmldoc.childNodes[0].removeChild(seg_def_tbl)
    seg_def_tbl.unlink()
    if verbose:
        print >> sys.stderr, "segment, segment-definer & segment-summary tables dropped from xmldoc"
Esempio n. 5
0
def find_segments(doc, key, use_segment_table=True):
    key_pieces = key.split(":")
    while len(key_pieces) < 3:
        key_pieces.append("*")

    filter_func = (
        lambda x: str(x.ifos) == key_pieces[0]
        and (str(x.name) == key_pieces[1] or key_pieces[1] == "*")
        and (str(x.version) == key_pieces[2] or key_pieces[2] == "*")
    )

    # Find all segment definers matching the critieria
    seg_def_table = table.get_table(doc, lsctables.SegmentDefTable.tableName)
    seg_defs = filter(filter_func, seg_def_table)
    seg_def_ids = map(lambda x: str(x.segment_def_id), seg_defs)

    # Find all segments belonging to those definers
    if use_segment_table:
        seg_table = table.get_table(doc, lsctables.SegmentTable.tableName)
        seg_entries = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
    else:
        seg_sum_table = table.get_table(doc, lsctables.SegmentSumTable.tableName)
        seg_entries = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)

    # Combine into a segmentlist
    ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))

    ret.coalesce()

    return ret
Esempio n. 6
0
def coinc_to_triggers(xmldoc, trigger_types):
    """ Function returns list of glitch-trigger(s) coincident events.
        Coincident event in the list is represented by tuple (glitch_object, [trigger1, trigger2, ...]).
        trigger_types is the list of trigger type names corresponding to "search" column of the sngl_burst table
    """
    # get necessary tables from xmldoc
    coinc_def_table = table.get_table(xmldoc, lsctables.CoincDefTable.tableName)
    coinc_table = table.get_table(xmldoc, lsctables.CoincTable.tableName)
    coinc_map_table = table.get_table(xmldoc, lsctables.CoincMapTable.tableName)
    idq_glitch_table = table.get_table(xmldoc, lsctables.IDQGlitchTable.tableName)
    sngl_burst_table = table.get_table(xmldoc, lsctables.SnglBurstTable.tableName)
    
    # get coinc_def_id
    #coinc_def_id = coinc_def_table.get_coinc_def_id(
    #            search = IDQCoincDef['idq_glitch<-->sngl_burst'][0],
    #            search_coinc_type = IDQCoincDef['idq_glitch<-->sngl_burst'][1],
    #            create_new = False,
    #            description = 'idq_glitch<-->sngl_burst')
    coinc_def_ids = [row.coinc_def_id for row in coinc_def_table if row.description == 'idq_glitch<-->sngl_burst']
    
    # use this id to get all coinc_event ids
    trig_coinc_ids = [coinc.coinc_event_id for coinc in coinc_table if coinc.coinc_def_id in coinc_def_ids]

    # convert idq_glitch and sngl_burst tables into dictionaries for a quick lookup
    glitches = dict([ (glitch.event_id, glitch) for glitch in idq_glitch_table])
    triggers = dict([ (row.event_id, row) for row in sngl_burst_table if row.search in trigger_types])

    # create dictionary of connected events using coinc_event_map.
    # We can not assume any specific order of rows in the table.
    connected_events_dict = {}
    for row in coinc_map_table:
        try: connected_events_dict[row.coinc_event_id].append(row)
        except: connected_events_dict[row.coinc_event_id] = [row]
    
    glitch_table_name = lsctables.IDQGlitchTable.tableName
    sngl_burst_table_name = lsctables.SnglBurstTable.tableName    
    glitch_trig_tuples = []
    for coinc_id in trig_coinc_ids:
        # get connectected events for this id
        connected_events = connected_events_dict[coinc_id]
        connected_trigs = []
        if len(connected_events) >= 2:
            for event in connected_events:
                if event.table_name == glitch_table_name:
                    glitch_event = glitches[event.event_id]
                elif  event.table_name == sngl_burst_table_name:
                    try: connected_trigs.append(triggers[event.event_id])
                    except: # no trigger with that id, it is probably of different type.
                        pass
                else:
                    raise ValueError("Event is not the row of either " + \
                                     glitch_table_name + " or " + sngl_burst_table_name
                                     )
        else:
            raise Exception("Glitch-Triggers coincidences must contain at least 2 events. " \
                            + str(len(connected_events))+ " events are found instead."
                            )
        glitch_trig_tuples.append((glitch_event, connected_trigs))
    return glitch_trig_tuples
Esempio n. 7
0
def coinc_to_ovl_data(xmldoc):
    """Function returns list of (idq_glitch_object, ovl_data_object) tuples
       where objects in the tuple are mapped to each other via coinc tables.
	"""
    # get necessary tables from xmldoc
    coinc_def_table = table.get_table(xmldoc, lsctables.CoincDefTable.tableName)
    coinc_table = table.get_table(xmldoc, lsctables.CoincTable.tableName)
    coinc_map_table = table.get_table(xmldoc, lsctables.CoincMapTable.tableName)
    idq_glitch_table = table.get_table(xmldoc, lsctables.IDQGlitchTable.tableName)
    ovl_data_table = table.get_table(xmldoc, lsctables.OVLDataTable.tableName)
    
    # get coinc_def_ids
    #coinc_def_id = coinc_def_table.get_coinc_def_id(
    #            search = IDQCoincDef['idq_glitch<-->ovl_data'][0],
    #            search_coinc_type = IDQCoincDef['idq_glitch<-->ovl_data'][1],
    #            create_new = False,
    #            description = 'idq_glitch<-->ovl_data')
    coinc_def_ids = [row.coinc_def_id for row in coinc_def_table if row.description == 'idq_glitch<-->ovl_data']
    
    # use this id to get all coinc_event ids
    ovl_coinc_ids = [coinc.coinc_event_id for coinc in coinc_table if coinc.coinc_def_id in coinc_def_ids]

    # convert idq_glitch and ovl tables into dictionaries for a quick lookup
    glitches = dict([(glitch.event_id, glitch) for glitch in idq_glitch_table])
    ovl_data = dict([(row.event_id, row) for row in ovl_data_table])
    
    # create dictionary of connected events in coinc_event_map.
    # We can not assume any specific order of rows in the table.
    connected_events_dict = {}
    for row in coinc_map_table:
        try: connected_events_dict[row.coinc_event_id].append(row)
        except: connected_events_dict[row.coinc_event_id] = [row]
    
    glitch_table_name = lsctables.IDQGlitchTable.tableName
    ovl_data_table_name = lsctables.OVLDataTable.tableName
     
    glitch_ovl_pairs = []
    for coinc_id in ovl_coinc_ids:
        # get connectected events for this id
        connected_events = connected_events_dict[coinc_id]
        if len(connected_events) == 2:
            for event in connected_events:
                if event.table_name == glitch_table_name:
                    glitch_event = glitches[event.event_id]
                elif  event.table_name == ovl_data_table_name:
                    ovl_event = ovl_data[event.event_id]
                else:
                    print event.table_name
                    raise ValueError("Event is not the row of either " + \
                                     glitch_table_name + \
                                     " or " + ovl_data_table_name
                                     )
        else:
            raise Exception("Glitch-OVL coincidence must contain exactly 2 events. "\
                            + str(len(connected_events))+ " events are found instead."
                            )
        glitch_ovl_pairs.append((glitch_event, ovl_event))
    return glitch_ovl_pairs
Esempio n. 8
0
def fromsegmentxml(file, dict=False, id=None):

  """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Arguments:

      file : file object
        file object for segment xml file

    Keyword Arguments:

      dict : [ True | False ]
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table. Default False
      id : int
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer
        
  """

  # load xmldocument and SegmentDefTable and SegmentTables
  xmldoc, digest = utils.load_fileobj(file, gz=file.name.endswith(".gz"))
  seg_def_table  = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)
  seg_table      = table.get_table(xmldoc, lsctables.SegmentTable.tableName)

  if dict:
    segs = segments.segmentlistdict()
  else:
    segs = segments.segmentlist()

  seg_id = {}
  for seg_def in seg_def_table:
    seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
    if dict:
      segs[str(seg_def.name)] = segments.segmentlist()

  for seg in seg_table:
    if dict:
      segs[seg_id[int(seg.segment_def_id)]]\
          .append(segments.segment(seg.start_time, seg.end_time))
      continue
    if id and int(seg.segment_def_id)==id:
      segs.append(segments.segment(seg.start_time, seg.end_time))
      continue
    segs.append(segments.segment(seg.start_time, seg.end_time))

  if dict:
   for seg_name in seg_id.values():
     segs[seg_name] = segs[seg_name].coalesce()
  else:
    segs = segs.coalesce()

  xmldoc.unlink()

  return segs
Esempio n. 9
0
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None, nside=-1):
    # LIGO-LW XML imports.
    from . import ligolw
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables
    import lal.series

    # Determine approximant, amplitude order, and phase order from command line arguments.
    approximant, amplitude_order, phase_order = \
        timing.get_approximant_and_orders_from_string(waveform)

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [(sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id).next() for event_id in event_ids]
    instruments = set(sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals)

    # Read PSDs.
    if psd_file is None:
        psds = None
    else:
        xmldoc, _ = ligolw_utils.load_fileobj(
            psd_file, contenthandler=lal.series.PSDContentHandler)
        psds = lal.series.read_psd_xmldoc(xmldoc)

        # Rearrange PSDs into the same order as the sngl_inspirals.
        psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

        # Interpolate PSDs.
        psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
            for psd in psds]

    # TOA+SNR sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, approximant,
        amplitude_order, phase_order, f_low,
        min_distance, max_distance, prior_distance_power,
        nside=nside, psds=psds)

    return prob, epoch, elapsed_time, instruments
Esempio n. 10
0
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None,
        method="toa_phoa_snr", nside=-1, chain_dump=None,
        phase_convention='antifindchirp', f_high_truncate=1.0,
        enable_snr_series=False):
    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesAndSeriesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id)) for event_id in event_ids]
    instruments = {sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals}

    # Try to load complex SNR time series.
    snrs = ligolw.snr_series_by_sngl_inspiral_id_for_xmldoc(xmldoc)
    try:
        snrs = [snrs[sngl.event_id] for sngl in sngl_inspirals]
    except KeyError:
        snrs = None

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
            f_high_truncate=f_high_truncate)
        for psd in psds]

    # Run sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, waveform, f_low,
        min_distance, max_distance, prior_distance_power, method=method,
        nside=nside, psds=psds, phase_convention=phase_convention,
        chain_dump=chain_dump, snr_series=snrs,
        enable_snr_series=enable_snr_series)

    return prob, epoch, elapsed_time, instruments
Esempio n. 11
0
  def get_coincs_from_coinctable(self,files):
    """
    read data from coinc tables (xml format)
    
    FIXME: currently assumes one coinc per file!!!
    """
    for file in files:
      coinc = CoincData()
      xmldoc = utils.load_filename(file)
      sngltab = tab.get_table(xmldoc,lsctables.SnglInspiralTable.tableName)
      coinc.set_snr(dict((row.ifo, row.snr) for row in sngltab))
      coinc.set_gps(dict((row.ifo, LIGOTimeGPS(row.get_end())) for row in sngltab))
      #FIXME: this is put in place to deal with eff_distance = 0
      # needs to be fixed upstream in the pipeline
      effDs = list((row.ifo,row.eff_distance) for row in sngltab)
      for eD in effDs:
        if eD[1] == 0.:
          effDs.append((eD[0],1.))
          effDs.remove(eD)
      coinc.set_effDs(dict(effDs))
#      coinc.set_effDs(dict((row.ifo,row.eff_distance) for row in sngltab))
      coinc.set_masses(dict((row.ifo, row.mass1) for row in sngltab), \
                       dict((row.ifo, row.mass2) for row in sngltab))
      ctab = tab.get_table(xmldoc,lsctables.CoincInspiralTable.tableName)
      #FIXME: ignoring H2 for now, but should be dealt in a better way
      allifos = list(ctab[0].get_ifos())
      try:
        allifos.remove('H2')
      except ValueError:
        pass
      coinc.set_ifos(allifos)
      if ctab[0].false_alarm_rate is not None:
        coinc.set_FAR(ctab[0].false_alarm_rate)

      try:
        simtab = tab.get_table(xmldoc,lsctables.SimInspiralTable.tableName)
        row = siminsptab[0]
        effDs_inj = {}
        for ifo in coinc.ifo_list:
          if ifo == 'H1':
            effDs_inj[ifo] = row.eff_dist_h
          elif ifo == 'L1':
            effDs_inj[ifo] = row.eff_dist_l
          elif ifo == 'V1':
            effDs_inj[ifo] = row.eff_dist_v
        dist_inj = row.distance
        coinc.set_inj_params(row.latitude,row.longitude,row.mass1,row.mass2, \
                             dist_inj,effDs_inj)
        coinc.is_injection = True
      #FIXME: name the exception!
      except:
        pass

      self.append(coinc)
def ligolw_copy_process(xmldoc_src, xmldoc_dest):
    """
    We want to copy over process and process_params tables to eventually
    merge them.
    """
    proc = table.get_table(xmldoc_src, lsctables.ProcessTable.tableName)
    pp = table.get_table(xmldoc_src, lsctables.ProcessParamsTable.tableName)

    xmldoc_dest.appendChild(ligolw.LIGO_LW())
    xmldoc_dest.childNodes[-1].appendChild(proc)
    xmldoc_dest.childNodes[-1].appendChild(pp)
Esempio n. 13
0
def get_segment_summary_times(scienceFile, segmentName):
    """
    This function will find the times for which the segment_summary is set
    for the flag given by segmentName.

    Parameters
    -----------
    scienceFile : SegFile
        The segment file that we want to use to determine this.
    segmentName : string
        The DQ flag to search for times in the segment_summary table.

    Returns
    ---------
    summSegList : glue.segments.segmentlist
        The times that are covered in the segment summary table.
    """
    # Parse the segmentName
    segmentName = segmentName.split(':')
    if not len(segmentName) in [2,3]:
        raise ValueError("Invalid channel name %s." %(segmentName))
    ifo = segmentName[0]
    channel = segmentName[1]
    version = ''
    if len(segmentName) == 3:
        version = int(segmentName[2])

    # Load the filename
    xmldoc = utils.load_filename(scienceFile.cache_entry.path,
                             gz=scienceFile.cache_entry.path.endswith("gz"),
                             contenthandler=ContentHandler)

    # Get the segment_def_id for the segmentName
    segmentDefTable = table.get_table(xmldoc, "segment_definer")
    for entry in segmentDefTable:
        if (entry.ifos == ifo) and (entry.name == channel):
            if len(segmentName) == 2 or (entry.version==version):
                segDefID = entry.segment_def_id
                break
    else:
        raise ValueError("Cannot find channel %s in segment_definer table."\
                         %(segmentName))

    # Get the segmentlist corresponding to this segmentName in segment_summary
    segmentSummTable = table.get_table(xmldoc, "segment_summary")
    summSegList = segments.segmentlist([])
    for entry in segmentSummTable:
        if entry.segment_def_id == segDefID:
            segment = segments.segment(entry.start_time, entry.end_time)
            summSegList.append(segment)
    summSegList.coalesce()

    return summSegList
Esempio n. 14
0
def gracedb_sky_map(
        coinc_file, psd_file, waveform, f_low, min_distance=None,
        max_distance=None, prior_distance_power=None,
        method="toa_phoa_snr", nside=-1, chain_dump=None,
        phase_convention='antifindchirp', f_high_truncate=1.0):
    # LIGO-LW XML imports.
    from . import ligolw
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables
    import lal.series

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
        lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(xmldoc,
        lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id]
    sngl_inspirals = [next((sngl_inspiral for sngl_inspiral in sngl_inspiral_table
        if sngl_inspiral.event_id == event_id)) for event_id in event_ids]
    instruments = set(sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals)

    # Read PSDs.
    xmldoc, _ = ligolw_utils.load_fileobj(
        psd_file, contenthandler=lal.series.PSDContentHandler)
    psds = lal.series.read_psd_xmldoc(xmldoc)

    # Rearrange PSDs into the same order as the sngl_inspirals.
    psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

    # Interpolate PSDs.
    psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data,
            f_high_truncate=f_high_truncate)
        for psd in psds]

    # Run sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals, waveform, f_low,
        min_distance, max_distance, prior_distance_power, method=method,
        nside=nside, psds=psds, phase_convention=phase_convention,
        chain_dump=chain_dump)

    return prob, epoch, elapsed_time, instruments
Esempio n. 15
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
  try: 
    search_summary = table.get_table(xmldoc, lsctables.SearchSummaryTable.tableName)
  except ValueError:
    search_summary = lsctables.New(lsctables.SearchSummaryTable,
    ["process_id", "nevents", "ifos", "comment", "in_start_time",
    "in_start_time_ns", "out_start_time", "out_start_time_ns",
    "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"])
    xmldoc.childNodes[0].appendChild(search_summary)

  process_id_type = type(table.get_table(xmldoc, lsctables.ProcessTable.tableName).next_id)

  runids = set()
  for i in range(0, sim_tree.GetEntries()) :
    sim_tree.GetEntry(i)

    # Id for the run processed by WaveBurst -> process ID
    if sim_tree.run in runids :
      continue

    row = search_summary.RowType()
    row.process_id = process_id_type(sim_tree.run)
    runids.add(sim_tree.run)

    # Search Summary Table
    # events found in the run -> nevents
    setattr(row, "nevents", sim_tree.GetEntries())

    # Imstruments involved in the search
    row.ifos = lsctables.ifos_from_instrument_set( get_ifos_from_index( branch_array_to_list ( sim_tree.ifo, sim_tree.ndim ) ) )
    setattr(row, "comment", "waveburst")

    # Begin and end time of the segment
    # TODO: This is a typical offset on either side of the job for artifacts
    # It can, and probably will change in the future, and should not be hardcoded
		# TODO: Make this work properly. We need a gps end from the livetime
    waveoffset = 8
    livetime = 600
    #live_entries = live_tree.GetEntries()
    # This is WAAAAAAAAAAAAAY too slow
    #for l in range(0, live_entries):
      #liv_tree.GetEntry(l)
      #livetime = max(livetime, liv_tree.live)

    #if livetime < 0:
      #sys.exit("Could not find livetime, cannot fill all of summary table.")
    # in -- with waveoffset
    # out -- without waveoffset
    row.set_in(segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset), LIGOTimeGPS(sim_tree.gps + livetime + waveoffset)))
    row.set_out(segments.segment(LIGOTimeGPS(sim_tree.gps), LIGOTimeGPS(sim_tree.gps + livetime)))

    search_summary.append(row)
Esempio n. 16
0
  def do_summary_table_from_segment(self, xmldoc, segment, sim_tree, jobid=-1):
    """
    Create the search_summary table for a cWB from a segment specified from the command line. The function will try to determine the proper job intervals from the waveoffset, if specified.
    """
  
    try: 
      search_summary = table.get_table(xmldoc, lsctables.SearchSummaryTable.tableName)
    except ValueError:
      search_summary = lsctables.New(lsctables.SearchSummaryTable,
      ["process_id", "nevents", "ifos", "comment", "in_start_time",
      "in_start_time_ns", "out_start_time", "out_start_time_ns",
      "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"])
      xmldoc.childNodes[0].appendChild(search_summary)
  
    process_id_type = type(table.get_table(xmldoc, lsctables.ProcessTable.tableName).next_id)
  
    sim_tree.GetEntry(0)

    if(jobid < 0):
      run = sim_tree.run
    else: run = jobid
    seg = segment
  
    # Search Summary Table
    # events found in the run -> nevents
    row = search_summary.RowType()
    row.process_id = process_id_type(run)
    row.nevents = sim_tree.GetEntries()

    ifos = lsctables.ifos_from_instrument_set( get_ifos_from_index( branch_array_to_list ( sim_tree.ifo, sim_tree.ndim ) ) )
    # Imstruments involved in the search
    if( ifos == None or len(ifos) == 0 ): 
        if( self.instruments ):
            ifos = self.instruments
        else: # Not enough information to completely fill out the table
            sys.exit("Found a job with no IFOs on, or not enough to determine IFOs. Try specifying instruments directly.")

    row.ifos = ifos
    row.comment = "waveburst"
  
    # Begin and end time of the segment
    waveoffset = self.waveoffset
    if waveoffset == None: waveoffset = 0
  
    # in -- with waveoffset
    row.set_in(seg)
    # out -- without waveoffset
    waveoffset = LIGOTimeGPS(waveoffset)
    row.set_out(segments.segment(seg[0]+waveoffset, seg[1]-waveoffset))
    search_summary.append(row)
Esempio n. 17
0
def sim_coinc_and_sngl_inspirals_for_xmldoc(xmldoc):
    """Retrieve (as a generator) all of the
    (sim_inspiral, coinc_event, (sngl_inspiral, sngl_inspiral, ... sngl_inspiral)
    tuples from found coincidences in a LIGO-LW XML document."""

    # Look up necessary tables.
    coinc_table = ligolw_table.get_table(xmldoc, lsctables.CoincTable.tableName)
    coinc_def_table = ligolw_table.get_table(xmldoc, lsctables.CoincDefTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc, lsctables.CoincMapTable.tableName)

    # Look up coinc_def ids.
    sim_coinc_def_id = coinc_def_table.get_coinc_def_id(
        ligolw_inspinjfind.InspiralSCExactCoincDef.search,
        ligolw_inspinjfind.InspiralSCExactCoincDef.search_coinc_type,
        create_new=False)

    def events_for_coinc_event_id(coinc_event_id):
        for coinc_map in coinc_map_table:
            if coinc_map.coinc_event_id == coinc_event_id:
                for row in ligolw_table.get_table(xmldoc, coinc_map.table_name):
                    column_name = coinc_map.event_id.column_name
                    if getattr(row, column_name) == coinc_map.event_id:
                        yield coinc_map.event_id, row

    # Loop over all coinc_event <-> sim_inspiral coincs.
    for sim_coinc in coinc_table:

        # If this is not a coinc_event <-> sim_inspiral coinc, skip it.
        if sim_coinc.coinc_def_id != sim_coinc_def_id:
            continue

        # Locate the sim_inspiral and coinc events.
        sim_inspiral = None
        coinc = None
        for event_id, event in events_for_coinc_event_id(sim_coinc.coinc_event_id):
            if event_id.table_name == ligolw_table.StripTableName(lsctables.SimInspiralTable.tableName):
                if sim_inspiral is not None:
                    raise RuntimeError("Found more than one matching sim_inspiral entry")
                sim_inspiral = event
            elif event_id.table_name == ligolw_table.StripTableName(lsctables.CoincTable.tableName):
                if coinc is not None:
                    raise RuntimeError("Found more than one matching coinc entry")
                coinc = event
            else:
                raise RuntimeError("Did not expect coincidence to contain an event of type '%s'" % event_id.table_name)

        sngl_inspirals = tuple(event
            for event_id, event in events_for_coinc_event_id(coinc.coinc_event_id))

        yield sim_inspiral, coinc, sngl_inspirals
Esempio n. 18
0
def generate_experiment_tables(xmldoc, **cmdline_opts):
    """
    Create or adds entries to the experiment table and experiment_summ
    table using instruments pulled from the search summary table and
    offsets pulled from the time_slide table.
    """

    if cmdline_opts["verbose"]:
        print >> sys.stderr, "Populating the experiment and experiment_summary tables using " + \
            "search_summary and time_slide tables..."

    # Get the instruments that were on
    instruments = get_on_instruments(xmldoc, cmdline_opts["trigger_program"])

    # find the experiment & experiment_summary table or create one if needed
    try:
        table.get_table(xmldoc, lsctables.ExperimentSummaryTable.tableName)
    except ValueError:
        xmldoc.childNodes[0].appendChild(lsctables.New(lsctables.ExperimentSummaryTable))

    try:
        table.get_table(xmldoc, lsctables.ExperimentTable.tableName)
    except ValueError:
        xmldoc.childNodes[0].appendChild(lsctables.New(lsctables.ExperimentTable))

    # Populate the experiment table
    experiment_ids = populate_experiment_table(
        xmldoc,
        cmdline_opts["search_group"],
        cmdline_opts["trigger_program"],
        cmdline_opts["lars_id"],
        instruments,
        comments = cmdline_opts["comment"],
        add_inst_subsets = True,
        verbose = cmdline_opts["verbose"]
    )

    # Get the time_slide table as dict
    time_slide_dict = table.get_table(xmldoc, lsctables.TimeSlideTable.tableName).as_dict()

    # Populate the experiment_summary table
    for instruments in experiment_ids:
        populate_experiment_summ_table(
            xmldoc,
            experiment_ids[instruments],
            time_slide_dict,
            cmdline_opts["vetoes_name"],
            verbose = cmdline_opts["verbose"]
        )
Esempio n. 19
0
def get_tables(doc):
  snglinspiraltable = table.get_table(
    doc, lsctables.SnglInspiralTable.tableName)

  input_times = None
  output_times = None
  try:
    searchsummtable = table.get_table(
      doc, lsctables.SearchSummaryTable.tableName)
    input_times = searchsummtable.get_inlist().extent()
    output_times = searchsummtable.get_outlist().extent()
  except ValueError:
    pass
    
  return input_times, output_times, snglinspiraltable
Esempio n. 20
0
 def events_for_coinc_event_id(coinc_event_id):
     for coinc_map in coinc_map_table:
         if coinc_map.coinc_event_id == coinc_event_id:
             for row in ligolw_table.get_table(xmldoc, coinc_map.table_name):
                 column_name = coinc_map.event_id.column_name
                 if getattr(row, column_name) == coinc_map.event_id:
                     yield coinc_map.event_id, row
Esempio n. 21
0
def write_coinc_tables( vetotrigs, xmldoc, refchannel, twind, time_slide_id=None):
	"""
	Write a set of coinc tables for this round. We only write coincidences for coincs with refchannel. Note: This is probably gonna be slow... aaaand that's why we implemented the real algorithm in C.
	"""
	# Retrieve process information
	process = [ p for p in table.get_table( xmldoc, lsctables.ProcessTable.tableName ) if p.program == "laldetchar-hveto" ][0]
	process_id = process.process_id

	# Insert a time slide ID. It's not yet really necessary
	if time_slide_id is None:
		timeslidetable = lsctables.New(lsctables.TimeSlideTable)
		time_slide = timeslidetable.RowType
		time_slide.process_id = process_id
		time_slide.time_slide_id = time_slide_id = ilwd.ilwdchar( "time_slide:time_slide_id:0" )
		time_slide.instrument = opt.instrument
		time_slide.offset = 0.0
		timeslidetable.append(time_slide)
		xmldoc.childNodes[0].appendChild( timeslidetable )

	# Set up coinc tables
	coinc_def = HVetoBBCoincDef
	coincdeftable = lsctables.New(lsctables.CoincDefTable)
	coinc_def.coinc_def_id = coinc_def_id = coincdeftable.get_next_id()
	coincdeftable.append( coinc_def )
	xmldoc.childNodes[0].appendChild( coincdeftable )

	coinc_def = HVetoCoincTables( xmldoc )
	reftrigs = [ (segment( sb.get_peak()-twind/2.0, sb.get_peak()+twind/2.0 ), sb) for sb in vetotrigs if sb.channel == refchannel ]
	for vt in vetotrigs:
		if vt.channel == refchannel:
			continue
		for (s, t) in reftrigs:
			if vt.get_peak() in s:
				coinc_def.append_coinc( process_id, time_slide_id, coinc_def_id, (t, vt))
	return xmldoc
Esempio n. 22
0
def add_to_segment_summary_ns(xmldoc, proc_id, seg_def_id, sgmtlist, comment=''):
    try:
        seg_sum_table = table.get_table(xmldoc, lsctables.SegmentSumTable.tableName)
    except:
        seg_sum_table = lsctables.New(lsctables.SegmentSumTable, columns = ["process_id", "segment_def_id", "segment_sum_id", "start_time", "start_time_ns", "end_time", "end_time_ns", "comment"])
        xmldoc.childNodes[0].appendChild(seg_sum_table)

    for seg in sgmtlist:
        segment_sum                = lsctables.SegmentSum()
        segment_sum.process_id     = proc_id
        segment_sum.segment_def_id = seg_def_id
        segment_sum.segment_sum_id = seg_sum_table.get_next_id()
        seconds,nanoseconds=output_microseconds(seg[0])
        segment_sum.start_time     = seconds
        segment_sum.start_time_ns  = nanoseconds
        seconds,nanoseconds=output_microseconds(seg[1])
        segment_sum.end_time       = seconds
        segment_sum.end_time_ns    = nanoseconds
        #segment_sum.start_time     = seg[0]
        #segment_sum.start_time_ns  = 0
        #segment_sum.end_time       = seg[1]
        #segment_sum.end_time_ns    = 0
        segment_sum.comment        = comment

        seg_sum_table.append(segment_sum)
Esempio n. 23
0
def loadSingleBurst( files, trigs_dict=None):
    """
    loads snglburst tables (produced by Omicron) into trgdict object
    files - is the list of file names
    """
    if type(files) is str:
        files = [files]
    if trigs_dict is None:
        trigs_dict = trigdict()
    for file in files:
        for row in table.get_table( ligolw_utils.load_filename(file, contenthandler=lsctables.use_in(ligolw.LIGOLWContentHandler)), lsctables.SnglBurstTable.tableName ):
            channel = "%s-%s_%s"%(row.ifo, row.channel.replace("-","_"), row.search)
            tcent = row.peak_time + 1e-9*row.peak_time_ns
            tstart = row.start_time + 1e-9*row.start_time_ns
            dur = row.duration
            fpeak = row.peak_frequency
            fcent = row.central_freq
            bndwth = row.bandwidth
            amp = row.amplitude
            snr = row.snr
            conf = row.confidence
            chi2 = row.chisq
            chi2_dof = row.chisq_dof

            trigger = [tstart, dur, tcent, fpeak, fcent, bndwth, amp, snr, conf, chi2, chi2_dof]

            if channel in trigs_dict.channels():
                trigs_dict[channel].append( trigger ) ### SingleBurst trigger structure
            else:
                trigs_dict[channel] = [ trigger ]

    return trigs_dict
Esempio n. 24
0
def get_on_instruments(xmldoc, trigger_program):
    process_tbl = table.get_table(xmldoc, lsctables.ProcessTable.tableName)
    instruments = set([])
    for row in process_tbl:
        if row.program == trigger_program:
            instruments.add(row.ifos)
    return instruments
Esempio n. 25
0
def append_summ_vars(xmldoc, procid, **summvars):
    """
  Append round information in the form of SummVars.
  """

    try:
        summtable = table.get_table(xmldoc, lsctables.SearchSummVarsTable.tableName)
        procid = summtable[0].process_id
    except ValueError:
        summtable = lsctables.New(lsctables.SearchSummVarsTable, lsctables.SearchSummVarsTable.validcolumns.keys())

    for name, value in summvars.iteritems():
        summvar = summtable.RowType()
        summvar.name = name
        if isinstance(value, str):
            summvar.string = str(value)
            summvar.value = -1.0
        else:
            summvar.string = str(value)
            summvar.value = float(value)
        summvar.process_id = procid
        summvar.search_summvar_id = summtable.get_next_id()
        summtable.append(summvar)

    xmldoc.childNodes[0].appendChild(summtable)
Esempio n. 26
0
def add_to_segment(xmldoc, proc_id, seg_def_id, sgmtlist):
    try:
        segtable = table.get_table(xmldoc, lsctables.SegmentTable.tableName)
    except:
        segtable = lsctables.New(
            lsctables.SegmentTable,
            columns=[
                "process_id",
                "segment_def_id",
                "segment_id",
                "start_time",
                "start_time_ns",
                "end_time",
                "end_time_ns",
            ],
        )
        xmldoc.childNodes[0].appendChild(segtable)

    for seg in sgmtlist:
        segment = lsctables.Segment()
        segment.process_id = proc_id
        segment.segment_def_id = seg_def_id
        segment.segment_id = segtable.get_next_id()
        segment.start_time = seg[0]
        segment.start_time_ns = 0
        segment.end_time = seg[1]
        segment.end_time_ns = 0

        segtable.append(segment)
Esempio n. 27
0
 def getAuxChannels(self, inputList):
     intermediateTable = {
         "type": [],
         "ifo": [],
         "qscan_time": [],
         "qscan_dir": [],
         "channel_name": [],
         "peak_time": [],
         "peak_frequency": [],
         "peak_q": [],
         "peak_significance": [],
         "peak_amplitude": [],
     }
     try:
         doc = utils.load_filename(
             inputList[0] + "/summary.xml", verbose=True, gz=False, xmldoc=None, contenthandler=None
         )
         qscanTable = table.get_table(doc, "qscan:summary:table")
     except:
         print >> sys.stderr, "failed to read" + inputList[0] + "/summary.xml"
         return intermediateTable
     for channel in qscanTable:
         for param in self.paramMaps:
             intermediateTable[param[0]].append(eval("channel." + param[1]))
         intermediateTable["qscan_dir"].append(inputList[0])
         # if len(inputList) == 4:
         intermediateTable["qscan_time"].append(inputList[1])
         intermediateTable["type"].append(inputList[2])
         intermediateTable["ifo"].append(inputList[3])
     return intermediateTable
Esempio n. 28
0
def add_to_segment_summary(xmldoc, proc_id, seg_def_id, sgmtlist, comment=""):
    try:
        seg_sum_table = table.get_table(xmldoc, lsctables.SegmentSumTable.tableName)
    except:
        seg_sum_table = lsctables.New(
            lsctables.SegmentSumTable,
            columns=[
                "process_id",
                "segment_def_id",
                "segment_sum_id",
                "start_time",
                "start_time_ns",
                "end_time",
                "end_time_ns",
                "comment",
            ],
        )
        xmldoc.childNodes[0].appendChild(seg_sum_table)

    for seg in sgmtlist:
        segment_sum = lsctables.SegmentSum()
        segment_sum.process_id = proc_id
        segment_sum.segment_def_id = seg_def_id
        segment_sum.segment_sum_id = seg_sum_table.get_next_id()
        segment_sum.start_time = seg[0]
        segment_sum.start_time_ns = 0
        segment_sum.end_time = seg[1]
        segment_sum.end_time_ns = 0
        segment_sum.comment = comment

        seg_sum_table.append(segment_sum)
Esempio n. 29
0
def check_segment_availability(grb_name, grb_time, query_start, query_end, offset, ifo, segmentName):
  '''
  Searches +/- offset from GRB time to download the latest segment lists then extracts times and puts them into a txt file.
  '''
  args = {'grb_name'    : grb_name,
          'query_start' : query_start,
          'query_end'   : query_end,
          'ifo'         : ifo,
          'segmentName' : segmentName}
  cmd  = "ligolw_segment_query --database --query-segments --include-segments '{segmentName}' --gps-start-time {query_start} --gps-end-time {query_end} > ./segments{ifo}_grb{grb_name}.xml".format(**args)
  print '>>',cmd
  print
  process    = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
  output,err = process.communicate()

  # try to open the file
  try:
    doc = utils.load_filename("segments{ifo}_grb{grb_name}.xml".format(**args), contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))
  except:
    raise IOError, "Error reading file: segments{ifo}_grb{grb_name}.xml".format(**args)

  # extract the segment list from segment:table and store in a txt file
  segs = table.get_table(doc, "segment")
  seglist = segments.segmentlist(segments.segment(s.start_time, s.end_time) for s in segs)
  segmentsUtils.tosegwizard(file("{ifo}-science_grb{grb_name}.txt".format(**args),'w'),seglist,header = True)

  print ">> %s segments +/-%ds from %ds found:"%(ifo,offset,grb_time)
  for s in segs:
    print "Start:",s.start_time,"End:",s.end_time,"Duration:",s.end_time-s.start_time
  print

  return
Esempio n. 30
0
def get_segment_definer_comments(xml_file, include_version=True):
    """Returns a dict with the comment column as the value for each segment"""

    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)

    # read segment definer table
    xmldoc, _ = ligolw_utils.load_fileobj(xml_file,
                                        gz=xml_file.name.endswith(".gz"),
                                        contenthandler=h)
    seg_def_table = table.get_table(xmldoc,
                                    lsctables.SegmentDefTable.tableName)

    # put comment column into a dict
    comment_dict = {}
    for seg_def in seg_def_table:
        if include_version:
            full_channel_name = ':'.join([str(seg_def.ifos),
                                          str(seg_def.name),
                                          str(seg_def.version)])
        else:
            full_channel_name = ':'.join([str(seg_def.ifos),
                                          str(seg_def.name)])

        comment_dict[full_channel_name] = seg_def.comment

    return comment_dict
Esempio n. 31
0
    def _make_events(self, doc, psd_file, coinc_def):
        # Look up necessary tables.
        coinc_table = get_table(doc, CoincTable.tableName)
        coinc_map_table = get_table(doc, CoincMapTable.tableName)
        sngl_inspiral_table = get_table(doc, SnglInspiralTable.tableName)
        try:
            time_slide_table = get_table(doc, TimeSlideTable.tableName)
        except ValueError:
            offsets_by_time_slide_id = None
        else:
            offsets_by_time_slide_id = time_slide_table.as_dict()

        # Indices to speed up lookups by ID.
        key = operator.attrgetter('coinc_event_id')
        event_ids_by_coinc_event_id = {
            coinc_event_id:
            tuple(coinc_map.event_id for coinc_map in coinc_maps)
            for coinc_event_id, coinc_maps in groupby(
                sorted(coinc_map_table, key=key), key=key)
        }
        sngl_inspirals_by_event_id = {
            row.event_id: row
            for row in sngl_inspiral_table
        }

        # Filter rows by coinc_def if requested.
        if coinc_def is not None:
            coinc_def_table = get_table(doc, CoincDefTable.tableName)
            coinc_def_ids = {
                row.coinc_def_id
                for row in coinc_def_table
                if (row.search,
                    row.search_coinc_type) == (coinc_def.search,
                                               coinc_def.search_coinc_type)
            }
            coinc_table = (row for row in coinc_table
                           if row.coinc_def_id in coinc_def_ids)

        snr_dict = dict(self._snr_series_by_sngl_inspiral(doc))

        process_table = get_table(doc, ProcessTable.tableName)
        program_for_process_id = {
            row.process_id: row.program
            for row in process_table
        }

        try:
            process_params_table = get_table(doc, ProcessParamsTable.tableName)
        except ValueError:
            psd_filenames_by_process_id = {}
        else:
            psd_filenames_by_process_id = {
                process_param.process_id: process_param.value
                for process_param in process_params_table
                if process_param.param == '--reference-psd'
            }

        for coinc in coinc_table:
            coinc_event_id = coinc.coinc_event_id
            coinc_event_num = int(coinc_event_id)
            sngls = [
                sngl_inspirals_by_event_id[event_id]
                for event_id in event_ids_by_coinc_event_id[coinc_event_id]
            ]
            if offsets_by_time_slide_id is None and coinc.time_slide_id == TimeSlideID(
                    0):
                log.warn(
                    'Time slide record is missing for %s, '
                    'guessing that this is zero-lag', coinc.time_slide_id)
                offsets = defaultdict(float)
            else:
                offsets = offsets_by_time_slide_id[coinc.time_slide_id]

            template_args = [{
                key: getattr(sngl, key)
                for key in self._template_keys
            } for sngl in sngls]
            if any(d != template_args[0] for d in template_args[1:]):
                raise ValueError(
                    'Template arguments are not identical for all detectors!')
            template_args = template_args[0]

            invert_phases = self._phase_convention(
                program_for_process_id[coinc.process_id])
            if invert_phases:
                log.warn(PHASE_CONVENTION_WARNING)

            singles = tuple(
                LigoLWSingleEvent(
                    self, sngl.ifo, sngl.snr, sngl.coa_phase,
                    float(sngl.end +
                          offsets[sngl.ifo]), float(sngl.end), psd_file
                    or psd_filenames_by_process_id.get(sngl.process_id),
                    snr_dict.get(sngl.event_id), invert_phases)
                for sngl in sngls)

            event = LigoLWEvent(coinc_event_num, singles, template_args)

            yield coinc_event_num, event
Esempio n. 32
0
    signal_sample_rate = options.signal_sample_rate

if options.psd and options.asd_file:
    parser.error("PSD and asd-file options are mututally exclusive")

if options.use_cuda:
    ctx = CUDAScheme
else:
    ctx = DefaultScheme

print "STARTING THE BANKSIM"

# Load in the template bank file
indoc = ligolw_utils.load_filename(options.bank_file, False)
try :
    template_table = table.get_table(indoc, lsctables.SnglInspiralTable.tableName) 
except ValueError:
    template_table = table.get_table(indoc, lsctables.SimInspiralTable.tableName)

# open the output file where the max overlaps over the bank are stored 
fout = open(options.out_file, "w")
fout2 = open(options.out_file+".found", "w")

print "Writing matches to " + options.out_file
print "Writing recovered template in " + options.out_file+".found"

# Load in the simulation list
indoc = ligolw_utils.load_filename(options.sim_file, False)
try:
    signal_table = table.get_table(indoc, lsctables.SimInspiralTable.tableName) 
except ValueError:
Esempio n. 33
0
    def __init__(self, filename, approximant=None, parameters=None,
            load_compressed=True, load_compressed_now=False,
            **kwds):
        ext = os.path.basename(filename)
        self.compressed_waveforms = None
        if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
            self.filehandler = None
            self.indoc = ligolw_utils.load_filename(
                filename, False, contenthandler=LIGOLWContentHandler)
            self.table = table.get_table(
                self.indoc, lsctables.SnglInspiralTable.tableName)
            self.table = pycbc.io.WaveformArray.from_ligolw_table(self.table,
                columns=parameters)

            # inclination stored in xml alpha3 column
            names = list(self.table.dtype.names)
            names = tuple([n if n != 'alpha3' else 'inclination' for n in names]) 

            # low frequency cutoff in xml alpha6 column
            names = tuple([n if n!= 'alpha6' else 'f_lower' for n in names])
            self.table.dtype.names = names

        elif ext.endswith(('hdf', '.h5')):
            self.indoc = None
            f = h5py.File(filename, 'r')
            self.filehandler = f
            try:
                fileparams = map(str, f.attrs['parameters'])
            except KeyError:
                # just assume all of the top-level groups are the parameters
                fileparams = map(str, f.keys())
                logging.info("WARNING: no parameters attribute found. "
                    "Assuming that %s " %(', '.join(fileparams)) +
                    "are the parameters.")
            # use WaveformArray's syntax parser to figure out what fields
            # need to be loaded
            if parameters is None:
                parameters = fileparams
            common_fields = list(pycbc.io.WaveformArray(1,
                names=parameters).fieldnames)
            add_fields = list(set(parameters) &
                (set(fileparams) - set(common_fields)))
            # load
            dtype = []
            data = {}
            for key in common_fields+add_fields:
                data[str(key)] = f[key][:]
                dtype.append((str(key), data[key].dtype))
            num = f[fileparams[0]].size
            self.table = pycbc.io.WaveformArray(num, dtype=dtype)
            for key in data:
                self.table[key] = data[key]
            # add the compressed waveforms, if they exist
            if load_compressed and 'compressed_waveforms' in f:
                self.compressed_waveforms = {}
                for tmplt_hash in self.table['template_hash']:
                    self.compressed_waveforms[tmplt_hash] = \
                        pycbc.waveform.compress.CompressedWaveform.from_hdf(f,
                            tmplt_hash, load_now=load_compressed_now)
        else:
            raise ValueError("Unsupported template bank file extension %s" %(
                ext))

        # if approximant is specified, override whatever was in the file
        # (if anything was in the file)
        if approximant is not None:
            # get the approximant for each template
            apprxs = self.parse_approximant(approximant)
            if 'approximant' not in self.table.fieldnames:
                self.table = self.table.add_fields(apprxs, 'approximant')
            else:
                self.table['approximant'] = apprxs
        self.extra_args = kwds
        self.ensure_hash()
Esempio n. 34
0
                  action="store_true",
                  help="print extra debugging information",
                  default=False)
parser.add_option("-e",
                  "--named",
                  help="Starting string in the names of final XMLs")

options, argv_frame_files = parser.parse_args()
print(options.named)

print(options.named)
indoc = ligolw_utils.load_filename(options.tmplt_bank,
                                   contenthandler=mycontenthandler)

try:
    template_bank_table = table.get_table(
        indoc, lsctables.SnglInspiralTable.tableName)
    tabletype = lsctables.SnglInspiralTable
except BaseException:
    template_bank_table = table.get_table(indoc,
                                          lsctables.SimInspiralTable.tableName)
    tabletype = lsctables.SimInspiralTable

# print tabletype
length = len(template_bank_table)
num_files = int(round(length / options.num + .5))

for num in range(num_files):

    # create a blank xml document and add the process id
    outdoc = ligolw.Document()
    outdoc.appendChild(ligolw.LIGO_LW())
Esempio n. 35
0
    if cnt == 0:
        new_point = get_new_sample_point()
        new_point.bandpass = cnt
        new_point.process_id = out_proc_id
        new_points_table.append(new_point)
        cnt += 1
        continue

    k = 0
    new_point = get_new_sample_point()
    while reject_new_sample_point(new_point, new_points_table, MM, psd, f_min,
                                  dt, N, options.mchirp_window):
        print("Rejecting sample %d" % k)
        k += 1
        new_point = get_new_sample_point()

    new_point.bandpass = cnt
    new_point.process_id = out_proc_id
    new_points_table.append(new_point)
    cnt += 1

#}}}
############## Write the new sample points to XML #############
print("Writing %d new points to %s" % (len(new_points_table), new_file_name))
sys.stdout.flush()

new_points_proctable = table.get_table(new_points_doc,
                                       lsctables.ProcessTable.tableName)
new_points_proctable[0].end_time = gpstime.GpsSecondsFromPyUTC(time.time())
ligolw_utils.write_filename(new_points_doc, new_file_name)
Esempio n. 36
0
#process = ligolw_process.register_to_xmldoc(fake_xmldoc, "lalapps_cbc_sbank_sim", FIXME
#    opts_dict, version=git_version.tag or git_version.id, FIXME
#    cvs_repository="sbank", cvs_entry_time=git_version.date) FIXME
process = ligolw_process.register_to_xmldoc(
    fake_xmldoc,
    "lalapps_cbc_sbank_sim",
    opts_dict,
    version="no version",
    cvs_repository="sbank",
    cvs_entry_time=strftime('%Y/%m/%d %H:%M:%S'))
h5file = H5File("%s.h5" % usertag, "w")

# load templates
xmldoc = utils.load_filename(tmplt_file, contenthandler=ContentHandler)
ligolw_copy_process(xmldoc, fake_xmldoc)
sngls = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
bank = Bank.from_sngls(sngls,
                       tmplt_approx,
                       noise_model,
                       flow,
                       use_metric=False,
                       cache_waveforms=opts.cache_waveforms,
                       nhood_size=opts.neighborhood_size,
                       nhood_param=opts.neighborhood_param)
# write bank to h5 file, but note that from_sngls() has resorted the
# bank by neighborhood_param
sngls = lsctables.SnglInspiralTable()
for s in bank._templates:
    sngls.append(s.to_sngl())
h5file.create_dataset("/sngl_inspiral",
                      data=ligolw_table_to_array(sngls),
Esempio n. 37
0
def get_columns_to_print(xmldoc, tableName, with_sngl=False):
    """
    Retrieves canonical columns to print for the given tableName.
    Returns a columnList, row_span and rspan_break lists.

    @with_sngl: for the loudest_events table, if with_sngl turned on, will print
     sngl_ifo end_times
    """
    tableName = tableName.endswith(
        ":table") and tableName or tableName + ":table"
    summTable = table.get_table(xmldoc, tableName)
    # get rankname
    rankname = [
        col.getAttribute("Name").split(":")[-1]
        for col in summTable.getElementsByTagName(u'Column')
        if "rank" in col.getAttribute("Name")
    ][0]

    if tableName == "loudest_events:table" and not with_sngl:
        durname = [
            col.getAttribute("Name").split(":")[-1]
            for col in summTable.getElementsByTagName(u'Column')
            if "_duration__Px_" in col.getAttribute("Name")
            and not col.getAttribute("Name").split(":")[-1].startswith('sngl_')
        ][0]
        columnList = [
            rankname, 'combined_far', 'fap', 'fap_1yr', 'snr', 'end_time',
            'gps_time_utc__Px_click_for_daily_ihope_xP_',
            'ifos__Px_click_for_elog_xP_', 'instruments_on', 'mass', 'mchirp',
            'mini_followup', 'omega_scan', durname
        ]
        row_span_columns = rspan_break_columns = [durname]
    elif tableName == "loudest_events:table" and with_sngl:
        durname = [
            col.getAttribute("Name").split(":")[-1]
            for col in summTable.getElementsByTagName(u'Column')
            if "_duration__Px_" in col.getAttribute("Name") and
            not col.getAttribute("Name").split(":")[-1].startswith(u'sngl_')
        ][0]
        columnList = [
            rankname, 'combined_far', 'fap', 'fap_1yr', 'snr', 'mass',
            'mchirp', 'instruments_on', 'sngl_ifo__Px_click_for_elog_xP_',
            'sngl_end_time',
            'sngl_event_time_utc__Px_click_for_daily_ihope_xP_',
            'mini_followup', 'omega_scan', durname
        ]
        row_span_columns = rspan_break_columns = \
            [col for col in summTable.columnnames if not col.startswith('sngl_')]
    elif tableName == "selected_found_injections:table":
        durname = [
            col.getAttribute("Name").split(":")[-1]
            for col in summTable.getElementsByTagName(u'Column')
            if "_duration__Px_" in col.getAttribute("Name")
        ][0]
        columnList = [
            rankname, 'injected_gps_time',
            'injected_event_time_utc__Px_click_for_daily_ihope_xP_', 'elogs',
            'mini_followup', 'omega_scan', 'sim_tag',
            'injected_decisive_distance', 'injected_mchirp', 'injected_mass1',
            'injected_mass2', 'recovered_match_rank', 'recovered_ifos',
            'recovered_combined_far', 'recovered_fap', 'recovered_fap_1yr',
            'recovered_snr', 'recovered_gps_time', 'recovered_mchirp',
            'recovered_mass'
        ]
        row_span_columns = rspan_break_columns = [
            rankname, 'injected_gps_time',
            'injected_event_time_utc__Px_click_for_daily_ihope_xP_', 'elogs',
            'mini_followup', 'omega_scan', 'sim_tag',
            'injected_decisive_distance', 'injected_mchirp', 'injected_mass1',
            'injected_mass2'
        ]
        if with_sngl:
            row_span_columns.extend([
                col for col in summTable.columnnames
                if col.startswith('recovered_')
            ])
    elif tableName == "close_missed_injections:table":
        columnList = [
            'rank', 'decisive_distance', 'gps_time',
            'injection_time_utc__Px_click_for_daily_ihope_xP_', 'elogs',
            'mchirp', 'mass1', 'mass2', 'eff_dist_h', 'eff_dist_l',
            'eff_dist_v', 'sim_tag', 'mini_followup', 'omega_scan'
        ]
        row_span_columns = rspan_break_columns = []
    else:
        # unrecognized table, just return all the columns in the table
        columnList = [
            col.getAttribute("Name").split(":")[-1]
            for col in summTable.getElementsByTagName(u'Column')
        ]
        row_span_columns = rspan_break_columns = []

    return columnList, row_span_columns, rspan_break_columns
Esempio n. 38
0
def output_sngl_inspiral_table(outputFile,
                               tempBank,
                               metricParams,
                               ethincaParams,
                               programName="",
                               optDict=None,
                               outdoc=None,
                               **kwargs):
    """
    Function that converts the information produced by the various pyCBC bank
    generation codes into a valid LIGOLW xml file containing a sngl_inspiral
    table and outputs to file.
 
    Parameters
    -----------
    outputFile : string
        Name of the file that the bank will be written to
    tempBank : iterable
        Each entry in the tempBank iterable should be a sequence of
        [mass1,mass2,spin1z,spin2z] in that order.
    metricParams : metricParameters instance
        Structure holding all the options for construction of the metric
        and the eigenvalues, eigenvectors and covariance matrix
        needed to manipulate the space.
    ethincaParams: {ethincaParameters instance, None}
        Structure holding options relevant to the ethinca metric computation
        including the upper frequency cutoff to be used for filtering.
        NOTE: The computation is currently only valid for non-spinning systems
        and uses the TaylorF2 approximant.
    programName (key-word-argument) : string
        Name of the executable that has been run
    optDict (key-word argument) : dictionary
        Dictionary of the command line arguments passed to the program
    outdoc (key-word argument) : ligolw xml document
        If given add template bank to this representation of a xml document and
        write to disk. If not given create a new document.
    kwargs : key-word arguments
        All other key word arguments will be passed directly to 
        ligolw_process.register_to_xmldoc
    """
    if optDict is None:
        optDict = {}
    if outdoc is None:
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

    # get IFO to put in search summary table
    ifos = []
    if 'channel_name' in optDict.keys():
        if optDict['channel_name'] is not None:
            ifos = [optDict['channel_name'][0:2]]

    proc_id = ligolw_process.register_to_xmldoc(outdoc,
                                                programName,
                                                optDict,
                                                ifos=ifos,
                                                **kwargs).process_id
    sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id)
    # Calculate Gamma components if needed
    if ethincaParams is not None:
        if ethincaParams.doEthinca:
            for sngl in sngl_inspiral_table:
                # Set tau_0 and tau_3 values needed for the calculation of
                # ethinca metric distances
                (sngl.tau0, sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3(
                    sngl.mass1, sngl.mass2, metricParams.f0)
                fMax_theor, GammaVals = calculate_ethinca_metric_comps(
                    metricParams,
                    ethincaParams,
                    sngl.mass1,
                    sngl.mass2,
                    spin1z=sngl.spin1z,
                    spin2z=sngl.spin2z,
                    full_ethinca=ethincaParams.full_ethinca)
                # assign the upper frequency cutoff and Gamma0-5 values
                sngl.f_final = fMax_theor
                for i in xrange(len(GammaVals)):
                    setattr(sngl, "Gamma" + str(i), GammaVals[i])
        # If Gamma metric components are not wanted, assign f_final from an
        # upper frequency cutoff specified in ethincaParams
        elif ethincaParams.cutoff is not None:
            for sngl in sngl_inspiral_table:
                sngl.f_final = pnutils.frequency_cutoff_from_name(
                    ethincaParams.cutoff, sngl.mass1, sngl.mass2, sngl.spin1z,
                    sngl.spin2z)

    outdoc.childNodes[0].appendChild(sngl_inspiral_table)

    # get times to put in search summary table
    start_time = 0
    end_time = 0
    if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys():
        start_time = optDict['gps_start_time']
        end_time = optDict['gps_end_time']

    # make search summary table
    search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
    search_summary = return_search_summary(start_time, end_time,
                                           len(sngl_inspiral_table), ifos,
                                           **kwargs)
    search_summary_table.append(search_summary)
    outdoc.childNodes[0].appendChild(search_summary_table)

    # write the xml doc to disk
    proctable = table.get_table(outdoc, lsctables.ProcessTable.tableName)
    ligolw_utils.write_filename(outdoc,
                                outputFile,
                                gz=outputFile.endswith('.gz'))
Esempio n. 39
0
    def __init__(self, opts, flist):
        self.segments = segments.segmentlistdict()
        self.non_inj_fnames = []
        self.inj_fnames = []
        self.found = {}
        self.missed = {}
        self.opts = opts
        self.veto_segments = segments.segmentlistdict()
        self.zero_lag_segments = {}
        self.instruments = []
        self.livetime = {}
        self.multi_burst_table = None
        self.coinc_inspiral_table = None

        for f in flist:
            if opts.verbose:
                print >> sys.stderr, "Gathering stats from: %s...." % (f, )
            working_filename = dbtables.get_connection_filename(
                f, tmp_path=opts.tmp_space, verbose=opts.verbose)
            connection = sqlite3.connect(working_filename)
            dbtables.DBTable_set_connection(connection)
            xmldoc = dbtables.get_xml(connection)

            # look for a sim table
            try:
                sim_inspiral_table = table.get_table(
                    xmldoc, dbtables.lsctables.SimInspiralTable.tableName)
                self.inj_fnames.append(f)
                sim = True
            except ValueError:
                self.non_inj_fnames.append(f)
                sim = False

            # FIGURE OUT IF IT IS A BURST OR INSPIRAL RUN
            try:
                self.multi_burst_table = table.get_table(
                    xmldoc, dbtables.lsctables.MultiBurstTable.tableName)
            except ValueError:
                self.multi_burst_table = None
            try:
                self.coinc_inspiral_table = table.get_table(
                    xmldoc, dbtables.lsctables.CoincInspiralTable.tableName)
            except ValueError:
                self.coinc_inspiral_table = None
            if self.multi_burst_table and self.coinc_inspiral_table:
                print >> sys.stderr, "both burst and inspiral tables found.  Aborting"
                raise ValueError

            if not sim:
                self.get_instruments(connection)
                self.segments += self.get_segments(connection, xmldoc)
                #FIXME, don't assume veto segments are the same in every file!
                self.veto_segments = self.get_veto_segments(connection)

            dbtables.discard_connection_filename(f,
                                                 working_filename,
                                                 verbose=opts.verbose)
            dbtables.DBTable_set_connection(None)

        # remove redundant instruments
        self.instruments = list(set(self.instruments))
        # FIXME Do these have to be done by instruments?
        self.segments -= self.veto_segments

        # segments and livetime by instruments
        for i in self.instruments:
            self.zero_lag_segments[i] = self.segments.intersection(
                i) - self.segments.union(set(self.segments.keys()) - i)
            self.livetime[i] = float(abs(self.zero_lag_segments[i]))
    L1_eras = [L1_gps_era_start]

# Grab all relevant Omicron trigger files
H1_omicron_times = []
H1_omicron_snr = []
H1_omicron_freq = []

for era in H1_eras:
    # Generate list of all Omicron SnglBurst xml trigger files
    H1_file_list = glob.glob(args.H1_omicron_dir + 
            '/GDS-CALIB_STRAIN_Omicron/%s/H1-GDS_CALIB_STRAIN_Omicron-*.xml.gz' %(era))
    
    # Parse trigger files into SNR, time, and frequency for Omicron triggers
    for file in H1_file_list:
        omicron_xml = utils.load_filename(file, contenthandler=DefaultContentHandler)
        H1snglburst_table = table.get_table(omicron_xml, lsctables.SnglBurstTable.tableName)

        for row in H1snglburst_table:
            if (row.snr > args.omicron_snr_thresh and 
                    H1_omicron_start_time < row.peak_time < H1_omicron_end_time):
                H1_omicron_times.append(row.peak_time + row.peak_time_ns * 10**(-9))
                H1_omicron_snr.append(row.snr)
                H1_omicron_freq.append(row.peak_frequency)


L1_omicron_times = []
L1_omicron_snr = []
L1_omicron_freq = []

for era in L1_eras:
    # Generate list of all Omicron SnglBurst xml trigger files
Esempio n. 41
0
(opts, args) = parser.parse_args()

# generate an injection file
command = "lalapps_bbhinj --seed " + str(opts.seed)
command = command + "  --min-distance " + str(1.0e3 * opts.min_distance)
command = command + "  --max-distance " + str(1.0e3 * opts.max_distance)
command = command + "  --d-distr 2"
os.system(command)

# read the injection file and create a list of galaxies from it
bbhinjFile = "HL-INJECTIONS_123-729273613-5094000.xml"
doc = utils.load_filename(bbhinjFile)
sims = None
try:
    simInspiralTable = \
        table.get_table(doc, lsctables.SimInspiralTable.tableName)
    sims = simInspiralTable
except:
    simInspiralTable = None
sims.sort(lambda a, b: cmp(a.distance, b.distance))

# make distance, latitute, longitude arrays
distance = sims.getColumnByName('distance').asarray()
latitude = sims.getColumnByName('latitude').asarray()
longitude = sims.getColumnByName('longitude').asarray()
ngalaxies = float(len(longitude))

figure()
plot(longitude, sin(latitude), 'bx')
xlabel(r'Longitude')
ylabel(r'Sin(Latitude)')
Esempio n. 42
0
	def __init__(self, filelist, live_time_program = None, veto_segments_name = None, data_segments_name = "datasegments", tmp_path = None, verbose = False):

		self.segments = segments.segmentlistdict()
		self.instruments = set()
		self.table_name = None
		self.found_injections_by_instrument_set = {}
		self.missed_injections_by_instrument_set = {}
		self.total_injections_by_instrument_set = {}
		self.zerolag_fars_by_instrument_set = {}
		self.ts_fars_by_instrument_set = {}
		self.numslides = set()

		for f in filelist:
			if verbose:
				print >> sys.stderr, "Gathering stats from: %s...." % (f,)
			working_filename = dbtables.get_connection_filename(f, tmp_path = tmp_path, verbose = verbose)
			connection = sqlite3.connect(working_filename)
			xmldoc = dbtables.get_xml(connection)

			sim = False

			# look for a sim inspiral table.  This is IMR work we have to have one of these :)
			try:
				sim_inspiral_table = table.get_table(xmldoc, dbtables.lsctables.SimInspiralTable.tableName)
				sim = True
			except ValueError:
				pass

			# look for the relevant table for analyses
			for table_name in allowed_analysis_table_names():
				try:
					setattr(self, table_name, table.get_table(xmldoc, table_name))
					if self.table_name is None or self.table_name == table_name:
						self.table_name = table_name
					else:
						raise ValueError("detected more than one table type out of " + " ".join(allowed_analysis_table_names()))
				except ValueError:
					setattr(self, table_name, None)

			# the non simulation databases are where we get information about segments
			if not sim:
				self.numslides.add(connection.cursor().execute('SELECT count(DISTINCT(time_slide_id)) FROM time_slide').fetchone()[0])
				[self.instruments.add(ifos) for ifos in get_instruments_from_coinc_event_table(connection)]
				# save a reference to the segments for this file, needed to figure out the missed and found injections
				self.this_segments = get_segments(connection, xmldoc, self.table_name, live_time_program, veto_segments_name, data_segments_name = data_segments_name)
				# FIXME we don't really have any reason to use playground segments, but I put this here as a reminder
				# self.this_playground_segments = segmentsUtils.S2playground(self.this_segments.extent_all())
				self.segments += self.this_segments

				# get the far thresholds for the loudest events in these databases
				for (instruments_set, far, ts) in get_event_fars(connection, self.table_name):
					if not ts:
						self.zerolag_fars_by_instrument_set.setdefault(instruments_set, []).append(far)
					else:
						self.ts_fars_by_instrument_set.setdefault(instruments_set, []).append(far)
			# get the injections
			else:
				# We need to know the segments in this file to determine which injections are found
				self.this_injection_segments = get_segments(connection, xmldoc, self.table_name, live_time_program, veto_segments_name, data_segments_name = data_segments_name)
				self.this_injection_instruments = []
				distinct_instruments = connection.cursor().execute('SELECT DISTINCT(instruments) FROM coinc_event WHERE instruments!=""').fetchall()
				for instruments, in distinct_instruments:
					instruments_set = frozenset(lsctables.instrument_set_from_ifos(instruments))
					self.this_injection_instruments.append(instruments_set)
					segments_to_consider_for_these_injections = self.this_injection_segments.intersection(instruments_set) - self.this_injection_segments.union(set(self.this_injection_segments.keys()) - instruments_set)
					found, total, missed = get_min_far_inspiral_injections(connection, segments = segments_to_consider_for_these_injections, table_name = self.table_name)
					if verbose:
						print >> sys.stderr, "%s total injections: %d; Found injections %d: Missed injections %d" % (instruments, len(total), len(found), len(missed))
					self.found_injections_by_instrument_set.setdefault(instruments_set, []).extend(found)
					self.total_injections_by_instrument_set.setdefault(instruments_set, []).extend(total)
					self.missed_injections_by_instrument_set.setdefault(instruments_set, []).extend(missed)

			# All done
			dbtables.discard_connection_filename(f, working_filename, verbose = verbose)
		if len(self.numslides) > 1:
			raise ValueError('number of slides differs between input files')
		elif self.numslides:
			self.numslides = min(self.numslides)
		else:
			self.numslides = 0
Esempio n. 43
0
def parse_veto_definer(veto_def_filename):
    """ Parse a veto definer file from the filename and return a dictionary
    indexed by ifo and veto definer category level.

    Parameters
    ----------
    veto_def_filename: str
        The path to the veto definer file

    Returns:
        parsed_definition: dict
            Returns a dictionary first indexed by ifo, then category level, and
            finally a list of veto definitions.
    """
    from glue.ligolw import table, lsctables, utils as ligolw_utils
    from glue.ligolw.ligolw import LIGOLWContentHandler as h
    lsctables.use_in(h)

    indoc = ligolw_utils.load_filename(veto_def_filename,
                                       False,
                                       contenthandler=h)
    veto_table = table.get_table(indoc, 'veto_definer')

    ifo = veto_table.getColumnByName('ifo')
    name = veto_table.getColumnByName('name')
    version = numpy.array(veto_table.getColumnByName('version'))
    category = numpy.array(veto_table.getColumnByName('category'))
    start = numpy.array(veto_table.getColumnByName('start_time'))
    end = numpy.array(veto_table.getColumnByName('end_time'))
    start_pad = numpy.array(veto_table.getColumnByName('start_pad'))
    end_pad = numpy.array(veto_table.getColumnByName('end_pad'))

    data = {}
    for i in range(len(veto_table)):
        if ifo[i] not in data:
            data[ifo[i]] = {}

        # The veto-definer categories are weird! Hardware injections are stored
        # in "3" and numbers above that are bumped up by one (although not
        # often used any more). So we remap 3 to H and anything above 3 to
        # N-1. 2 and 1 correspond to 2 and 1 (YAY!)
        if category[i] > 3:
            curr_cat = "CAT_{}".format(category[i] - 1)
        elif category[i] == 3:
            curr_cat = "CAT_H"
        else:
            curr_cat = "CAT_{}".format(category[i])

        if curr_cat not in data[ifo[i]]:
            data[ifo[i]][curr_cat] = []

        veto_info = {
            'name': name[i],
            'version': version[i],
            'full_name': name[i] + ':' + str(version[i]),
            'start': start[i],
            'end': end[i],
            'start_pad': start_pad[i],
            'end_pad': end_pad[i],
        }
        data[ifo[i]][curr_cat].append(veto_info)
    return data
Esempio n. 44
0
                    'templates are supported'.format(attr))
        return super(TaylorF2RedSpinIntrinsicParams, cls).__new__(
            cls, *(getattr(sim_inspiral, field) for field in cls._fields))

    @property
    def chi(self):
        return lalsimulation.SimInspiralTaylorF2ReducedSpinComputeChi(
            self.mass1, self.mass2, self.spin1z, self.spin2z)


# Read injection file.
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.input, contenthandler=ligolw_bayestar.LSCTablesContentHandler)

# Extract simulation table from injection file.
sim_inspiral_table = ligolw_table.get_table(
    xmldoc, lsctables.SimInspiralTable.tableName)

# Get just the intrinsic parameters from the sim_inspiral table.
sim_inspiral_intrinsic_params = {
    TaylorF2RedSpinIntrinsicParams(sim_inspiral)
    for sim_inspiral in sim_inspiral_table
}

if opts.low_frequency_cutoff is None:
    # Get the low-frequency cutoffs from the sim_inspiral table.
    f_lows = {sim_inspiral.f_lower for sim_inspiral in sim_inspiral_table}

    # There can be only one!
    try:
        f_low, = f_lows
    except ValueError:
    # if .sqlite extension
    if fExtension == "sqlite":
        samples = xmlutils.db_to_samples(
            fname, lsctables.SnglInspiralTable,
            ['mass1', 'mass2', 'snr', 'tau0', 'event_duration'])
        for row in samples:  # print each individual row, don't reweight yet
            if not (data_at_intrinsic.has_key((row.mass1, row.mass2))):
                data_at_intrinsic[(row.mass1, row.mass2)] = [[
                    row.snr, row.tau0, row.event_duration
                ]]
            else:
                data_at_intrinsic[(row.mass1, row.mass2)].append(
                    [row.snr, row.tau0, row.event_duration])
    # otherwise xml: could have just used ligolw_print
    elif fExtension == "xml" or fExtension == "gz":
        samples = table.get_table(utils.load_filename(fname),
                                  lsctables.SnglInspiralTable.tableName)
        for row in samples:  # print each individual row, don't reweight yet
            if not (data_at_intrinsic.has_key((row.mass1, row.mass2))):
                data_at_intrinsic[(row.mass1, row.mass2)] = [[
                    row.snr, row.tau0, row.event_duration
                ]]
            else:
                data_at_intrinsic[(row.mass1, row.mass2)].append(
                    [row.snr, row.tau0, row.event_duration])

# Loop over each key and print out
for intrinsic in data_at_intrinsic.keys():
    lnL, neff, sigmaOverL = np.transpose(data_at_intrinsic[intrinsic])
    lnLmax = np.max(lnL)
    sigma = sigmaOverL * np.exp(
        lnL - lnLmax
Esempio n. 46
0
    else:
        smplpt.alpha3 = -1
    if options.sample_lambda2:
        smplpt.alpha4 = sample_range(options.lambda2_min, options.lambda2_max)
    else:
        smplpt.alpha4 = -1

    # Get the inclination angle
    if options.inclination is not None:
        smplpt.inclination = options.inclination
    else:
        smplpt.inclination = sample_inc()

    # Get the polarization angle
    if options.polarization is not None:
        smplpt.polarization = options.polarization
    else:
        smplpt.polarization = sample_pol()

    smplpt.latitude = 0.
    smplpt.longitude = 0.
    #
    sim_inspiral_table.append(smplpt)
    #print smplpt.spin1z, smplpt.spin2z

# Store the samples in the output file
proctable = table.get_table(outdoc, lsctables.ProcessTable.tableName)
proctable[0].end_time = gpstime.GpsSecondsFromPyUTC(time.time())
outname = 'TestPoints.xml'
ligolw_utils.write_filename(outdoc, outname)
Esempio n. 47
0
                  default=False)

(options, args) = parser.parse_args()
# }}}

if options.input_bank is None or options.output_bank is None:
    raise IOError("PLease give input AND output bank names")

if options.strict_region_alignedspin is False and options.mtotal_cut is None and options.mchirp_cut is None and options.eta_cut is None and options.mass_cut is None and options.extract_eta is None and options.extract_q is None and not options.strict_region and options.insert_q is None and options.insert_eta is None:
    raise IOError("Please specify one cut to apply")

################### Read in the bank file, and get the sim table ############
bank_file_name = options.input_bank
in_bank_doc = ligolw_utils.load_filename(bank_file_name, options.verbose)
try:
    in_bank_table = table.get_table(in_bank_doc,
                                    lsctables.SimInspiralTable.tableName)
except ValueError:
    in_bank_table = table.get_table(in_bank_doc,
                                    lsctables.SnglInspiralTable.tableName)

################### Cut the bank amd write it to disk ############
subfile_name = options.output_bank
print("Writing sub-bank file %s" % subfile_name)
out_subbank_doc = ligolw.Document()
out_subbank_doc.appendChild(ligolw.LIGO_LW())
out_proc_id = ligolw_process.register_to_xmldoc(
    out_subbank_doc,
    PROGRAM_NAME,
    options.__dict__,
    comment=options.comment,
    version=git_version.id,
Esempio n. 48
0
 def __init__(self, sim_file, **kwds):
     self.indoc = ligolw_utils.load_filename(
         sim_file, False, contenthandler=LIGOLWContentHandler)
     self.table = table.get_table(
         self.indoc, lsctables.SimBurstTable.tableName)
     self.extra_args = kwds
condor_commands = None
if opts.condor_command is not None:
    condor_commands = dict([c.split("=") for c in opts.condor_command])

#
# Get trigger information from coinc xml file
#
# FIXME: CML should package this for us

# Get end time from coinc inspiral table or command line
xmldoc = None
if opts.coinc_xml is not None:
    xmldoc = utils.load_filename(opts.coinc_xml,
                                 contenthandler=ligolw.LIGOLWContentHandler)
    coinc_table = table.get_table(xmldoc,
                                  lsctables.CoincInspiralTable.tableName)
    assert len(coinc_table) == 1
    coinc_row = coinc_table[0]
    event_time = coinc_row.get_end()
    print "Coinc XML loaded, event time: %s" % str(coinc_row.get_end())
elif opts.event_time is not None:
    event_time = glue.lal.LIGOTimeGPS(opts.event_time)
    print "Event time from command line: %s" % str(event_time)
else:
    raise ValueError(
        "Either --coinc-xml or --event-time must be provided to parse event time."
    )

xmldoc, tmplt_bnk = utils.load_filename(
    opts.template_bank_xml, contenthandler=ligolw.LIGOLWContentHandler), None
try:
Esempio n. 50
0
def createInjectionFile(hipe_dir, cp, cpinj, injrun, injection_segment,
  source_file, ipn_gps=None, usertag=None, verbose=False):
  """
  Creates an master injection file containing all injections for this run.
  Also reads the file and returns its contents
  """
  cpinj = copy.deepcopy(cpinj)

  # get the number of injections to be made
  for opt in ['exttrig-inj-start','exttrig-inj-stop']:
    value = int(cpinj.get(injrun,opt))
    cpinj.remove_option(injrun,opt)
    if 'start' in opt:
      injStart = value
    else:
      injEnd = value
  seed = hash_n_bits(hipe_dir, 31)
  numberInjections = injEnd - injStart + 1  # e.g., 1 through 5000 inclusive

      
  # get the jitter parameters
  if cpinj.has_option(injrun, "jitter-skyloc"):
    jitter_sigma_deg = cpinj.getfloat(injrun, "jitter-skyloc")
    cpinj.remove_option(injrun, "jitter-skyloc")
  else:
    jitter_sigma_deg = None


  # check if the specific Fermi systematic error needs to 
  # be added to the location jittering
  if cpinj.has_option(injrun, "jitter-skyloc-fermi"):
    jitter_skyloc_fermi = cpinj.getboolean(injrun, "jitter-skyloc-fermi")
    cpinj.remove_option(injrun, "jitter-skyloc-fermi")
  else:
    jitter_skyloc_fermi = False

  # check if we should align the total angular momentum
  if cpinj.has_option(injrun, "align-total-spin"):
    align_total_spin = cpinj.getboolean(injrun, "align-total-spin")
    cpinj.remove_option(injrun, "align-total-spin")
  else:
    align_total_spin = False

  # set all the arguments
  argument = []
  for (opt,value) in cpinj.items(injrun):
    argument.append("--%s %s" % (opt, value) )

  # add arguments on times and time-intervals
  interval = abs(injection_segment)
  injInterval = interval / numberInjections
  argument.append(" --gps-start-time %d" % injection_segment[0] )
  argument.append(" --gps-end-time %d" % injection_segment[1] )
  argument.append(" --time-interval %f" % injInterval )
  argument.append(" --time-step %f" % injInterval )
  argument.append(" --seed %d" % seed )
  argument.append(" --user-tag %s" % usertag)

  # set output file as exttrig-file or IPN file with IPN GPS time
  if ipn_gps:
    argument.append(" --ipn-gps-time %d" % ipn_gps )
  else:
    argument.append(" --exttrig-file %s" % source_file )


  # execute the command
  executable = cp.get("condor", "inspinj")
  arguments = " ".join(argument)
  inspiralutils.make_external_call(executable + " " + arguments,
    show_command=verbose)

  # recreate the output filename
  injFile = "HL-INJECTIONS_" + str(seed)
  if usertag is not None:
    injFile += "_" + usertag
  injFile += "-%d-%d.xml" % (injection_segment[0], abs(injection_segment))

  # move it into the GRB directory to avoid clutter
  new_injFile = hipe_dir + "/" + injFile
  os.rename(injFile, new_injFile)

  # jitter the sky locations of the injections
  if jitter_sigma_deg is not None:
    # rename the original, then have ligolw_cbc_jitter_skyloc create a new one
    os.rename(new_injFile, new_injFile + ".prejitter")
    cmd = ["ligolw_cbc_jitter_skyloc"]
    if jitter_skyloc_fermi:
      cmd.append("--apply-fermi-error")
    cmd.extend(["--jitter-sigma-deg",
      str(jitter_sigma_deg), "--output-file", new_injFile,
      new_injFile + ".prejitter"])
    if verbose:
      print " ".join(cmd)
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = p.communicate()
    if p.returncode != 0:
      raise subprocess.CalledProcessError(p.returncode, "%s: %s" % (" ".join(cmd), err))

  # rotate the binary so that total angular momentum has the current inclination
  if align_total_spin:
    # rename the original then have ligolw_cbc_align_total_spin create a new one
    os.rename(new_injFile, new_injFile + ".prealign")
    cmd = ["ligolw_cbc_align_total_spin", "--output-file", new_injFile,
      new_injFile + ".prealign"]
    if verbose:
      print " ".join(cmd)
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = p.communicate()
    if p.returncode != 0:
      raise subprocess.CalledProcessError(p.returncode, "%s: %s" % (" ".join(cmd), err))

  # read in the file and the tables
  doc = utils.load_filename(new_injFile, contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler))
  sims = table.get_table(doc, lsctables.SimInspiralTable.tableName)

  return sims, injInterval, numberInjections, new_injFile
Esempio n. 51
0
def sim_coinc_and_sngl_inspirals_for_xmldoc(xmldoc):
    """Retrieve (as a generator) all of the
    (sim_inspiral, coinc_event, (sngl_inspiral, sngl_inspiral, ... sngl_inspiral)
    tuples from found coincidences in a LIGO-LW XML document."""

    # Look up necessary tables.
    coinc_table = ligolw_table.get_table(xmldoc,
                                         lsctables.CoincTable.tableName)
    coinc_def_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincDefTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincMapTable.tableName)

    # Look up coinc_def ids.
    sim_coinc_def_id = coinc_def_table.get_coinc_def_id(
        InspiralSCExactCoincDef.search,
        InspiralSCExactCoincDef.search_coinc_type,
        create_new=False)

    def events_for_coinc_event_id(coinc_event_id):
        for coinc_map in coinc_map_table:
            if coinc_map.coinc_event_id == coinc_event_id:
                for row in ligolw_table.get_table(xmldoc,
                                                  coinc_map.table_name):
                    column_name = coinc_map.event_id.column_name
                    if getattr(row, column_name) == coinc_map.event_id:
                        yield coinc_map.event_id, row

    # Loop over all coinc_event <-> sim_inspiral coincs.
    for sim_coinc in coinc_table:

        # If this is not a coinc_event <-> sim_inspiral coinc, skip it.
        if sim_coinc.coinc_def_id != sim_coinc_def_id:
            continue

        # Locate the sim_inspiral and coinc events.
        sim_inspiral = None
        coinc = None
        for event_id, event in events_for_coinc_event_id(
                sim_coinc.coinc_event_id):
            if event_id.table_name == ligolw_table.StripTableName(
                    lsctables.SimInspiralTable.tableName):
                if sim_inspiral is not None:
                    raise RuntimeError(
                        "Found more than one matching sim_inspiral entry")
                sim_inspiral = event
            elif event_id.table_name == ligolw_table.StripTableName(
                    lsctables.CoincTable.tableName):
                if coinc is not None:
                    raise RuntimeError(
                        "Found more than one matching coinc entry")
                coinc = event
            else:
                raise RuntimeError(
                    "Did not expect coincidence to contain an event of type '%s'"
                    % event_id.table_name)

        sngl_inspirals = tuple(event for event_id, event in
                               events_for_coinc_event_id(coinc.coinc_event_id))

        yield sim_inspiral, coinc, sngl_inspirals
Esempio n. 52
0
#deal with the glob
files = []
if opts.glob is not None:
    for gl in opts.glob.split(" "):
        files.extend(glob.glob(gl))
    if len(files) < 1:
        print >> sys.stderr, "The glob for " + opts.glob + " returned no files"
        sys.exit(1)
else:
    print >> sys.stderr, "Need to specify a glob"
    sys.exit(1)

for file in files:
    xmldoc = utils.load_filename(file)
    try:
        sltab = table.get_table(xmldoc, skylocutils.SkyLocInjTable.tableName)
        if sltab[0].grid:
            print "injection"
            inj = True
        else:
            print "no injection"
            sltab = table.get_table(xmldoc, skylocutils.SkyLocTable.tableName)
            inj = False
    except:
        print "no injection"
        sltab = table.get_table(xmldoc, skylocutils.SkyLocTable.tableName)
        inj = False

    print "Plotting " + file
    for row in sltab:
        for fname in [row.grid, row.galaxy_grid]:
Esempio n. 53
0
def create_tables(xmldoc, rootfiles):

    sim_tree = TChain("waveburst")
    liv_tree = TChain("liveTime")
    for rootfile in rootfiles:
        sim_tree.Add(rootfile)
        liv_tree.Add(rootfile)

    # Define tables
    sngl_burst_table = lsctables.New(lsctables.SnglBurstTable, [
        "peak_time_ns", "start_time_ns", "stop_time_ns", "process_id", "ifo",
        "peak_time", "start_time", "stop_time", "duration", "time_lag",
        "peak_frequency", "search", "flow", "fhigh", "bandwidth", "tfvolume",
        "hrss", "event_id"
    ])
    xmldoc.childNodes[0].appendChild(sngl_burst_table)
    sngl_burst_table.sync_next_id()

    coinc_event_table = lsctables.New(lsctables.CoincTable, [
        "process_id", "coinc_event_id", "nevents", "instruments",
        "time_slide_id", "coinc_def_id"
    ])
    xmldoc.childNodes[0].appendChild(coinc_event_table)
    coinc_event_table.sync_next_id()

    multi_burst_table = lsctables.New(
        lsctables.MultiBurstTable,
        ["process_id", "peak_time", "peak_time_ns", "coinc_event_id"])
    xmldoc.childNodes[0].appendChild(multi_burst_table)

    coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
    xmldoc.childNodes[0].appendChild(coinc_event_map_table)

    do_process_table(xmldoc, sim_tree, liv_tree)
    process_index = dict(
        (int(row.process_id), row)
        for row in table.get_table(xmldoc, lsctables.ProcessTable.tableName))

    do_summary_table(xmldoc, sim_tree, liv_tree)

    # create coinc_definer row
    row = get_coinc_def_row(sim_tree)
    coinc_def_id = llwapp.get_coinc_def_id(xmldoc,
                                           row.search,
                                           row.search_coinc_type,
                                           description=row.description)

    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        offset_vector = dict(
            (get_ifos_from_index(instrument_index), offset)
            for instrument_index, offset in zip(sim_tree.ifo, sim_tree.lag))

        coinc_event = coinc_event_table.RowType()
        coinc_event.process_id = process_index[sim_tree.run].process_id
        coinc_event.coinc_event_id = coinc_event_table.get_next_id()
        coinc_event.coinc_def_id = coinc_def_id
        coinc_event.nevents = sim_tree.ndim
        coinc_event.instruments = get_ifos_from_index(
            branch_array_to_list(sim_tree.ifo, sim_tree.ndim))
        coinc_event.time_slide_id = llwapp.get_time_slide_id(
            xmldoc, offset_vector, process_index[sim_tree.run])
        coinc_event_table.append(coinc_event)

        for d in range(0, sim_tree.ndim):
            sngl_burst = get_sngl_burst_row(sngl_burst_table, sim_tree, d)
            sngl_burst.process_id = coinc_event.process_id
            sngl_burst.event_id = sngl_burst_table.get_next_id()
            sngl_burst_table.append(sngl_burst)

            coinc_event_map = coinc_event_map_table.RowType()
            coinc_event_map.event_id = sngl_burst.event_id
            coinc_event_map.table_name = sngl_burst.event_id.table_name
            coinc_event_map.coinc_event_id = coinc_event.coinc_event_id
            coinc_event_map_table.append(coinc_event_map)

        multi_burst = get_multi_burst_row(multi_burst_table, sim_tree)
        multi_burst.process_id = coinc_event.process_id
        multi_burst.coinc_event_id = coinc_event.coinc_event_id
        multi_burst_table.append(multi_burst)
Esempio n. 54
0
def exttrig_dataquery(grb_name,
                      grb_time,
                      grb_ra,
                      grb_dec,
                      offset,
                      config_file,
                      extend=False,
                      useold=False,
                      make_plots=False,
                      make_xml=False):
    '''
  Finds science time of all available IFOs.
  '''
    ##############################################################################
    # get segment duration and minimum amount of science time
    ##############################################################################

    # read the configuration file
    cp = ConfigParser.ConfigParser()
    cp.read(config_file)

    # define hardcoded variables
    basic_ifolist = ifolist = ['H1', 'H2', 'L1', 'V1']
    catlist = [1, 2, 3]
    sensitivity_dict = {"H1": 1, "L1": 2, "H2": 3, "V1": 4, "G1": 5}

    # get segment length from configuration file
    pad_data = int(cp.get('data', 'pad-data'))
    if cp.has_option('data', 'segment-duration'):
        blockDuration = segmentDuration = psdDuration = int(
            cp.get('data', 'segment-duration'))
    elif cp.has_option('data', 'segment-length'):
        blockDuration = segmentDuration = psdDuration = int(
            cp.get('data', 'segment-length')) / int(
                cp.get('data', 'sample-rate'))
    else:
        raise ValueError, "EXIT: Cannot find segment-duration in [data] section of configuration file!"

    # get sample rate
    if cp.has_option('data', 'sample-rate'):
        sampleRate = int(cp.get('data', 'sample-rate'))
        print ">> Sample rate has been set to: %d" % sampleRate
        print
    else:
        print ">> ERROR: Need to specify sample-rate in [data] section of configuration file in order to calculate inputs for downstream processes."
        sys.exit()

    # if not extend option then need to get block duration
    if not extend:
        if cp.has_option('data', 'block-duration'):
            blockDuration = int(cp.get('data', 'block-duration'))
        elif cp.has_option('data', 'segment-length'):
            s_length = int(cp.get('data', 'segment-length'))
            s_num = int(cp.get('data', 'number-of-segments'))
            s_rate = int(cp.get('data', 'sample-rate'))
            s_overlap = int(cp.get('inspiral', 'segment-overlap'))
            # calculate blockDuration
            blockDuration = (s_length * s_num -
                             (s_num - 1) * s_overlap) / s_rate
        else:
            raise ValueError, "EXIT: Cannot find block-duration in [data] section of configuration file! Either set block-duration or use --extend option."

    # calculate the minimum amount of science time need and how the length of quanta to be added on both ends of the analysis time
    minscilength = blockDuration + 2 * pad_data
    quanta = segmentDuration / 2

    # if extend beyond minscilength; add segments of quanta length to each end of segment
    print ">> Minimum science segment length is: %ss" % minscilength
    print
    if extend:
        print ">> Will extend minimum science segment by quanta of: %ss" % quanta
        print

    ##############################################################################
    # get list of segments for each IFO and put in science txt file
    ##############################################################################

    if not useold:
        # external call to ligolw_segment_query
        query_start = int(grb_time - offset)
        query_end = int(grb_time + offset)
        for ifo in ifolist:
            if cp.has_option('segments', '%s-segments' % ifo.lower()):
                segmentName = cp.get('segments', '%s-segments' % ifo.lower())
                check_segment_availability(grb_name, grb_time, query_start,
                                           query_end, offset, ifo, segmentName)

    ##############################################################################
    # get veto files
    ##############################################################################

    if not useold:
        # get and read veto definer file
        veto_file_url = cp.get('exttrig', 'cvs_veto_definer')
        veto_file_path, headers = urllib.urlretrieve(
            veto_file_url, os.path.basename(veto_file_url))

        # do ligolw_segments_from_cats
        deltat = 500
        args = {
            'start_time': int(grb_time - offset - deltat),
            'end_time': int(grb_time + offset + deltat),
            'veto_file_path': veto_file_path
        }
        cmd = "ligolw_segments_from_cats --database --veto-file={veto_file_path} --separate-categories --gps-start-time {start_time}  --gps-end-time {end_time} --output-dir=. --individual-results".format(
            **args)
        print '>>', cmd
        print
        process = subprocess.Popen([cmd],
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        output, err = process.communicate()

        # Rename the veto files for easier handling
        veto_files = glob.glob(
            './*VETOTIME_CAT*{start_time}*xml'.format(**args))
        for filename in veto_files:
            p = filename.split('-')
            newname = "%s-%s_grb%s.xml" % (p[0], p[1], grb_name)
            shutil.move(filename, newname)

    ##############################################################################
    # look in txt files and find segment with onsource and minscilength
    ##############################################################################

    # create segment that is +/- offset of GRB time
    onsource        = [grb_time - int(cp.get('exttrig','onsource_left')),\
         grb_time + int(cp.get('exttrig','onsource_right'))]
    onSourceSegment = segments.segment(onsource[0], onsource[1])

    # get segments in science txt files; see if segments length at least minscilength
    # if no then discard them; if yes then put in segdict[ifo] and ifo in ifolist
    basic_segdict = segdict = segments.segmentlistdict()
    for ifo in ifolist:
        # check configuration file
        if not cp.has_option('segments', '%s-segments' % ifo.lower()):
            continue
        # find segment with onsource and check it is at least minscilength
        ifo_segfile = '%s-science_grb%s.txt' % (ifo, grb_name)
        if os.path.exists(ifo_segfile):
            tmplist = segmentsUtils.fromsegwizard(open(ifo_segfile))
            try:
                s = tmplist.find(onSourceSegment)
            except ValueError:
                # if onsource not in segments then move on to next IFO
                continue
            if abs(tmplist[s]) >= minscilength:
                segdict[ifo] = segments.segmentlist([tmplist[s]])
            basic_segdict[ifo] = segments.segmentlist([s for s in tmplist])
    ifolist = segdict.keys()

    if len(ifolist) < 2:
        print "EXIT: Less than 2 interferometers have available data!"
        sys.exit()

    ##############################################################################
    # apply vetoes
    ##############################################################################

    # apply
    print ">> Vetoes that overlap with science segments:"
    for ifo in ifolist:
        # flag; True if IFO not vetoed
        cat_flag = True
        for cat in catlist:
            # create list and check for overlaps
            xmlsegfile = "./%s-VETOTIME_CAT%s_grb%s.xml" % (ifo, cat, grb_name)
            if os.path.exists(xmlsegfile) and cat_flag:
                testseg = segments.segment(
                    [segdict[ifo][0][0], segdict[ifo][0][1]])
                list_overlaps = []

                # load the content of the veto-file
                xmldoc = utils.load_filename(xmlsegfile,
                                             gz=False,
                                             contenthandler=lsctables.use_in(
                                                 ligolw.LIGOLWContentHandler))
                segs = table.get_table(xmldoc,
                                       lsctables.SegmentTable.tableName)
                segdefs = table.get_table(xmldoc,
                                          lsctables.SegmentDefTable.tableName)

                # create a mapping between the segments and their definitions
                defdict = {}
                for segdef in segdefs:
                    defdict[segdef.segment_def_id] = segdef.name

                # find veto segments that intersect science segment of IFO with onsource
                for seg in segs:
                    s = segments.segment(seg.start_time, seg.end_time)
                    if testseg.intersects(s):
                        id = seg.segment_def_id
                        list_overlaps.append(
                            [defdict[id], seg.start_time, seg.end_time])

                # cut veto CAT1 segments out of science segment; CAT1,2,3 veto IFO if in onsource will veto IFO
                for name, segstart, segend in list_overlaps:
                    print "CAT%s IFO %s, Start: %d End: %d because %s" % (
                        cat, ifo, segstart, segend, name)
                    s = segments.segment(segstart, segend)
                    if onSourceSegment.intersects(s):
                        segdict.pop(ifo, None)
                        cat_flag = False
                        break
                if cat == 1:
                    vetoes = segments.segmentlist(
                        segments.segment(s[1], s[2]) for s in list_overlaps)
                    segdict[ifo] -= vetoes

    # get list of IFOs
    ifolist = segdict.keys()

    print

    if len(ifolist) < 2:
        print "EXIT: After vetoes, less than 2 interferometers have available data!"
        sys.exit()

    ##############################################################################
    # determine segment to be analyzed
    ##############################################################################

    # sort from most sensitive to least sensitive
    def sensitivity_cmp(ifo1, ifo2):
        return cmp(sensitivity_dict[ifo1], sensitivity_dict[ifo2])

    ifolist.sort(sensitivity_cmp)

    # compares IFOs and finds the segment to analyze
    # now try getting off-source segments
    # start trying with all IFOs
    # work our way through subsets; beginning with most sensitive combinations
    test_combos = itertools.chain(*itertools.imap(
        lambda n: iterutils.choices(ifolist, n), xrange(len(ifolist), 1, -1)))
    off_source_segment = None
    the_ifo_combo = []
    for ifo_combo in test_combos:
        # find conincident science time of IFOs
        trial_seglist = segdict.intersection(ifo_combo)
        if abs(trial_seglist) < minscilength:
            print "EXIT: IFOs do not overlap enough for minscilength", abs(
                trial_seglist)
            sys.exit()
        else:
            pass

            # find segment with grb_time inside
            try:
                super_seg = trial_seglist[trial_seglist.find(
                    onSourceSegment)].contract(pad_data)
            except ValueError:
                print "EXIT: ValueError with super_seg"
                sys.exit()
            if onSourceSegment not in super_seg:
                print "EXIT: onsource not in super_seg"
                sys.exit()

            # find int division of onsource time intervals before and after grb
            tplus = (super_seg[1] - onSourceSegment[1])
            tminus = (onSourceSegment[0] - super_seg[0])

            # get minimum number of onsource time intervals in offsource
            tmin = (minscilength - 2 * pad_data - abs(onSourceSegment))

            # cut to get minscilength
            if tplus + tminus > tmin:
                half_max = tmin // 2
                if tplus < half_max:
                    print ">> Left sticks out so cut it."
                    remainder = tmin - tplus
                    tminus = min(remainder, tminus)
                elif tminus < half_max:
                    print ">> Right sticks out so cut it."
                    remainder = tmin - tminus
                    tplus = min(remainder, tplus)
                else:
                    print ">> Both sides stick out so cut as symmetrically as possible."
                    tminus = half_max
                    tplus = tmin - half_max  # odd trial sticks out on right
            if tplus + tminus < tmin:
                offsource = None
            temp_segment = segments.segment(
                (onSourceSegment[0] - tminus - pad_data,
                 onSourceSegment[1] + tplus + pad_data))

            if temp_segment is not None:
                offsource = temp_segment
                ifolist = list(ifo_combo)

                if extend:
                    # extend as many adjacent 128 second blocks as possible
                    begin_time = offsource[0] - quanta * (
                        abs(super_seg[0] - offsource[0]) // quanta)
                    end_time = offsource[1] + quanta * (
                        abs(super_seg[1] - offsource[1]) // quanta)
                    offsource = segments.segment((begin_time, end_time))

                break
    print

    # check length at least minscilength
    if abs(offsource) < minscilength:
        print abs(offsource), minscilength
        print "EXIT: Calculated offsource segment but less than minscilength!"
        sys.exit()

    # check if no detectors can be used then exit
    if len(ifolist) < 2:
        print "EXIT: Calculated offsource segment but less than two IFOs!"
        sys.exit()

    # check edge case
    if abs(offsource[0] -
           onsource[0]) < pad_data or abs(offsource[1] -
                                          onsource[1]) < pad_data:
        print "WARNING: GRB time close to edge of offsource. Its within the padding time."

    # concatenate "H1L1V1", etc.
    ifolist.sort()
    ifotag = "".join(ifolist)
    print ">> Offsource segment for %s GRB is:" % ifotag
    print "Start:", offsource[0], "End:", offsource[1], "Duration:", offsource[
        1] - offsource[0], "Left:", grb_time - offsource[
            0], "Right:", offsource[1] - grb_time
    print

    ##############################################################################
    # output
    ##############################################################################

    # write analyse txt files
    for ifo in basic_ifolist:
        if ifo in ifolist:
            analysisFP = open('%s-analyse_grb%s.txt' % (ifo, grb_name), 'w')
            analysisFP.write('# seg\t start    \t stop    \t duration\n')
            analysisFP.write(
                '0\t %d\t %d\t %d\n' %
                (offsource[0], offsource[1], offsource[1] - offsource[0]))
        else:
            analysisFP = open('%s-analyse_grb%s.txt' % (ifo, grb_name), 'w')
            analysisFP.write('# seg\t start    \t stop    \t duration\n')

    # calculate blockDuration
    blockDuration = int(abs(offsource[0] - offsource[1])) - 2 * pad_data

    # calculate psdDuration
    # gets largest power of two such that blockDuration/psdDuration = psdRatio
    # could have done a binary & operator that is faster but this is more user-friendly I believe
    min_psdDuration = int(cp.get('exttrig', 'min-psd-length'))
    psdRatio = int(cp.get('exttrig', 'psd-ratio'))
    psdDuration = 2**int(numpy.log2(blockDuration / psdRatio))
    if psdDuration < min_psdDuration:
        print "EXIT: PSD segment duration is too short. It is %ds but needs to be at least %ds in length." % (
            psdDuration, min_psdDuration)
        sys.exit()

    # some downstream processes (e.g. lalapps_tmpltbank) cannot handle these inputs
    if cp.has_option('data', 'segment-duration'):
        cp.remove_option('data', 'segment-duration')
        cp.remove_option('data', 'block-duration')

    # some downstream processes (e.g. lalapps_tmpltbank) requires these options to run
    print ">> Using sample rate of %d to calculate inputs for downstream processes." % sampleRate
    print
    segmentLength = segmentDuration * sampleRate
    segmentCount = blockDuration / (
        segmentDuration /
        2) - 1  # subtract 1 because one segment length is overlapped
    segmentOverlap = segmentLength / 2
    cp.set('data', 'segment-length', segmentLength)
    cp.set('data', 'number-of-segments', segmentCount)
    cp.set('inspiral', 'segment-overlap', segmentOverlap)

    # set values for [coh_PTF_inspral] section in configuration file
    cp.set('coh_PTF_inspiral', 'block-duration', blockDuration)
    cp.set('coh_PTF_inspiral', 'segment-duration', segmentDuration)
    cp.set('coh_PTF_inspiral', 'psd-segment-duration', psdDuration)
    cp.set('coh_PTF_inspiral', 'pad-data', pad_data)
    f = open('grb%s.ini' % grb_name, 'w')
    cp.write(f)
    f.close()
    print ">> The [data] section of the configuration file has been edited with the following values:"
    print "sample-rate=", sampleRate
    print "segment-length=", segmentLength
    print "number-of-segments=", segmentCount
    print "segment-overlap=", segmentOverlap
    print
    print ">> The [coh_PTF_inspiral] section of the configuration file has been edited with the following values:"
    print "block-duration =", blockDuration
    print "segment-duration =", segmentDuration
    print "psd-segment-duration =", psdDuration
    print "pad-data =", pad_data
    print

    # plot segments
    offSourceSegment = segments.segment(offsource[0], offsource[1])
    plot_window = segments.segment(grb_time - offset, grb_time + offset)
    plot_segments(basic_segdict, onSourceSegment, offSourceSegment, grb_time,
                  plot_window, "segment_plot_%s.png" % grb_name, grb_name)

    # make xml file
    if make_xml:
        # create a new xml document with an ExtTriggers Table
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        tbl = lsctables.New(lsctables.ExtTriggersTable)
        xmldoc.childNodes[-1].appendChild(tbl)

        # set the values we need
        row = lsctables.ExtTriggersTable()
        row.process_id = None
        row.det_alts = None
        row.det_band = None
        row.det_fluence = None
        row.det_fluence_int = None
        row.det_name = None
        row.det_peak = None
        row.det_peak_int = None
        row.det_snr = ''
        row.email_time = 0
        row.event_dec = float(grb_dec)
        row.event_dec_err = 0.0
        row.event_epoch = ''
        row.event_err_type = ''
        row.event_ra = float(grb_ra)
        row.event_ra_err = 0.0
        row.start_time = grb_time
        row.start_time_ns = 0
        row.event_type = ''
        row.event_z = 0.0
        row.event_z_err = 0.0
        row.notice_comments = ''
        row.notice_id = ''
        row.notice_sequence = ''
        row.notice_time = 0
        row.notice_type = ''
        row.notice_url = ''
        row.obs_fov_dec = 0.0
        row.obs_fov_dec_width = 0.0
        row.obs_fov_ra = 0.0
        row.obs_fov_ra_width = 0.0
        row.obs_loc_ele = 0.0
        row.obs_loc_lat = 0.0
        row.obs_loc_long = 0.0
        row.ligo_fave_lho = 0.0
        row.ligo_fave_llo = 0.0
        row.ligo_delay = 0.0
        row.event_number_gcn = 9999
        row.event_number_grb = grb_name
        row.event_status = 0

        # insert into the table and write file
        tbl.extend([row])
        filename = 'grb%s.xml' % grb_name
        utils.write_filename(xmldoc, filename)

    # plot all vetoes
    if make_plots:
        vetodict = segments.segmentlistdict()
        for cat in catlist:
            for ifo in ifolist:
                vetofile = "%s-VETOTIME_CAT%s_grb%s.xml" % (ifo, cat, grb_name)
                xmldoc = utils.load_filename(vetofile,
                                             gz=False,
                                             contenthandler=lsctables.use_in(
                                                 ligolw.LIGOLWContentHandler))
                segs = table.get_table(xmldoc,
                                       lsctables.SegmentTable.tableName)
                segdefs = table.get_table(xmldoc,
                                          lsctables.SegmentDefTable.tableName)
                vetodict[ifo] = segments.segmentlist(
                    segments.segment(s.start_time, s.end_time) for s in segs)

            if vetodict:
                plot_segments(vetodict, onSourceSegment, offSourceSegment,
                              grb_time, plot_window,
                              "veto_plot_CAT%s_%s.png" % (cat, grb_name),
                              "%s CAT%s" % (grb_name, cat))

    # return
    return 'grb%s.ini' % grb_name, ifolist, onSourceSegment, offSourceSegment
Esempio n. 55
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
    try:
        search_summary = table.get_table(
            xmldoc, lsctables.SearchSummaryTable.tableName)
    except ValueError:
        search_summary = lsctables.New(lsctables.SearchSummaryTable, [
            "process_id", "nevents", "ifos", "comment", "in_start_time",
            "in_start_time_ns", "out_start_time", "out_start_time_ns",
            "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"
        ])
        xmldoc.childNodes[0].appendChild(search_summary)

    process_id_type = type(
        table.get_table(xmldoc, lsctables.ProcessTable.tableName).next_id)

    runids = set()
    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        # Id for the run processed by WaveBurst -> process ID
        if sim_tree.run in runids:
            continue

        row = search_summary.RowType()
        row.process_id = process_id_type(sim_tree.run)
        runids.add(sim_tree.run)

        # Search Summary Table
        # events found in the run -> nevents
        setattr(row, "nevents", sim_tree.GetEntries())

        # Imstruments involved in the search
        row.ifos = lsctables.ifos_from_instrument_set(
            get_ifos_from_index(
                branch_array_to_list(sim_tree.ifo, sim_tree.ndim)))
        setattr(row, "comment", "waveburst")

        # Begin and end time of the segment
        # TODO: This is a typical offset on either side of the job for artifacts
        # It can, and probably will change in the future, and should not be hardcoded
        # TODO: Make this work properly. We need a gps end from the livetime
        waveoffset = 8
        livetime = 600
        #live_entries = live_tree.GetEntries()
        # This is WAAAAAAAAAAAAAY too slow
        #for l in range(0, live_entries):
        #liv_tree.GetEntry(l)
        #livetime = max(livetime, liv_tree.live)

        #if livetime < 0:
        #sys.exit("Could not find livetime, cannot fill all of summary table.")
        # in -- with waveoffset
        # out -- without waveoffset
        row.set_in(
            segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset),
                             LIGOTimeGPS(sim_tree.gps + livetime +
                                         waveoffset)))
        row.set_out(
            segments.segment(LIGOTimeGPS(sim_tree.gps),
                             LIGOTimeGPS(sim_tree.gps + livetime)))

        search_summary.append(row)
Esempio n. 56
0
    # return
    return 'grb%s.ini' % grb_name, ifolist, onSourceSegment, offSourceSegment


if __name__ == "__main__":
    # get the command line arguments
    opts, args = parse_args()

    # get time, RA, DEC and name of GRB; get offset to search from GRB time
    if opts.grb_file:
        xmldoc = utils.load_filename(opts.grb_file,
                                     gz=opts.grb_file.endswith('.gz'),
                                     contenthandler=lsctables.use_in(
                                         ligolw.LIGOLWContentHandler))
        ext_table = table.get_table(xmldoc,
                                    lsctables.ExtTriggersTable.tableName)
        grb_time = ext_table[0].start_time
        grb_name = os.path.basename(opts.grb_file)[3:-4]
        grb_ra = ext_table[0].event_ra
        grb_dec = ext_table[0].event_dec
    else:
        grb_name = opts.name
        grb_time = int(opts.time)
        grb_ra = float(opts.ra)
        grb_dec = float(opts.dec)

    # run
    exttrig_dataquery(grb_name, grb_time, grb_ra, grb_dec, opts.offset,
                      opts.config_file, opts.extend, opts.useold,
                      opts.make_plots, opts.make_xml)
    print "Sim XML loaded, event time: %s" % str(sim_row.get_end())
elif opts.event_time is not None:
    event_time = glue.lal.LIGOTimeGPS(opts.event_time)
    print "Event time from command line: %s" % str(event_time)
else:
    raise ValueError(
        "Either --coinc-xml or --event-time must be provided to parse event time."
    )

# get masses from sngl_inspiral_table
if opts.mass1 is not None and opts.mass2 is not None:
    m1, m2 = opts.mass1, opts.mass2
elif sim_row:
    m1, m2 = sim_row.mass1, sim_row.mass2
elif xmldoc is not None:
    sngl_inspiral_table = table.get_table(
        xmldoc, lsctables.SnglInspiralTable.tableName)
    assert len(sngl_inspiral_table) == len(coinc_row.ifos.split(","))
    m1, m2 = None, None
    for sngl_row in sngl_inspiral_table:
        # NOTE: gstlal is exact match, but other pipelines may not be
        assert m1 is None or (sngl_row.mass1 == m1 and sngl_row.mass2 == m2)
        m1, m2 = sngl_row.mass1, sngl_row.mass2
else:
    raise ValueError(
        "Need either --mass1 --mass2 or --coinc-xml to retrieve masses.")

m1_SI = m1 * lal.MSUN_SI
m2_SI = m2 * lal.MSUN_SI
print "Computing marginalized likelihood in a neighborhood about intrinsic parameters mass 1: %f, mass 2 %f" % (
    m1, m2)
Esempio n. 58
0
def gracedb_sky_map(coinc_file,
                    psd_file,
                    waveform,
                    f_low,
                    min_distance=None,
                    max_distance=None,
                    prior_distance_power=None,
                    nside=-1):
    # LIGO-LW XML imports.
    from . import ligolw
    from glue.ligolw import table as ligolw_table
    from glue.ligolw import utils as ligolw_utils
    from glue.ligolw import lsctables
    import lal.series

    # Determine approximant, amplitude order, and phase order from command line arguments.
    approximant, amplitude_order, phase_order = \
        timing.get_approximant_and_orders_from_string(waveform)

    # Read input file.
    xmldoc, _ = ligolw_utils.load_fileobj(
        coinc_file, contenthandler=ligolw.LSCTablesContentHandler)

    # Locate the tables that we need.
    coinc_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.CoincInspiralTable.tableName)
    coinc_map_table = ligolw_table.get_table(xmldoc,
                                             lsctables.CoincMapTable.tableName)
    sngl_inspiral_table = ligolw_table.get_table(
        xmldoc, lsctables.SnglInspiralTable.tableName)

    # Locate the sngl_inspiral rows that we need.
    coinc_inspiral = coinc_inspiral_table[0]
    coinc_event_id = coinc_inspiral.coinc_event_id
    event_ids = [
        coinc_map.event_id for coinc_map in coinc_map_table
        if coinc_map.coinc_event_id == coinc_event_id
    ]
    sngl_inspirals = [(sngl_inspiral for sngl_inspiral in sngl_inspiral_table
                       if sngl_inspiral.event_id == event_id).next()
                      for event_id in event_ids]
    instruments = set(sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals)

    # Read PSDs.
    if psd_file is None:
        psds = None
    else:
        xmldoc, _ = ligolw_utils.load_fileobj(
            psd_file, contenthandler=lal.series.PSDContentHandler)
        psds = lal.series.read_psd_xmldoc(xmldoc)

        # Rearrange PSDs into the same order as the sngl_inspirals.
        psds = [psds[sngl_inspiral.ifo] for sngl_inspiral in sngl_inspirals]

        # Interpolate PSDs.
        psds = [
            timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
            for psd in psds
        ]

    # TOA+SNR sky localization
    prob, epoch, elapsed_time = ligolw_sky_map(sngl_inspirals,
                                               approximant,
                                               amplitude_order,
                                               phase_order,
                                               f_low,
                                               min_distance,
                                               max_distance,
                                               prior_distance_power,
                                               nside=nside,
                                               psds=psds)

    return prob, epoch, elapsed_time, instruments
lsctables.use_in(LIGOLWContentHandler)

parser = argparse.ArgumentParser()
parser.add_argument('--injection-file', type=str, required=True)
parser.add_argument('--trigger-glob', type=str, required=True)
parser.add_argument('--plot-file', type=str, required=True)
parser.add_argument('--sens-plot-file', type=str)
parser.add_argument('--x-axis', type=str, choices=['time', 'mchirp'],
                    required=True)
args = parser.parse_args()

# read injections

doc = ligolw_utils.load_filename(args.injection_file, False,
                                 contenthandler=LIGOLWContentHandler)
sim_table = table.get_table(doc, lsctables.SimInspiralTable.tableName)
injections = []
for sim in sim_table:
    injections.append((float(sim.get_time_geocent()),
                       sim.mchirp,
                       sim.alpha1,
                       sim.alpha2,
                       sim.alpha3))
del doc
print('{} injections'.format(len(injections)))

# read triggers

triggers = []
for file_path in tqdm.tqdm(glob.glob(args.trigger_glob)):
    doc = ligolw_utils.load_filename(file_path, False, contenthandler=LIGOLWContentHandler)
Esempio n. 60
0
# This script reads a template bank, sets the f_final to zero for all
# templates and writes it to a new bank. This is needed when banks
# created by Sbank with ROMs are used with TaylorF2 terminating at ISCO.

import sys
from glue.ligolw import ligolw
from glue.ligolw import table
from glue.ligolw import lsctables
from glue.ligolw import utils as ligolw_utils
from glue import git_version


class ContentHandler(ligolw.LIGOLWContentHandler):
    pass


lsctables.use_in(ContentHandler)

indoc = ligolw_utils.load_filename(sys.argv[1],
                                   verbose=True,
                                   contenthandler=ContentHandler)

template_bank_table = table.get_table(indoc,
                                      lsctables.SnglInspiralTable.tableName)

for template in template_bank_table:
    template.f_final = 0

ligolw_utils.write_filename(indoc, sys.argv[2], gz=sys.argv[2].endswith('.gz'))