def sort_trigs(trial_dict, trigs, slide_dict, seg_dict):
    """Constructs sorted triggers from a trials dictionary"""

    sorted_trigs = {}

    # Begin by sorting the triggers into each slide
    # New seems pretty slow, so run it once and then use deepcopy
    tmp_table = glsctables.New(glsctables.MultiInspiralTable)
    for slide_id in slide_dict:
        sorted_trigs[slide_id] = copy.deepcopy(tmp_table)
    for trig in trigs:
        sorted_trigs[int(trig.time_slide_id)].append(trig)

    for slide_id in slide_dict:
        # These can only *reduce* the analysis time
        curr_seg_list = seg_dict[slide_id]

        # Check the triggers are all in the analysed segment lists
        for trig in sorted_trigs[slide_id]:
            if trig.end_time not in curr_seg_list:
                # This can be raised if the trigger is on the segment boundary,
                # so check if the trigger is within 1/100 of a second within
                # the list
                if trig.get_end() + 0.01 in curr_seg_list:
                    continue
                if trig.get_end() - 0.01 in curr_seg_list:
                    continue
                err_msg = "Triggers found in input files not in the list of "
                err_msg += "analysed segments. This should not happen."
                raise RuntimeError(err_msg)
        # END OF CHECK #

        # The below line works like the inverse of .veto and only returns trigs
        # that are within the segment specified by trial_dict[slide_id]
        sorted_trigs[slide_id] = \
            sorted_trigs[slide_id].vetoed(trial_dict[slide_id])

    return sorted_trigs
예제 #2
0
파일: inject.py 프로젝트: yuzuri/pycbc
    def write(filename, samples, write_params=None, static_args=None):
        """Writes the injection samples to the given xml.

        Parameters
        ----------
        filename : str
            The name of the file to write to.
        samples : io.FieldArray
            FieldArray of parameters.
        write_params : list, optional
            Only write the given parameter names. All given names must be keys
            in ``samples``. Default is to write all parameters in ``samples``.
        static_args : dict, optional
            Dictionary mapping static parameter names to values. These are
            written to the ``attrs``.
        """
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        simtable = lsctables.New(lsctables.SimInspiralTable)
        xmldoc.childNodes[0].appendChild(simtable)
        if static_args is None:
            static_args = {}
        if write_params is None:
            write_params = samples.fieldnames
        for ii in range(samples.size):
            sim = lsctables.SimInspiral()
            # initialize all elements to None
            for col in sim.__slots__:
                setattr(sim, col, None)
            for field in write_params:
                data = samples[ii][field]
                set_sim_data(sim, field, data)
            # set any static args
            for (field, value) in static_args.items():
                set_sim_data(sim, field, value)
            simtable.append(sim)
        ligolw_utils.write_filename(xmldoc, filename,
                                    gz=filename.endswith('gz'))
def write_to_xml(cells, intr_prms, pin_prms={}, fvals=None, fname=None, verbose=False):
    """
    Write a set of cells, with dimensions corresponding to intr_prms to an XML file as sim_inspiral rows.
    """
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    procrow = process.append_process(xmldoc, program=sys.argv[0])
    procid = procrow.process_id
    process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

    rows = ["simulation_id", "process_id", "numrel_data"]
    rows += list(intr_prms)
    rows += list(pin_prms)
    if fvals is not None:
        rows.append("alpha1")
    sim_insp_tbl = lsctables.New(lsctables.SimInspiralTable, rows)
    for itr, intr_prm in enumerate(cells):
        sim_insp = sim_insp_tbl.RowType()
        # FIXME: Need better IDs
        sim_insp.numrel_data = "INTR_SET_%d" % itr
        sim_insp.simulation_id = ilwd.ilwdchar("sim_inspiral:sim_inspiral_id:%d" % itr)
        sim_insp.process_id = procid
        if fvals:
            sim_insp.alpha1 = fvals[itr]
        for p, v in zip(intr_prms, intr_prm._center):
            setattr(sim_insp, p, v)
        for p, v in pin_prms.iteritems():
            setattr(sim_insp, p, v)
        sim_insp_tbl.append(sim_insp)

    xmldoc.childNodes[0].appendChild(sim_insp_tbl)
    if fname is None:
        channel_name = ["H=H", "L=L"]
        ifos = "".join([o.split("=")[0][0] for o in channel_name])
        #start = int(event_time)
        start = 0
        fname = "%s-MASS_POINTS-%d-1.xml.gz" % (ifos, start)
    utils.write_filename(xmldoc, fname, gz=True, verbose=verbose)
예제 #4
0
def do_process_table(xmldoc, sim_tree, liv_tree):
    try:
        process_table = table.get_table(xmldoc,
                                        lsctables.ProcessTable.tableName)
    except ValueError:
        process_table = lsctables.New(lsctables.ProcessTable, [
            "process_id", "ifos", "comment", "program", "start_time", "jobid",
            "end_time"
        ])
        xmldoc.childNodes[0].appendChild(process_table)

    runids = set()
    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        # Id for the run processed by WaveBurst -> process ID
        if sim_tree.run in runids:
            continue

        row = process_table.RowType()
        row.process_id = type(process_table.next_id)(sim_tree.run)
        runids.add(sim_tree.run)

        # Imstruments involved in the search
        row.ifos = lsctables.ifos_from_instrument_set(
            get_ifos_from_index(
                branch_array_to_list(sim_tree.ifo, sim_tree.ndim)))
        row.comment = "waveburst"
        row.program = "waveburst"

        # Begin and end time of the segment
        # TODO: This is a typical offset on either side of the job for artifacts
        # It can, and probably will change in the future, and should not be hardcoded
        setattr(row, "start_time", None)
        setattr(row, "end_time", None)
        setattr(row, "jobid", sim_tree.run)

        process_table.append(row)
예제 #5
0
def add_to_segment(xmldoc, proc_id, seg_def_id, sgmtlist):
    try:
        segtable = lsctables.SegmentTable.get_table(xmldoc)
    except:
        segtable = lsctables.New(lsctables.SegmentTable,
                                 columns=[
                                     "process_id", "segment_def_id",
                                     "segment_id", "start_time",
                                     "start_time_ns", "end_time", "end_time_ns"
                                 ])
        xmldoc.childNodes[0].appendChild(segtable)

    for seg in sgmtlist:
        segment = lsctables.Segment()
        segment.process_id = proc_id
        segment.segment_def_id = seg_def_id
        segment.segment_id = segtable.get_next_id()
        segment.start_time = seg[0]
        segment.start_time_ns = 0
        segment.end_time = seg[1]
        segment.end_time_ns = 0

        segtable.append(segment)
예제 #6
0
def add_to_segment_definer(xmldoc, proc_id, ifo, name, version, comment=''):
    try:
        seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
    except:
        seg_def_table = lsctables.New(lsctables.SegmentDefTable,
                                      columns=[
                                          "process_id", "segment_def_id",
                                          "ifos", "name", "version", "comment"
                                      ])
        xmldoc.childNodes[0].appendChild(seg_def_table)

    seg_def_id = seg_def_table.get_next_id()
    segment_definer = lsctables.SegmentDef()
    segment_definer.process_id = proc_id
    segment_definer.segment_def_id = seg_def_id
    segment_definer.ifos = ifo
    segment_definer.name = name
    segment_definer.version = version
    segment_definer.comment = comment

    seg_def_table.append(segment_definer)

    return seg_def_id
예제 #7
0
def ReadSnglInspiralFromFiles(fileList, verbose=False, filterFunc=None):
  """
  Read the SnglInspiralTables from a list of files.
  If filterFunc is not None, only keep triggers for which filterFunc
  evaluates to True.  Ex.: filterFunc=lambda sng: sng.snr >= 6.0

  @param fileList: list of input files
  @param verbose: print progress
  """
  # NOTE: this function no longer carries out event ID mangling (AKA
  # reassignment). Please adjust calling codes accordingly!
  # This means that identical event IDs produced by lalapps_thinca in
  # non-slide files having the same GPS start time will stay identical,
  # affecting zerolag and injection runs made over the same data.
  #
  # In consequence, if the calling code is going to reconstruct coincs
  # from the sngl event IDs, and if these include multiple injection
  # runs, coinc finding should be done one file at a time - see the
  # readCoincInspiralFromFiles function in CoincInspiralUtils.py

  sngls = lsctables.New(lsctables.SnglInspiralTable, \
      columns=lsctables.SnglInspiralTable.loadcolumns)

  lsctables.use_in(ExtractSnglInspiralTableLIGOLWContentHandler)
  for i,file in enumerate(fileList):
    if verbose: print str(i+1)+"/"+str(len(fileList))+": "
    xmldoc = utils.load_filename(file, verbose=verbose, contenthandler=ExtractSnglInspiralTableLIGOLWContentHandler)
    try:
      sngl_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
      if filterFunc is not None:
        iterutils.inplace_filter(filterFunc, sngl_table)
    except ValueError: #some xml files have no sngl table, that's OK
      sngl_table = None
    if sngl_table: sngls.extend(sngl_table)
    xmldoc.unlink()    #free memory

  return sngls
예제 #8
0
def add_to_segment_summary_ns(xmldoc,
                              proc_id,
                              seg_def_id,
                              sgmtlist,
                              comment=''):
    try:
        seg_sum_table = table.get_table(xmldoc,
                                        lsctables.SegmentSumTable.tableName)
    except:
        seg_sum_table = lsctables.New(lsctables.SegmentSumTable,
                                      columns=[
                                          "process_id", "segment_def_id",
                                          "segment_sum_id", "start_time",
                                          "start_time_ns", "end_time",
                                          "end_time_ns", "comment"
                                      ])
        xmldoc.childNodes[0].appendChild(seg_sum_table)

    for seg in sgmtlist:
        segment_sum = lsctables.SegmentSum()
        segment_sum.process_id = proc_id
        segment_sum.segment_def_id = seg_def_id
        segment_sum.segment_sum_id = seg_sum_table.get_next_id()
        seconds, nanoseconds = output_microseconds(seg[0])
        segment_sum.start_time = seconds
        segment_sum.start_time_ns = nanoseconds
        seconds, nanoseconds = output_microseconds(seg[1])
        segment_sum.end_time = seconds
        segment_sum.end_time_ns = nanoseconds
        #segment_sum.start_time     = seg[0]
        #segment_sum.start_time_ns  = 0
        #segment_sum.end_time       = seg[1]
        #segment_sum.end_time_ns    = 0
        segment_sum.comment = comment

        seg_sum_table.append(segment_sum)
예제 #9
0
    def return_sim_inspirals(self, statistic=None, thresh=0):
        """
    Method to return the sim_inspiral table associated to the coincs.
    If thresh is specified, only return sims from those coincs whose stat
    exceeds thresh (or is under thresh if statistic == far).

    @param statistic: the statistic to use
    @param thresh: the threshold on the statistic
    """
        from glue.ligolw import table
        try:
            simInspirals = table.new_from_template(self.sim_table)
        except:
            simInspirals = lsctables.New(lsctables.SimInspiralTable)
        for coinc in self:
            if statistic == 'far':
                if (hasattr(coinc,"sim")) and \
                    (((coinc.stat <= thresh) and (coinc.stat >= 0)) or (thresh < 0)):
                    simInspirals.append(coinc.sim)
            else:
                if (hasattr(coinc, "sim")) and (coinc.stat >= thresh):
                    simInspirals.append(coinc.sim)

        return simInspirals
예제 #10
0
	def __init__(self, xmldoc, vetoes = None, program = u"inspiral", likelihood_func = None, likelihood_params_func = None):
		snglcoinc.CoincTables.__init__(self, xmldoc)

		#
		# configure the likelihood ratio evaluator
		#

		if likelihood_func is None and likelihood_params_func is not None or likelihood_func is not None and likelihood_params_func is None:
			raise ValueError("must provide both a likelihood function and a parameter function or neither")
		self.likelihood_func = likelihood_func
		self.likelihood_params_func = likelihood_params_func

		#
		# create a string uniquifier
		#

		self.uniquifier = {}

		#
		# find the coinc_inspiral table or create one if not found
		#

		try:
			self.coinc_inspiral_table = lsctables.CoincInspiralTable.get_table(xmldoc)
		except ValueError:
			self.coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
			xmldoc.childNodes[0].appendChild(self.coinc_inspiral_table)

		#
		# extract the coalesced out segment lists from the trigger
		# generator
		#

		self.seglists = ligolw_search_summary.segmentlistdict_fromsearchsummary(xmldoc, program = program).coalesce()
		if vetoes is not None:
			self.seglists -= vetoes
예제 #11
0
def convert_to_sngl_inspiral_table(params, proc_id):
    '''
    Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral 
    table with mass and spin parameters populated and event IDs assigned

    Parameters
    -----------
    params : iterable
        Each entry in the params iterable should be a sequence of 
        [mass1, mass2, spin1z, spin2z] in that order
    proc_id : ilwd char
        Process ID to add to each row of the sngl_inspiral table

    Returns
    ----------
    SnglInspiralTable
        Bank of templates in SnglInspiralTable format
    '''
    sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
    col_names = ['mass1','mass2','spin1z','spin2z']

    for values in params:
        tmplt = return_empty_sngl()

        tmplt.process_id = proc_id
        for colname, value in zip(col_names, values):
            setattr(tmplt, colname, value)
        tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta(
            tmplt.mass1, tmplt.mass2)
        tmplt.mchirp, junk = pnutils.mass1_mass2_to_mchirp_eta(
            tmplt.mass1, tmplt.mass2)
        tmplt.template_duration = 0 # FIXME
        tmplt.event_id = sngl_inspiral_table.get_next_id()
        sngl_inspiral_table.append(tmplt)

    return sngl_inspiral_table
    def __init__(self, xmldoc, bbdef, sbdef, scndef, process, sngl_type,
                 sim_type, get_sngl_time):
        #
        # store the process row
        #

        self.process = process

        #
        # locate the sngl_inspiral and sim_inspiral tables
        #

        self.sngltable = sngl_type.get_table(xmldoc)
        try:
            self.simtable = sim_type.get_table(xmldoc)
        except ValueError:
            self.simtable = lsctables.SimInspiralTable.get_table(xmldoc)
            print >> sys.stderr, "No SimRingdownTable, use SimInspiralTable instead!"

        #
        # construct the zero-lag time slide needed to cover the
        # instruments listed in all the triggers, then determine
        # its ID (or create it if needed)
        #
        # FIXME:  in the future, the sim_inspiral table should
        # indicate time slide at which the injection was done
        #

        self.tisi_id = ligolw_tisi.get_time_slide_id(xmldoc, {}.fromkeys(
            self.sngltable.getColumnByName("ifo"), 0.0),
                                                     create_new=process)

        #
        # get coinc_definer row for sim_type <--> sngl_type
        # coincs; this creates a coinc_definer table if the
        # document doesn't have one
        #

        self.sb_coinc_def_id = ligolw_coincs.get_coinc_def_id(
            xmldoc,
            sbdef.search,
            sbdef.search_coinc_type,
            create_new=True,
            description=sbdef.description)

        #
        # get coinc_def_id's for sngl_type <--> sngl_type, and
        # the sim_type <--> coinc_event coincs.  set all
        # to None if this document does not contain any sngl_type
        # <--> sngl_type coincs.
        #

        try:
            bb_coinc_def_id = ligolw_coincs.get_coinc_def_id(
                xmldoc,
                bbdef.search,
                bbdef.search_coinc_type,
                create_new=False)
        except KeyError:
            bb_coinc_def_id = None
            self.scn_coinc_def_id = None
        else:
            self.scn_coinc_def_id = ligolw_coincs.get_coinc_def_id(
                xmldoc,
                scndef.search,
                scndef.search_coinc_type,
                create_new=True,
                description=scndef.description)

        #
        # get coinc table, create one if needed
        #

        try:
            self.coinctable = lsctables.CoincTable.get_table(xmldoc)
        except ValueError:
            self.coinctable = lsctables.New(lsctables.CoincTable)
            xmldoc.childNodes[0].appendChild(self.coinctable)
        self.coinctable.sync_next_id()

        #
        # get coinc_map table, create one if needed
        #

        try:
            self.coincmaptable = lsctables.CoincMapTable.get_table(xmldoc)
        except ValueError:
            self.coincmaptable = lsctables.New(lsctables.CoincMapTable)
            xmldoc.childNodes[0].appendChild(self.coincmaptable)

        #
        # index the document
        #
        # FIXME:  type<-->type coincs should be organized by time
        # slide ID, but since injections are only done at zero lag
        # for now this is ignored.
        #

        # index sngl_type table
        index = {}
        for row in self.sngltable:
            index[row.event_id] = row
        # find IDs of type<-->type coincs
        self.coincs = {}
        for coinc in self.coinctable:
            if coinc.coinc_def_id == bb_coinc_def_id:
                self.coincs[coinc.coinc_event_id] = []
        # construct event list for each type<-->type coinc
        for row in self.coincmaptable:
            if row.coinc_event_id in self.coincs:
                self.coincs[row.coinc_event_id].append(index[row.event_id])
        del index
        # sort each event list by end/start time and convert to tuples
        # for speed

        for coinc_event_id, events in self.coincs.iteritems():
            events.sort(key=get_sngl_time)
            self.coincs[coinc_event_id] = tuple(events)
        # convert dictionary to a list

        self.coincs = self.coincs.items()

        #
        # FIXME Is this true for inspirals too?
        # sort sngl_type table by end/start time, and sort the coincs
        # list by the end/start time of the first (earliest) event in
        # each coinc (recall that the event tuple for each coinc
        # has been time-ordered)
        #

        self.sngltable.sort(key=get_sngl_time)
        self.coincs.sort(key=lambda (id, a): get_sngl_time(a[0]))

        #
        # set the window for type_near_time().  this window
        # is the amount of time such that if an injection's end
        # time and a inspiral event's end time differ by more than
        # this it is *impossible* for them to match one another.
        #

        # FIXME I'll just make the windows 1.0 s

        self.search_time_window = 1.0
        self.coinc_time_window = 1.0
예제 #13
0
    # reset rng state
    rng_state = np.load(opts.output_filename + "_checkpoint.rng.npz")
    rng1 = rng_state["state1"]
    rng2 = rng_state["state2"]
    rng3 = rng_state["state3"]
    rng4 = rng_state["state4"]
    np.random.mtrand.set_state(("MT19937", rng1, rng2, rng3, rng4))

else:
    if opts.output_filename.endswith(('.xml', '.xml.gz')):
        # prepare a new XML document
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        lsctables.SnglInspiralTable.RowType = SnglInspiralTable
        tbl = lsctables.New(lsctables.SnglInspiralTable)
        xmldoc.childNodes[-1].appendChild(tbl)
    elif opts.output_filename.endswith(('.hdf', '.h5', '.hdf5')):
        # No setup is required for HDF files
        tbl = []
    else:
        err_msg = "File extension is unrecognized. Sbank supports xml and "
        err_msg += "HDF5 file formats. {}".format(opts.output_filename)
        raise ValueError(err_msg)

    # initialize random seed
    np.random.mtrand.seed(opts.seed)

#
# prepare process table with information about the current program
#
예제 #14
0
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
proc_id = ligolw_process.register_to_xmldoc(
    outdoc,
    PROGRAM_NAME,
    options.__dict__,
    ifos=["G1"],
    version=git_version.id,
    cvs_repository=git_version.branch,
    cvs_entry_time=git_version.date).process_id

out_table = lsctables.New(
    inputtabletype,
    columns=[
        'mass1', 'mass2', 'mchirp', 'eta', 'spin1x', 'spin1y', 'spin1z',
        'spin2x', 'spin2y', 'spin2z', 'inclination', 'polarization',
        'latitude', 'longitude', 'bandpass', 'alpha', 'alpha1', 'alpha2',
        'process_id', 'waveform', 'numrel_data', 'numrel_mode_min',
        'numrel_mode_max', 't_end_time', 'f_lower'
    ])
outdoc.childNodes[0].appendChild(out_table)

# Copy the INPUT table
for input_catalog, input_table in input_tables:
    if options.verbose:
        print("Reading from %s" % input_catalog, file=sys.stderr)
    #
    for point in input_table:
        # Apply the mass-ratio threshold
        qth = options.upper_q_threshold
        if point.eta < (qth / (1. + qth)**2):
예제 #15
0
    def __init__(self, ifos, coinc_results, **kwargs):
        """Initialize a ligolw xml representation of a zerolag trigger
        for upload from pycbc live to gracedb.

        Parameters
        ----------
        ifos: list of strs
            A list of the ifos pariticipating in this trigger
        coinc_results: dict of values
            A dictionary of values. The format is defined in
            pycbc/events/coinc.py and matches the on disk representation
            in the hdf file for this time.
        psds: dict of FrequencySeries
            Dictionary providing PSD estimates for all involved detectors.
        low_frequency_cutoff: float
            Minimum valid frequency for the PSD estimates.
        followup_data: dict of dicts, optional
            Dictionary providing SNR time series for each detector,
            to be used in sky localization with BAYESTAR. The format should
            be `followup_data['H1']['snr_series']`. More detectors can be
            present than given in `ifos`. If so, the extra detectors will only
            be used for sky localization.
        channel_names: dict of strings, optional
            Strain channel names for each detector.
            Will be recorded in the sngl_inspiral table.
        """
        self.template_id = coinc_results['foreground/%s/template_id' % ifos[0]]
        self.coinc_results = coinc_results
        self.ifos = ifos

        # remember if this should be marked as HWINJ
        self.is_hardware_injection = ('HWINJ' in coinc_results
                                      and coinc_results['HWINJ'])

        if 'followup_data' in kwargs:
            fud = kwargs['followup_data']
            assert len({fud[ifo]['snr_series'].delta_t for ifo in fud}) == 1, \
                    "delta_t for all ifos do not match"
            self.snr_series = {ifo: fud[ifo]['snr_series'] for ifo in fud}
            usable_ifos = fud.keys()
            followup_ifos = list(set(usable_ifos) - set(ifos))
        else:
            self.snr_series = None
            usable_ifos = ifos
            followup_ifos = []

        # Set up the bare structure of the xml document
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

        proc_id = ligolw_process.register_to_xmldoc(
            outdoc, 'pycbc', {}, ifos=usable_ifos, comment='',
            version=pycbc_version.git_hash,
            cvs_repository='pycbc/'+pycbc_version.git_branch,
            cvs_entry_time=pycbc_version.date).process_id

        # Set up coinc_definer table
        coinc_def_table = lsctables.New(lsctables.CoincDefTable)
        coinc_def_id = lsctables.CoincDefID(0)
        coinc_def_row = lsctables.CoincDef()
        coinc_def_row.search = "inspiral"
        coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs"
        coinc_def_row.coinc_def_id = coinc_def_id
        coinc_def_row.search_coinc_type = 0
        coinc_def_table.append(coinc_def_row)
        outdoc.childNodes[0].appendChild(coinc_def_table)

        # Set up coinc inspiral and coinc event tables
        coinc_id = lsctables.CoincID(0)
        coinc_event_table = lsctables.New(lsctables.CoincTable)
        coinc_event_row = lsctables.Coinc()
        coinc_event_row.coinc_def_id = coinc_def_id
        coinc_event_row.nevents = len(usable_ifos)
        coinc_event_row.instruments = ','.join(usable_ifos)
        coinc_event_row.time_slide_id = lsctables.TimeSlideID(0)
        coinc_event_row.process_id = proc_id
        coinc_event_row.coinc_event_id = coinc_id
        coinc_event_row.likelihood = 0.
        coinc_event_table.append(coinc_event_row)
        outdoc.childNodes[0].appendChild(coinc_event_table)

        # Set up sngls
        sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
        coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)

        sngl_populated = None
        network_snrsq = 0
        for sngl_id, ifo in enumerate(usable_ifos):
            sngl = return_empty_sngl(nones=True)
            sngl.event_id = lsctables.SnglInspiralID(sngl_id)
            sngl.process_id = proc_id
            sngl.ifo = ifo
            names = [n.split('/')[-1] for n in coinc_results
                     if 'foreground/%s' % ifo in n]
            for name in names:
                val = coinc_results['foreground/%s/%s' % (ifo, name)]
                if name == 'end_time':
                    sngl.set_end(lal.LIGOTimeGPS(val))
                else:
                    try:
                        setattr(sngl, name, val)
                    except AttributeError:
                        pass
            if sngl.mass1 and sngl.mass2:
                sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
                        sngl.mass1, sngl.mass2)
                sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
                        sngl.mass1, sngl.mass2)
                sngl_populated = sngl
            if sngl.snr:
                sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
                network_snrsq += sngl.snr ** 2.0
            if 'channel_names' in kwargs and ifo in kwargs['channel_names']:
                sngl.channel = kwargs['channel_names'][ifo]
            sngl_inspiral_table.append(sngl)

            # Set up coinc_map entry
            coinc_map_row = lsctables.CoincMap()
            coinc_map_row.table_name = 'sngl_inspiral'
            coinc_map_row.coinc_event_id = coinc_id
            coinc_map_row.event_id = sngl.event_id
            coinc_event_map_table.append(coinc_map_row)

            if self.snr_series is not None:
                snr_series_to_xml(self.snr_series[ifo], outdoc, sngl.event_id)

        # for subthreshold detectors, respect BAYESTAR's assumptions and checks
        bayestar_check_fields = ('mass1 mass2 mtotal mchirp eta spin1x '
                                 'spin1y spin1z spin2x spin2y spin2z').split()
        subthreshold_sngl_time = numpy.mean(
                    [coinc_results['foreground/{}/end_time'.format(ifo)]
                     for ifo in ifos])
        for sngl in sngl_inspiral_table:
            if sngl.ifo in followup_ifos:
                for bcf in bayestar_check_fields:
                    setattr(sngl, bcf, getattr(sngl_populated, bcf))
                sngl.set_end(lal.LIGOTimeGPS(subthreshold_sngl_time))

        outdoc.childNodes[0].appendChild(coinc_event_map_table)
        outdoc.childNodes[0].appendChild(sngl_inspiral_table)

        # Set up the coinc inspiral table
        coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
        coinc_inspiral_row = lsctables.CoincInspiral()
        # This seems to be used as FAP, which should not be in gracedb
        coinc_inspiral_row.false_alarm_rate = 0
        coinc_inspiral_row.minimum_duration = 0.
        coinc_inspiral_row.set_ifos(usable_ifos)
        coinc_inspiral_row.coinc_event_id = coinc_id
        coinc_inspiral_row.mchirp = sngl_populated.mchirp
        coinc_inspiral_row.mass = sngl_populated.mtotal
        coinc_inspiral_row.end_time = sngl_populated.end_time
        coinc_inspiral_row.end_time_ns = sngl_populated.end_time_ns
        coinc_inspiral_row.snr = network_snrsq ** 0.5
        far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar'])
        coinc_inspiral_row.combined_far = far
        coinc_inspiral_table.append(coinc_inspiral_row)
        outdoc.childNodes[0].appendChild(coinc_inspiral_table)

        # append the PSDs
        self.psds = kwargs['psds']
        psds_lal = {}
        for ifo in self.psds:
            psd = self.psds[ifo]
            kmin = int(kwargs['low_frequency_cutoff'] / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, kwargs['low_frequency_cutoff'], psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries
        make_psd_xmldoc(psds_lal, outdoc)

        self.outdoc = outdoc
        self.time = sngl_populated.get_end()
예제 #16
0
def create_tables(xmldoc, rootfiles):

    sim_tree = TChain("waveburst")
    liv_tree = TChain("liveTime")
    for rootfile in rootfiles:
        sim_tree.Add(rootfile)
        liv_tree.Add(rootfile)

    # Define tables
    sngl_burst_table = lsctables.New(lsctables.SnglBurstTable, [
        "peak_time_ns", "start_time_ns", "stop_time_ns", "process_id", "ifo",
        "peak_time", "start_time", "stop_time", "duration", "time_lag",
        "peak_frequency", "search", "flow", "fhigh", "bandwidth", "tfvolume",
        "hrss", "event_id"
    ])
    xmldoc.childNodes[0].appendChild(sngl_burst_table)
    sngl_burst_table.sync_next_id()

    coinc_event_table = lsctables.New(lsctables.CoincTable, [
        "process_id", "coinc_event_id", "nevents", "instruments",
        "time_slide_id", "coinc_def_id"
    ])
    xmldoc.childNodes[0].appendChild(coinc_event_table)
    coinc_event_table.sync_next_id()

    multi_burst_table = lsctables.New(
        lsctables.MultiBurstTable,
        ["process_id", "peak_time", "peak_time_ns", "coinc_event_id"])
    xmldoc.childNodes[0].appendChild(multi_burst_table)

    coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
    xmldoc.childNodes[0].appendChild(coinc_event_map_table)

    do_process_table(xmldoc, sim_tree, liv_tree)
    process_index = dict((int(row.process_id), row)
                         for row in lsctables.ProcessTable.get_table(xmldoc))

    do_summary_table(xmldoc, sim_tree, liv_tree)

    # create coinc_definer row
    row = get_coinc_def_row(sim_tree)
    coinc_def_id = llwapp.get_coinc_def_id(xmldoc,
                                           row.search,
                                           row.search_coinc_type,
                                           description=row.description)

    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        offset_vector = dict(
            (get_ifos_from_index(instrument_index), offset)
            for instrument_index, offset in zip(sim_tree.ifo, sim_tree.lag))

        coinc_event = coinc_event_table.RowType()
        coinc_event.process_id = process_index[sim_tree.run].process_id
        coinc_event.coinc_event_id = coinc_event_table.get_next_id()
        coinc_event.coinc_def_id = coinc_def_id
        coinc_event.nevents = sim_tree.ndim
        coinc_event.instruments = get_ifos_from_index(
            branch_array_to_list(sim_tree.ifo, sim_tree.ndim))
        coinc_event.time_slide_id = llwapp.get_time_slide_id(
            xmldoc, offset_vector, process_index[sim_tree.run])
        coinc_event_table.append(coinc_event)

        for d in range(0, sim_tree.ndim):
            sngl_burst = get_sngl_burst_row(sngl_burst_table, sim_tree, d)
            sngl_burst.process_id = coinc_event.process_id
            sngl_burst.event_id = sngl_burst_table.get_next_id()
            sngl_burst_table.append(sngl_burst)

            coinc_event_map = coinc_event_map_table.RowType()
            coinc_event_map.event_id = sngl_burst.event_id
            coinc_event_map.table_name = sngl_burst.event_id.table_name
            coinc_event_map.coinc_event_id = coinc_event.coinc_event_id
            coinc_event_map_table.append(coinc_event_map)

        multi_burst = get_multi_burst_row(multi_burst_table, sim_tree)
        multi_burst.process_id = coinc_event.process_id
        multi_burst.coinc_event_id = coinc_event.coinc_event_id
        multi_burst_table.append(multi_burst)
예제 #17
0
    version=git_version.id,
    cvs_repository=git_version.branch,
    cvs_entry_time=git_version.date).process_id

params = loadtxt(options.input_file)

col_names = []

param_names = str.split(options.parameter_names)

for name in param_names:
    col_names.append(name)

print param_names
if options.type == "sngl":
    sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable,
                                        columns=col_names)
elif options.type == "sim":
    sngl_inspiral_table = lsctables.New(lsctables.SimInspiralTable,
                                        columns=col_names)

outdoc.childNodes[0].appendChild(sngl_inspiral_table)

for values in params:
    if options.type == "sngl":
        tmplt = lsctables.SnglInspiral()
    elif options.type == "sim":
        tmplt = lsctables.SimInspiral()

    tmplt.process_id = proc_id
    index = 0
    for value in values:
예제 #18
0
        injections['frequency'] = [np.nan for i in samples['ra']]
    injections['duration'] = dur
    injections['q'] = q
    try:
        injections['hrss'] = samples['hrss']
    except:
        injections['hrss'] = np.exp(samples['loghrss'])
    injections['ra'] = samples['ra']
    injections['dec'] = samples['dec']
    injections['psi'] = samples['psi']

    # Create a new XML document
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    #create timeslide table and set offsets to 0
    timeslide_table = lsctables.New(lsctables.TimeSlideTable)
    p = lsctables.Process
    p.process_id = ilwd.ilwdchar("process:process_id:{0:d}".format(0))
    timeslide_table.append_offsetvector({
        'H1': 0,
        'V1': 0,
        'L1': 0,
        'H2': 0
    }, p)

    sim_table = lsctables.New(lsctables.SimBurstTable)
    xmldoc.childNodes[0].appendChild(timeslide_table)
    xmldoc.childNodes[0].appendChild(sim_table)

    # Add empty rows to the sim_inspiral table
    for inj in range(N):
예제 #19
0
import sys
from glue.lal import CacheEntry
from glue.ligolw import lsctables, utils
for filename in (CacheEntry(line).path for line in file(sys.argv[1])):
    xmldoc = utils.load_filename(filename,
                                 gz=(filename or "stdin").endswith(".gz"))
    try:
        lsctables.table.get_table(xmldoc,
                                  lsctables.SnglInspiralTable.tableName)
    except ValueError:
        xmldoc.childNodes[-1].appendChild(
            lsctables.New(
                lsctables.SnglInspiralTable,
                columns=("process_id", "ifo", "search", "channel", "end_time",
                         "end_time_ns", "end_time_gmst", "impulse_time",
                         "impulse_time_ns", "template_duration",
                         "event_duration", "amplitude", "eff_distance",
                         "coa_phase", "mass1", "mass2", "mchirp", "mtotal",
                         "eta", "kappa", "chi", "tau0", "tau2", "tau3", "tau4",
                         "tau5", "ttotal", "psi0", "psi3", "alpha", "alpha1",
                         "alpha2", "alpha3", "alpha4", "alpha5", "alpha6",
                         "beta", "f_final", "snr", "chisq", "chisq_dof",
                         "bank_chisq", "bank_chisq_dof", "cont_chisq",
                         "cont_chisq_dof", "sigmasq", "rsqveto_duration",
                         "Gamma0", "Gamma1", "Gamma2", "Gamma3", "Gamma4",
                         "Gamma5", "Gamma6", "Gamma7", "Gamma8", "Gamma9",
                         "event_id")))
        utils.write_filename(filename,
                             xmldoc,
                             gz=(filename or "stdout").endswith(".gz"))
progress.update(-1, 'setting up output document')
out_xmldoc = ligolw.Document()
out_xmldoc.appendChild(ligolw.LIGO_LW())

# Write process metadata to output file.
process = command.register_to_xmldoc(out_xmldoc,
                                     parser,
                                     opts,
                                     ifos=opts.detector,
                                     comment="Simulated coincidences")

# Add search summary to output file.
all_time = segments.segment(
    [glue.lal.LIGOTimeGPS(0),
     glue.lal.LIGOTimeGPS(2e9)])
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
out_xmldoc.childNodes[0].appendChild(search_summary_table)
summary = ligolw_search_summary.append_search_summary(out_xmldoc,
                                                      process,
                                                      inseg=all_time,
                                                      outseg=all_time)

# Read PSDs.
progress.update(-1, 'reading ' + opts.reference_psd.name)
xmldoc, _ = ligolw_utils.load_fileobj(
    opts.reference_psd, contenthandler=lal.series.PSDContentHandler)
psds = lal.series.read_psd_xmldoc(xmldoc, root_name=None)
psds = {
    key: timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data)
    for key, psd in psds.items() if psd is not None
}
예제 #21
0
def do_it_to(xmldoc):
    """
	NOTE:  this performs an in-place transcription of the contents of
	the XML document tree.  This should be assumed to be a destructive
	operation on the contents of the tree.  If you wish to hold
	references to any of the Table elements or other structures in the
	tree and wish them to remain intact so they can be used afterwards,
	make copies first
	"""
    #
    # walk the tree finding Table elements
    #

    for table in list(xmldoc.getElementsByTagName(ligolw.Table.tagName)):
        #
        # this is not the table we're looking for
        #

        if table.Name not in ilwdchar_tables:
            continue

        #
        # make a copy of the table with glue.ligolw's lsctables and
        # replace the old table with the new table in the XML tree
        #

        newtable = table.parentNode.replaceChild(
            lsctables.New(lsctables.TableByName[table.Name],
                          table.columnnames), table)

        #
        # build a row transcription function for this table
        #

        if table.Name != "coinc_event_map":
            ilwdclsmap = ilwdchar_tables[table.Name]
            newrowtype = newtable.RowType

            def newrow(row,
                       nonilwdcharattrs=tuple(colname
                                              for colname in table.columnnames
                                              if colname not in ilwdclsmap),
                       ilwdcharattrs=tuple(colname
                                           for colname in table.columnnames
                                           if colname in ilwdclsmap)):
                kwargs = dict(
                    (attr, getattr(row, attr)) for attr in nonilwdcharattrs)
                kwargs.update((attr, ilwdclsmap[attr](getattr(row, attr)))
                              for attr in ilwdcharattrs)
                return newrowtype(**kwargs)
        else:
            # event_id IDs obtain their table name prefix from
            # the table_name column
            newrowtype = newtable.RowType

            def newrow(row,
                       coinc_id_ilwdcls=ilwdchar_tables["coinc_event"]
                       ["coinc_event_id"]):
                # FIXME this is probably a dumb way to do this,
                # but it shouldn't matter once we have no
                # reason to convert back to ilwdchar
                if "event_id" in ilwdchar_tables[row.table_name]:
                    event_id = ilwdchar_tables[row.table_name]["event_id"](
                        row.event_id)
                elif "simulation_id" in ilwdchar_tables[row.table_name]:
                    event_id = ilwdchar_tables[
                        row.table_name]["simulation_id"](row.event_id)
                elif "coinc_event_id" in ilwdchar_tables[row.table_name]:
                    event_id = ilwdchar_tables[
                        row.table_name]["coinc_event_id"](row.event_id)
                else:
                    raise KeyError(
                        "event_id, simulation_id or coinc_event_id not in " +
                        ilwdchar_tables[row.table_name])
                return newrowtype(table_name=row.table_name,
                                  event_id=event_id,
                                  coinc_event_id=coinc_id_ilwdcls(
                                      row.coinc_event_id))

        #
        # transcribe rows from the old table into the new table
        #

        newtable.extend(newrow(row) for row in table)

        #
        # dispose of the old table
        #

        table.unlink()

    #
    # walk the tree looking for Param elements containing sngl_inspiral
    # IDs and convert to ilwd:char
    #

    ilwdcls = ilwdchar_tables["sngl_inspiral"]["event_id"]
    for param in list(ligo_lw_Param.getParamsByName(xmldoc, "event_id")):
        param.Type = u"ilwd:char"
        param.pcdata = ilwdcls(param.pcdata)

    #
    # done
    #

    return xmldoc
예제 #22
0
    def __getitem__(self, coinc_event_id):
        """
		Construct and return an XML document containing the
		sngl_inspiral<-->sngl_inspiral coinc carrying the given
		coinc_event_id.
		"""
        newxmldoc = ligolw.Document()
        newxmldoc.appendChild(ligolw.LIGO_LW())

        # when making these, we can't use table.new_from_template()
        # because we need to ensure we have a Table subclass, not a
        # DBTable subclass
        new_process_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.ProcessTable,
                          self.process_table.columnnames))
        new_process_params_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.ProcessParamsTable,
                          self.process_params_table.columnnames))
        new_search_summary_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.SearchSummaryTable,
                          self.search_summary_table.columnnames))
        new_sngl_inspiral_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.SnglInspiralTable,
                          self.sngl_inspiral_table.columnnames))
        new_coinc_def_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.CoincDefTable,
                          self.coinc_def_table.columnnames))
        new_coinc_event_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.CoincTable,
                          self.coinc_event_table.columnnames))
        new_coinc_inspiral_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.CoincInspiralTable,
                          self.coinc_inspiral_table.columnnames))
        new_coinc_event_map_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.CoincMapTable,
                          self.coinc_event_map_table.columnnames))
        new_time_slide_table = newxmldoc.childNodes[-1].appendChild(
            lsctables.New(lsctables.TimeSlideTable,
                          self.time_slide_table.columnnames))

        new_coinc_def_table.append(self.coinc_def)
        coinc_event = self.coinc_event_index[coinc_event_id]
        new_coinc_event_table.append(coinc_event)
        new_coinc_inspiral_table.append(
            self.coinc_inspiral_index[coinc_event_id])
        map(new_coinc_event_map_table.append,
            self.coinc_event_map_index[coinc_event_id])
        map(new_time_slide_table.append,
            self.time_slide_index[coinc_event.time_slide_id])
        for row in new_coinc_event_map_table:
            new_sngl_inspiral_table.append(
                self.sngl_inspiral_index[row.event_id])

        for process_id in set(
                new_sngl_inspiral_table.getColumnByName("process_id")) | set(
                    new_coinc_event_table.getColumnByName("process_id")) | set(
                        new_time_slide_table.getColumnByName("process_id")):
            # process row is required
            new_process_table.append(self.process_index[process_id])
            try:
                map(new_process_params_table.append,
                    self.process_params_index[process_id])
            except KeyError:
                # process_params rows are optional
                pass
            try:
                new_search_summary_table.append(
                    self.search_summary_index[process_id])
            except KeyError:
                # search_summary rows are optional
                pass

        return newxmldoc
예제 #23
0
def ReadMultiInspiralTimeSlidesFromFiles(fileList,
                                         generate_output_tables=False):
    """
  Read time-slid multiInspiral tables from a list of files
  @param fileList: list of input files
  """
    if not fileList:
        return multiInspiralTable(), None

    multis = None
    timeSlides = []

    segmentDict = {}
    for thisFile in fileList:

        doc = utils.load_filename(thisFile,
                                  gz=(thisFile or "stdin").endswith(".gz"),
                                  contenthandler=lsctables.use_in(
                                      ligolw.LIGOLWContentHandler))
        # Extract the time slide table
        timeSlideTable = table.get_table(doc,
                                         lsctables.TimeSlideTable.tableName)
        slideMapping = {}
        currSlides = {}
        # NOTE: I think some of this is duplicated in the glue definition of the
        # time slide table. Probably should move over to that
        for slide in timeSlideTable:
            currID = int(slide.time_slide_id)
            if currID not in currSlides.keys():
                currSlides[currID] = {}
                currSlides[currID][slide.instrument] = slide.offset
            elif slide.instrument not in currSlides[currID].keys():
                currSlides[currID][slide.instrument] = slide.offset

        for slideID, offsetDict in currSlides.items():
            try:
                # Is the slide already in the list and where?
                offsetIndex = timeSlides.index(offsetDict)
                slideMapping[slideID] = offsetIndex
            except ValueError:
                # If not then add it
                timeSlides.append(offsetDict)
                slideMapping[slideID] = len(timeSlides) - 1

        # Get the mapping table
        segmentMap = {}
        timeSlideMapTable = table.get_table(
            doc, lsctables.TimeSlideSegmentMapTable.tableName)
        for entry in timeSlideMapTable:
            segmentMap[int(entry.segment_def_id)] = int(entry.time_slide_id)

        # Extract the segment table
        segmentTable = table.get_table(doc, lsctables.SegmentTable.tableName)
        for entry in segmentTable:
            currSlidId = segmentMap[int(entry.segment_def_id)]
            currSeg = entry.get()
            if not segmentDict.has_key(slideMapping[currSlidId]):
                segmentDict[slideMapping[currSlidId]] = segments.segmentlist()
            segmentDict[slideMapping[currSlidId]].append(currSeg)
            segmentDict[slideMapping[currSlidId]].coalesce()

        # extract the multi inspiral table
        try:
            multiInspiralTable = table.get_table(
                doc, lsctables.MultiInspiralTable.tableName)
            # Remap the time slide IDs
            for multi in multiInspiralTable:
                newID = slideMapping[int(multi.time_slide_id)]
                multi.time_slide_id = ilwd.ilwdchar(\
                                      "time_slide:time_slide_id:%d" % (newID))
            if multis: multis.extend(multiInspiralTable)
            else: multis = multiInspiralTable
#    except: multiInspiralTable = None
        except:
            raise

    if not generate_output_tables:
        return multis, timeSlides, segmentDict
    else:
        # Make a new time slide table
        timeSlideTab = lsctables.New(lsctables.TimeSlideTable)

        for slideID, offsetDict in enumerate(timeSlides):
            for instrument in offsetDict.keys():
                currTimeSlide = lsctables.TimeSlide()
                currTimeSlide.instrument = instrument
                currTimeSlide.offset = offsetDict[instrument]
                currTimeSlide.time_slide_id = ilwd.ilwdchar(\
                                        "time_slide:time_slide_id:%d" % (slideID))
                currTimeSlide.process_id = ilwd.ilwdchar(\
                                        "process:process_id:%d" % (0))
                timeSlideTab.append(currTimeSlide)

        # Make a new mapping table
        timeSlideSegMapTab = lsctables.New(lsctables.TimeSlideSegmentMapTable)

        for i in range(len(timeSlides)):
            currMapEntry = lsctables.TimeSlideSegmentMap()
            currMapEntry.time_slide_id = ilwd.ilwdchar(\
                                      "time_slide:time_slide_id:%d" % (i))
            currMapEntry.segment_def_id = ilwd.ilwdchar(\
                                      "segment_def:segment_def_id:%d" % (i))
            timeSlideSegMapTab.append(currMapEntry)

        # Make a new segment table
        newSegmentTable = lsctables.New(lsctables.SegmentTable)

        segmentIDCount = 0
        for i in range(len(timeSlides)):
            currSegList = segmentDict[i]
            for seg in currSegList:
                currSegment = lsctables.Segment()
                currSegment.segment_id = ilwd.ilwdchar(\
                                      "segment:segment_id:%d" %(segmentIDCount))
                segmentIDCount += 1
                currSegment.segment_def_id = ilwd.ilwdchar(\
                                        "segment_def:segment_def_id:%d" % (i))
                currSegment.process_id = ilwd.ilwdchar(\
                                        "process:process_id:%d" % (0))
                currSegment.set(seg)
                currSegment.creator_db = -1
                currSegment.segment_def_cdb = -1
                newSegmentTable.append(currSegment)
        return multis,timeSlides,segmentDict,timeSlideTab,newSegmentTable,\
               timeSlideSegMapTab
예제 #24
0
    def __init__(self, xmldoc, bbdef, sbdef, scedef, scndef, process,
                 end_time_bisect_window):
        #
        # store the process row
        #

        self.process = process

        #
        # locate the sngl_inspiral and sim_inspiral tables
        #

        self.snglinspiraltable = lsctables.SnglInspiralTable.get_table(xmldoc)
        self.siminspiraltable = lsctables.SimInspiralTable.get_table(xmldoc)

        #
        # get the offset vectors from the document
        #

        self.offsetvectors = lsctables.TimeSlideTable.get_table(
            xmldoc).as_dict()

        #
        # get out segment lists for programs that generated
        # triggers (currently only used for time_slide vector
        # construction)
        #

        seglists = lsctables.SearchSummaryTable.get_table(
            xmldoc).get_out_segmentlistdict(
                set(self.snglinspiraltable.getColumnByName(
                    "process_id"))).coalesce()

        #
        # construct the zero-lag time slide needed to cover the
        # instruments listed in all the triggers, then determine
        # its ID (or create it if needed)
        #
        # FIXME:  in the future, the sim_inspiral table should
        # indicate time slide at which the injection was done
        #

        self.tisi_id = ligolw_time_slide.get_time_slide_id(xmldoc, {}.fromkeys(
            seglists, 0.0),
                                                           create_new=process)

        #
        # get coinc_definer row for sim_inspiral <--> sngl_inspiral
        # coincs; this creates a coinc_definer table if the
        # document doesn't have one
        #

        self.sb_coinc_def_id = ligolw_coincs.get_coinc_def_id(
            xmldoc,
            sbdef.search,
            sbdef.search_coinc_type,
            create_new=True,
            description=sbdef.description)

        #
        # get coinc_def_id's for sngl_inspiral <--> sngl_inspiral, and
        # both kinds of sim_inspiral <--> coinc_event coincs.  set all
        # to None if this document does not contain any sngl_inspiral
        # <--> sngl_inspiral coincs.
        #

        try:
            ii_coinc_def_id = ligolw_coincs.get_coinc_def_id(
                xmldoc,
                bbdef.search,
                bbdef.search_coinc_type,
                create_new=False)
        except KeyError:
            ii_coinc_def_id = None
            self.sce_coinc_def_id = None
            self.scn_coinc_def_id = None
        else:
            self.sce_coinc_def_id = ligolw_coincs.get_coinc_def_id(
                xmldoc,
                scedef.search,
                scedef.search_coinc_type,
                create_new=True,
                description=scedef.description)
            self.scn_coinc_def_id = ligolw_coincs.get_coinc_def_id(
                xmldoc,
                scndef.search,
                scndef.search_coinc_type,
                create_new=True,
                description=scndef.description)

        #
        # get coinc table, create one if needed
        #

        try:
            self.coinctable = lsctables.CoincTable.get_table(xmldoc)
        except ValueError:
            self.coinctable = lsctables.New(lsctables.CoincTable)
            xmldoc.childNodes[0].appendChild(self.coinctable)
        self.coinctable.sync_next_id()

        #
        # get coinc_map table, create one if needed
        #

        try:
            self.coincmaptable = lsctables.CoincMapTable.get_table(xmldoc)
        except ValueError:
            self.coincmaptable = lsctables.New(lsctables.CoincMapTable)
            xmldoc.childNodes[0].appendChild(self.coincmaptable)

        #
        # index the document
        #
        # FIXME:  inspiral<-->inspiral coincs should be organized by time
        # slide ID, but since injections are only done at zero lag
        # for now this is ignored.
        #

        # index the sngl_inspiral table
        index = dict((row.event_id, row) for row in self.snglinspiraltable)
        # find IDs of inspiral<-->inspiral coincs
        self.sngls = dict((row.coinc_event_id, []) for row in self.coinctable
                          if row.coinc_def_id == ii_coinc_def_id)
        # construct event list for each inspiral<-->inspiral coinc
        for row in self.coincmaptable:
            try:
                self.sngls[row.coinc_event_id].append(index[row.event_id])
            except KeyError:
                pass
        del index
        # construct a sngl-->coincs look-up table
        self.coincs = dict((event.event_id, set())
                           for events in self.sngls.values()
                           for event in events)
        for row in self.coincmaptable:
            if row.event_id in self.coincs and row.coinc_event_id in self.sngls:
                self.coincs[row.event_id].add(row.coinc_event_id)
        # create a coinc_event_id to offset vector look-up table
        self.coincoffsets = dict(
            (row.coinc_event_id, self.offsetvectors[row.time_slide_id])
            for row in self.coinctable if row.coinc_def_id == ii_coinc_def_id)

        #
        # sort sngl_inspiral table by end time, and sort the coincs
        # list by the end time of the first (earliest) event in
        # each coinc (recall that the event tuple for each coinc
        # has been time-ordered)
        #

        self.snglinspiraltable.sort(lambda a, b: cmp(a.end_time, b.end_time) or
                                    cmp(a.end_time_ns, b.end_time_ns))

        #
        # set the window for inspirals_near_endtime().  this window
        # is the amount of time such that if an injection's end
        # time and a inspiral event's end time differ by more than
        # this it is *impossible* for them to match one another.
        #

        self.end_time_bisect_window = LIGOTimeGPS(end_time_bisect_window)
예제 #25
0
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
    '''
    Make an ExtTrig xml file containing information on the external trigger

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    sci_seg : glue.segments.segment
    The science segment for the analysis run.
    
    out_dir : str
    The output directory, destination for xml file.

    Returns
    -------
    xml_file : pycbc.workflow.File object
    The xml file with external trigger information.

    '''
    # Initialise objects
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(lsctables.ExtTriggersTable)
    cols = tbl.validcolumns
    xmldoc.childNodes[-1].appendChild(tbl)
    row = tbl.appendRow()

    # Add known attributes for this GRB
    setattr(row, "event_ra", float(cp.get("workflow", "ra")))
    setattr(row, "event_dec", float(cp.get("workflow", "dec")))
    setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
    setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))

    # Fill in all empty rows
    for entry in cols.keys():
        if not hasattr(row, entry):
            if cols[entry] in ['real_4', 'real_8']:
                setattr(row, entry, 0.)
            elif cols[entry] == 'int_4s':
                setattr(row, entry, 0)
            elif cols[entry] == 'lstring':
                setattr(row, entry, '')
            elif entry == 'process_id':
                row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
            elif entry == 'event_id':
                row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
            else:
                print >> sys.stderr, "Column %s not recognized" % (entry)
                raise ValueError

    # Save file
    xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
                                                    "trigger-name"))
    xml_file_path = os.path.join(out_dir, xml_file_name)
    utils.write_filename(xmldoc, xml_file_path)
    xml_file_url = urlparse.urljoin("file:",
                                    urllib.pathname2url(xml_file_path))
    xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
    xml_file.PFN(xml_file_url, site="local")

    return xml_file
예제 #26
0
def output_sngl_inspiral_table(outputFile, tempBank, metricParams,
                               ethincaParams, programName="", optDict = None,
                               outdoc=None, **kwargs):
    """
    Function that converts the information produced by the various pyCBC bank
    generation codes into a valid LIGOLW xml file containing a sngl_inspiral
    table and outputs to file.
 
    Parameters
    -----------
    outputFile : string
        Name of the file that the bank will be written to
    tempBank : iterable
        Each entry in the tempBank iterable should be a sequence of
        [mass1,mass2,spin1z,spin2z] in that order.
    metricParams : metricParameters instance
        Structure holding all the options for construction of the metric
        and the eigenvalues, eigenvectors and covariance matrix
        needed to manipulate the space.
    ethincaParams: {ethincaParameters instance, None}
        Structure holding options relevant to the ethinca metric computation
        including the upper frequency cutoff to be used for filtering.
        NOTE: The computation is currently only valid for non-spinning systems
        and uses the TaylorF2 approximant.
    programName (key-word-argument) : string
        Name of the executable that has been run
    optDict (key-word argument) : dictionary
        Dictionary of the command line arguments passed to the program
    outdoc (key-word argument) : ligolw xml document
        If given add template bank to this representation of a xml document and
        write to disk. If not given create a new document.
    kwargs : key-word arguments
        All other key word arguments will be passed directly to 
        ligolw_process.register_to_xmldoc
    """
    if optDict is None:
        optDict = {}
    if outdoc is None:
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

    # get IFO to put in search summary table
    ifos = []
    if 'channel_name' in optDict.keys():
        if optDict['channel_name'] is not None:
            ifos = [optDict['channel_name'][0:2]]

    proc_id = ligolw_process.register_to_xmldoc(outdoc, programName, optDict,
                                                ifos=ifos, **kwargs).process_id
    sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id)
    # Calculate Gamma components if needed
    if ethincaParams is not None:
        if ethincaParams.doEthinca:
            for sngl in sngl_inspiral_table:
                # Set tau_0 and tau_3 values needed for the calculation of
                # ethinca metric distances
                (sngl.tau0,sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3(
                    sngl.mass1, sngl.mass2, metricParams.f0)
                fMax_theor, GammaVals = calculate_ethinca_metric_comps(
                    metricParams, ethincaParams,
                    sngl.mass1, sngl.mass2, spin1z=sngl.spin1z,
                    spin2z=sngl.spin2z, full_ethinca=ethincaParams.full_ethinca)
                # assign the upper frequency cutoff and Gamma0-5 values
                sngl.f_final = fMax_theor
                for i in xrange(len(GammaVals)):
                    setattr(sngl, "Gamma"+str(i), GammaVals[i])
        # If Gamma metric components are not wanted, assign f_final from an
        # upper frequency cutoff specified in ethincaParams
        elif ethincaParams.cutoff is not None:
            for sngl in sngl_inspiral_table:
                sngl.f_final = pnutils.frequency_cutoff_from_name(
                    ethincaParams.cutoff,
                    sngl.mass1, sngl.mass2, sngl.spin1z, sngl.spin2z)

    # set per-template low-frequency cutoff
    if 'f_low_column' in optDict and 'f_low' in optDict and \
            optDict['f_low_column'] is not None:
        for sngl in sngl_inspiral_table:
            setattr(sngl, optDict['f_low_column'], optDict['f_low'])

    outdoc.childNodes[0].appendChild(sngl_inspiral_table)

    # get times to put in search summary table
    start_time = 0
    end_time = 0
    if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys():
        start_time = optDict['gps_start_time']
        end_time = optDict['gps_end_time']

    # make search summary table
    search_summary_table = lsctables.New(lsctables.SearchSummaryTable) 
    search_summary = return_search_summary(start_time, end_time,
                               len(sngl_inspiral_table), ifos, **kwargs)
    search_summary_table.append(search_summary)
    outdoc.childNodes[0].appendChild(search_summary_table)

    # write the xml doc to disk
    proctable = table.get_table(outdoc, lsctables.ProcessTable.tableName)
    ligolw_utils.write_filename(outdoc, outputFile,
                                gz=outputFile.endswith('.gz'))
예제 #27
0
# moving left to right on an IFAN plot)
for ii in range(0, len(FANc)):
    for jj in range(
            1, len(maxFANs)):  # cycle through bkg fans, skipping first one
        if FANc[ii] > maxFANs[
                jj]:  # find the largest bkg fan < this foreground fan
            FANc[ii] = FANc[ii] * (jj
                                   )  # multiply by number of active categories
            for kk in range(jj, len(maxFANs)):
                FANc[ii] = FANc[ii] + maxFANs[
                    kk]  # add bkg fans of inactive categories
            break  # go to next fan in FANc
        elif jj == len(maxFANs) - 1:  # all categories active
            FANc[ii] = FANc[ii] * len(maxFANs)

combinedTrigs = lsctables.New(lsctables.SnglInspiralTable, columns=[])
loudestTrig = lsctables.New(lsctables.SnglInspiralTable, columns=[])

if opts.min_rate:
    minIFAN = opts.min_rate / (FrgrndTime / 3.15567360E7)
    maxFAN = 1 / minIFAN

for column in columnList():
    combinedTrigs.appendColumn(column)
    loudestTrig.appendColumn(column)

loudestTrigTemp = []
loudestTrigFAR = 99999999999.
for thisfile in corsefiles:
    insptrigs = SnglInspiralUtils.ReadSnglInspiralFromFiles([thisfile])
    if insptrigs:
예제 #28
0
    def __init__(self, xmldoc, sbdef, process):
        #
        # store the process row
        #

        self.process = process

        #
        # locate the multi_inspiral and sim_inspiral tables
        #

        self.multiinspiraltable = lsctables.MultiInspiralTable.get_table(
            xmldoc)
        self.siminspiraltable = lsctables.SimInspiralTable.get_table(xmldoc)

        #
        # get out segment lists for programs that generated
        # triggers (currently only used for time_slide vector
        # construction)
        #

        search_summary = lsctables.SearchSummaryTable.get_table(xmldoc)
        pids = set(self.multiinspiraltable.getColumnByName("process_id"))
        seglists = search_summary.get_out_segmentlistdict(pids)\
                                 .coalesce()

        #
        # construct the zero-lag time slide needed to cover the
        # instruments listed in all the triggers, then determine
        # its ID (or create it if needed)
        #
        # FIXME:  in the future, the sim_inspiral table should
        # indicate time slide at which the injection was done
        #

        self.tisi_id = ligolw_tisi.get_time_slide_id(xmldoc,
                                                     dict.fromkeys(
                                                         seglists, 0.0),
                                                     create_new=process)

        #
        # get coinc_definer row for sim_inspiral <--> multi_inspiral
        # coincs; this creates a coinc_definer table if the
        # document doesn't have one
        #

        self.sb_coinc_def_id = llwapp.get_coinc_def_id(
            xmldoc,
            sbdef.search,
            sbdef.search_coinc_type,
            create_new=True,
            description=sbdef.description)

        #
        # get coinc table, create one if needed
        #

        try:
            self.coinctable = lsctables.CoincTable.get_table(xmldoc)
        except ValueError:
            self.coinctable = lsctables.New(lsctables.CoincTable)
            xmldoc.childNodes[0].appendChild(self.coinctable)
        self.coinctable.sync_next_id()

        #
        # get coinc_map table, create one if needed
        #

        try:
            self.coincmaptable = lsctables.CoincMapTable.get_table(xmldoc)
        except ValueError:
            self.coincmaptable = lsctables.New(lsctables.CoincMapTable)
            xmldoc.childNodes[0].appendChild(self.coincmaptable)

        #
        # sort multi_inspiral table by end time
        #

        self.multiinspiraltable.sort(lambda a, b: cmp(a.end_time, b.end_time)
                                     or cmp(a.end_time_ns, b.end_time_ns))
예제 #29
0
파일: hdf.py 프로젝트: veronica-villa/pycbc
    def to_coinc_xml_object(self, file_name):
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

        ifos = list(self.sngl_files.keys())
        proc_id = ligolw_process.register_to_xmldoc(
            outdoc,
            'pycbc', {},
            ifos=ifos,
            comment='',
            version=pycbc_version.git_hash,
            cvs_repository='pycbc/' + pycbc_version.git_branch,
            cvs_entry_time=pycbc_version.date).process_id

        search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
        coinc_h5file = self.coinc_file.h5file
        try:
            start_time = coinc_h5file['segments']['coinc']['start'][:].min()
            end_time = coinc_h5file['segments']['coinc']['end'][:].max()
        except KeyError:
            start_times = []
            end_times = []
            for ifo_comb in coinc_h5file['segments']:
                if ifo_comb == 'foreground_veto':
                    continue
                seg_group = coinc_h5file['segments'][ifo_comb]
                start_times.append(seg_group['start'][:].min())
                end_times.append(seg_group['end'][:].max())
            start_time = min(start_times)
            end_time = max(end_times)
        num_trigs = len(self.sort_arr)
        search_summary = return_search_summary(start_time, end_time, num_trigs,
                                               ifos)
        search_summ_table.append(search_summary)
        outdoc.childNodes[0].appendChild(search_summ_table)

        sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
        coinc_def_table = lsctables.New(lsctables.CoincDefTable)
        coinc_event_table = lsctables.New(lsctables.CoincTable)
        coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
        coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
        time_slide_table = lsctables.New(lsctables.TimeSlideTable)

        # Set up time_slide table
        time_slide_id = lsctables.TimeSlideID(0)
        for ifo in ifos:
            time_slide_row = lsctables.TimeSlide()
            time_slide_row.instrument = ifo
            time_slide_row.time_slide_id = time_slide_id
            time_slide_row.offset = 0
            time_slide_row.process_id = proc_id
            time_slide_table.append(time_slide_row)

        # Set up coinc_definer table
        coinc_def_id = lsctables.CoincDefID(0)
        coinc_def_row = lsctables.CoincDef()
        coinc_def_row.search = "inspiral"
        coinc_def_row.description = \
            "sngl_inspiral<-->sngl_inspiral coincidences"
        coinc_def_row.coinc_def_id = coinc_def_id
        coinc_def_row.search_coinc_type = 0
        coinc_def_table.append(coinc_def_row)

        bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
        bank_col_vals = {}
        for name in bank_col_names:
            bank_col_vals[name] = self.get_bankfile_array(name)

        coinc_event_names = ['ifar', 'time', 'fap', 'stat']
        coinc_event_vals = {}
        for name in coinc_event_names:
            if name == 'time':
                coinc_event_vals[name] = self.get_end_time()
            else:
                coinc_event_vals[name] = self.get_coincfile_array(name)

        sngl_col_names = [
            'snr', 'chisq', 'chisq_dof', 'bank_chisq', 'bank_chisq_dof',
            'cont_chisq', 'cont_chisq_dof', 'end_time', 'template_duration',
            'coa_phase', 'sigmasq'
        ]
        sngl_col_vals = {}
        for name in sngl_col_names:
            sngl_col_vals[name] = self.get_snglfile_array_dict(name)

        sngl_event_count = 0
        for idx in range(len(self.sort_arr)):
            # Set up IDs and mapping values
            coinc_id = lsctables.CoincID(idx)

            # Set up sngls
            # FIXME: As two-ifo is hardcoded loop over all ifos
            sngl_combined_mchirp = 0
            sngl_combined_mtot = 0
            net_snrsq = 0
            for ifo in ifos:
                # If this ifo is not participating in this coincidence then
                # ignore it and move on.
                if not sngl_col_vals['snr'][ifo][1][idx]:
                    continue
                event_id = lsctables.SnglInspiralID(sngl_event_count)
                sngl_event_count += 1
                sngl = return_empty_sngl()
                sngl.event_id = event_id
                sngl.ifo = ifo
                net_snrsq += sngl_col_vals['snr'][ifo][0][idx]**2
                for name in sngl_col_names:
                    val = sngl_col_vals[name][ifo][0][idx]
                    if name == 'end_time':
                        sngl.set_end(LIGOTimeGPS(val))
                    else:
                        setattr(sngl, name, val)
                for name in bank_col_names:
                    val = bank_col_vals[name][idx]
                    setattr(sngl, name, val)
                sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
                    sngl.mass1, sngl.mass2)
                sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
                    sngl.mass1, sngl.mass2)
                sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
                sngl_combined_mchirp += sngl.mchirp
                sngl_combined_mtot += sngl.mtotal

                sngl_inspiral_table.append(sngl)

                # Set up coinc_map entry
                coinc_map_row = lsctables.CoincMap()
                coinc_map_row.table_name = 'sngl_inspiral'
                coinc_map_row.coinc_event_id = coinc_id
                coinc_map_row.event_id = event_id
                coinc_event_map_table.append(coinc_map_row)

            sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
            sngl_combined_mtot = sngl_combined_mtot / len(ifos)

            # Set up coinc inspiral and coinc event tables
            coinc_event_row = lsctables.Coinc()
            coinc_inspiral_row = lsctables.CoincInspiral()
            coinc_event_row.coinc_def_id = coinc_def_id
            coinc_event_row.nevents = len(ifos)
            coinc_event_row.instruments = ','.join(ifos)
            coinc_inspiral_row.set_ifos(ifos)
            coinc_event_row.time_slide_id = time_slide_id
            coinc_event_row.process_id = proc_id
            coinc_event_row.coinc_event_id = coinc_id
            coinc_inspiral_row.coinc_event_id = coinc_id
            coinc_inspiral_row.mchirp = sngl_combined_mchirp
            coinc_inspiral_row.mass = sngl_combined_mtot
            coinc_inspiral_row.set_end(
                LIGOTimeGPS(coinc_event_vals['time'][idx]))
            coinc_inspiral_row.snr = net_snrsq**0.5
            coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
            coinc_inspiral_row.combined_far = 1. / coinc_event_vals['ifar'][idx]
            # Transform to Hz
            coinc_inspiral_row.combined_far = \
                                    coinc_inspiral_row.combined_far / YRJUL_SI
            coinc_event_row.likelihood = coinc_event_vals['stat'][idx]
            coinc_inspiral_row.minimum_duration = 0.
            coinc_event_table.append(coinc_event_row)
            coinc_inspiral_table.append(coinc_inspiral_row)

        outdoc.childNodes[0].appendChild(coinc_def_table)
        outdoc.childNodes[0].appendChild(coinc_event_table)
        outdoc.childNodes[0].appendChild(coinc_event_map_table)
        outdoc.childNodes[0].appendChild(time_slide_table)
        outdoc.childNodes[0].appendChild(coinc_inspiral_table)
        outdoc.childNodes[0].appendChild(sngl_inspiral_table)

        ligolw_utils.write_filename(outdoc, file_name)
예제 #30
0
def excess_power2(
    ts_data,  # Time series from magnetic field data
    psd_segment_length,  # Length of each segment in seconds
    psd_segment_stride,  # Separation between 2 consecutive segments in seconds
    psd_estimation,  # Average method
    window_fraction,  # Withening window fraction
    tile_fap,  # Tile false alarm probability threshold in Gaussian noise.
    station,  # Station
    nchans=None,  # Total number of channels
    band=None,  # Channel bandwidth
    fmin=0,  # Lowest frequency of the filter bank.
    fmax=None,  # Highest frequency of the filter bank.
    max_duration=None,  # Maximum duration of the tile
    wtype='tukey'):  # Whitening type, can tukey or hann
    """
    Perform excess-power search analysis on magnetic field data.
    This method will produce a bunch of time-frequency plots for every
    tile duration and bandwidth analysed as well as a XML file identifying
    all the triggers found in the selected data within the user-defined
    time range.

    Parameters
    ----------
    ts_data : TimeSeries
      Time Series from magnetic field data
    psd_segment_length : float
      Length of each segment in seconds
    psd_segment_stride : float
      Separation between 2 consecutive segments in seconds
    psd_estimation : string
      Average method
    window_fraction : float
      Withening window fraction
    tile_fap : float
      Tile false alarm probability threshold in Gaussian noise.
    nchans : int
      Total number of channels
    band : float
      Channel bandwidth
    fmin : float
      Lowest frequency of the filter bank.
    fmax : float
      Highest frequency of the filter bank
    """
    # Determine sampling rate based on extracted time series
    sample_rate = ts_data.sample_rate
    # Check if tile maximum frequency is not defined
    if fmax is None or fmax > sample_rate / 2.:
        # Set the tile maximum frequency equal to the Nyquist frequency
        # (i.e. half the sampling rate)
        fmax = sample_rate / 2.0
    # Check whether or not tile bandwidth and channel are defined
    if band is None and nchans is None:
        # Exit program with error message
        exit("Either bandwidth or number of channels must be specified...")
    else:
        # Check if tile maximum frequency larger than its minimum frequency
        assert fmax >= fmin
        # Define spectral band of data
        data_band = fmax - fmin
        # Check whether tile bandwidth or channel is defined
        if band is not None:
            # Define number of possible filter bands
            nchans = int(data_band / band) - 1
        elif nchans is not None:
            # Define filter bandwidth
            band = data_band / nchans
            nchans = nchans - 1
        # Check if number of channels is superior than unity
        assert nchans > 1
    # Print segment information
    print '|- Estimating PSD from segments of time',
    print '%.2f s in length, with %.2f s stride...' % (psd_segment_length,
                                                       psd_segment_stride)
    # Convert time series as array of float
    data = ts_data.astype(numpy.float64)
    # Define segment length for PSD estimation in sample unit
    seg_len = int(psd_segment_length * sample_rate)
    # Define separation between consecutive segments in sample unit
    seg_stride = int(psd_segment_stride * sample_rate)
    # Calculate the overall PSD from individual PSD segments
    fd_psd = psd.welch(data,
                       avg_method=psd_estimation,
                       seg_len=seg_len,
                       seg_stride=seg_stride)
    # We need this for the SWIG functions...
    lal_psd = fd_psd.lal()
    # Plot the power spectral density
    plot_spectrum(fd_psd)
    # Create whitening window
    print "|- Whitening window and spectral correlation..."
    if wtype == 'hann':
        window = lal.CreateHannREAL8Window(seg_len)
    elif wtype == 'tukey':
        window = lal.CreateTukeyREAL8Window(seg_len, window_fraction)
    else:
        raise ValueError("Can't handle window type %s" % wtype)
    # Create FFT plan
    fft_plan = lal.CreateForwardREAL8FFTPlan(len(window.data.data), 1)
    # Perform two point spectral correlation
    spec_corr = lal.REAL8WindowTwoPointSpectralCorrelation(window, fft_plan)
    # Initialise filter bank
    print "|- Create filter..."
    filter_bank, fdb = [], []
    # Loop for each channels
    for i in range(nchans):
        channel_flow = fmin + band / 2 + i * band
        channel_width = band
        # Create excess power filter
        lal_filter = lalburst.CreateExcessPowerFilter(channel_flow,
                                                      channel_width, lal_psd,
                                                      spec_corr)
        filter_bank.append(lal_filter)
        fdb.append(Spectrum.from_lal(lal_filter))
    # Calculate the minimum bandwidth
    min_band = (len(filter_bank[0].data.data) - 1) * filter_bank[0].deltaF / 2
    # Plot filter bank
    plot_bank(fdb)
    # Convert filter bank from frequency to time domain
    print "|- Convert all the frequency domain to the time domain..."
    tdb = []
    # Loop for each filter's spectrum
    for fdt in fdb:
        zero_padded = numpy.zeros(int((fdt.f0 / fdt.df).value) + len(fdt))
        st = int((fdt.f0 / fdt.df).value)
        zero_padded[st:st + len(fdt)] = numpy.real_if_close(fdt.value)
        n_freq = int(sample_rate / 2 / fdt.df.value) * 2
        tdt = numpy.fft.irfft(zero_padded, n_freq) * math.sqrt(sample_rate)
        tdt = numpy.roll(tdt, len(tdt) / 2)
        tdt = TimeSeries(tdt,
                         name="",
                         epoch=fdt.epoch,
                         sample_rate=sample_rate)
        tdb.append(tdt)
    # Plot time series filter
    plot_filters(tdb, fmin, band)
    # Compute the renormalization for the base filters up to a given bandwidth.
    mu_sq_dict = {}
    # Loop through powers of 2 up to number of channels
    for nc_sum in range(0, int(math.log(nchans, 2))):
        nc_sum = 2**nc_sum - 1
        print "|- Calculating renormalization for resolution level containing %d %fHz channels" % (
            nc_sum + 1, min_band)
        mu_sq = (nc_sum + 1) * numpy.array([
            lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, None)
            for f in filter_bank
        ])
        # Uncomment to get all possible frequency renormalizations
        #for n in xrange(nc_sum, nchans): # channel position index
        for n in xrange(nc_sum, nchans, nc_sum + 1):  # channel position index
            for k in xrange(0, nc_sum):  # channel sum index
                # FIXME: We've precomputed this, so use it instead
                mu_sq[n] += 2 * lalburst.ExcessPowerFilterInnerProduct(
                    filter_bank[n - k], filter_bank[n - 1 - k], spec_corr,
                    None)
        #print mu_sq[nc_sum::nc_sum+1]
        mu_sq_dict[nc_sum] = mu_sq
    # Create an event list where all the triggers will be stored
    event_list = lsctables.New(lsctables.SnglBurstTable, [
        'start_time', 'start_time_ns', 'peak_time', 'peak_time_ns', 'duration',
        'bandwidth', 'central_freq', 'chisq_dof', 'confidence', 'snr',
        'amplitude', 'channel', 'ifo', 'process_id', 'event_id', 'search',
        'stop_time', 'stop_time_ns'
    ])
    # Create repositories to save TF and time series plots
    os.system('mkdir -p segments/time-frequency')
    os.system('mkdir -p segments/time-series')
    # Define time edges
    t_idx_min, t_idx_max = 0, seg_len
    while t_idx_max <= len(ts_data):
        # Define starting and ending time of the segment in seconds
        start_time = ts_data.start_time + t_idx_min / float(
            ts_data.sample_rate)
        end_time = ts_data.start_time + t_idx_max / float(ts_data.sample_rate)
        print "\n|-- Analyzing block %i to %i (%.2f percent)" % (
            start_time, end_time, 100 * float(t_idx_max) / len(ts_data))
        # Model a withen time series for the block
        tmp_ts_data = types.TimeSeries(ts_data[t_idx_min:t_idx_max] *
                                       window.data.data,
                                       delta_t=1. / ts_data.sample_rate,
                                       epoch=start_time)
        # Save time series in relevant repository
        segfolder = 'segments/%i-%i' % (start_time, end_time)
        os.system('mkdir -p ' + segfolder)
        plot_ts(tmp_ts_data,
                fname='segments/time-series/%i-%i.png' %
                (start_time, end_time))
        # Convert times series to frequency series
        fs_data = tmp_ts_data.to_frequencyseries()
        print "|-- Frequency series data has variance: %s" % fs_data.data.std(
        )**2
        # Whitening (FIXME: Whiten the filters, not the data)
        fs_data.data /= numpy.sqrt(fd_psd) / numpy.sqrt(2 * fd_psd.delta_f)
        print "|-- Whitened frequency series data has variance: %s" % fs_data.data.std(
        )**2
        print "|-- Create time-frequency plane for current block"
        # Return the complex snr, along with its associated normalization of the template,
        # matched filtered against the data
        #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),
        #                           fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=filter_bank[0].f0,
        #                           high_frequency_cutoff=filter_bank[0].f0+2*band)
        print "|-- Filtering all %d channels..." % nchans
        # Initialise 2D zero array
        tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
        # Initialise 2D zero array for time-frequency map
        tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
        # Loop over all the channels
        for i in range(nchans):
            # Reset filter bank series
            tmp_filter_bank *= 0.0
            # Index of starting frequency
            f1 = int(filter_bank[i].f0 / fd_psd.delta_f)
            # Index of ending frequency
            f2 = int((filter_bank[i].f0 + 2 * band) / fd_psd.delta_f) + 1
            # (FIXME: Why is there a factor of 2 here?)
            tmp_filter_bank[f1:f2] = filter_bank[i].data.data * 2
            # Define the template to filter the frequency series with
            template = types.FrequencySeries(tmp_filter_bank,
                                             delta_f=fd_psd.delta_f,
                                             copy=False)
            # Create filtered series
            filtered_series = filter.matched_filter_core(
                template,
                fs_data,
                h_norm=None,
                psd=None,
                low_frequency_cutoff=filter_bank[i].f0,
                high_frequency_cutoff=filter_bank[i].f0 + 2 * band)
            # Include filtered series in the map
            tf_map[i, :] = filtered_series[0].numpy()
        # Plot spectrogram
        plot_spectrogram(numpy.abs(tf_map).T,
                         tmp_ts_data.delta_t,
                         band,
                         ts_data.sample_rate,
                         start_time,
                         end_time,
                         fname='segments/time-frequency/%i-%i.png' %
                         (start_time, end_time))
        # Loop through all summed channels
        for nc_sum in range(0, int(math.log(nchans, 2)))[::-1]:
            nc_sum = 2**nc_sum - 1
            mu_sq = mu_sq_dict[nc_sum]
            # Clip the boundaries to remove window corruption
            clip_samples = int(psd_segment_length * window_fraction *
                               ts_data.sample_rate / 2)
            # Constructing tile and calculate their energy
            print "\n|--- Constructing tile with %d summed channels..." % (
                nc_sum + 1)
            # Current bandwidth of the time-frequency map tiles
            df = band * (nc_sum + 1)
            dt = 1.0 / (2 * df)
            # How much each "step" is in the time domain -- under sampling rate
            us_rate = int(round(dt / ts_data.delta_t))
            print "|--- Undersampling rate for this level: %f" % (
                ts_data.sample_rate / us_rate)
            print "|--- Calculating tiles..."
            # Making independent tiles
            # because [0:-0] does not give the full array
            tf_map_temp = tf_map[:,clip_samples:-clip_samples:us_rate] \
                          if clip_samples > 0 else tf_map[:,::us_rate]
            tiles = tf_map_temp.copy()
            # Here's the deal: we're going to keep only the valid output and
            # it's *always* going to exist in the lowest available indices
            stride = nc_sum + 1
            for i in xrange(tiles.shape[0] / stride):
                numpy.absolute(tiles[stride * i:stride * (i + 1)].sum(axis=0),
                               tiles[stride * (i + 1) - 1])
            tiles = tiles[nc_sum::nc_sum + 1].real**2 / mu_sq[nc_sum::nc_sum +
                                                              1].reshape(
                                                                  -1, 1)
            print "|--- TF-plane is %dx%s samples" % tiles.shape
            print "|--- Tile energy mean %f, var %f" % (numpy.mean(tiles),
                                                        numpy.var(tiles))
            # Define maximum number of degrees of freedom and check it larger or equal to 2
            max_dof = 32 if max_duration == None else 2 * max_duration * df
            assert max_dof >= 2
            # Loop through multiple degrees of freedom
            for j in [2**l for l in xrange(0, int(math.log(max_dof, 2)))]:
                # Duration is fixed by the NDOF and bandwidth
                duration = j * dt
                print "\n|----- Explore signal duration of %f s..." % duration
                print "|----- Summing DOF = %d ..." % (2 * j)
                tlen = tiles.shape[1] - 2 * j + 1 + 1
                dof_tiles = numpy.zeros((tiles.shape[0], tlen))
                sum_filter = numpy.array([1, 0] * (j - 1) + [1])
                for f in range(tiles.shape[0]):
                    # Sum and drop correlate tiles
                    dof_tiles[f] = fftconvolve(tiles[f], sum_filter, 'valid')
                print "|----- Summed tile energy mean: %f, var %f" % (
                    numpy.mean(dof_tiles), numpy.var(dof_tiles))
                plot_spectrogram(
                    dof_tiles.T,
                    dt,
                    df,
                    ts_data.sample_rate,
                    start_time,
                    end_time,
                    fname='segments/%i-%i/tf_%02ichans_%02idof.png' %
                    (start_time, end_time, nc_sum + 1, 2 * j))
                threshold = scipy.stats.chi2.isf(tile_fap, j)
                print "|------ Threshold for this level: %f" % threshold
                spant, spanf = dof_tiles.shape[1] * dt, dof_tiles.shape[0] * df
                print "|------ Processing %.2fx%.2f time-frequency map." % (
                    spant, spanf)
                # Since we clip the data, the start time needs to be adjusted accordingly
                window_offset_epoch = fs_data.epoch + psd_segment_length * window_fraction / 2
                window_offset_epoch = LIGOTimeGPS(float(window_offset_epoch))
                for i, j in zip(*numpy.where(dof_tiles > threshold)):
                    event = event_list.RowType()
                    # The points are summed forward in time and thus a `summed point' is the
                    # sum of the previous N points. If this point is above threshold, it
                    # corresponds to a tile which spans the previous N points. However, the
                    # 0th point (due to the convolution specifier 'valid') is actually
                    # already a duration from the start time. All of this means, the +
                    # duration and the - duration cancels, and the tile 'start' is, by
                    # definition, the start of the time frequency map if j = 0
                    # FIXME: I think this needs a + dt/2 to center the tile properly
                    event.set_start(window_offset_epoch + float(j * dt))
                    event.set_stop(window_offset_epoch + float(j * dt) +
                                   duration)
                    event.set_peak(event.get_start() + duration / 2)
                    event.central_freq = filter_bank[
                        0].f0 + band / 2 + i * df + 0.5 * df
                    event.duration = duration
                    event.bandwidth = df
                    event.chisq_dof = 2 * duration * df
                    event.snr = math.sqrt(dof_tiles[i, j] / event.chisq_dof -
                                          1)
                    # FIXME: Magic number 0.62 should be determine empircally
                    event.confidence = -lal.LogChisqCCDF(
                        event.snr * 0.62, event.chisq_dof * 0.62)
                    event.amplitude = None
                    event.process_id = None
                    event.event_id = event_list.get_next_id()
                    event_list.append(event)
                for event in event_list[::-1]:
                    if event.amplitude != None:
                        continue
                    etime_min_idx = float(event.get_start()) - float(
                        fs_data.epoch)
                    etime_min_idx = int(etime_min_idx / tmp_ts_data.delta_t)
                    etime_max_idx = float(event.get_start()) - float(
                        fs_data.epoch) + event.duration
                    etime_max_idx = int(etime_max_idx / tmp_ts_data.delta_t)
                    # (band / 2) to account for sin^2 wings from finest filters
                    flow_idx = int((event.central_freq - event.bandwidth / 2 -
                                    (df / 2) - fmin) / df)
                    fhigh_idx = int((event.central_freq + event.bandwidth / 2 +
                                     (df / 2) - fmin) / df)
                    # TODO: Check that the undersampling rate is always commensurate
                    # with the indexing: that is to say that
                    # mod(etime_min_idx, us_rate) == 0 always
                    z_j_b = tf_map[flow_idx:fhigh_idx,
                                   etime_min_idx:etime_max_idx:us_rate]
                    event.amplitude = 0
                print "|------ Total number of events: %d" % len(event_list)
        t_idx_min += int(seg_len * (1 - window_fraction))
        t_idx_max += int(seg_len * (1 - window_fraction))
    setname = "MagneticFields"
    __program__ = 'pyburst_excesspower'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time, end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = 'H1'  #channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc,
                                  __program__,
                                  straindict,
                                  ifos=[ifo],
                                  version=git_version.id,
                                  cvs_repository=git_version.branch,
                                  cvs_entry_time=git_version.date)
    dt_stride = psd_segment_length
    sample_rate = ts_data.sample_rate
    # Amount to overlap successive blocks so as not to lose data
    window_overlap_samples = window_fraction * sample_rate
    outseg = inseg.contract(window_fraction * dt_stride / 2)
    # With a given dt_stride, we cannot process the remainder of this data
    remainder = math.fmod(abs(outseg), dt_stride * (1 - window_fraction))
    # ...so make an accounting of it
    outseg = segment(outseg[0], outseg[1] - remainder)
    ss = append_search_summary(xmldoc,
                               proc_row,
                               ifos=(station, ),
                               inseg=inseg,
                               outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    fname = 'excesspower.xml.gz'
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))