コード例 #1
0
def coinc_without_inj(coinc, tmpdir):
    """Produce a coinc.xml file with the found coincs stripped out."""
    filename = str(tmpdir / 'coinc_without_inj.xml')
    xmldoc = ligolw_utils.load_filename(coinc, contenthandler=ContentHandler)

    # Prune coinc_def table
    coinc_def_table = lsctables.CoincDefTable.get_table(xmldoc)
    included = [row for row in coinc_def_table
                if row.search_coinc_type == InspiralCoincDef.search_coinc_type
                and row.search == InspiralCoincDef.search]
    included_coinc_def_ids = {row.coinc_def_id for row in included}
    coinc_def_table[:] = included

    # Prune coinc table
    coinc_table = lsctables.CoincTable.get_table(xmldoc)
    included = [row for row in coinc_table
                if row.coinc_def_id in included_coinc_def_ids]
    included_coinc_ids = {row.coinc_event_id for row in included}
    coinc_table[:] = included

    # Prune coinc_map table
    coinc_map_table = lsctables.CoincMapTable.get_table(xmldoc)
    coinc_map_table[:] = [row for row in coinc_map_table
                          if row.coinc_event_id in included_coinc_ids]

    ligolw_utils.write_filename(xmldoc, filename)
    return filename
コード例 #2
0
    def save(self, path, group=None, ifo='P1'):
        """
        Save frequency series to a Numpy .npy, hdf, or text file. The first column
        contains the sample frequencies, the second contains the values.
        In the case of a complex frequency series saved as text, the imaginary
        part is written as a third column.  When using hdf format, the data is stored
        as a single vector, along with relevant attributes.

        Parameters
        ----------
        path: string
            Destination file path. Must end with either .hdf, .npy or .txt.
            
        group: string 
            Additional name for internal storage use. Ex. hdf storage uses
            this as the key value.

        Raises
        ------
        ValueError
            If path does not end in .npy or .txt.
        """

        ext = _os.path.splitext(path)[1]
        if ext == '.npy':
            output = _numpy.vstack((self.sample_frequencies.numpy(),
                                    self.numpy())).T
            _numpy.save(path, output)
        elif ext == '.txt':
            if self.kind == 'real':
                output = _numpy.vstack((self.sample_frequencies.numpy(),
                                        self.numpy())).T
            elif self.kind == 'complex':
                output = _numpy.vstack((self.sample_frequencies.numpy(),
                                        self.numpy().real,
                                        self.numpy().imag)).T
            _numpy.savetxt(path, output)
        elif ext == '.xml' or path.endswith('.xml.gz'):
            from pylal import series as lalseries
            from glue.ligolw import utils
            assert(self.kind == 'real')
            output = self.lal()
            # When writing in this format we must *not* have the 0 values at
            # frequencies less than flow. To resolve this we set the first
            # non-zero value < flow.
            data_lal = output.data.data
            first_idx = _numpy.argmax(data_lal>0)
            if not first_idx == 0:
                data_lal[:first_idx] = data_lal[first_idx]
            psddict = {ifo: output}
            utils.write_filename(lalseries.make_psd_xmldoc(psddict), path,
                                 gz=path.endswith(".gz"))
        elif ext =='.hdf':
            key = 'data' if group is None else group
            d = h5py.File(path)
            d[key] = self.numpy()
            d[key].attrs['epoch'] = float(self.epoch)
            d[key].attrs['delta_f'] = float(self.delta_f)
        else:
            raise ValueError('Path must end with .npy or .txt')
コード例 #3
0
def write_to_xml(cells, intr_prms, fvals=None, fname=None, verbose=False):
    """
    Write a set of cells, with dimensions corresponding to intr_prms to an XML file as sim_inspiral rows.
    """
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    procrow = process.append_process(xmldoc, program=sys.argv[0])
    procid = procrow.process_id
    process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

    rows = ["simulation_id", "process_id", "numrel_data"] + list(intr_prms)
    if fvals is not None:
        rows.append("alpha1")
    sim_insp_tbl = lsctables.New(lsctables.SimInspiralTable, rows)
    for itr, intr_prm in enumerate(cells):
        sim_insp = sim_insp_tbl.RowType()
        # FIXME: Need better IDs
        sim_insp.numrel_data = "INTR_SET_%d" % itr
        sim_insp.simulation_id = ilwd.ilwdchar("sim_inspiral:sim_inspiral_id:%d" % itr)
        sim_insp.process_id = procid
        if fvals:
            sim_insp.alpha1 = fvals[itr]
        for p, v in zip(intr_prms, intr_prm._center):
            setattr(sim_insp, p, v)
        sim_insp_tbl.append(sim_insp)

    xmldoc.childNodes[0].appendChild(sim_insp_tbl)
    if fname is None:
        channel_name = ["H=H", "L=L"]
        ifos = "".join([o.split("=")[0][0] for o in channel_name])
        #start = int(event_time)
        start = 0
        fname = "%s-MASS_POINTS-%d-1.xml.gz" % (ifos, start)
    utils.write_filename(xmldoc, fname, gz=True, verbose=verbose)
コード例 #4
0
def write_xml(trig_times,freqs,snrs,channel,start_time,length,thresh,outdir):
    if len(trig_times):
        print 'number of triggers is ' + str(len(trig_times))
        print 'trigger rate is ' + str(float(len(trig_times))/length)
        if (float(len(trig_times))/length > 16):
            print 'Trigger rate too high, skipping channel'
            return
        sngl_burst_table = lsctables.New(lsctables.SnglBurstTable, ["peak_time", "peak_time_ns","peak_frequency","snr"])

        for t,f,s in zip(trig_times, freqs, snrs):
            row = sngl_burst_table.RowType()
            row.set_peak(t)
            row.peak_frequency = f
            row.snr = s
            sngl_burst_table.append(row)
    
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        xmldoc.childNodes[0].appendChild(sngl_burst_table)
# define trigger directory
        trig_dir = (outdir  + channel[:2] + '/' + 
        channel[3:] + '_' + str(thresh)  + '_DAC/' + str(start_time)[:5] + '/')

        if not os.path.exists(trig_dir):
            os.makedirs(trig_dir)

# create filename string
        utils.write_filename(xmldoc, trig_dir + channel[:2] + "-" + channel[3:6] +
        "_" + channel[7:] + "_" + str(thresh) + "_DAC-" + str(start_time) + "-" + str(length) +
        ".xml.gz", gz=True)

        print 'wrote XML for channel: ' + str(channel)
        print 'number of triggers: ' + str(np.size(trig_times))
    else:
        print 'No triggers found for channel: ' + str(channel)  
コード例 #5
0
    def save(self, filename):
        """Write this trigger to gracedb compatible xml format

        Parameters
        ----------
        filename: str
            Name of file to write to disk.
        """
        ligolw_utils.write_filename(self.outdoc, filename)
コード例 #6
0
ファイル: live.py プロジェクト: prayush/pycbc
    def save(self, filename):
        """Write this trigger to gracedb compatible xml format

        Parameters
        ----------
        filename: str
            Name of file to write to disk.
        """
        ligolw_utils.write_filename(self.outdoc, filename)
コード例 #7
0
def write_likelihood_data(filename,
                          coincparamsdistributions,
                          seglists,
                          verbose=False):
    utils.write_filename(ligolw_burca_tailor.gen_likelihood_control(
        coincparamsdistributions, seglists, name=u"string_cusp_likelihood"),
                         filename,
                         verbose=verbose,
                         gz=(filename or "stdout").endswith(".gz"))
コード例 #8
0
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz",
                         open("tmp_psd.xml.gz", "rb").read(), "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
コード例 #9
0
ファイル: live.py プロジェクト: prayush/pycbc
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz", open("tmp_psd.xml.gz", "rb").read(),
                         "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
コード例 #10
0
ファイル: ligolw.py プロジェクト: stefco/gwpy
def write_tables(target, tables, append=False, overwrite=False, **kwargs):
    """Write an LIGO_LW table to file

    Parameters
    ----------
    target : `str`, `file`, :class:`~glue.ligolw.ligolw.Document`
        the file or document to write into

    tables : `list`, `tuple` of :class:`~glue.ligolw.table.Table`
        the tables to write

    append : `bool`, optional, default: `False`
        if `True`, append to an existing file/table, otherwise `overwrite`

    overwrite : `bool`, optional, default: `False`
        if `True`, delete an existing instance of the table type, otherwise
        append new rows

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate
    """
    from glue.ligolw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler)
    from glue.ligolw import utils as ligolw_utils

    # allow writing directly to XML
    if isinstance(target, (Document, LIGO_LW)):
        xmldoc = target
    # open existing document, if possible
    elif append and not overwrite:
        xmldoc = open_xmldoc(
            target, contenthandler=kwargs.pop('contenthandler',
                                              LIGOLWContentHandler))
    # fail on existing document and not overwriting
    elif (not overwrite and isinstance(target, string_types) and
          os.path.isfile(target)):
        raise IOError("File exists: {}".format(target))
    else:  # or create a new document
        xmldoc = Document()

    # convert table to format
    write_tables_to_document(xmldoc, tables, overwrite=overwrite)

    # write file
    if isinstance(target, string_types):
        kwargs.setdefault('gz', target.endswith('.gz'))
        ligolw_utils.write_filename(xmldoc, target, **kwargs)
    elif isinstance(target, FILE_LIKE):
        kwargs.setdefault('gz', target.name.endswith('.gz'))
        ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
コード例 #11
0
def write_tables(target, tables, append=False, overwrite=False, **kwargs):
    """Write an LIGO_LW table to file

    Parameters
    ----------
    target : `str`, `file`, :class:`~glue.ligolw.ligolw.Document`
        the file or document to write into

    tables : `list`, `tuple` of :class:`~glue.ligolw.table.Table`
        the tables to write

    append : `bool`, optional, default: `False`
        if `True`, append to an existing file/table, otherwise `overwrite`

    overwrite : `bool`, optional, default: `False`
        if `True`, delete an existing instance of the table type, otherwise
        append new rows

    **kwargs
        other keyword arguments to pass to
        :func:`~glue.ligolw.utils.load_filename`, or
        :func:`~glue.ligolw.utils.load_fileobj` as appropriate
    """
    from glue.ligolw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler)
    from glue.ligolw import utils as ligolw_utils

    # allow writing directly to XML
    if isinstance(target, (Document, LIGO_LW)):
        xmldoc = target
    # open existing document, if possible
    elif append and not overwrite:
        xmldoc = open_xmldoc(target,
                             contenthandler=kwargs.pop('contenthandler',
                                                       LIGOLWContentHandler))
    # fail on existing document and not overwriting
    elif (not overwrite and isinstance(target, string_types)
          and os.path.isfile(target)):
        raise IOError("File exists: {}".format(target))
    else:  # or create a new document
        xmldoc = Document()

    # convert table to format
    write_tables_to_document(xmldoc, tables, overwrite=overwrite)

    # write file
    if isinstance(target, string_types):
        kwargs.setdefault('gz', target.endswith('.gz'))
        ligolw_utils.write_filename(xmldoc, target, **kwargs)
    elif isinstance(target, FILE_LIKE):
        kwargs.setdefault('gz', target.name.endswith('.gz'))
        ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
コード例 #12
0
def write_to_xml(cells,
                 intr_prms,
                 pin_prms={},
                 fvals=None,
                 fname=None,
                 verbose=False):
    """
    Write a set of cells, with dimensions corresponding to intr_prms to an XML file as sim_inspiral rows.
    """
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    procrow = process.append_process(xmldoc, program=sys.argv[0])
    procid = procrow.process_id
    process.append_process_params(
        xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

    rows = ["simulation_id", "process_id", "numrel_data"]
    # Override eff_lambda to with psi0, its shoehorn column
    if "eff_lambda" in intr_prms:
        intr_prms[intr_prms.index("eff_lambda")] = "psi0"
    if "deff_lambda" in intr_prms:
        intr_prms[intr_prms.index("deff_lambda")] = "psi3"
    rows += list(intr_prms)
    rows += list(pin_prms)
    if fvals is not None:
        rows.append("alpha1")
    sim_insp_tbl = lsctables.New(lsctables.SimInspiralTable, rows)
    for itr, intr_prm in enumerate(cells):
        sim_insp = sim_insp_tbl.RowType()
        # FIXME: Need better IDs
        sim_insp.numrel_data = "INTR_SET_%d" % itr
        sim_insp.simulation_id = ilwd.ilwdchar(
            "sim_inspiral:sim_inspiral_id:%d" % itr)
        sim_insp.process_id = procid
        if fvals:
            sim_insp.alpha1 = fvals[itr]
        for p, v in zip(intr_prms, intr_prm._center):
            setattr(sim_insp, p, v)
        for p, v in pin_prms.iteritems():
            setattr(sim_insp, p, v)
        sim_insp_tbl.append(sim_insp)

    xmldoc.childNodes[0].appendChild(sim_insp_tbl)
    if fname is None:
        channel_name = ["H=H", "L=L"]
        ifos = "".join([o.split("=")[0][0] for o in channel_name])
        #start = int(event_time)
        start = 0
        fname = "%s-MASS_POINTS-%d-1.xml.gz" % (ifos, start)
    utils.write_filename(xmldoc, fname, gz=True, verbose=verbose)
コード例 #13
0
ファイル: mdctools.py プロジェクト: transientlunatic/minke
    def save_xml(self, filename):
        """
        Save the MDC set as an XML SimBurstTable.

        Parameters
        ----------
        filename : str
           The location to save the xml file. The output is gzipped, so ending it with 
           a ".gz" would stick with convention.
        """
        xmldoc = ligolw.Document()
        lw = xmldoc.appendChild(ligolw.LIGO_LW())
        sim = lsctables.New(self.table_type)
        lw.appendChild(sim)
        # This needs to be given the proper metadata once the package has the maturity to
        # write something sensible.
        for waveform in self.waveforms:
            procrow = process.register_to_xmldoc(xmldoc, "minke_burst_mdc+{}".format(minke.__version__), {}) # waveform.params)
            try:
                waveform_row = waveform._row(sim)
                waveform_row.process_id = procrow.process_id
            except:
                row = sim.RowType()
                for a in self.table_type.validcolumns.keys():
                    if a in waveform.params.keys():
                        setattr(row, a, waveform.params[a])
                    else:
                        if not hasattr(waveform, a):
                            setattr(row, a, 0)
                        else:
                            setattr(row, a, getattr(waveform, a))

                row.waveform = waveform.waveform
                if self.table_type == lsctables.SimBurstTable:
                    # Fill in the time
                    row.set_time_geocent(GPS(float(waveform.time)))
                    # Get the sky locations
                    row.ra, row.dec, row.psi = waveform.ra, waveform.dec, waveform.psi
                row.simulation_id = waveform.simulation_id
                row.waveform_number = random.randint(0,int(2**32)-1)
                ### !! This needs to be updated.
                row.process_id = "process:process_id:0" #procrow.process_id

                waveform_row = row
            
            sim.append(waveform_row)
            #del waveform_row
        # Write out the xml and gzip it.
        utils.write_filename(xmldoc, filename, gz=True)
コード例 #14
0
def checkpoint_save(xmldoc, fout, process):

    print >>sys.stderr, "\t[Checkpointing ...]"

    # save rng state
    rng_state = np.random.get_state()
    np.savez(fout + "_checkpoint.rng.npz",
             state1=rng_state[1],
             state2=np.array(rng_state[2]),
             state3=np.array(rng_state[3]),
             state4=np.array(rng_state[4]))

    # write out the document
    ligolw_process.set_process_end_time(process)
    utils.write_filename(xmldoc, fout + "_checkpoint.gz",  gz=True)
コード例 #15
0
def checkpoint_save(xmldoc, fout, process):

    print >> sys.stderr, "\t[Checkpointing ...]"

    # save rng state
    rng_state = np.random.get_state()
    np.savez(fout + "_checkpoint.rng.npz",
             state1=rng_state[1],
             state2=np.array(rng_state[2]),
             state3=np.array(rng_state[3]),
             state4=np.array(rng_state[4]))

    # write out the document
    ligolw_process.set_process_end_time(process)
    utils.write_filename(xmldoc, fout + "_checkpoint.gz", gz=True)
コード例 #16
0
ファイル: grbsummary.py プロジェクト: Solaro/lalsuite
def write_rows(rows, table_type, filename):
    """
    Create an empty LIGO_LW XML document, add a table of table_type,
    insert the given rows, then write the document to a file.
    """
    # prepare a new XML document
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(table_type)
    xmldoc.childNodes[-1].appendChild(tbl)
    
    # insert our rows
    tbl.extend(rows)
    
    # write out the document
    utils.write_filename(xmldoc, filename)
コード例 #17
0
def write_rows(rows, table_type, filename):
    """
    Create an empty LIGO_LW XML document, add a table of table_type,
    insert the given rows, then write the document to a file.
    """
    # prepare a new XML document
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(table_type)
    xmldoc.childNodes[-1].appendChild(tbl)

    # insert our rows
    tbl.extend(rows)

    # write out the document
    utils.write_filename(xmldoc, filename)
コード例 #18
0
    def save(self, filename):
        """Write this trigger to gracedb compatible xml format

        Parameters
        ----------
        filename: str
            Name of file to write to disk.
        """
        gz = filename.endswith('.gz')
        ligolw_utils.write_filename(self.outdoc, filename, gz=gz)

        # save source probabilities in a json file
        if self.probabilities is not None:
            prob_fname = filename.replace('.xml.gz', '_probs.json')
            with open(prob_fname, 'w') as prob_outfile:
                json.dump(self.probabilities, prob_outfile)
            logging.info('Source probabilities file saved as %s', prob_fname)
コード例 #19
0
ファイル: ligolw.py プロジェクト: jumbokh/gwpy
def write_tables(f, tables, append=False, overwrite=False, **kwargs):
    """Write an LIGO_LW table to file

    Parameters
    ----------
    f : `str`, `file`, :class:`~glue.ligolw.ligolw.Document`
        the file or document to write into

    tables : `list` of :class:`~glue.ligolw.table.Table`
        the tables to write

    append : `bool`, optional, default: `False`
        if `True`, append to an existing file/table, otherwise `overwrite`

    overwrite : `bool`, optional, default: `False`
        if `True`, delete an existing instance of the table type, otherwise
        append new rows
    """
    from glue.ligolw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler)
    from glue.ligolw import utils as ligolw_utils

    # allow writing directly to XML
    if isinstance(f, (Document, LIGO_LW)):
        xmldoc = f
    # open existing document, if possible
    elif append and not overwrite:
        xmldoc = open_xmldoc(f,
                             contenthandler=kwargs.pop('contenthandler',
                                                       LIGOLWContentHandler))
    # fail on existing document and not overwriting
    elif not overwrite and isinstance(f, string_types) and os.path.isfile(f):
        raise IOError("File exists: %s" % f)
    else:  # or create a new document
        xmldoc = Document()

    # convert table to format
    write_tables_to_document(xmldoc, tables, overwrite=overwrite)

    # write file
    if isinstance(f, string_types):
        kwargs.setdefault('gz', f.endswith('.gz'))
        ligolw_utils.write_filename(xmldoc, f, **kwargs)
    elif not isinstance(f, Document):
        kwargs.setdefault('gz', f.name.endswith('.gz'))
        ligolw_utils.write_fileobj(xmldoc, f, **kwargs)
コード例 #20
0
ファイル: epower.py プロジェクト: zoran-grujic/gdas
def create_xml(ts_data,psd_segment_length,window_fraction,event_list,station,setname="MagneticFields"):
    __program__ = 'pyburst_excesspower'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time,end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = 'H1'#channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc, __program__,straindict, ifos=[ifo],version=git_version.id, cvs_repository=git_version.branch, cvs_entry_time=git_version.date)
    outseg = determine_output_segment(inseg, psd_segment_length, ts_data.sample_rate, window_fraction)
    ss = append_search_summary(xmldoc, proc_row, ifos=(station,), inseg=inseg, outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    fname = make_filename(station, inseg)
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))
コード例 #21
0
def multi_segments_to_file(seg_list, filename, names, ifos):
    """ Save segments to an xml file
    
    Parameters
    ----------
    seg_list: glue.segments.segmentlist
        List of segment lists to write to disk
    filename : str
        name of the output file
    names : 
        name of each segment list
    ifos :
        list of ifos
        
    Returns
    -------
    File : Return a pycbc.core.File reference to the file
    """
    from pycbc.workflow.core import File

    # create XML doc and add process table
    outdoc = ligolw.Document()
    outdoc.appendChild(ligolw.LIGO_LW())
    process = ligolw_utils.process.register_to_xmldoc(outdoc, argv[0], {})

    for segs, ifo, name in zip(seg_list, ifos, names):
        fsegs = [(lal.LIGOTimeGPS(seg[0]), lal.LIGOTimeGPS(seg[1])) \
            for seg in segs]

        # add segments, segments summary, and segment definer tables using glue library
        with ligolw_segments.LigolwSegments(outdoc, process) as xmlsegs:
            xmlsegs.insert_from_segmentlistdict({ifo: fsegs}, name)

    # write file
    ligolw_utils.write_filename(outdoc, filename)

    # return a File instance
    url = urlparse.urlunparse(
        ['file', 'localhost', filename, None, None, None])
    f = File(ifo, name, segs, file_url=url, tags=[name])
    f.PFN(os.path.abspath(filename), site='local')
    return f
コード例 #22
0
ファイル: veto.py プロジェクト: vitale82/pycbc
def multi_segments_to_file(seg_list, filename, names, ifos):
    """ Save segments to an xml file
    
    Parameters
    ----------
    seg_list: glue.segments.segmentlist
        List of segment lists to write to disk
    filename : str
        name of the output file
    names : 
        name of each segment list
    ifos :
        list of ifos
        
    Returns
    -------
    File : Return a pycbc.core.File reference to the file
    """
    from pycbc.workflow.core import File

    # create XML doc and add process table
    outdoc = ligolw.Document()
    outdoc.appendChild(ligolw.LIGO_LW())
    process = ligolw_utils.process.register_to_xmldoc(outdoc, argv[0], {})

    for segs, ifo, name in zip(seg_list, ifos, names):
        fsegs = [(lal.LIGOTimeGPS(seg[0]), lal.LIGOTimeGPS(seg[1])) \
            for seg in segs]

        # add segments, segments summary, and segment definer tables using glue library
        with ligolw_segments.LigolwSegments(outdoc, process) as xmlsegs:
            xmlsegs.insert_from_segmentlistdict({ifo : fsegs}, name)

    # write file
    ligolw_utils.write_filename(outdoc, filename)

    # return a File instance
    url = urlparse.urlunparse(['file', 'localhost', filename, None, None, None])
    f = File(ifo, name, segs, file_url=url, tags=[name])
    f.PFN(os.path.abspath(filename), site='local')
    return f
コード例 #23
0
    def write(filename, samples, write_params=None, static_args=None):
        """Writes the injection samples to the given xml.

        Parameters
        ----------
        filename : str
            The name of the file to write to.
        samples : io.FieldArray
            FieldArray of parameters.
        write_params : list, optional
            Only write the given parameter names. All given names must be keys
            in ``samples``. Default is to write all parameters in ``samples``.
        static_args : dict, optional
            Dictionary mapping static parameter names to values. These are
            written to the ``attrs``.
        """
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        simtable = lsctables.New(lsctables.SimInspiralTable)
        xmldoc.childNodes[0].appendChild(simtable)
        if static_args is None:
            static_args = {}
        if write_params is None:
            write_params = samples.fieldnames
        for ii in range(samples.size):
            sim = lsctables.SimInspiral()
            # initialize all elements to None
            for col in sim.__slots__:
                setattr(sim, col, None)
            for field in write_params:
                data = samples[ii][field]
                set_sim_data(sim, field, data)
            # set any static args
            for (field, value) in static_args.items():
                set_sim_data(sim, field, value)
            simtable.append(sim)
        ligolw_utils.write_filename(xmldoc,
                                    filename,
                                    gz=filename.endswith('gz'))
コード例 #24
0
ファイル: inject.py プロジェクト: bhooshan-gadre/pycbc
    def write(filename, samples, write_params=None, static_args=None):
        """Writes the injection samples to the given xml.

        Parameters
        ----------
        filename : str
            The name of the file to write to.
        samples : io.FieldArray
            FieldArray of parameters.
        write_params : list, optional
            Only write the given parameter names. All given names must be keys
            in ``samples``. Default is to write all parameters in ``samples``.
        static_args : dict, optional
            Dictionary mapping static parameter names to values. These are
            written to the ``attrs``.
        """
        xmldoc = ligolw.Document()
        xmldoc.appendChild(ligolw.LIGO_LW())
        simtable = lsctables.New(lsctables.SimInspiralTable)
        xmldoc.childNodes[0].appendChild(simtable)
        if static_args is None:
            static_args = {}
        if write_params is None:
            write_params = samples.fieldnames
        for ii in range(samples.size):
            sim = lsctables.SimInspiral()
            # initialize all elements to None
            for col in sim.__slots__:
                setattr(sim, col, None)
            for field in write_params:
                data = samples[ii][field]
                set_sim_data(sim, field, data)
            # set any static args
            for (field, value) in static_args.items():
                set_sim_data(sim, field, value)
            simtable.append(sim)
        ligolw_utils.write_filename(xmldoc, filename,
                                    gz=filename.endswith('gz'))
コード例 #25
0
ファイル: mdctools.py プロジェクト: princess-supernova/minke
    def save_xml(self, filename):
        """
        Save the MDC set as an XML SimBurstTable.

        Parameters
        ----------
        filename : str
           The location to save the xml file. The output is gzipped, so ending it with 
           a ".gz" would stick with convention.
        """
        xmldoc = ligolw.Document()
        lw = xmldoc.appendChild(ligolw.LIGO_LW())
        sim = lsctables.New(lsctables.SimBurstTable)
        lw.appendChild(sim)
        # This needs to be given the proper metadata once the package has the maturity to
        # write something sensible.
        for waveform in self.waveforms:
            procrow = process.register_to_xmldoc(xmldoc, "minke_burst_mdc", {}) # waveform.params)
            waveform_row = waveform._row(sim)
            waveform_row.process_id = procrow.process_id
            sim.append(waveform_row)
            #del waveform_row
        # Write out the xml and gzip it.
        utils.write_filename(xmldoc, filename, gz=True)
コード例 #26
0
ファイル: ligolw.py プロジェクト: yangnk42/gwpy
def write_ligolw(flag, fobj, **kwargs):
    """Write this `DataQualityFlag` to XML in LIGO_LW format
    """
    # if given a Document, just add data
    if isinstance(fobj, Document):
        return write_to_xmldoc(flag, fobj, **kwargs)
    # otherwise build a new Document
    xmldoc = Document()
    xmldoc.appendChild(LIGO_LW())
    # TODO: add process information
    write_to_xmldoc(flag, xmldoc)
    # and write
    if isinstance(fobj, string_types):
        return write_filename(xmldoc, fobj, gz=fobj.endswith('.gz'))
    else:
        return write_fileobj(xmldoc, fobj, gz=fobj.name.endswith('.gz'))
コード例 #27
0
ファイル: hdf.py プロジェクト: veronica-villa/pycbc
    def to_coinc_xml_object(self, file_name):
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

        ifos = list(self.sngl_files.keys())
        proc_id = ligolw_process.register_to_xmldoc(
            outdoc,
            'pycbc', {},
            ifos=ifos,
            comment='',
            version=pycbc_version.git_hash,
            cvs_repository='pycbc/' + pycbc_version.git_branch,
            cvs_entry_time=pycbc_version.date).process_id

        search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
        coinc_h5file = self.coinc_file.h5file
        try:
            start_time = coinc_h5file['segments']['coinc']['start'][:].min()
            end_time = coinc_h5file['segments']['coinc']['end'][:].max()
        except KeyError:
            start_times = []
            end_times = []
            for ifo_comb in coinc_h5file['segments']:
                if ifo_comb == 'foreground_veto':
                    continue
                seg_group = coinc_h5file['segments'][ifo_comb]
                start_times.append(seg_group['start'][:].min())
                end_times.append(seg_group['end'][:].max())
            start_time = min(start_times)
            end_time = max(end_times)
        num_trigs = len(self.sort_arr)
        search_summary = return_search_summary(start_time, end_time, num_trigs,
                                               ifos)
        search_summ_table.append(search_summary)
        outdoc.childNodes[0].appendChild(search_summ_table)

        sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
        coinc_def_table = lsctables.New(lsctables.CoincDefTable)
        coinc_event_table = lsctables.New(lsctables.CoincTable)
        coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
        coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
        time_slide_table = lsctables.New(lsctables.TimeSlideTable)

        # Set up time_slide table
        time_slide_id = lsctables.TimeSlideID(0)
        for ifo in ifos:
            time_slide_row = lsctables.TimeSlide()
            time_slide_row.instrument = ifo
            time_slide_row.time_slide_id = time_slide_id
            time_slide_row.offset = 0
            time_slide_row.process_id = proc_id
            time_slide_table.append(time_slide_row)

        # Set up coinc_definer table
        coinc_def_id = lsctables.CoincDefID(0)
        coinc_def_row = lsctables.CoincDef()
        coinc_def_row.search = "inspiral"
        coinc_def_row.description = \
            "sngl_inspiral<-->sngl_inspiral coincidences"
        coinc_def_row.coinc_def_id = coinc_def_id
        coinc_def_row.search_coinc_type = 0
        coinc_def_table.append(coinc_def_row)

        bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
        bank_col_vals = {}
        for name in bank_col_names:
            bank_col_vals[name] = self.get_bankfile_array(name)

        coinc_event_names = ['ifar', 'time', 'fap', 'stat']
        coinc_event_vals = {}
        for name in coinc_event_names:
            if name == 'time':
                coinc_event_vals[name] = self.get_end_time()
            else:
                coinc_event_vals[name] = self.get_coincfile_array(name)

        sngl_col_names = [
            'snr', 'chisq', 'chisq_dof', 'bank_chisq', 'bank_chisq_dof',
            'cont_chisq', 'cont_chisq_dof', 'end_time', 'template_duration',
            'coa_phase', 'sigmasq'
        ]
        sngl_col_vals = {}
        for name in sngl_col_names:
            sngl_col_vals[name] = self.get_snglfile_array_dict(name)

        sngl_event_count = 0
        for idx in range(len(self.sort_arr)):
            # Set up IDs and mapping values
            coinc_id = lsctables.CoincID(idx)

            # Set up sngls
            # FIXME: As two-ifo is hardcoded loop over all ifos
            sngl_combined_mchirp = 0
            sngl_combined_mtot = 0
            net_snrsq = 0
            for ifo in ifos:
                # If this ifo is not participating in this coincidence then
                # ignore it and move on.
                if not sngl_col_vals['snr'][ifo][1][idx]:
                    continue
                event_id = lsctables.SnglInspiralID(sngl_event_count)
                sngl_event_count += 1
                sngl = return_empty_sngl()
                sngl.event_id = event_id
                sngl.ifo = ifo
                net_snrsq += sngl_col_vals['snr'][ifo][0][idx]**2
                for name in sngl_col_names:
                    val = sngl_col_vals[name][ifo][0][idx]
                    if name == 'end_time':
                        sngl.set_end(LIGOTimeGPS(val))
                    else:
                        setattr(sngl, name, val)
                for name in bank_col_names:
                    val = bank_col_vals[name][idx]
                    setattr(sngl, name, val)
                sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
                    sngl.mass1, sngl.mass2)
                sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
                    sngl.mass1, sngl.mass2)
                sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
                sngl_combined_mchirp += sngl.mchirp
                sngl_combined_mtot += sngl.mtotal

                sngl_inspiral_table.append(sngl)

                # Set up coinc_map entry
                coinc_map_row = lsctables.CoincMap()
                coinc_map_row.table_name = 'sngl_inspiral'
                coinc_map_row.coinc_event_id = coinc_id
                coinc_map_row.event_id = event_id
                coinc_event_map_table.append(coinc_map_row)

            sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
            sngl_combined_mtot = sngl_combined_mtot / len(ifos)

            # Set up coinc inspiral and coinc event tables
            coinc_event_row = lsctables.Coinc()
            coinc_inspiral_row = lsctables.CoincInspiral()
            coinc_event_row.coinc_def_id = coinc_def_id
            coinc_event_row.nevents = len(ifos)
            coinc_event_row.instruments = ','.join(ifos)
            coinc_inspiral_row.set_ifos(ifos)
            coinc_event_row.time_slide_id = time_slide_id
            coinc_event_row.process_id = proc_id
            coinc_event_row.coinc_event_id = coinc_id
            coinc_inspiral_row.coinc_event_id = coinc_id
            coinc_inspiral_row.mchirp = sngl_combined_mchirp
            coinc_inspiral_row.mass = sngl_combined_mtot
            coinc_inspiral_row.set_end(
                LIGOTimeGPS(coinc_event_vals['time'][idx]))
            coinc_inspiral_row.snr = net_snrsq**0.5
            coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
            coinc_inspiral_row.combined_far = 1. / coinc_event_vals['ifar'][idx]
            # Transform to Hz
            coinc_inspiral_row.combined_far = \
                                    coinc_inspiral_row.combined_far / YRJUL_SI
            coinc_event_row.likelihood = coinc_event_vals['stat'][idx]
            coinc_inspiral_row.minimum_duration = 0.
            coinc_event_table.append(coinc_event_row)
            coinc_inspiral_table.append(coinc_inspiral_row)

        outdoc.childNodes[0].appendChild(coinc_def_table)
        outdoc.childNodes[0].appendChild(coinc_event_table)
        outdoc.childNodes[0].appendChild(coinc_event_map_table)
        outdoc.childNodes[0].appendChild(time_slide_table)
        outdoc.childNodes[0].appendChild(coinc_inspiral_table)
        outdoc.childNodes[0].appendChild(sngl_inspiral_table)

        ligolw_utils.write_filename(outdoc, file_name)
コード例 #28
0
    del lsctables.ProcessTable.validcolumns['jobid']
    del lsctables.ProcessTable.validcolumns['is_online']

    segment_map_map = {}
    segment_sum_map_map = {}

    # Create two random files, each with 2 procs, 4 segment definers each with 8 segs and 8 seg summaries
    tmp_dir = tempfile.mkdtemp()

    for filename in ['file1.xml', 'file2.xml']:
        doc, segment_map, segment_sum_map = make_random_document(
            2, 4, 8, 8, 800000000, 800000200, 10)
        segment_map_map[filename] = segment_map
        segment_map_map[filename] = segment_map
        segment_sum_map_map[filename] = segment_sum_map
        utils.write_filename(doc, tmp_dir + "/" + filename)

    # find the intersection between two segment definers
    do_test(
        'cat %s/file1.xml | ligolw_segment_intersect -i H1:TEST_SEG_1,H1:TEST_SEG_2 --segment | ligolw_print -t segment -c start_time -c end_time'
        % tmp_dir, segment_map_map['file1.xml']['TEST_SEG_1']
        & segment_map_map['file1.xml']['TEST_SEG_2'],
        'intersect between segments')

    # union
    do_test(
        'cat %s/file1.xml | ligolw_segment_union -i H1:TEST_SEG_1,H1:TEST_SEG_2 --segment | ligolw_print -t segment -c start_time -c end_time'
        % tmp_dir, segment_map_map['file1.xml']['TEST_SEG_1']
        | segment_map_map['file1.xml']['TEST_SEG_2'], 'union between segments')

    # difference
コード例 #29
0
# Convert to m1, m2
m1m2_grid = np.array([lsu.m1m2(cart_grid[i][0], cart_grid[i][1])
        for i in xrange(len(cart_grid))])
m1m2_grid /= lal.MSUN_SI

if opts.mass_points_xml:
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    procrow = process.append_process(xmldoc, program=sys.argv[0])
    procid = procrow.process_id
    process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))
    
    sim_insp_tbl = lsctables.New(lsctables.SimInspiralTable, ["simulation_id", "process_id", "numrel_data", "mass1", "mass2", "psi0", "psi3"])
    for itr, (m1, m2) in enumerate(m1m2_grid):
        for l1 in np.linspace(common_cl.param_limits["lam_tilde"][0], common_cl.param_limits["lam_tilde"][1], Nlam):
            sim_insp = sim_insp_tbl.RowType()
            sim_insp.numrel_data = "MASS_SET_%d" % itr
            sim_insp.simulation_id = ilwd.ilwdchar("sim_inspiral:sim_inspiral_id:%d" % itr)
            sim_insp.process_id = procid
            sim_insp.mass1, sim_insp.mass2 = m1, m2
            sim_insp.psi0, sim_insp.psi3 = opts.eff_lambda or l1, opts.delta_eff_lambda or 0
            sim_insp_tbl.append(sim_insp)
    xmldoc.childNodes[0].appendChild(sim_insp_tbl)
    if opts.channel_name:
        ifos = "".join([o.split("=")[0][0] for o in opts.channel_name])
    else:
        ifos = "HLV"
    start = int(event_time)
    fname = "%s-MASS_POINTS-%d-1.xml.gz" % (ifos, start)
    utils.write_filename(xmldoc, fname, gz=True)
コード例 #30
0
ファイル: grb_utils.py プロジェクト: torreycullen/pycbc
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
    '''
    Make an ExtTrig xml file containing information on the external trigger

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    sci_seg : glue.segments.segment
    The science segment for the analysis run.
    
    out_dir : str
    The output directory, destination for xml file.

    Returns
    -------
    xml_file : pycbc.workflow.File object
    The xml file with external trigger information.

    '''
    # Initialise objects
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(lsctables.ExtTriggersTable)
    cols = tbl.validcolumns
    xmldoc.childNodes[-1].appendChild(tbl)
    row = tbl.appendRow()

    # Add known attributes for this GRB
    setattr(row, "event_ra", float(cp.get("workflow", "ra")))
    setattr(row, "event_dec", float(cp.get("workflow", "dec")))
    setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
    setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))

    # Fill in all empty rows
    for entry in cols.keys():
        if not hasattr(row, entry):
            if cols[entry] in ['real_4', 'real_8']:
                setattr(row, entry, 0.)
            elif cols[entry] == 'int_4s':
                setattr(row, entry, 0)
            elif cols[entry] == 'lstring':
                setattr(row, entry, '')
            elif entry == 'process_id':
                row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
            elif entry == 'event_id':
                row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
            else:
                print >> sys.stderr, "Column %s not recognized" % (entry)
                raise ValueError

    # Save file
    xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
                                                    "trigger-name"))
    xml_file_path = os.path.join(out_dir, xml_file_name)
    utils.write_filename(xmldoc, xml_file_path)
    xml_file_url = urlparse.urljoin("file:",
                                    urllib.pathname2url(xml_file_path))
    xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
    xml_file.PFN(xml_file_url, site="local")

    return xml_file
コード例 #31
0
	#
	# Call clustering library
	#

	xmldoc, changed = bucluster.bucluster(
		xmldoc,
		program = options.program,
		process = process,
		prefunc = prefunc,
		postfunc = postfunc,
		testfunc = testfunc,
		clusterfunc = clusterfunc,
		sortfunc = sortfunc,
		bailoutfunc = bailoutfunc,
		verbose = options.verbose
	)

	#
	# Finish process information
	#

	ligolw_process.set_process_end_time(process)

	#
	# Write document
	#

	if changed:
		utils.write_filename(xmldoc, filename, gz = (filename or "stdout").endswith(".gz"), verbose = options.verbose)
	xmldoc.unlink()
コード例 #32
0
	del lsctables.ProcessTable.validcolumns['jobid']
	del lsctables.ProcessTable.validcolumns['is_online']


	segment_map_map = {}
	segment_sum_map_map = {}

	# Create two random files, each with 2 procs, 4 segment definers each with 8 segs and 8 seg summaries
	tmp_dir = tempfile.mkdtemp()

	for filename in ['file1.xml', 'file2.xml']:
		doc, segment_map, segment_sum_map = make_random_document(2, 4, 8, 8, 800000000, 800000200, 10)
		segment_map_map[filename] = segment_map
		segment_map_map[filename] = segment_map
		segment_sum_map_map[filename] = segment_sum_map
		utils.write_filename(doc, tmp_dir + "/" + filename)

	# find the intersection between two segment definers
	do_test('cat %s/file1.xml | ligolw_segment_intersect -i H1:TEST_SEG_1,H1:TEST_SEG_2 --segment | ligolw_print -t segment -c start_time -c end_time' % tmp_dir,
			segment_map_map['file1.xml']['TEST_SEG_1'] & segment_map_map['file1.xml']['TEST_SEG_2'],
			'intersect between segments')


	# union
	do_test('cat %s/file1.xml | ligolw_segment_union -i H1:TEST_SEG_1,H1:TEST_SEG_2 --segment | ligolw_print -t segment -c start_time -c end_time' % tmp_dir,
			segment_map_map['file1.xml']['TEST_SEG_1'] | segment_map_map['file1.xml']['TEST_SEG_2'],
			'union between segments')


	# difference
	do_test('cat %s/file1.xml | ligolw_segment_diff -i H1:TEST_SEG_1,H1:TEST_SEG_2 --segment | ligolw_print -t segment -c start_time -c end_time' % tmp_dir,
コード例 #33
0
import sys
from glue.lal import CacheEntry
from glue.ligolw import lsctables, utils
for filename in (CacheEntry(line).path for line in file(sys.argv[1])):
    xmldoc = utils.load_filename(filename,
                                 gz=(filename or "stdin").endswith(".gz"))
    try:
        lsctables.table.get_table(xmldoc,
                                  lsctables.SnglInspiralTable.tableName)
    except ValueError:
        xmldoc.childNodes[-1].appendChild(
            lsctables.New(
                lsctables.SnglInspiralTable,
                columns=("process_id", "ifo", "search", "channel", "end_time",
                         "end_time_ns", "end_time_gmst", "impulse_time",
                         "impulse_time_ns", "template_duration",
                         "event_duration", "amplitude", "eff_distance",
                         "coa_phase", "mass1", "mass2", "mchirp", "mtotal",
                         "eta", "kappa", "chi", "tau0", "tau2", "tau3", "tau4",
                         "tau5", "ttotal", "psi0", "psi3", "alpha", "alpha1",
                         "alpha2", "alpha3", "alpha4", "alpha5", "alpha6",
                         "beta", "f_final", "snr", "chisq", "chisq_dof",
                         "bank_chisq", "bank_chisq_dof", "cont_chisq",
                         "cont_chisq_dof", "sigmasq", "rsqveto_duration",
                         "Gamma0", "Gamma1", "Gamma2", "Gamma3", "Gamma4",
                         "Gamma5", "Gamma6", "Gamma7", "Gamma8", "Gamma9",
                         "event_id")))
        utils.write_filename(filename,
                             xmldoc,
                             gz=(filename or "stdout").endswith(".gz"))
コード例 #34
0
    coinc.coinc_event_id = coinc_table.get_next_id()
    coinc.process_id = process.process_id
    coinc.coinc_def_id = coinc_def_id
    coinc.time_slide_id = time_slide_id
    coinc.set_instruments(opts.detector)
    coinc.nevents = len(opts.detector)
    coinc.likelihood = None
    coinc_table.append(coinc)

    # Record all sngl_inspiral records and associate them with coincidences.
    for sngl_inspiral in sngl_inspirals:
        # Give this sngl_inspiral record an id and add it to the table.
        sngl_inspiral.event_id = sngl_inspiral_table.get_next_id()
        sngl_inspiral_table.append(sngl_inspiral)

        # Add CoincMap entry.
        coinc_map = lsctables.CoincMap()
        coinc_map.coinc_event_id = coinc.coinc_event_id
        coinc_map.table_name = sngl_inspiral_table.tableName
        coinc_map.event_id = sngl_inspiral.event_id
        coinc_map_table.append(coinc_map)


# Record process end time.
progress.update(-1, 'writing ' + opts.output)
ligolw_process.set_process_end_time(process)

# Write output file.
ligolw_utils.write_filename(out_xmldoc, opts.output,
    gz=(os.path.splitext(opts.output)[-1]==".gz"))
コード例 #35
0
ファイル: epower2.py プロジェクト: zoran-grujic/gdas
def excess_power2(
    ts_data,  # Time series from magnetic field data
    psd_segment_length,  # Length of each segment in seconds
    psd_segment_stride,  # Separation between 2 consecutive segments in seconds
    psd_estimation,  # Average method
    window_fraction,  # Withening window fraction
    tile_fap,  # Tile false alarm probability threshold in Gaussian noise.
    station,  # Station
    nchans=None,  # Total number of channels
    band=None,  # Channel bandwidth
    fmin=0,  # Lowest frequency of the filter bank.
    fmax=None,  # Highest frequency of the filter bank.
    max_duration=None,  # Maximum duration of the tile
    wtype='tukey'):  # Whitening type, can tukey or hann
    """
    Perform excess-power search analysis on magnetic field data.
    This method will produce a bunch of time-frequency plots for every
    tile duration and bandwidth analysed as well as a XML file identifying
    all the triggers found in the selected data within the user-defined
    time range.

    Parameters
    ----------
    ts_data : TimeSeries
      Time Series from magnetic field data
    psd_segment_length : float
      Length of each segment in seconds
    psd_segment_stride : float
      Separation between 2 consecutive segments in seconds
    psd_estimation : string
      Average method
    window_fraction : float
      Withening window fraction
    tile_fap : float
      Tile false alarm probability threshold in Gaussian noise.
    nchans : int
      Total number of channels
    band : float
      Channel bandwidth
    fmin : float
      Lowest frequency of the filter bank.
    fmax : float
      Highest frequency of the filter bank
    """
    # Determine sampling rate based on extracted time series
    sample_rate = ts_data.sample_rate
    # Check if tile maximum frequency is not defined
    if fmax is None or fmax > sample_rate / 2.:
        # Set the tile maximum frequency equal to the Nyquist frequency
        # (i.e. half the sampling rate)
        fmax = sample_rate / 2.0
    # Check whether or not tile bandwidth and channel are defined
    if band is None and nchans is None:
        # Exit program with error message
        exit("Either bandwidth or number of channels must be specified...")
    else:
        # Check if tile maximum frequency larger than its minimum frequency
        assert fmax >= fmin
        # Define spectral band of data
        data_band = fmax - fmin
        # Check whether tile bandwidth or channel is defined
        if band is not None:
            # Define number of possible filter bands
            nchans = int(data_band / band) - 1
        elif nchans is not None:
            # Define filter bandwidth
            band = data_band / nchans
            nchans = nchans - 1
        # Check if number of channels is superior than unity
        assert nchans > 1
    # Print segment information
    print '|- Estimating PSD from segments of time',
    print '%.2f s in length, with %.2f s stride...' % (psd_segment_length,
                                                       psd_segment_stride)
    # Convert time series as array of float
    data = ts_data.astype(numpy.float64)
    # Define segment length for PSD estimation in sample unit
    seg_len = int(psd_segment_length * sample_rate)
    # Define separation between consecutive segments in sample unit
    seg_stride = int(psd_segment_stride * sample_rate)
    # Calculate the overall PSD from individual PSD segments
    fd_psd = psd.welch(data,
                       avg_method=psd_estimation,
                       seg_len=seg_len,
                       seg_stride=seg_stride)
    # We need this for the SWIG functions...
    lal_psd = fd_psd.lal()
    # Plot the power spectral density
    plot_spectrum(fd_psd)
    # Create whitening window
    print "|- Whitening window and spectral correlation..."
    if wtype == 'hann':
        window = lal.CreateHannREAL8Window(seg_len)
    elif wtype == 'tukey':
        window = lal.CreateTukeyREAL8Window(seg_len, window_fraction)
    else:
        raise ValueError("Can't handle window type %s" % wtype)
    # Create FFT plan
    fft_plan = lal.CreateForwardREAL8FFTPlan(len(window.data.data), 1)
    # Perform two point spectral correlation
    spec_corr = lal.REAL8WindowTwoPointSpectralCorrelation(window, fft_plan)
    # Initialise filter bank
    print "|- Create filter..."
    filter_bank, fdb = [], []
    # Loop for each channels
    for i in range(nchans):
        channel_flow = fmin + band / 2 + i * band
        channel_width = band
        # Create excess power filter
        lal_filter = lalburst.CreateExcessPowerFilter(channel_flow,
                                                      channel_width, lal_psd,
                                                      spec_corr)
        filter_bank.append(lal_filter)
        fdb.append(Spectrum.from_lal(lal_filter))
    # Calculate the minimum bandwidth
    min_band = (len(filter_bank[0].data.data) - 1) * filter_bank[0].deltaF / 2
    # Plot filter bank
    plot_bank(fdb)
    # Convert filter bank from frequency to time domain
    print "|- Convert all the frequency domain to the time domain..."
    tdb = []
    # Loop for each filter's spectrum
    for fdt in fdb:
        zero_padded = numpy.zeros(int((fdt.f0 / fdt.df).value) + len(fdt))
        st = int((fdt.f0 / fdt.df).value)
        zero_padded[st:st + len(fdt)] = numpy.real_if_close(fdt.value)
        n_freq = int(sample_rate / 2 / fdt.df.value) * 2
        tdt = numpy.fft.irfft(zero_padded, n_freq) * math.sqrt(sample_rate)
        tdt = numpy.roll(tdt, len(tdt) / 2)
        tdt = TimeSeries(tdt,
                         name="",
                         epoch=fdt.epoch,
                         sample_rate=sample_rate)
        tdb.append(tdt)
    # Plot time series filter
    plot_filters(tdb, fmin, band)
    # Compute the renormalization for the base filters up to a given bandwidth.
    mu_sq_dict = {}
    # Loop through powers of 2 up to number of channels
    for nc_sum in range(0, int(math.log(nchans, 2))):
        nc_sum = 2**nc_sum - 1
        print "|- Calculating renormalization for resolution level containing %d %fHz channels" % (
            nc_sum + 1, min_band)
        mu_sq = (nc_sum + 1) * numpy.array([
            lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, None)
            for f in filter_bank
        ])
        # Uncomment to get all possible frequency renormalizations
        #for n in xrange(nc_sum, nchans): # channel position index
        for n in xrange(nc_sum, nchans, nc_sum + 1):  # channel position index
            for k in xrange(0, nc_sum):  # channel sum index
                # FIXME: We've precomputed this, so use it instead
                mu_sq[n] += 2 * lalburst.ExcessPowerFilterInnerProduct(
                    filter_bank[n - k], filter_bank[n - 1 - k], spec_corr,
                    None)
        #print mu_sq[nc_sum::nc_sum+1]
        mu_sq_dict[nc_sum] = mu_sq
    # Create an event list where all the triggers will be stored
    event_list = lsctables.New(lsctables.SnglBurstTable, [
        'start_time', 'start_time_ns', 'peak_time', 'peak_time_ns', 'duration',
        'bandwidth', 'central_freq', 'chisq_dof', 'confidence', 'snr',
        'amplitude', 'channel', 'ifo', 'process_id', 'event_id', 'search',
        'stop_time', 'stop_time_ns'
    ])
    # Create repositories to save TF and time series plots
    os.system('mkdir -p segments/time-frequency')
    os.system('mkdir -p segments/time-series')
    # Define time edges
    t_idx_min, t_idx_max = 0, seg_len
    while t_idx_max <= len(ts_data):
        # Define starting and ending time of the segment in seconds
        start_time = ts_data.start_time + t_idx_min / float(
            ts_data.sample_rate)
        end_time = ts_data.start_time + t_idx_max / float(ts_data.sample_rate)
        print "\n|-- Analyzing block %i to %i (%.2f percent)" % (
            start_time, end_time, 100 * float(t_idx_max) / len(ts_data))
        # Model a withen time series for the block
        tmp_ts_data = types.TimeSeries(ts_data[t_idx_min:t_idx_max] *
                                       window.data.data,
                                       delta_t=1. / ts_data.sample_rate,
                                       epoch=start_time)
        # Save time series in relevant repository
        segfolder = 'segments/%i-%i' % (start_time, end_time)
        os.system('mkdir -p ' + segfolder)
        plot_ts(tmp_ts_data,
                fname='segments/time-series/%i-%i.png' %
                (start_time, end_time))
        # Convert times series to frequency series
        fs_data = tmp_ts_data.to_frequencyseries()
        print "|-- Frequency series data has variance: %s" % fs_data.data.std(
        )**2
        # Whitening (FIXME: Whiten the filters, not the data)
        fs_data.data /= numpy.sqrt(fd_psd) / numpy.sqrt(2 * fd_psd.delta_f)
        print "|-- Whitened frequency series data has variance: %s" % fs_data.data.std(
        )**2
        print "|-- Create time-frequency plane for current block"
        # Return the complex snr, along with its associated normalization of the template,
        # matched filtered against the data
        #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),
        #                           fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=filter_bank[0].f0,
        #                           high_frequency_cutoff=filter_bank[0].f0+2*band)
        print "|-- Filtering all %d channels..." % nchans
        # Initialise 2D zero array
        tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
        # Initialise 2D zero array for time-frequency map
        tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
        # Loop over all the channels
        for i in range(nchans):
            # Reset filter bank series
            tmp_filter_bank *= 0.0
            # Index of starting frequency
            f1 = int(filter_bank[i].f0 / fd_psd.delta_f)
            # Index of ending frequency
            f2 = int((filter_bank[i].f0 + 2 * band) / fd_psd.delta_f) + 1
            # (FIXME: Why is there a factor of 2 here?)
            tmp_filter_bank[f1:f2] = filter_bank[i].data.data * 2
            # Define the template to filter the frequency series with
            template = types.FrequencySeries(tmp_filter_bank,
                                             delta_f=fd_psd.delta_f,
                                             copy=False)
            # Create filtered series
            filtered_series = filter.matched_filter_core(
                template,
                fs_data,
                h_norm=None,
                psd=None,
                low_frequency_cutoff=filter_bank[i].f0,
                high_frequency_cutoff=filter_bank[i].f0 + 2 * band)
            # Include filtered series in the map
            tf_map[i, :] = filtered_series[0].numpy()
        # Plot spectrogram
        plot_spectrogram(numpy.abs(tf_map).T,
                         tmp_ts_data.delta_t,
                         band,
                         ts_data.sample_rate,
                         start_time,
                         end_time,
                         fname='segments/time-frequency/%i-%i.png' %
                         (start_time, end_time))
        # Loop through all summed channels
        for nc_sum in range(0, int(math.log(nchans, 2)))[::-1]:
            nc_sum = 2**nc_sum - 1
            mu_sq = mu_sq_dict[nc_sum]
            # Clip the boundaries to remove window corruption
            clip_samples = int(psd_segment_length * window_fraction *
                               ts_data.sample_rate / 2)
            # Constructing tile and calculate their energy
            print "\n|--- Constructing tile with %d summed channels..." % (
                nc_sum + 1)
            # Current bandwidth of the time-frequency map tiles
            df = band * (nc_sum + 1)
            dt = 1.0 / (2 * df)
            # How much each "step" is in the time domain -- under sampling rate
            us_rate = int(round(dt / ts_data.delta_t))
            print "|--- Undersampling rate for this level: %f" % (
                ts_data.sample_rate / us_rate)
            print "|--- Calculating tiles..."
            # Making independent tiles
            # because [0:-0] does not give the full array
            tf_map_temp = tf_map[:,clip_samples:-clip_samples:us_rate] \
                          if clip_samples > 0 else tf_map[:,::us_rate]
            tiles = tf_map_temp.copy()
            # Here's the deal: we're going to keep only the valid output and
            # it's *always* going to exist in the lowest available indices
            stride = nc_sum + 1
            for i in xrange(tiles.shape[0] / stride):
                numpy.absolute(tiles[stride * i:stride * (i + 1)].sum(axis=0),
                               tiles[stride * (i + 1) - 1])
            tiles = tiles[nc_sum::nc_sum + 1].real**2 / mu_sq[nc_sum::nc_sum +
                                                              1].reshape(
                                                                  -1, 1)
            print "|--- TF-plane is %dx%s samples" % tiles.shape
            print "|--- Tile energy mean %f, var %f" % (numpy.mean(tiles),
                                                        numpy.var(tiles))
            # Define maximum number of degrees of freedom and check it larger or equal to 2
            max_dof = 32 if max_duration == None else 2 * max_duration * df
            assert max_dof >= 2
            # Loop through multiple degrees of freedom
            for j in [2**l for l in xrange(0, int(math.log(max_dof, 2)))]:
                # Duration is fixed by the NDOF and bandwidth
                duration = j * dt
                print "\n|----- Explore signal duration of %f s..." % duration
                print "|----- Summing DOF = %d ..." % (2 * j)
                tlen = tiles.shape[1] - 2 * j + 1 + 1
                dof_tiles = numpy.zeros((tiles.shape[0], tlen))
                sum_filter = numpy.array([1, 0] * (j - 1) + [1])
                for f in range(tiles.shape[0]):
                    # Sum and drop correlate tiles
                    dof_tiles[f] = fftconvolve(tiles[f], sum_filter, 'valid')
                print "|----- Summed tile energy mean: %f, var %f" % (
                    numpy.mean(dof_tiles), numpy.var(dof_tiles))
                plot_spectrogram(
                    dof_tiles.T,
                    dt,
                    df,
                    ts_data.sample_rate,
                    start_time,
                    end_time,
                    fname='segments/%i-%i/tf_%02ichans_%02idof.png' %
                    (start_time, end_time, nc_sum + 1, 2 * j))
                threshold = scipy.stats.chi2.isf(tile_fap, j)
                print "|------ Threshold for this level: %f" % threshold
                spant, spanf = dof_tiles.shape[1] * dt, dof_tiles.shape[0] * df
                print "|------ Processing %.2fx%.2f time-frequency map." % (
                    spant, spanf)
                # Since we clip the data, the start time needs to be adjusted accordingly
                window_offset_epoch = fs_data.epoch + psd_segment_length * window_fraction / 2
                window_offset_epoch = LIGOTimeGPS(float(window_offset_epoch))
                for i, j in zip(*numpy.where(dof_tiles > threshold)):
                    event = event_list.RowType()
                    # The points are summed forward in time and thus a `summed point' is the
                    # sum of the previous N points. If this point is above threshold, it
                    # corresponds to a tile which spans the previous N points. However, the
                    # 0th point (due to the convolution specifier 'valid') is actually
                    # already a duration from the start time. All of this means, the +
                    # duration and the - duration cancels, and the tile 'start' is, by
                    # definition, the start of the time frequency map if j = 0
                    # FIXME: I think this needs a + dt/2 to center the tile properly
                    event.set_start(window_offset_epoch + float(j * dt))
                    event.set_stop(window_offset_epoch + float(j * dt) +
                                   duration)
                    event.set_peak(event.get_start() + duration / 2)
                    event.central_freq = filter_bank[
                        0].f0 + band / 2 + i * df + 0.5 * df
                    event.duration = duration
                    event.bandwidth = df
                    event.chisq_dof = 2 * duration * df
                    event.snr = math.sqrt(dof_tiles[i, j] / event.chisq_dof -
                                          1)
                    # FIXME: Magic number 0.62 should be determine empircally
                    event.confidence = -lal.LogChisqCCDF(
                        event.snr * 0.62, event.chisq_dof * 0.62)
                    event.amplitude = None
                    event.process_id = None
                    event.event_id = event_list.get_next_id()
                    event_list.append(event)
                for event in event_list[::-1]:
                    if event.amplitude != None:
                        continue
                    etime_min_idx = float(event.get_start()) - float(
                        fs_data.epoch)
                    etime_min_idx = int(etime_min_idx / tmp_ts_data.delta_t)
                    etime_max_idx = float(event.get_start()) - float(
                        fs_data.epoch) + event.duration
                    etime_max_idx = int(etime_max_idx / tmp_ts_data.delta_t)
                    # (band / 2) to account for sin^2 wings from finest filters
                    flow_idx = int((event.central_freq - event.bandwidth / 2 -
                                    (df / 2) - fmin) / df)
                    fhigh_idx = int((event.central_freq + event.bandwidth / 2 +
                                     (df / 2) - fmin) / df)
                    # TODO: Check that the undersampling rate is always commensurate
                    # with the indexing: that is to say that
                    # mod(etime_min_idx, us_rate) == 0 always
                    z_j_b = tf_map[flow_idx:fhigh_idx,
                                   etime_min_idx:etime_max_idx:us_rate]
                    event.amplitude = 0
                print "|------ Total number of events: %d" % len(event_list)
        t_idx_min += int(seg_len * (1 - window_fraction))
        t_idx_max += int(seg_len * (1 - window_fraction))
    setname = "MagneticFields"
    __program__ = 'pyburst_excesspower'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time, end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = 'H1'  #channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc,
                                  __program__,
                                  straindict,
                                  ifos=[ifo],
                                  version=git_version.id,
                                  cvs_repository=git_version.branch,
                                  cvs_entry_time=git_version.date)
    dt_stride = psd_segment_length
    sample_rate = ts_data.sample_rate
    # Amount to overlap successive blocks so as not to lose data
    window_overlap_samples = window_fraction * sample_rate
    outseg = inseg.contract(window_fraction * dt_stride / 2)
    # With a given dt_stride, we cannot process the remainder of this data
    remainder = math.fmod(abs(outseg), dt_stride * (1 - window_fraction))
    # ...so make an accounting of it
    outseg = segment(outseg[0], outseg[1] - remainder)
    ss = append_search_summary(xmldoc,
                               proc_row,
                               ifos=(station, ),
                               inseg=inseg,
                               outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    fname = 'excesspower.xml.gz'
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))
コード例 #36
0
	#
	# Fill in some metadata about the flags
	#
	name = "detchar %s threshold flags" % channel
	comment = " ".join(["%s %s %s" % (channel, stringify(op), str(v)) for op, v in opthresholds])
	lwsegs.insert_from_segmentlistdict(segmentlistdict({channel[:2]: seglist}), name=name, comment=comment)

#
# After recording segments, one can take the intersection (all must be on) or
# union (any can be on)
#
# Possible enhancement: instead of giving all keys, give a user selection
# corresponding to their demands
if opts.intersection:
	intersection = segmentlistdict(all_segs).intersection(all_segs.keys())
	# FIXME: ifos
	lwsegs.insert_from_segmentlistdict(segmentlistdict({channel[:2]: seglist}), name=opts.name_result or "INTERSECTION", comment="%s intersection" % " ".join(all_segs.keys()))
elif opts.union:
	union = segmentlistdict(all_segs).union(all_segs.keys())
	# FIXME: ifos
	lwsegs.insert_from_segmentlistdict(segmentlistdict({channel[:2]: seglist}), name=opts.name_result or "UNION", comment="%s union" % " ".join(all_segs.keys()))

#
# Finish up
#
lwsegs.finalize(procrow)

# FIXME: Determine the true extent of the cache
seg = cache.to_segmentlistdict()[ifos[0][0]][0]
utils.write_filename(xmldoc, "%s-%s-%d-%d.xml.gz" % ("".join(ifos), opts.output_tag, seg[0], abs(seg)), gz = True, verbose=opts.verbose)
コード例 #37
0
ファイル: ligo_lw_test_01.py プロジェクト: Cyberface/lalsuite
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import numpy
import sys

from glue.ligolw import ligolw
from glue.ligolw import array as ligolw_array
from glue.ligolw import param as ligolw_param
from glue.ligolw import utils as ligolw_utils

class ContentHandler(ligolw.LIGOLWContentHandler):
	pass
ligolw_array.use_in(ContentHandler)
ligolw_param.use_in(ContentHandler)

xmldoc = ligolw_utils.load_filename("ligo_lw_test_01.xml", contenthandler = ContentHandler, verbose = True)
ligolw_utils.write_filename(xmldoc, "/dev/null")

t, = xmldoc.getElementsByTagName(ligolw.Time.tagName)
print("%s: %s" % (t.Name, t.pcdata), file=sys.stderr)

for n, a in enumerate(xmldoc.getElementsByTagName(ligolw.Array.tagName)):
	print("found %s array '%s'" % ("x".join(map(str, a.array.shape)), a.Name), file=sys.stderr)
	fig = figure.Figure()
	FigureCanvas(fig)
	axes = fig.gca()
	axes.loglog()
	axes.grid(True)
	for i in range(1, a.array.shape[0]):
		axes.plot(numpy.fabs(a.array[0]), numpy.fabs(a.array[i]))
	axes.set_title(a.Name)
	print("saving as 'ligo_lw_test_01_%d.png' ..." % n, file=sys.stderr)
コード例 #38
0
# Blockwise separation of Fisher matrix. Parameters are in the following order:
# theta0, theta3, theta3S, t0, phi0
IA = I[0:3, 0:3] # intrinsic block
IB = I[0:3, 3:5] # cross block
ID = I[3:5, 3:5] # extrinsic block
metric = IA - np.dot(IB, linalg.solve(ID, IB.T, sym_pos=True))


def predicate(sngl):
    """Return True if a template is within a 1-sigma radius of the central
    template, False otherwise"""
    thetas = lalsimulation.SimInspiralTaylorF2RedSpinChirpTimesFromMchirpEtaChi(
        sngl.mchirp, sngl.eta, sngl.chi, f_low)
    dtheta = np.asarray(thetas) - thetas_0
    distance = np.dot(dtheta, np.dot(metric, dtheta))
    return distance <= 1

# Grab the templates that are at most 1 sigma from the central (mass1, mass2).
rows_to_keep = filter(predicate, sngl_inspiral_table)
del sngl_inspiral_table[:]
sngl_inspiral_table.extend(rows_to_keep)


# Record process end time.
ligolw_process.set_process_end_time(process)

# Write output.
ligolw_utils.write_filename(xmldoc, opts.output,
    gz=(os.path.splitext(opts.output)[-1] == '.gz'))
コード例 #39
0
    #=============================================
    if opts.ignore_science_segments:
        logger.info("ignoring science segments")
        scisegs = [[gpsstart, gpsstart+stride]]

    else:
        logger.info("generating science segments")
        try:
            seg_xml_file = idq.segment_query(config, gpsstart, gpsstart+stride, url=config.get("get_science_segments","segdb"))

            lsctables.use_in(ligolw.LIGOLWContentHandler)
            xmldoc = utils.load_fileobj(seg_xml_file, contenthandler=ligolw.LIGOLWContentHandler)[0]

            seg_file = "%s/science_segements-%d-%d.xml.gz"%(this_sumdir, int(gpsstart), int(stride))
            logger.info("writting science segments to file : %s"%seg_file)
            utils.write_filename(xmldoc, seg_file, gz=seg_file.endswith(".gz"))

            (scisegs, coveredseg) = idq.extract_dq_segments(seg_file, config.get('get_science_segments', 'include'))

        except Exception as e:
            traceback.print_exc()
            logger.info("ERROR: segment generation failed. Skipping this summary period.")

            gpsstart += stride
            continue

    #=============================================
    # generating summary datfiles filtered by segments
    #=============================================
    ### get all relevant datfiles
    datfiles = idq.get_all_files_in_range(realtimedir, gpsstart, gpsstart+stride, suffix=".dat")
コード例 #40
0
ファイル: grb_utils.py プロジェクト: RorySmith/pycbc
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
    '''
    Make an ExtTrig xml file containing information on the external trigger

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    sci_seg : glue.segments.segment
    The science segment for the analysis run.
    
    out_dir : str
    The output directory, destination for xml file.

    Returns
    -------
    xml_file : pycbc.workflow.File object
    The xml file with external trigger information.

    '''
    # Initialise objects
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(lsctables.ExtTriggersTable)
    cols = tbl.validcolumns
    xmldoc.childNodes[-1].appendChild(tbl)    
    row = tbl.appendRow()
    
    # Add known attributes for this GRB
    setattr(row, "event_ra", float(cp.get("workflow", "ra")))
    setattr(row, "event_dec", float(cp.get("workflow", "dec")))
    setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
    setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))

    # Fill in all empty rows
    for entry in cols.keys():
        if not hasattr(row, entry):
            if cols[entry] in ['real_4','real_8']:
                setattr(row,entry,0.)
            elif cols[entry] == 'int_4s':
                setattr(row,entry,0)
            elif cols[entry] == 'lstring':
                setattr(row,entry,'')
            elif entry == 'process_id':
                row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
            elif entry == 'event_id':
                row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
            else:
                print >> sys.stderr, "Column %s not recognized" %(entry)
                raise ValueError

    # Save file
    xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
                                                    "trigger-name"))
    xml_file_path = os.path.join(out_dir, xml_file_name)
    utils.write_filename(xmldoc, xml_file_path)
    xml_file_url = urlparse.urljoin("file:", urllib.pathname2url(xml_file_path))
    xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
    xml_file.PFN(xml_file_url, site="local")
    
    return xml_file
コード例 #41
0
parser.add_option("-o",
                  "--output-name",
                  action="store",
                  dest="outputname",
                  help="Name of XML file to output.")
#parser.add_option("-s", "--segment-file", action="store", dest="segfile",
#help="Name of segment file to integrate.")

opts, rootfiles = parser.parse_args()

if rootfiles == None:
    sys.exit("Must specify input Coherent WaveBurst ROOT file with -r option")

xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())

create_tables(xmldoc, rootfiles)

if opts.outputname == None:
    print("Assigning name to output xml")
    output = "convertROOT.xml.gz"
else:
    output = "%s.xml.gz" % opts.outputname

utils.write_filename(xmldoc,
                     output,
                     verbose=True,
                     gz=(output or "stdout").endswith(".gz"))

xmldoc.unlink()
コード例 #42
0
def write_likelihood_data(filename, coincparamsdistributions, seglists, verbose = False):
	utils.write_filename(ligolw_burca_tailor.gen_likelihood_control(coincparamsdistributions, seglists, name = u"string_cusp_likelihood"), filename, verbose = verbose, gz = (filename or "stdout").endswith(".gz"))
コード例 #43
0
ファイル: plot_likelihood.py プロジェクト: white105/lalsuite
            math.sqrt((coincs.H1.get_effective_snr())**2 +
                      (coincs.L1.get_effective_snr())**2))

    elif opts.statistic == 'effective_snr' and opts.coincs == "H2L1" and hasattr(
            coincs, "H2") and hasattr(coincs, "L1"):
        distributions.add_injection(double_params_func(coincs, timeslide))
        x_back_param.append(
            math.sqrt((coincs.H2.get_effective_snr())**2 +
                      (coincs.L1.get_effective_snr())**2))

X_back_param = asarray(x_back_param)
#############################################################################
# Finish Smoothening of the Data using Gaussian Filter
#############################################################################
xmldoc = ligolw_burca_tailor.gen_likelihood_control(distributions)
utils.write_filename(xmldoc, "distributions.xml")

distributions.finish()
#3###########################################################################
# Construction of Histrogram
#############################################################################

p = arange(0, 50.0, 1.0)
X_Inj_norm = hist(X_inj_param, p)[0] * 1.0 / max(hist(X_back_param, p)[0])
clf()

X_Back_norm = hist(X_back_param, p)[0] * 1.0 / max(hist(X_back_param, p)[0])
clf()

##########################################################################
# Reload X nd Y parameters
コード例 #44
0
    psd_dict_raw[ifo] = lalsimutils.get_psd_series_from_xmldoc(
        opts.psd_file, ifo)
    npts_orig = len(psd_dict_raw[ifo].data.data)
    df = psd_dict_raw[ifo].deltaF
    f0 = psd_dict_raw[ifo].f0
    nyquist = int(len(psd_dict_raw[ifo].data.data) * df + f0)
    epoch = psd_dict_raw[ifo].epoch
    print(ifo, len(psd_dict_raw[ifo].data.data), 1. / psd_dict_raw[ifo].deltaF,
          nyquist)

npts_desired = int(nyquist / df + 0.5)
indx_start = int(f0 / df + 0.5)

# Loop
for ifo in ifos:
    print(" Writing  for ", ifo)
    dat_here = psd_dict_raw[ifo].data.data
    psddict = {}
    psd_s = lal.CreateREAL8FrequencySeries(name=ifo,
                                           epoch=epoch,
                                           f0=0,
                                           deltaF=df,
                                           sampleUnits="s",
                                           length=npts_desired)
    psd_s.data.data[indx_start:indx_start + npts_orig] = dat_here[:npts_orig -
                                                                  1]
    psddict[ifo] = psd_s

    xmldoc = make_psd_xmldoc(psddict)
    utils.write_filename(xmldoc, ifo + "-psd.xml.gz", gz=True)
コード例 #45
0
ファイル: dayhopecheck.py プロジェクト: AbhayMK/pycbc
    # And add these to the output file
    # Start with the segment summary
    summSegs = segments.segmentlist([workflow.analysis_time])
    sci_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id, ifo,
                                                      "CBC_DAYHOPE_SCIENCE", 0)
    sciok_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id, ifo,
                                                   "CBC_DAYHOPE_SCIENCE_OK", 0)
    sciavailable_def_id = segmentdb_utils.add_to_segment_definer(outdoc,
                              proc_id, ifo, "CBC_DAYHOPE_SCIENCE_AVAILABLE", 0)
    analysable_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id,
                                              ifo, "CBC_DAYHOPE_ANALYSABLE", 0)
    
    segmentdb_utils.add_to_segment(outdoc, proc_id, sci_def_id, sciSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, sciok_def_id, sciokSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, sciavailable_def_id,
                                   sciavailableSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, analysable_def_id,
                                   analysableSegs)

    segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sci_def_id,
                                           summSegs, comment='')
    segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sciok_def_id,
                                           summSegs, comment='')
    segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sciavailable_def_id,
                                           summSegs, comment='')
    segmentdb_utils.add_to_segment_summary(outdoc, proc_id, analysable_def_id,
                                           summSegs, comment='')

ligolw_utils.write_filename(outdoc, "SUMMARY.xml")

コード例 #46
0
#!/usr/bin/python
import sys
from glue.lal import CacheEntry
from glue.ligolw import lsctables, utils
for filename in (CacheEntry(line).path for line in file(sys.argv[1])):
	xmldoc = utils.load_filename(filename, gz = (filename or "stdin").endswith(".gz"))
	try:
		lsctables.table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
	except ValueError:
		xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.SnglInspiralTable, columns = ("process_id", "ifo", "search", "channel", "end_time", "end_time_ns", "end_time_gmst", "impulse_time", "impulse_time_ns", "template_duration", "event_duration", "amplitude", "eff_distance", "coa_phase", "mass1", "mass2", "mchirp", "mtotal", "eta", "kappa", "chi", "tau0", "tau2", "tau3", "tau4", "tau5", "ttotal", "psi0", "psi3", "alpha", "alpha1", "alpha2", "alpha3", "alpha4", "alpha5", "alpha6", "beta", "f_final", "snr", "chisq", "chisq_dof", "bank_chisq", "bank_chisq_dof", "cont_chisq", "cont_chisq_dof", "sigmasq", "rsqveto_duration", "Gamma0", "Gamma1", "Gamma2", "Gamma3", "Gamma4", "Gamma5", "Gamma6", "Gamma7", "Gamma8", "Gamma9", "event_id")))
		utils.write_filename(filename, xmldoc, gz = (filename or "stdout").endswith(".gz"))
コード例 #47
0
  # FIXME convert to years (use some lal or pylal thing in the future)
  vA.array /= secs_in_year
  vA2.array /= secs_in_year * secs_in_year #two powers for this squared quantity

  #Trim the array to have sane values outside the total mass area of interest
  try: minvol = scipy.unique(vA.array)[1]/10.0
  except: minvol = 0
  UL.trim_mass_space(dvA, instruments, minthresh=0.0, minM=UL.mintotal, maxM=UL.maxtotal)
  UL.trim_mass_space(vA, instruments, minthresh=minvol, minM=UL.mintotal, maxM=UL.maxtotal)
  UL.trim_mass_space(vA2, instruments, minthresh=0.0, minM=UL.mintotal, maxM=UL.maxtotal)

  #output an XML file with the result
  xmldoc = ligolw.Document()
  xmldoc.appendChild(ligolw.LIGO_LW())
  xmldoc.childNodes[-1].appendChild(rate.binned_array_to_xml(vA, "2DsearchvolumeFirstMoment"))
  xmldoc.childNodes[-1].appendChild(rate.binned_array_to_xml(vA2, "2DsearchvolumeSecondMoment"))
  xmldoc.childNodes[-1].appendChild(rate.binned_array_to_xml(dvA, "2DsearchvolumeDerivative"))

  # DONE with vA, so it is okay to mess it up...
  # Compute range 
  vA.array = (vA.array * secs_in_year / UL.livetime[instruments] / (4.0/3.0 * pi)) **(1.0/3.0)
  UL.trim_mass_space(vA, instruments, minthresh=0.0, minM=UL.mintotal, maxM=UL.maxtotal)
  xmldoc.childNodes[-1].appendChild(rate.binned_array_to_xml(vA, "2DsearchvolumeDistance"))

  # make a live time 
  UL.trim_mass_space(ltA, instruments, minthresh=0.0, minM=UL.mintotal, maxM=UL.maxtotal)
  xmldoc.childNodes[-1].appendChild(rate.binned_array_to_xml(ltA, "2DsearchvolumeLiveTime"))

  utils.write_filename(xmldoc, "2Dsearchvolume-%s-%s.xml" % (opts.output_name_tag, "".join(sorted(list(instruments)))))
コード例 #48
0
    print('Accepting triggers with FAN <', maxFAN)
    print('Accepted', len(loudestTrig), ' SINGLE triggers.')

flag = False
integ = 0
while not flag:
    try:
        outputFile=utils.load_filename(corsefiles[integ],\
           gz=corsefiles[integ].endswith('.gz'))
        origtbl = lsctables.SnglInspiralTable.get_table(outputFile)
        flag = True
    except:
        integ = integ + 1
parent = origtbl.parentNode
parent.replaceChild(combinedTrigs, origtbl)
utils.write_filename(outputFile,opts.output_file, \
    gz = opts.output_file.endswith('gz'))
if opts.min_rate:
    for trig in loudestTrig:
        trig.alpha = trig.alpha / (FrgrndTime / 3.15567360E7)
    newtbl = lsctables.SnglInspiralTable.get_table(outputFile)
    parent = newtbl.parentNode
    parent.replaceChild(loudestTrig, newtbl)
    utils.write_filename(outputFile,opts.output_file_loudest, \
        gz = opts.output_file.endswith('gz'))

#searchSumm = lsctables.New(lsctables.SearchSummaryTable)
#summVal = lsctables.New(lsctables.SummValueTable)
#
#outputFile = open(opts.output_file,"w")
#outputFile.write('''<?xml version='1.0' encoding='utf-8' ?> \n''')
#outputFile.write('''<!DOCTYPE LIGO_LW SYSTEM "http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt"><LIGO_LW> \n''')
コード例 #49
0
        # If cehck passed, proceed to appending it to the final table
        npoint = lsctables.SimInspiral()
        # Initialize columns
        for nn in out_table.columnnames:
            if 'process_id' in nn:
                npoint.process_id = proc_id
            elif 'waveform' in nn:
                npoint.waveform = 'NR'
            else:
                npoint.__setattr__(nn, 0)
        # Copy over columns
        for nn in point.__slots__:
            if hasattr(point, nn):
                npoint.__setattr__(nn, point.__getattribute__(nn))
        #
        out_table.append(npoint)
    #
    if options.verbose:
        print(len(out_table), " points copied", file=sys.stderr)

if options.verbose:
    print("Total %d points in final table" % len(out_table), file=sys.stderr)
# write the xml doc to disk
proctable = lsctables.ProcessTable.get_table(outdoc)
proctable[0].end_time = gpstime.GpsSecondsFromPyUTC(time.time())

outname = options.output_catalog + '.xml'
ligolw_utils.write_filename(outdoc, outname)

print(len(out_table))
コード例 #50
0
    segmentdb_utils.add_to_segment(outdoc, proc_id, sci_def_id, sciSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, sciok_def_id, sciokSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, sciavailable_def_id,
                                   sciavailableSegs)
    segmentdb_utils.add_to_segment(outdoc, proc_id, analysable_def_id,
                                   analysableSegs)

    segmentdb_utils.add_to_segment_summary(outdoc,
                                           proc_id,
                                           sci_def_id,
                                           summSegs,
                                           comment='')
    segmentdb_utils.add_to_segment_summary(outdoc,
                                           proc_id,
                                           sciok_def_id,
                                           summSegs,
                                           comment='')
    segmentdb_utils.add_to_segment_summary(outdoc,
                                           proc_id,
                                           sciavailable_def_id,
                                           summSegs,
                                           comment='')
    segmentdb_utils.add_to_segment_summary(outdoc,
                                           proc_id,
                                           analysable_def_id,
                                           summSegs,
                                           comment='')

ligolw_utils.write_filename(outdoc, "SUMMARY.xml")
コード例 #51
0
        if opts.checkpoint and not len(bank) % opts.checkpoint:
            checkpoint_save(xmldoc, opts.output_filename, process)

    # clear the proposal template if caching is not enabled
    if not opts.cache_waveforms:
        tmplt.clear()

if opts.verbose:
    print "\ntotal number of proposed templates: %d" % nprop
    print "total number of match calculations: %d" % bank._nmatch
    print "final bank size: %d" % len(bank)

bank.clear()  # clear caches

# write out the document
if opts.output_filename.endswith(('.xml', '.xml.gz')):
    ligolw_process.set_process_end_time(process)
    utils.write_filename(xmldoc,
                         opts.output_filename,
                         gz=opts.output_filename.endswith("gz"))
elif opts.output_filename.endswith(('.hdf', '.h5', '.hdf5')):
    hdf_fp = h5py.File(opts.output_filename, 'w')
    if len(tbl) == 0:
        hdf_fp.attrs['empty_file'] = True
    else:
        params = tbl.dtype.names
        hdf_fp.attrs['parameters'] = params
        for param in params:
            hdf_fp[param] = tbl[param]
コード例 #52
0
    fname = "m1_m2_pts_%i.txt" % i
    np.savetxt(fname, cart_grid3[i])

elapsed = elapsed_time() - elapsed
print("Time to distribute points, split and write to file:", elapsed)

#dag_utils.write_integrate_likelihood_extrinsic_sub('test')
#dag_utils.write_extrinsic_marginalization_dag(cart_grid2, 'test.sub')

xmldoc = ligolw.Document()
xmldoc.childNodes.append(ligolw.LIGO_LW())
#proc_id = process.register_to_xmldoc(xmldoc, sys.argv[0], opts.__dict__)
proc_id = process.register_to_xmldoc(xmldoc, sys.argv[0], {})
proc_id = proc_id.process_id
xmldoc.childNodes[0].appendChild(write_sngl_params(cart_grid3, proc_id))
utils.write_filename(xmldoc, "m1m2_grid.xml.gz", gz=True)

#
# N.B. Below here, the real code will divy up cart_grid into blocks of intrinsic
# parameters and compute the marginalized likelihood on each point
# in intrinsic parameter space
#
# For testing purposes, simply evaluate the overlap on cart_grid and plot
# to confirm it is placing points in the expected ellipsoid with the
# proper distribution, and the overlap behaves properly.
#

# Evaluate IP on the grid inside the ellipsoid
rhos2 = eff.evaluate_ip_on_grid(hfSIG, PTMPLT, IP, param_names, cart_grid)

# Plot the ambiguity function, effective Fisher and ellipsoid points
コード例 #53
0
ファイル: bank_output_utils.py プロジェクト: prayush/pycbc
def output_sngl_inspiral_table(outputFile, tempBank, metricParams,
                               ethincaParams, programName="", optDict = None,
                               outdoc=None, **kwargs):
    """
    Function that converts the information produced by the various pyCBC bank
    generation codes into a valid LIGOLW xml file containing a sngl_inspiral
    table and outputs to file.
 
    Parameters
    -----------
    outputFile : string
        Name of the file that the bank will be written to
    tempBank : iterable
        Each entry in the tempBank iterable should be a sequence of
        [mass1,mass2,spin1z,spin2z] in that order.
    metricParams : metricParameters instance
        Structure holding all the options for construction of the metric
        and the eigenvalues, eigenvectors and covariance matrix
        needed to manipulate the space.
    ethincaParams: {ethincaParameters instance, None}
        Structure holding options relevant to the ethinca metric computation
        including the upper frequency cutoff to be used for filtering.
        NOTE: The computation is currently only valid for non-spinning systems
        and uses the TaylorF2 approximant.
    programName (key-word-argument) : string
        Name of the executable that has been run
    optDict (key-word argument) : dictionary
        Dictionary of the command line arguments passed to the program
    outdoc (key-word argument) : ligolw xml document
        If given add template bank to this representation of a xml document and
        write to disk. If not given create a new document.
    kwargs : key-word arguments
        All other key word arguments will be passed directly to 
        ligolw_process.register_to_xmldoc
    """
    if optDict is None:
        optDict = {}
    if outdoc is None:
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

    # get IFO to put in search summary table
    ifos = []
    if 'channel_name' in optDict.keys():
        if optDict['channel_name'] is not None:
            ifos = [optDict['channel_name'][0:2]]

    proc_id = ligolw_process.register_to_xmldoc(outdoc, programName, optDict,
                                                ifos=ifos, **kwargs).process_id
    sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id)
    # Calculate Gamma components if needed
    if ethincaParams is not None:
        if ethincaParams.doEthinca:
            for sngl in sngl_inspiral_table:
                # Set tau_0 and tau_3 values needed for the calculation of
                # ethinca metric distances
                (sngl.tau0,sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3(
                    sngl.mass1, sngl.mass2, metricParams.f0)
                fMax_theor, GammaVals = calculate_ethinca_metric_comps(
                    metricParams, ethincaParams,
                    sngl.mass1, sngl.mass2, spin1z=sngl.spin1z,
                    spin2z=sngl.spin2z, full_ethinca=ethincaParams.full_ethinca)
                # assign the upper frequency cutoff and Gamma0-5 values
                sngl.f_final = fMax_theor
                for i in xrange(len(GammaVals)):
                    setattr(sngl, "Gamma"+str(i), GammaVals[i])
        # If Gamma metric components are not wanted, assign f_final from an
        # upper frequency cutoff specified in ethincaParams
        elif ethincaParams.cutoff is not None:
            for sngl in sngl_inspiral_table:
                sngl.f_final = pnutils.frequency_cutoff_from_name(
                    ethincaParams.cutoff,
                    sngl.mass1, sngl.mass2, sngl.spin1z, sngl.spin2z)

    # set per-template low-frequency cutoff
    if 'f_low_column' in optDict and 'f_low' in optDict and \
            optDict['f_low_column'] is not None:
        for sngl in sngl_inspiral_table:
            setattr(sngl, optDict['f_low_column'], optDict['f_low'])

    outdoc.childNodes[0].appendChild(sngl_inspiral_table)

    # get times to put in search summary table
    start_time = 0
    end_time = 0
    if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys():
        start_time = optDict['gps_start_time']
        end_time = optDict['gps_end_time']

    # make search summary table
    search_summary_table = lsctables.New(lsctables.SearchSummaryTable) 
    search_summary = return_search_summary(start_time, end_time,
                               len(sngl_inspiral_table), ifos, **kwargs)
    search_summary_table.append(search_summary)
    outdoc.childNodes[0].appendChild(search_summary_table)

    # write the xml doc to disk
    proctable = table.get_table(outdoc, lsctables.ProcessTable.tableName)
    ligolw_utils.write_filename(outdoc, outputFile,
                                gz=outputFile.endswith('.gz'))
コード例 #54
0
	timeslidetable = lsctables.TimeSlideTable.get_table(xmldoc)

	#
	# How many slides will go into this file?
	#

	N = int(round(float(len(time_slides)) / len(filenames)))

	#
	# Put them in.
	#

	for offsetvect in time_slides[:N]:
		timeslidetable.append_offsetvector(offsetvect, process)
	del time_slides[:N]

	#
	# Finish off the document.
	#

	ligolw_process.set_process_end_time(process)

	#
	# Write.
	#

	filename = filenames.pop(0)
	ligolw_utils.write_filename(xmldoc, filename, verbose = options.verbose, gz = (filename or "stdout").endswith(".gz"))

assert not time_slides
コード例 #55
0
ファイル: hdf.py プロジェクト: aburony1970/pycbc
    def to_coinc_xml_object(self, file_name):
        # FIXME: This function will only work with two ifos!!

        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

        ifos = [ifo for ifo in self.sngl_files.keys()]
        proc_id = ligolw_process.register_to_xmldoc(outdoc, 'pycbc',
                     {}, ifos=ifos, comment='', version=pycbc_version.git_hash,
                     cvs_repository='pycbc/'+pycbc_version.git_branch,
                     cvs_entry_time=pycbc_version.date).process_id

        search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
        coinc_h5file = self.coinc_file.h5file
        start_time = coinc_h5file['segments']['coinc']['start'][:].min()
        end_time = coinc_h5file['segments']['coinc']['end'][:].max()
        num_trigs = len(self.sort_arr)
        search_summary = return_search_summary(start_time, end_time,
                                                 num_trigs, ifos)
        search_summ_table.append(search_summary)
        outdoc.childNodes[0].appendChild(search_summ_table)

        sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
        coinc_def_table = lsctables.New(lsctables.CoincDefTable)
        coinc_event_table = lsctables.New(lsctables.CoincTable)
        coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
        coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
        time_slide_table = lsctables.New(lsctables.TimeSlideTable)

        # Set up time_slide table
        time_slide_id = lsctables.TimeSlideID(0)
        for ifo in ifos:
            time_slide_row = lsctables.TimeSlide()
            time_slide_row.instrument = ifo
            time_slide_row.time_slide_id = time_slide_id
            time_slide_row.offset = 0
            time_slide_row.process_id = proc_id
            time_slide_table.append(time_slide_row)

        # Set up coinc_definer table
        coinc_def_id = lsctables.CoincDefID(0)
        coinc_def_row = lsctables.CoincDef()
        coinc_def_row.search = "inspiral"
        coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincidences"
        coinc_def_row.coinc_def_id = coinc_def_id
        coinc_def_row.search_coinc_type = 0
        coinc_def_table.append(coinc_def_row)

        bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
        bank_col_vals = {}
        for name in bank_col_names:
            bank_col_vals[name] = self.get_bankfile_array(name)

        coinc_event_names = ['ifar', 'time1', 'fap', 'stat']
        coinc_event_vals = {}
        for name in coinc_event_names:
            coinc_event_vals[name] = self.get_coincfile_array(name)

        sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq',
                          'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof',
                          'end_time', 'template_duration', 'coa_phase',
                          'sigmasq']
        sngl_col_vals = {}
        for name in sngl_col_names:
            sngl_col_vals[name] = self.get_snglfile_array_dict(name)

        for idx in xrange(len(self.sort_arr)):
            # Set up IDs and mapping values
            coinc_id = lsctables.CoincID(idx)

            # Set up sngls
            # FIXME: As two-ifo is hardcoded loop over all ifos
            sngl_combined_mchirp = 0
            sngl_combined_mtot = 0
            for ifo in ifos:
                sngl_id = self.trig_id[ifo][idx]
                event_id = lsctables.SnglInspiralID(sngl_id)
                sngl = return_empty_sngl()
                sngl.event_id = event_id
                sngl.ifo = ifo
                for name in sngl_col_names:
                    val = sngl_col_vals[name][ifo][idx]
                    if name == 'end_time':
                        sngl.set_end(LIGOTimeGPS(val))
                    else:
                        setattr(sngl, name, val)
                for name in bank_col_names:
                    val = bank_col_vals[name][idx]
                    setattr(sngl, name, val)
                sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
                        sngl.mass1, sngl.mass2)
                sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
                        sngl.mass1, sngl.mass2)
                sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
                sngl_combined_mchirp += sngl.mchirp
                sngl_combined_mtot += sngl.mtotal

                sngl_inspiral_table.append(sngl)

                # Set up coinc_map entry
                coinc_map_row = lsctables.CoincMap()
                coinc_map_row.table_name = 'sngl_inspiral'
                coinc_map_row.coinc_event_id = coinc_id
                coinc_map_row.event_id = event_id
                coinc_event_map_table.append(coinc_map_row)

            sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
            sngl_combined_mtot = sngl_combined_mtot / len(ifos)

            # Set up coinc inspiral and coinc event tables
            coinc_event_row = lsctables.Coinc()
            coinc_inspiral_row = lsctables.CoincInspiral()
            coinc_event_row.coinc_def_id = coinc_def_id
            coinc_event_row.nevents = len(ifos)
            coinc_event_row.instruments = ','.join(ifos)
            coinc_inspiral_row.set_ifos(ifos)
            coinc_event_row.time_slide_id = time_slide_id
            coinc_event_row.process_id = proc_id
            coinc_event_row.coinc_event_id = coinc_id
            coinc_inspiral_row.coinc_event_id = coinc_id
            coinc_inspiral_row.mchirp = sngl_combined_mchirp
            coinc_inspiral_row.mass = sngl_combined_mtot
            coinc_inspiral_row.set_end(\
                                   LIGOTimeGPS(coinc_event_vals['time1'][idx]))
            coinc_inspiral_row.snr = coinc_event_vals['stat'][idx]
            coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
            coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx]
            # Transform to Hz
            coinc_inspiral_row.combined_far = \
                                    coinc_inspiral_row.combined_far / YRJUL_SI
            coinc_event_row.likelihood = 0.
            coinc_inspiral_row.minimum_duration = 0.
            coinc_event_table.append(coinc_event_row)
            coinc_inspiral_table.append(coinc_inspiral_row)

        outdoc.childNodes[0].appendChild(coinc_def_table)
        outdoc.childNodes[0].appendChild(coinc_event_table)
        outdoc.childNodes[0].appendChild(coinc_event_map_table)
        outdoc.childNodes[0].appendChild(time_slide_table)
        outdoc.childNodes[0].appendChild(coinc_inspiral_table)
        outdoc.childNodes[0].appendChild(sngl_inspiral_table)

        ligolw_utils.write_filename(outdoc, file_name)
コード例 #56
0
    if len(sngl_inspiral_table) < options.nbanks:
        raise ValueError, "Not enough templates to create the requested number of subbanks."

    # override/read instrument column
    if options.instrument:
        for row in sngl_inspiral_table:
            row.ifo = options.instrument
    else:
	for row in process_params_table:
	    if row.param=='--ifos':
	        options.instrument = row.value

    # split into disjoint sub-banks
    if min([row.f_final for row in sngl_inspiral_table]) > 0:
        # check that this column is actually populated...
        weights = [row.tau0*row.f_final for row in sngl_inspiral_table]
    else:
        weights = [1 for row in sngl_inspiral_table]
    weights_cum = numpy.array(weights).cumsum()

    first_row = 0
    for bank in range(options.nbanks):

        last_row = numpy.searchsorted(weights_cum, (bank+1)*weights_cum[-1]/options.nbanks)
        sngl_inspiral_table_split[:] = sngl_inspiral_table[first_row:last_row]
        first_row = last_row

	ligolw_process.set_process_end_time(process)
	utils.write_filename(xmldoc, "%s-SBANK_SPLIT_%04d-%s.xml"%(options.instrument,bank+1,options.user_tag), gz = False, verbose = options.verbose)
コード例 #57
0
if opts.output_file:
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    process.register_to_xmldoc(xmldoc, sys.argv[0], opts.__dict__)
    result_dict = {"mass1": opts.mass1, "mass2": opts.mass2, "event_duration": numpy.sqrt(var)/res, "ttotal": sampler.ntotal}
    if opts.spin1z is not None or sngl_inspiral_table:
        result_dict["spin1z"] = opts.spin1z or 0.0
    if opts.spin2z is not None or sngl_inspiral_table:
        result_dict["spin2z"] = opts.spin2z or 0.0
    if opts.eff_lambda is not None:
        result_dict["psi0"] = opts.eff_lambda
    if opts.deff_lambda is not None:
        result_dict["psi3"] = opts.deff_lambda

    xmlutils.append_likelihood_result_to_xmldoc(xmldoc, numpy.log(res), neff=neff, **result_dict)
    utils.write_filename(xmldoc, opts.output_file, gz=opts.output_file.endswith(".gz"))
    if opts.save_samples:
        samples = sampler._rvs
        samples["distance"] = samples["distance"]
        if not opts.time_marginalization:
            samples["t_ref"] += float(fiducial_epoch)
        else:
            samples["t_ref"] = float(fiducial_epoch)*numpy.ones(len(sampler._rvs["psi"]))
        samples["polarization"] = samples["psi"]
        samples["coa_phase"] = samples["phi_orb"]
        if ("declination", "right_ascension") in sampler.params:
            samples["latitude"], samples["longitude"] = samples[("declination", "right_ascension")]
        else:
            samples["latitude"] = samples["declination"]
            samples["longitude"] = samples["right_ascension"]
        samples["loglikelihood"] = numpy.log(samples["integrand"])