def parse_command_line():
    parser = OptionParser(
        version="%prog CVS $Id$",
        usage="%prog [options]",
        description=
        "Constructs the likelihood-ratio based coincidence stage for an excess power analysis.  The input consists of one or more LAL caches listing the sqlite database trigger files, and a list of segments giving the time intervals that should be considered to be independent.  The LAL caches list all trigger files together, that is injections, time slides, and zero-lag.  The individual trigger files are self-describing, so the analysis codes can autodetect their type.  Each segment will be analyzed using the files that intersect it:  the likelihood ratios will be constructed from the injections and time-lag triggers contained in files that intersect the segment, and that data used to assign likelihoods to the injections, time-lag, and zero-lag coincs in all files that intersect the same segment."
    )
    parser.add_option(
        "--input-cache",
        metavar="filename",
        action="append",
        default=[],
        help=
        "Add the contents of this cache file to the list of files from which to draw statistics."
    )
    parser.add_option(
        "--round-robin-cache",
        metavar="filename",
        action="append",
        default=[],
        help=
        "Add the contents of this cache file to the list of files from which to draw injection statistics in a round-robin way."
    )
    parser.add_option(
        "--condor-log-dir",
        metavar="path",
        default=".",
        help="Set the directory for Condor log files (default = \".\").")
    parser.add_option(
        "--config-file",
        metavar="filename",
        default="power.ini",
        help="Set .ini configuration file name (default = \"power.ini\").")
    parser.add_option(
        "--distribution-segments",
        metavar="filename",
        help=
        "Read boundaries for distribution data intervals from this segwizard format segments file (required)."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    if options.distribution_segments is None:
        raise ValueError, "missing required argument --distribution-segments"
    options.distribution_segments = segmentsUtils.fromsegwizard(
        file(options.distribution_segments), coltype=LIGOTimeGPS)

    options.input_cache = set([
        CacheEntry(line) for filename in options.input_cache
        for line in file(filename)
    ])
    options.round_robin_cache = [
        set(map(CacheEntry, file(filename)))
        for filename in options.round_robin_cache
    ]

    return options, (filenames or [])
예제 #2
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg,
		usage = "%prog [options] [file ...]",
		description = "%prog does blah blah blah."
	)
	parser.add_option("-p", "--live-time-program", metavar = "name", default="lalapps_ring", help = "Set the name of the program whose entries in the search_summary table will set the search live time.  Required.")
	parser.add_option("--veto-segments-name", help = "Set the name of the segments to extract from the segment tables and use as the veto list.")
	parser.add_option("--categories", metavar = "{\"frequency-ifos-oninstruments\",\"oninstruments\"}", default="oninstruments", help = "Select the event categorization algorithm.  Default oninstruments")
	parser.add_option("-b", "--frequency-bins", metavar = "frequency,frequency[,frequency,...]", help = "Set the boundaries of the frequency bins in Hz.  The lowest and highest bounds must be explicitly listed.  Example \"0,5,inf\".  Required if frequency-based categorization algorithm has been selected.")
	parser.add_option("--rank-by", metavar = "{\"snr\",\"uncombined-ifar\",\"likelihood\"}", default="snr", help = "Select the event ranking method.  Default is snr")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("-n","--extrapolation-num",action="store",type="int",default=0, metavar="num",help="number of time-slide points to use in FAR extrapolation" )
	parser.add_option("-g", "--input-cache", help="cache of sqlite files")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()
	if options.input_cache is not None: filenames.extend([CacheEntry(c).path() for c in open(options.input_cache)])

	#
	# categories and ranking
	#

	if options.categories not in ("frequency-ifos-oninstruments", "oninstruments"):
		raise ValueError, "missing or unrecognized --categories option"
	if options.rank_by not in ("snr", "uncombined-ifar", "likelihood"):
		raise ValueError, "missing or unrecognized --rank-by option"


	options.populate_column = "false_alarm_rate"

	#
	# parse frequency bins
	#

	if options.categories in ("frequency-ifos-oninstruments",):
		if options.frequency_bins is None:
			raise ValueError, "--frequency-bins required with category algorithm \"%s\"" % options.categories
		options.frequency_bins = sorted(map(float, options.frequency_bins.split(",")))
		if len(options.frequency_bins) < 2:
			raise ValueError, "must set at least two frequency bin boundaries (i.e., define at least one frequency bin)"
		options.frequency_bins = rate.IrregularBins(options.frequency_bins)

	#
	# other
	#

	if options.live_time_program is None:
		raise ValueError, "missing required option -p or --live-time-program"

	#
	# done
	#

	return options, (filenames or [None])
예제 #3
0
 def get_output_cache(self):
     """
 Returns a LAL cache of the output file name.  Calling this
 method also induces the output name to get set, so it must
 be at least once.
 """
     if not self.output_cache:
         self.output_cache = [
             CacheEntry(
                 self.get_ifo(), self.__usertag,
                 segments.segment(LIGOTimeGPS(self.get_start()),
                                  LIGOTimeGPS(self.get_end())),
                 "file://localhost" + os.path.abspath(self.get_output()))
         ]
     return self.output_cache
def fromlalcache(cachefile, coltype=int):
    """
	Construct a segmentlist representing the times spanned by the files
	identified in the LAL cache contained in the file object file.  The
	segmentlist will be created with segments whose boundaries are of
	type coltype, which should raise ValueError if it cannot convert
	its string argument.

	Example:

	>>> from lal import LIGOTimeGPS
	>>> cache_seglists = fromlalcache(open(filename), coltype = LIGOTimeGPS).coalesce()

	See also:

	glue.lal.CacheEntry
	"""
    return segments.segmentlist(
        CacheEntry(l, coltype=coltype).segment for l in cachefile)
예제 #5
0
#!/usr/bin/python
import sys
from glue.lal import CacheEntry
from glue.ligolw import lsctables, utils
for filename in (CacheEntry(line).path for line in file(sys.argv[1])):
    xmldoc = utils.load_filename(filename,
                                 gz=(filename or "stdin").endswith(".gz"))
    try:
        lsctables.table.get_table(xmldoc,
                                  lsctables.SnglInspiralTable.tableName)
    except ValueError:
        xmldoc.childNodes[-1].appendChild(
            lsctables.New(
                lsctables.SnglInspiralTable,
                columns=("process_id", "ifo", "search", "channel", "end_time",
                         "end_time_ns", "end_time_gmst", "impulse_time",
                         "impulse_time_ns", "template_duration",
                         "event_duration", "amplitude", "eff_distance",
                         "coa_phase", "mass1", "mass2", "mchirp", "mtotal",
                         "eta", "kappa", "chi", "tau0", "tau2", "tau3", "tau4",
                         "tau5", "ttotal", "psi0", "psi3", "alpha", "alpha1",
                         "alpha2", "alpha3", "alpha4", "alpha5", "alpha6",
                         "beta", "f_final", "snr", "chisq", "chisq_dof",
                         "bank_chisq", "bank_chisq_dof", "cont_chisq",
                         "cont_chisq_dof", "sigmasq", "rsqveto_duration",
                         "Gamma0", "Gamma1", "Gamma2", "Gamma3", "Gamma4",
                         "Gamma5", "Gamma6", "Gamma7", "Gamma8", "Gamma9",
                         "event_id")))
        utils.write_filename(filename,
                             xmldoc,
                             gz=(filename or "stdout").endswith(".gz"))
예제 #6
0
    # extract observatory
    observatory = (options.observatory
                   and options.observatory.strip()) or "+".join(
                       sorted(seglists))

    # extract description
    if options.description:
        description = options.description
    else:
        if process_ids is None:
            description = set(searchsumm.getColumnByName("comment"))
        else:
            description = set(row.comment for row in searchsumm
                              if row.process_id in process_ids)
        if len(description) < 1:
            raise ValueError, "%s: no matching rows found in search summary table" % filename
        if len(description) > 1:
            raise ValueError, "%s: comments in matching rows of search summary table are not identical" % filename
        description = description.pop().strip() or None

    # set URL
    url = "file://localhost" + os.path.abspath(filename)

    # write cache entry
    print >> options.output, str(
        CacheEntry(observatory, description, seglists.extent_all(), url))

    # allow garbage collection
    xmldoc.unlink()
예제 #7
0

#
# Using time slide information, construct segment lists describing times
# requiring trigger construction.
#


if options.verbose:
	print >>sys.stderr, "Computing segments for which lalapps_power jobs are required ..."

background_time_slides = {}
background_seglistdict = segments.segmentlistdict()
if options.do_noninjections:
	for filename in options.background_time_slides:
		cache_entry = CacheEntry(None, None, None, "file://localhost" + os.path.abspath(filename))
		background_time_slides[cache_entry] = ligolw_tisi.load_time_slides(filename, verbose = options.verbose, gz = filename.endswith(".gz")).values()
		background_seglistdict |= compute_segment_lists(seglistdict, background_time_slides[cache_entry], options.minimum_gap, options.timing_params, full_segments = options.full_segments, verbose = options.verbose)


injection_time_slides = {}
injection_seglistdict = segments.segmentlistdict()
if options.do_injections:
	for filename in options.injection_time_slides:
		cache_entry = CacheEntry(None, None, None, "file://localhost" + os.path.abspath(filename))
		injection_time_slides[cache_entry] = ligolw_tisi.load_time_slides(filename, verbose = options.verbose, gz = filename.endswith(".gz")).values()
		injection_seglistdict |= compute_segment_lists(seglistdict, injection_time_slides[cache_entry], options.minimum_gap, options.timing_params, full_segments = options.full_segments, verbose = options.verbose)


# apply time shifts to segment lists to shift tiling phases, but take
# intersection with original segments to stay within allowed times.  Note:
def parse_command_line():
    parser = OptionParser(usage="%prog [options] ...", description="FIXME")
    parser.add_option("-f",
                      "--config-file",
                      metavar="filename",
                      help="Use this configuration file (required).")
    parser.add_option(
        "-l",
        "--log-path",
        metavar="path",
        help="Make condor put log files in this directory (required).")
    parser.add_option(
        "--background-time-slides",
        metavar="filename",
        action="append",
        help=
        "Set the name of the file from which to obtain the time slide table for use in the background branch of the pipeline (required).  This option can be given multiple times to parallelize the background analysis across time slides.  You will want to make sure the time slide files have distinct vectors to not repeat the same analysis multiple times, and in particular you'll want to make sure only one of them has a zero-lag vector in it."
    )
    parser.add_option(
        "--injection-time-slides",
        metavar="filename",
        help=
        "Set the name of the file from which to obtain the time slide table for use in the injection branch of the pipeline (required)."
    )
    parser.add_option(
        "--segments-file",
        metavar="filename",
        help=
        "Set the name of the LIGO Light-Weight XML file from which to obtain segment lists (required).  See ligolw_segments and ligolw_segment_query for more information on constructing an XML-format segments file.  See also --segments-name."
    )
    parser.add_option(
        "--segments-name",
        metavar="name",
        default="segments",
        help=
        "Set the name of the segment lists to retrieve from the segments file (default = \"segments\").  See also --segments-file."
    )
    parser.add_option(
        "--vetoes-file",
        metavar="filename",
        help=
        "Set the name of the LIGO Light-Weight XML file from which to obtain veto segment lists (optional).  See ligolw_segments and ligolw_segment_query for more information on constructing an XML-format segments file.  See also --vetos-name."
    )
    parser.add_option(
        "--vetoes-name",
        metavar="name",
        default="vetoes",
        help=
        "Set the name of the segment lists to retrieve from the veto segments file (default = \"vetoes\").  See also --vetoes-file."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")

    options, filenames = parser.parse_args()

    required_options = [
        "log_path", "config_file", "background_time_slides",
        "injection_time_slides", "segments_file"
    ]
    missing_options = [
        option for option in required_options
        if getattr(options, option) is None
    ]
    if missing_options:
        raise ValueError, "missing required options %s" % ", ".join(
            sorted("--%s" % option.replace("_", "-")
                   for option in missing_options))

    if options.vetoes_file is not None:
        options.vetoes_cache = set([
            CacheEntry(
                None, None, None,
                "file://localhost" + os.path.abspath(options.vetoes_file))
        ])
    else:
        options.vetoes_cache = set()

    options.injection_time_slides = [options.injection_time_slides]

    return options, filenames