コード例 #1
0
def parse_command_line():
	parser = OptionParser()
	parser.add_option("--cal-uncertainty", metavar = "fraction", type = "float", help = "Set the fractional uncertainty in amplitude due to calibration uncertainty (eg. 0.08).  This option is required, use 0 to disable calibration uncertainty.")
	parser.add_option("--detection-threshold", metavar = "Hz", type = "float", help = "Override the false alarm rate threshold.  Only injection files will be processed, and the efficiency curve measured.")
	parser.add_option("--rankingstatpdf-file", metavar = "filename", help = "Set the name of the xml file containing the marginalized likelihood.")
	parser.add_option("-c", "--input-cache", metavar = "filename", help = "Process the files named in this LAL cache. See lalapps_path2cache for information on how to produce a LAL cache file. The input (& output) of the FAPFAR jobs should be OK.")
	parser.add_option("--likelihood-cache", metavar = "filename", help = "Also load the likelihood ratio data files listsed in this LAL cache. This is used to obtain the segments that were actually analyzed.")
	parser.add_option("--tmp-space", metavar = "dir", help = "Set the name of the tmp space if working with sqlite.")
	parser.add_option("--vetoes-name", metavar = "name", help = "Set the name of the segment lists to use as vetoes (default = do not apply vetoes).")
	parser.add_option("--verbose", "-v", action = "store_true", help = "Be verbose.")

	options, filenames = parser.parse_args()

	if options.cal_uncertainty is None:
		raise ValueError("must set --cal-uncertainty (use 0 to ignore calibration uncertainty)")

	if options.input_cache:
		filenames += [CacheEntry(line).path for line in open(options.input_cache)]
	if not filenames:
		raise ValueError("no candidate databases specified")

	options.likelihood_filenames = []
	if options.likelihood_cache is not None:
		options.likelihood_filenames += [CacheEntry(line).path for line in open(options.likelihood_cache)]
	if not options.likelihood_filenames:
		raise ValueError("no ranking statistic likelihood data files specified")

	return options, filenames
コード例 #2
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg
	)
	parser.add_option("-c", "--input-cache", metavar = "filename", help = "Also process the files named in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file.")
	parser.add_option("-l", "--likelihood-file", metavar = "filename", action = "append", help = "Set the name of the likelihood ratio data file to use.  Can be given more than once.")
	parser.add_option("--likelihood-cache", metavar = "filename", help = "Also load the likelihood ratio data files listsed in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file.")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("--vetoes-name", metavar = "name", help = "Set the name of the segment lists to use as vetoes (default = do not apply vetoes).")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	options.likelihood_filenames = []
	if options.likelihood_file is not None:
		options.likelihood_filenames += options.likelihood_file
	if options.likelihood_cache is not None:
		options.likelihood_filenames += [CacheEntry(line).path for line in open(options.likelihood_cache)]
	if not options.likelihood_filenames:
		raise ValueError("no ranking statistic likelihood data files specified")

	if options.input_cache:
		filenames += [CacheEntry(line).path for line in open(options.input_cache)]
	if not filenames:
		raise ValueError("no candidate databases specified")

	return options, filenames
コード例 #3
0
def parse_command_line():
    parser = OptionParser(
        version="%prog CVS $Id$",
        usage="%prog [options]",
        description=
        "Constructs the likelihood-ratio based coincidence stage for an excess power analysis.  The input consists of one or more LAL caches listing the sqlite database trigger files, and a list of segments giving the time intervals that should be considered to be independent.  The LAL caches list all trigger files together, that is injections, time slides, and zero-lag.  The individual trigger files are self-describing, so the analysis codes can autodetect their type.  Each segment will be analyzed using the files that intersect it:  the likelihood ratios will be constructed from the injections and time-lag triggers contained in files that intersect the segment, and that data used to assign likelihoods to the injections, time-lag, and zero-lag coincs in all files that intersect the same segment."
    )
    parser.add_option(
        "--input-cache",
        metavar="filename",
        action="append",
        default=[],
        help=
        "Add the contents of this cache file to the list of files from which to draw statistics."
    )
    parser.add_option(
        "--round-robin-cache",
        metavar="filename",
        action="append",
        default=[],
        help=
        "Add the contents of this cache file to the list of files from which to draw injection statistics in a round-robin way."
    )
    parser.add_option(
        "--condor-log-dir",
        metavar="path",
        default=".",
        help="Set the directory for Condor log files (default = \".\").")
    parser.add_option(
        "--config-file",
        metavar="filename",
        default="power.ini",
        help="Set .ini configuration file name (default = \"power.ini\").")
    parser.add_option(
        "--distribution-segments",
        metavar="filename",
        help=
        "Read boundaries for distribution data intervals from this segwizard format segments file (required)."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    if options.distribution_segments is None:
        raise ValueError, "missing required argument --distribution-segments"
    options.distribution_segments = segmentsUtils.fromsegwizard(
        file(options.distribution_segments), coltype=lal.LIGOTimeGPS)

    options.input_cache = set([
        CacheEntry(line) for filename in options.input_cache
        for line in file(filename)
    ])
    options.round_robin_cache = [
        set(map(CacheEntry, file(filename)))
        for filename in options.round_robin_cache
    ]

    return options, (filenames or [])
コード例 #4
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg,
		usage = """usage: %prog [options] cachename ...

Generate long time scale trigger rate plot, getting trigger file names from LAL
cache files."""
	)
	parser.add_option("-s", "--gps-start-time", metavar = "seconds", help = "Set start time of plot in GPS seconds (required).")
	parser.add_option("-e", "--gps-end-time", metavar = "seconds", help = "Set end time of plot in GPS seconds (required).")
	parser.add_option("-w", "--window", metavar = "seconds", type = "float", default = 3600.0, help = "Set width of averaging window in seconds (default = 3600.0).")
	parser.add_option("-i", "--instrument", metavar = "name", help = "Set instrument name (required).")
	parser.add_option("-o", "--output-base", metavar = "base", help = "Set base (no extension) of output file name (required).")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, cache_names = parser.parse_args()

	# check for required options
	required_options = ["gps_start_time", "gps_end_time", "output_base", "instrument"]
	missing_options = [option for option in required_options if getattr(options, option) is None]
	if missing_options:
		raise ValueError("missing required option(s) %s" % ", ".join("--%s" % option.replace("_", "-") for option in missing_options))

	# parse trigger cache files
	if not cache_names:
		raise ValueError("no cache files named on command line")
	cache = [CacheEntry(l) for name in cache_names for l in file(name)]

	# set segment
	options.gps_start_time = int(lal.LIGOTimeGPS(options.gps_start_time))
	options.gps_end_time = int(lal.LIGOTimeGPS(options.gps_end_time))
	options.segment = segments.segment(options.gps_start_time, options.gps_end_time)
	options.read_segment = options.segment.protract(5.0 * options.window)

	# filter cache entries and sort
	return options, [c.path for c in sorted(c for c in cache if options.read_segment.intersects(c.segment))]
コード例 #5
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg,
		usage = "%prog [options] [file ...]",
		description = "%prog performs the final, summary, stages of the upper-limit string cusp search.  Input consists of a list of all sqlite format database files produced by all injection and non-injection runs of the analysis pipeline.  The file names can be given on the command line and/or provided in a LAL cache file."
	)
	parser.add_option("--cal-uncertainty", metavar = "fraction", type = "float", help = "Set the fractional uncertainty in amplitude due to calibration uncertainty (eg. 0.08).  This option is required, use 0 to disable calibration uncertainty.")
	parser.add_option("--injections-bin-size", metavar = "bins", type = "float", default = 16.7, help = "Set bin width for injection efficiency curves (default = 16.7).")
	parser.add_option("-c", "--input-cache", metavar = "filename", action = "append", help = "Also process the files named in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file.  This option can be given multiple times.")
	parser.add_option("--import-dump", metavar = "filename", action = "append", help = "Import additional rate vs. threshold or efficiency data from this dump file.  Dump files are one of the data products produced by this program.  Whether the file provides rate vs. threshold data or efficiency data will be determined automatically.  This option can be given multiple times")
	parser.add_option("--image-formats", metavar = "ext[,ext,...]", default = "png,pdf", help = "Set list of graphics formats to produce by providing a comma-delimited list of the filename extensions (default = \"png,pdf\").")
	parser.add_option("-p", "--live-time-program", metavar = "program", default = "StringSearch", help = "Set the name, as it appears in the process table, of the program whose search summary entries define the search live time (default = StringSearch).")
	parser.add_option("-o", "--open-box", action = "store_true", help = "Perform open-box analysis.  In a closed-box analysis (the default), information about the events seen at zero-lag is concealed:  the rate vs. threshold plot only shows the rate of events seen in the background, the detection threshold used to measure the efficiency curves is obtained from n-th loudest background event where n is (the integer closest to) the ratio of background livetime to zero-lag livetime, and messages to stdout and stderr that contain information about event counts at zero-lag are silenced.")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("--vetoes-name", metavar = "name", help = "Set the name of the segment lists to use as vetoes (default = do not apply vetoes).")
	parser.add_option("--detection-threshold", metavar = "likelihood", type = "float", help = "Override the detection threshold.  Only injection files will be processed, and the efficiency curve measured.")
	parser.add_option("--record-background", metavar = "N", type = "int", default = 10000000, help = "Set the number of background likelihood ratios to hold in memory for producing the rate vs. threshold plot (default = 10000000).")
	parser.add_option("--record-candidates", metavar = "N", type = "int", default = 100, help = "Set the number of highest-ranked zero-lag candidates to dump to the candidate file (default = 100).")
	parser.add_option("--threads", metavar = "N", type = "int", default = 1, help = "Set the maximum number of parallel threads to use for processing files (default = 1).  Contention for the global Python interpreter lock will throttle the true number that can run.  The number of threads will be automatically adjusted downwards if the number requested exceeds the number of input files.")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	if options.cal_uncertainty is None:
		raise ValueError("must set --cal-uncertainty (use 0 to ignore calibration uncertainty)")

	options.image_formats = options.image_formats.split(",")

	if options.input_cache:
		filenames += [CacheEntry(line).path for input_cache in options.input_cache for line in file(input_cache)]

	if options.threads < 1:
		raise ValueError("--threads must be >= 1")

	return options, filenames
コード例 #6
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg,
		usage = "%prog [options] [filename ...]",
		description = "%prog analyzes a collection of SQLite3 database files containing lalapps_burca outputs, and measures probability distributions for a variety of parameters computed from the coincidences therein.  The distributions are written to a likelihood data file in XML format, which can be used by lalapps_burca for the excesspower2 algorithm in which a second pass assigns likelihoods to each coincidence.  The command line arguments are used to provide shell patterns for the files from which to obtain injection and backgroun coincidences.  If file names are given on the command line following the arguments, then likelihood data is loaded from those files and added to the output."
	)
	parser.add_option("--add-from", metavar = "filename", default = [], action = "append", help = "Also add likelihood data from this XML file.")
	parser.add_option("--add-from-cache", metavar = "filename", help = "Also add likelihood data from all XML files listed in this LAL cache.")
	parser.add_option("-o", "--output", metavar = "filename", default = None, help = "Set the name of the likelihood control file to write (default = stdout).")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("--T010150", metavar = "description", default = None, help = "Write the output to a file whose name is compatible with the file name format described in LIGO-T010150-00-E, \"Naming Convention for Frame Files which are to be Processed by LDAS\".  The description string will be used to form the second field in the file name.")
	parser.add_option("-p", "--live-time-program", metavar = "program", default = "lalapps_power", help = "Program from which to draw the livetime segments. (Necessary in case of giving --T010150.")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	if options.T010150 is not None:
		if options.output is not None:
			raise ValueError("cannot set both --T010150 and --output")
		if options.T010150 == "":
			options.T010150 = "EXCESSPOWER_LIKELIHOOD"
		elif set(options.T010150) - T010150_letters:
			raise ValueError("invalid characters in description \"%s\"" % options.T010150)

	if options.add_from_cache:
		options.add_from += [CacheEntry(line).path for line in file(options.add_from_cache)]

	return options, filenames
コード例 #7
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg,
		usage = "%prog [options] [filename ...]",
		description = "%prog analyzes a collection of sqlite3 database files containing lalapps_burca outputs of string-cusp coincidence events, and measures probability distributions for a variety of parameters computed from those coincidences.  The distributions are written to a likelihood data file in XML format, which can later be used by to assign likelihoods to the coincidences.  The files to be processed can be named on the command line and/or provided by a LAL cache file."
	)
	parser.add_option("-o", "--output", metavar = "filename", default = None, help = "Set the name of the likelihood data file to write (default = stdout).")
	parser.add_option("-c", "--input-cache", metavar = "filename", help = "Also process the files named in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file.")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("--T010150", metavar = "description", default = None, help = "Write the output to a file whose name is compatible with the file name format described in LIGO-T010150-00-E, \"Naming Convention for Frame Files which are to be Processed by LDAS\".  The description string will be used to form the second field in the file name.")
	parser.add_option("--injection-reweight", metavar = "off|astrophysical", default = "off", help = "Set the weight function to be applied to the injections (default = \"off\").  When \"off\", the injections are all given equal weight and so the injection population is whatever was injected.  When set to \"astrophysical\", the injections are reweighted to simulate an amplitude^{-4} distribution.")
	parser.add_option("--injection-reweight-cutoff", metavar = "amplitude", default = 1e-20, type = "float", help = "When using the astrophysical injection reweighting, do not allow the weight assigned to arbitrarily low-amplitude injections to grow without bound, instead clip the weight assigned to injections to the weight given to injections with this amplitude (default = 1e-20, 0 = disabled).  This option is ignored when astrophysical reweighting is not being performed.")
	parser.add_option("--vetoes-name", metavar = "name", help = "Set the name of the segment lists to use as vetoes (default = do not apply vetoes).")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	if options.T010150 is not None:
		if options.output is not None:
			raise ValueError("cannot set both --T010150 and --output")
		if options.T010150 == "":
			options.T010150 = "STRING_LIKELIHOOD"
		elif set(options.T010150) - T010150_letters:
			raise ValueError("invalid characters in description \"%s\"" % options.T010150)

	if options.input_cache:
		filenames += [CacheEntry(line).path for line in file(options.input_cache)]

	if not filenames:
		raise ValueError("no input files!")

	if options.injection_reweight not in ("off", "astrophysical"):
		raise ValueError("--injection-reweight \"%s\" not recognized" % options.injections_reweight)

	return options, filenames
コード例 #8
0
ファイル: cache.py プロジェクト: mangesh-v/gwpy
def read_cache(lcf, coltype=LIGOTimeGPS):
    """Read a LAL-format cache file

    Parameters
    ----------
    lcf : `str`, `file`
        Input file or file path to read

    coltype : `LIGOTimeGPS`, `int`, optional
        Type for GPS times

    Returns
    -------
    cache : `list` of :class:`lal.utils.CacheEntry`

    Notes
    -----
    This method requires |lal|_.
    """
    from lal.utils import CacheEntry  # pylint: disable=redefined-outer-name

    # open file
    if not isinstance(lcf, FILE_LIKE):
        with open(lcf, 'r') as fobj:
            return read_cache(fobj, coltype=coltype)

    # read file
    out = []
    append = out.append
    for line in lcf:
        if isinstance(line, bytes):
            line = line.decode('utf-8')
        append(CacheEntry(line, coltype=coltype))
    return out
コード例 #9
0
def make_cache_entry(input_cache, description, path):
	# summarize segment information
	seglists = segments.segmentlistdict()
	for c in input_cache:
		seglists |= c.segmentlistdict

	# obtain instrument list
	instruments = seglists.keys()
	if None in instruments:
		instruments.remove(None)
	instruments.sort()

	# remove empty segment lists to allow extent_all() to work
	for instrument in seglists.keys():
		if not seglists[instrument]:
			del seglists[instrument]

	# make the URL
	if path:
		url = "file://localhost%s" % os.path.abspath(path)
	else:
		# FIXME:  old version of CacheEntry allowed None for URL,
		# new version doesn't.  correct fix is to modify calling
		# code to not try to initialize the output cache until
		# after the input is known, but for now we'll just do this
		# stupid hack.
		url = "file://localhost/dev/null"

	# construct a cache entry from the instruments and
	# segments that remain
	return CacheEntry("+".join(instruments) or None, description, seglists.extent_all(), url)
コード例 #10
0
	def get_output_cache(self):
		"""
		Returns a LAL cache of the output file name.  Calling this
		method also induces the output name to get set, so it must
		be at least once.
		"""
		if not self.output_cache:
			self.output_cache = [CacheEntry(self.get_ifo(), self.__usertag, segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end())), "file://localhost" + os.path.abspath(self.get_output()))]
		return self.output_cache
コード例 #11
0
	def get_output_cache(self):
		"""
		Returns a LAL cache of the output file name.  Calling this
		method also induces the output name to get set, so it must
		be at least once.
		"""
		if not self.output_cache:
			# FIXME:  instruments hardcoded to "everything"
			self.output_cache = [CacheEntry(u"G1+H1+H2+L1+T1+V1", self.__usertag, segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end())), "file://localhost" + os.path.abspath(self.get_output()))]
		return self.output_cache
コード例 #12
0
def path2cache(rootdir, pathname):
    """
	given a rootdir and a glob-compatible pathname that may contain shell-style wildcards,
	will find all files that match and populate a Cache.
	NOTE: this will only work with files that comply with the T050017 file convention.
	"""
    return [
        CacheEntry.from_T050017(file_)
        for file_ in glob.iglob(os.path.join(rootdir, pathname))
    ]
コード例 #13
0
ファイル: test_data.py プロジェクト: gwpy/gwsumm
 def setup_class(cls):
     cls.FRAMES = {}
     cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
     # get data
     for channel in LOSC_DATA:
         cls.FRAMES[channel] = Cache()
         for gwf in LOSC_DATA[channel]:
             target = os.path.join(cls._tempdir, os.path.basename(gwf))
             download(gwf, target)
             cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
コード例 #14
0
ファイル: test_data.py プロジェクト: eagoetz/gwsumm
 def setup_class(cls):
     cls.FRAMES = {}
     cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
     # get data
     for channel in LOSC_DATA:
         cls.FRAMES[channel] = Cache()
         for gwf in LOSC_DATA[channel]:
             target = os.path.join(cls._tempdir, os.path.basename(gwf))
             download(gwf, target)
             cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
コード例 #15
0
def parse_command_line():
    parser = OptionParser(version="Name: %%prog\n%s" % git_version.verbose_msg)
    parser.add_option(
        "-c",
        "--input-cache",
        metavar="filename",
        help=
        "Also process the files named in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file."
    )
    parser.add_option(
        "-n",
        "--ranking-stat-samples",
        metavar="N",
        default=2**24,
        type="int",
        help=
        "Construct ranking statistic histograms by drawing this many samples from the ranking statistic generator (default = 2^24)."
    )
    parser.add_option(
        "-o",
        "--output",
        metavar="filename",
        help=
        "Write merged likelihood ratio histograms to this LIGO Light-Weight XML file."
    )
    parser.add_option(
        "-s",
        "--SNRPDF-file",
        metavar="filename",
        action="append",
        help=
        "Read pre-computed SNR PDF from this LIGO Light-Weight XML file (optional)."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    paramdict = options.__dict__.copy()

    if options.input_cache is not None:
        filenames = [
            CacheEntry(line).path for line in open(options.input_cache)
        ]
    if options.SNRPDF_file is not None:
        filenames += options.SNRPDF_file
    if not filenames:
        raise ValueError(
            "no ranking statistic likelihood data files specified")

    if options.output is None:
        raise ValueError("must set --output")

    return options, filenames, paramdict
コード例 #16
0
ファイル: test_cache.py プロジェクト: mangesh-v/gwpy
def cache():
    try:
        from lal.utils import CacheEntry
    except ImportError as e:
        pytest.skip(str(e))

    cache = []
    for seg in SEGMENTS:
        d = seg[1] - seg[0]
        f = 'A-B-%d-%d.tmp' % (seg[0], d)
        cache.append(CacheEntry.from_T050017(f, coltype=int))
    return cache
コード例 #17
0
ファイル: cafe.py プロジェクト: phyytang/lalsuite
def load_cache(filename, verbose=False):
    """
	Parse a LAL cache file named filename into a list of
	lal.utils.CacheEntry objects.  If filename is None then input is
	taken from stdin.
	"""
    if verbose:
        print("reading %s ..." % (filename or "stdin"), file=sys.stderr)
    if filename is not None:
        f = open(filename)
    else:
        f = sys.stdin
    return [CacheEntry(line) for line in f]
コード例 #18
0
ファイル: test_io.py プロジェクト: lewton1644/gwpy
    def make_cache():
        try:
            from lal.utils import CacheEntry
        except ImportError as e:
            pytest.skip(str(e))

        segs = SegmentList()
        cache = []
        for seg in [(0, 1), (1, 2), (4, 5)]:
            d = seg[1] - seg[0]
            f = 'A-B-%d-%d.tmp' % (seg[0], d)
            cache.append(CacheEntry.from_T050017(f, coltype=int))
            segs.append(Segment(*seg))
        return cache, segs
コード例 #19
0
ファイル: test_io.py プロジェクト: stefco/gwpy
    def make_cache():
        try:
            from lal.utils import CacheEntry
        except ImportError as e:
            pytest.skip(str(e))

        segs = SegmentList()
        cache = Cache()
        for seg in [(0, 1), (1, 2), (4, 5)]:
            d = seg[1] - seg[0]
            f = 'A-B-%d-%d.tmp' % (seg[0], d)
            cache.append(CacheEntry.from_T050017(f, coltype=int))
            segs.append(Segment(*seg))
        return cache, segs
コード例 #20
0
ファイル: utils.py プロジェクト: lpsinger/segments
def fromlalcache(cachefile, coltype=int):
    """
	Construct a segmentlist representing the times spanned by the files
	identified in the LAL cache contained in the file object file.  The
	segmentlist will be created with segments whose boundaries are of
	type coltype, which should raise ValueError if it cannot convert
	its string argument.

	See also:

	lal.utils.CacheEntry
	"""
    from lal.utils import CacheEntry
    return segments.segmentlist(
        CacheEntry(l, coltype=coltype).segment for l in cachefile)
コード例 #21
0
ファイル: mocks.py プロジェクト: jumbokh/gwpy
def mock_datafind_connection(framefile):
    try:
        from lal.utils import CacheEntry
    except ImportError as e:
        pytest.skip(str(e))
    from glue import datafind
    ce = CacheEntry.from_T050017(framefile)
    frametype = ce.description
    # create mock up of connection object
    DatafindConnection = mock.create_autospec(
        datafind.GWDataFindHTTPConnection)
    DatafindConnection.find_types.return_value = [frametype]
    DatafindConnection.find_latest.return_value = [ce]
    DatafindConnection.find_frame_urls.return_value = [ce]
    return DatafindConnection
コード例 #22
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg
	)
	parser.add_option("--made-only", action = "store_true", default = False, help = "Plot only injections that were made.")
	parser.add_option("-b", "--base", metavar = "base", default = "plotbinj_", help = "Set the prefix for output filenames (default = \"plotbinj_\")")
	parser.add_option("-f", "--format", metavar = "format", default = "png", help = "Set the output image format (default = \"png\")")
	parser.add_option("--amplitude-func", metavar = "hrsswave|hrssdet|E", default = "hrsswave", help = "Select the amplitude to show on the plots.  \"hrsswave\" = the h_rss of the wave, \"hrssdet\" = the h_rss in the detector, \"E\" = the radiated energy over r^2.")
	parser.add_option("--input-cache", metavar = "filename", action = "append", default = [], help = "Get list of trigger files from this LAL cache file.")
	parser.add_option("-l", "--live-time-program", metavar = "program", default = "lalapps_power", help = "Set the name, as it appears in the process table, of the program whose search summary entries define the search live time (default = \"lalapps_power\").")
	parser.add_option("--plot", metavar = "number", action = "append", default = None, help = "Generate the given plot number (default = make all plots).  Use \"none\" to disable plots.")
	parser.add_option("--coinc-plot", metavar = "number", action = "append", default = None, help = "Generate the given coinc plot number (default = make all coinc plots).  Use \"none\" to disable coinc plots.")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	if options.amplitude_func == "hrsswave":
		options.amplitude_func = lambda sim, instrument, offsetvector: sim.hrss
		options.amplitude_lbl = r"$h_{\mathrm{rss}}$"
	elif options.amplitude_func == "hrssdet":
		options.amplitude_func = SimBurstUtils.hrss_in_instrument
		options.amplitude_lbl = r"$h_{\mathrm{rss}}^{\mathrm{det}}$"
	elif options.amplitude_func == "E":
		options.amplitude_func = lambda sim, instrument, offsetvector: sim.egw_over_rsquared
		options.amplitude_lbl = r"$M_{\odot} / \mathrm{pc}^{2}$"
	else:
		raise ValueError("unrecognized --amplitude-func %s" % options.amplitude_func)

	if options.plot is None:
		options.plot = range(10)
	elif "none" in options.plot:
		options.plot = []
	else:
		options.plot = map(int, options.plot)
	if options.coinc_plot is None:
		options.coinc_plot = range(2)
	elif "none" in options.coinc_plot:
		options.coinc_plot = []
	else:
		options.coinc_plot = map(int, options.coinc_plot)

	filenames = filenames or []
	for cache in options.input_cache:
		if options.verbose:
			print >>sys.stderr, "reading '%s' ..." % cache
		filenames += [CacheEntry(line).path for line in file(cache)]

	return options, filenames
コード例 #23
0
def parse_command_line():
    parser = OptionParser(
        version="Name: %%prog\n%s" % git_version.verbose_msg,
        usage="%prog [options] [file ...]",
        description=
        "Run a single-instrument burst clustering algorithm on the sngl_burst events contained in LIGO Light Weight XML files.  Files can be listed on the command line and/or in one or more LAL cache files.  If no files are named, then input is read from stdin and written to stdout."
    )
    parser.add_option(
        "--comment",
        metavar="text",
        help=
        "Set the comment string to be recorded in the process table (default = None)."
    )
    parser.add_option("-c",
                      "--cluster-algorithm",
                      metavar="[excesspower]",
                      help="Set clustering method (required).")
    parser.add_option("-i",
                      "--input-cache",
                      metavar="filename",
                      action="append",
                      default=[],
                      help="Process the files listed in this LAL cache.")
    parser.add_option(
        "-p",
        "--program",
        metavar="name",
        default="lalapps_power",
        help=
        "Set the name of the program that generated the events as it appears in the process table (default = \"lalapps_power\")."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    if options.cluster_algorithm is None:
        raise ValueError("missing required argument --cluster-algorithm")
    if options.cluster_algorithm not in ("excesspower", ):
        raise ValueError("unrecognized --cluster-algorithm %s" %
                         options.cluster_algorithm)

    for cache in options.input_cache:
        filenames += [CacheEntry(line).path for line in file(cache)]

    return options, (filenames or [None])
コード例 #24
0
ファイル: mocks.py プロジェクト: stefco/gwpy
def mock_datafind_connection(framefile):
    try:
        from lal.utils import CacheEntry
    except ImportError as e:
        pytest.skip(str(e))
    from glue import datafind
    ce = CacheEntry.from_T050017(framefile)
    frametype = ce.description
    # create mock up of connection object
    DatafindConnection = mock.create_autospec(
        datafind.GWDataFindHTTPConnection)
    DatafindConnection.find_types.return_value = [frametype]
    DatafindConnection.find_latest.return_value = [ce]
    DatafindConnection.find_frame_urls.return_value = [ce]
    DatafindConnection.host = 'mockhost'
    DatafindConnection.port = 80
    return DatafindConnection
コード例 #25
0
def parse_command_line():
    parser = OptionParser()
    parser.add_option(
        "--rankingstatpdf-file",
        metavar="filename",
        help=
        "Set the name of the xml file containing the marginalized likelihood.")
    parser.add_option(
        "-c",
        "--input-cache",
        metavar="filename",
        help=
        "Also process the files named in this LAL cache.  See lalapps_path2cache for information on how to produce a LAL cache file."
    )
    parser.add_option(
        "--non-injection-db",
        metavar="filename",
        default=[],
        action="append",
        help=
        "Provide the name of a database from a non-injection run.  Can be given multiple times."
    )
    parser.add_option(
        "--tmp-space",
        metavar="dir",
        help="Set the name of the tmp space if working with sqlite.")
    parser.add_option("--verbose",
                      "-v",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    process_params = options.__dict__.copy()

    if options.input_cache:
        filenames += [
            CacheEntry(line).path for line in open(options.input_cache)
        ]
    if not filenames:
        raise ValueError("no candidate databases specified")

    return options, process_params, filenames
コード例 #26
0
def parse_command_line():
	parser = OptionParser(
		version = "Name: %%prog\n%s" % git_version.verbose_msg
	)
	parser.add_option("-b", "--base", metavar = "base", default = "string_plotbinj_", help = "Set the prefix for output filenames (default = \"string_plotbinj_\").")
	parser.add_option("-f", "--format", metavar = "format", action = "append", default = [], help = "Set the output image format (default = \"png\").  Option can be given multiple times to generate plots in multiple formats.")
	parser.add_option("--amplitude-func", metavar = "det|wave", default = "det", help = "Select the amplitude to show on the plots.  \"det\" = the amplitude expected in the detector, \"wave\" = the amplitude of the wave (default = \"det\").")
	parser.add_option("--input-cache", metavar = "filename", action = "append", default = [], help = "Get list of files from this LAL cache.")
	parser.add_option("-l", "--live-time-program", metavar = "program", default = "StringSearch", help = "Set the name of the program, as it appears in the process table, whose search summary entries define the search live time (default = \"StringSearch\").")
	parser.add_option("--plot", metavar = "number", action = "append", default = None, help = "Generate the given plot number (default = make all plots).")
	parser.add_option("-t", "--tmp-space", metavar = "path", help = "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
	parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
	options, filenames = parser.parse_args()

	if options.amplitude_func == "wave":
		options.amplitude_func = lambda sim, instrument, offsetvector: sim.amplitude
		options.amplitude_lbl = r"$A^{\mathrm{wave}}$"
	elif options.amplitude_func == "det":
		options.amplitude_func = SimBurstUtils.string_amplitude_in_instrument
		options.amplitude_lbl = r"$A^{\mathrm{det}}$"
	else:
		raise ValueError("unrecognized --amplitude-func %s" % options.amplitude_func)

	if not options.format:
		options.format = ["png"]

	if options.plot is not None:
		# use sorted(set(...)) to ensure the indexes are ordered
		# and unique
		options.plot = sorted(set(map(int, options.plot)))

	filenames = filenames or []
	for cache in options.input_cache:
		if options.verbose:
			print >>sys.stderr, "reading '%s' ..." % cache
		filenames += [CacheEntry(line).path for line in file(cache)]
	if not filenames:
		raise ValueError("Nothing to do!")

	return options, filenames
コード例 #27
0
ファイル: test_timeseries_io.py プロジェクト: stefco/gwpy
def test_get_mp_cache_segments():
    """Test `gwpy.timeseries.io.cache.get_mp_cache_segments`
    """
    from lal.utils import CacheEntry
    from glue.lal import Cache
    from glue.segmentsUtils import segmentlist_range
    Cache.entry_class = CacheEntry

    # make cache
    cache = Cache()
    segments = SegmentList([Segment(0, 10), Segment(20, 30)])
    fsegs = SegmentList([s for seg in segments for
                         s in segmentlist_range(seg[0], seg[1], 2)])
    cache = Cache([CacheEntry.from_T050017(
                       'A-B-{0}-{1}.ext'.format(s[0], abs(s)))
                   for s in fsegs])

    # assert that no multiprocessing just returns the segment
    assert_segmentlist_equal(
        tio_cache.get_mp_cache_segments(cache, 1, Segment(0, 30)),
        SegmentList([Segment(0, 30)]))

    # simple test that segments get divided as expected
    mpsegs = tio_cache.get_mp_cache_segments(cache, 2, Segment(0, 30))
    assert_segmentlist_equal(mpsegs, segments)

    # test that mismatch with files edges is fine
    mpsegs = tio_cache.get_mp_cache_segments(cache, 2, Segment(0, 21))
    assert not mpsegs - SegmentList([Segment(0, 21)])

    # test segment divisions
    mpsegs = tio_cache.get_mp_cache_segments(cache, 4, Segment(0, 30))
    assert_segmentlist_equal(
        mpsegs,
        SegmentList(map(Segment, [(0, 6), (6, 10), (20, 26), (26, 30)]))
    )
コード例 #28
0
power.init_job_types(config_parser)

#
# Using time slide information, construct segment lists describing times
# requiring trigger construction.
#

if options.verbose:
    print >> sys.stderr, "Computing segments for which lalapps_power jobs are required ..."

background_time_slides = {}
background_seglistdict = segments.segmentlistdict()
if options.do_noninjections:
    for filename in options.background_time_slides:
        cache_entry = CacheEntry(
            None, None, None, "file://localhost" + os.path.abspath(filename))
        background_time_slides[cache_entry] = timeslides.load_time_slides(
            filename, verbose=options.verbose,
            gz=filename.endswith(".gz")).values()
        background_seglistdict |= compute_segment_lists(
            seglistdict,
            background_time_slides[cache_entry],
            options.minimum_gap,
            options.timing_params,
            full_segments=options.full_segments,
            verbose=options.verbose)

injection_time_slides = {}
injection_seglistdict = segments.segmentlistdict()
if options.do_injections:
    for filename in options.injection_time_slides:
コード例 #29
0
short_segment_duration = config_parser.getint('lalapps_StringSearch',
                                              'short-segment-duration')
pad = config_parser.getint('lalapps_StringSearch', 'pad')
min_segment_length = config_parser.getint(
    'pipeline', 'segment-length')  # not including pad at each end
trig_overlap = config_parser.getint('pipeline', 'trig_overlap')
overlap = short_segment_duration / 2 + 2 * pad  # FIXME:  correct?

#
# get the instruments and raw segments
#

instruments = lsctables.instrumentsproperty.get(
    config_parser.get('pipeline', 'ifos'))
segments_cache = set([
    CacheEntry(None, "SEG", None,
               "file://localhost" + os.path.abspath(options.segments_file))
])
seglists = ligolw_segments.segmenttable_get_by_name(
    ligolw_utils.load_filename(
        options.segments_file,
        contenthandler=ligolw_segments.LIGOLWContentHandler,
        verbose=options.verbose), options.segments_name).coalesce()
# remove extra instruments
for instrument in set(seglists) - instruments:
    if options.verbose:
        print("warning: ignoring segments for '%s' found in '%s'" %
              (instrument, options.segments_file),
              file=sys.stderr)
    del seglists[instrument]
# check for missing instruments
if not instruments.issubset(set(seglists)):
コード例 #30
0
    # extract description
    if options.description:
        description = options.description
    else:
        if process_ids is None:
            description = set(searchsumm.getColumnByName("comment"))
        else:
            description = set(row.comment for row in searchsumm
                              if row.process_id in process_ids)
        if len(description) < 1:
            raise ValueError(
                "%s: no matching rows found in search summary table" %
                filename)
        if len(description) > 1:
            raise ValueError(
                "%s: comments in matching rows of search summary table are not identical"
                % filename)
        description = description.pop().strip() or None

    # set URL
    url = "file://localhost" + os.path.abspath(filename)

    # write cache entry
    print(str(CacheEntry(observatory, description, seglists.extent_all(),
                         url)),
          file=options.output)

    # allow garbage collection
    xmldoc.unlink()
コード例 #31
0
def parse_command_line():
    parser = OptionParser(
        version="Name: %%prog\n%s" % git_version.verbose_msg,
        usage="%prog [options] [file ...]",
        description=
        "%prog uses likelihood ratio data stored in LIGO Light-Weight XML files to compute likelihood ratio values for excess power coincs in SQLite databases."
    )
    parser.add_option(
        "-c",
        "--comment",
        metavar="text",
        help="Set comment string in process table (default = None).")
    parser.add_option(
        "-p",
        "--program",
        metavar="name",
        help=
        "Set the name of the program that generated the events as it appears in the process table (required).  The program name is used to extract live time information from the search summary tables in the input files."
    )
    parser.add_option(
        "--likelihood-data",
        metavar="filename",
        default=[],
        action="append",
        help=
        "Read likelihood data from this XML file.  (use lalapps_burca_tailor to generate these files)"
    )
    parser.add_option(
        "--likelihood-data-cache",
        metavar="filename",
        help=
        "Read likelihood data from the XML files described by this LAL cache.  For each trigger file, the live time of the trigger file is established and all likelihood data files whose segments intersect the trigger file's live time are loaded and merged into a single distribution data set.  (use lalapps_burca_tailor to generate these files)"
    )
    parser.add_option(
        "--tmp-space",
        metavar="path",
        help=
        "Path to a directory suitable for use as a work area while manipulating the database file.  The database file will be worked on in this directory, and then moved to the final location when complete.  This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")
    options, filenames = parser.parse_args()

    #
    # check and convert and bunch of arguments
    #

    options.likelihood_data = set(options.likelihood_data)
    if (not options.likelihood_data) and (options.likelihood_data_cache is
                                          None):
        raise ValueError(
            "must set one of --likelihood-data or --likelihood-data-cache")
    if options.likelihood_data and (options.likelihood_data_cache is not None):
        raise ValueError(
            "cannot set both --likelihood-data and --likelihood-data-cache")
    if options.likelihood_data_cache:
        options.likelihood_data_cache = set(
            [CacheEntry(line) for line in file(options.likelihood_data_cache)])
    else:
        options.likelihood_data_cache = set()
    if options.program is None:
        raise ValueError("missing required argument --program")

    #
    # done
    #

    return options, (filenames or [None])
コード例 #32
0
def test_file_path_cacheentry():
    from lal.utils import CacheEntry
    path = "/path/to/A-B-0-1.txt"
    assert io_utils.file_path(CacheEntry.from_T050017(path)) == path
コード例 #33
0
def parse_command_line():
    parser = OptionParser(usage="%prog [options] ...", description="FIXME")
    parser.add_option("-f",
                      "--config-file",
                      metavar="filename",
                      help="Use this configuration file (required).")
    parser.add_option(
        "-l",
        "--log-path",
        metavar="path",
        help="Make condor put log files in this directory (required).")
    parser.add_option(
        "--background-time-slides",
        metavar="filename",
        action="append",
        help=
        "Set the name of the file from which to obtain the time slide table for use in the background branch of the pipeline (required).  This option can be given multiple times to parallelize the background analysis across time slides.  You will want to make sure the time slide files have distinct vectors to not repeat the same analysis multiple times, and in particular you'll want to make sure only one of them has a zero-lag vector in it."
    )
    parser.add_option(
        "--injection-time-slides",
        metavar="filename",
        help=
        "Set the name of the file from which to obtain the time slide table for use in the injection branch of the pipeline (required)."
    )
    parser.add_option(
        "--segments-file",
        metavar="filename",
        help=
        "Set the name of the LIGO Light-Weight XML file from which to obtain segment lists (required).  See ligolw_segments and ligolw_segment_query for more information on constructing an XML-format segments file.  See also --segments-name."
    )
    parser.add_option(
        "--segments-name",
        metavar="name",
        default="segments",
        help=
        "Set the name of the segment lists to retrieve from the segments file (default = \"segments\").  See also --segments-file."
    )
    parser.add_option(
        "--vetoes-file",
        metavar="filename",
        help=
        "Set the name of the LIGO Light-Weight XML file from which to obtain veto segment lists (optional).  See ligolw_segments and ligolw_segment_query for more information on constructing an XML-format segments file.  See also --vetos-name."
    )
    parser.add_option(
        "--vetoes-name",
        metavar="name",
        default="vetoes",
        help=
        "Set the name of the segment lists to retrieve from the veto segments file (default = \"vetoes\").  See also --vetoes-file."
    )
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="Be verbose.")

    options, filenames = parser.parse_args()

    required_options = [
        "log_path", "config_file", "background_time_slides",
        "injection_time_slides", "segments_file"
    ]
    missing_options = [
        option for option in required_options
        if getattr(options, option) is None
    ]
    if missing_options:
        raise ValueError("missing required options %s" % ", ".join(
            sorted("--%s" % option.replace("_", "-")
                   for option in missing_options)))

    if options.vetoes_file is not None:
        options.vetoes_cache = set([
            CacheEntry(
                None, "VETO", None,
                "file://localhost" + os.path.abspath(options.vetoes_file))
        ])
    else:
        options.vetoes_cache = set()

    options.injection_time_slides = [options.injection_time_slides]

    return options, filenames
コード例 #34
0
#
# Other initializations
#

path_count = 0
seglists = segments.segmentlistdict()

#
# Filter input one line at a time
#

for line in src:
    path, filename = os.path.split(line.strip())
    url = "file://localhost%s" % os.path.abspath(os.path.join(path, filename))
    try:
        cache_entry = CacheEntry.from_T050017(url)
    except ValueError as e:
        if options.include_all:
            cache_entry = CacheEntry(None, None, None, url)
        elif options.force:
            continue
        else:
            raise e
    print(str(cache_entry), file=dst)
    path_count += 1
    if cache_entry.segment is not None:
        seglists |= cache_entry.segmentlistdict.coalesce()

#
# Summary
#