Ejemplo n.º 1
0
def add_container_args(parser):
    container_args = parser.add_argument_group(
        "optional arguments for run container")
    try:
        from benchexec import containerexecutor
    except Exception:
        # This fails e.g. on MacOS X because of missing libc.
        # We want to keep BenchExec usable for cases where the
        # localexecutor is replaced by something else.
        logging.debug("Could not import container feature:", exc_info=1)
        container_args.add_argument(
            "--no-container",
            action="store_false",
            dest="container",
            required=True,
            help="disable use of containers for isolation of runs "
            "(REQUIRED because this system does not support container mode)",
        )
    else:
        container_on_args = container_args.add_mutually_exclusive_group()
        container_on_args.add_argument(
            "--container",
            action="store_true",
            dest="_ignored_container",
            help="force isolation of run in container (default)",
        )
        container_on_args.add_argument(
            "--no-container",
            action="store_false",
            dest="container",
            help="disable use of containers for isolation of runs",
        )
        containerexecutor.add_basic_container_args(container_args)
Ejemplo n.º 2
0
    def create_argument_parser(self):
        """
        Create a parser for the command-line options.
        May be overwritten for adding more configuration options.
        @return: an argparse.ArgumentParser instance
        """
        parser = argparse.ArgumentParser(
            fromfile_prefix_chars='@',
            description=
            """Execute benchmarks for a given tool with a set of input files.
               Benchmarks are defined in an XML file given as input.
               Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.
               The tool table-generator can be used to create tables for the results.
               Part of BenchExec: https://github.com/sosy-lab/benchexec/""")

        parser.add_argument("files",
                            nargs='+',
                            metavar="FILE",
                            help="XML file with benchmark definition")
        parser.add_argument(
            "-d",
            "--debug",
            action="store_true",
            help="Enable debug output and a debugging helper on signal USR1")

        parser.add_argument(
            "-r",
            "--rundefinition",
            dest="selected_run_definitions",
            action="append",
            help=
            "Run only the specified RUN_DEFINITION from the benchmark definition file. "
            +
            "This option can be specified several times and can contain wildcards.",
            metavar="RUN_DEFINITION")

        parser.add_argument(
            "-t",
            "--tasks",
            dest="selected_sourcefile_sets",
            action="append",
            help="Run only the tasks from the tasks tag with TASKS as name. " +
            "This option can be specified several times and can contain wildcards.",
            metavar="TASKS")

        parser.add_argument("-n",
                            "--name",
                            dest="name",
                            default=None,
                            help="Set name of benchmark execution to NAME",
                            metavar="NAME")

        parser.add_argument(
            "-o",
            "--outputpath",
            dest="output_path",
            type=str,
            default=self.DEFAULT_OUTPUT_PATH,
            help="Output prefix for the generated results. " +
            "If the path is a folder files are put into it," +
            "otherwise it is used as a prefix for the resulting files.")

        parser.add_argument(
            "-T",
            "--timelimit",
            dest="timelimit",
            default=None,
            help='Time limit for each run, e.g. "90s" '
            '(overwrites time limit and hard time limit from XML file, '
            'use "-1" to disable time limits completely)',
            metavar="SECONDS")

        parser.add_argument(
            "-M",
            "--memorylimit",
            dest="memorylimit",
            default=None,
            help=
            "Memory limit, if no unit is given MB are assumed (-1 to disable)",
            metavar="BYTES")

        parser.add_argument("-N",
                            "--numOfThreads",
                            dest="num_of_threads",
                            default=None,
                            type=int,
                            help="Run n benchmarks in parallel",
                            metavar="n")

        parser.add_argument(
            "-c",
            "--limitCores",
            dest="corelimit",
            default=None,
            metavar="N",
            help="Limit each run of the tool to N CPU cores (-1 to disable).")

        parser.add_argument(
            "--user",
            dest="users",
            action="append",
            metavar="USER",
            help=
            "Execute benchmarks under given user account(s) (needs password-less sudo setup)."
        )

        parser.add_argument("--no-compress-results",
                            dest="compress_results",
                            action="store_false",
                            help="Do not compress result files.")

        def parse_filesize_value(value):
            try:
                value = int(value)
                if value == -1:
                    return None
                logging.warning(
                    'Value "%s" for logfile size interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.', value)
                value = value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                value = util.parse_memory_value(value)
            return value

        parser.add_argument(
            "--maxLogfileSize",
            dest="maxLogfileSize",
            type=parse_filesize_value,
            default=20 * _BYTE_FACTOR * _BYTE_FACTOR,
            metavar="SIZE",
            help="Shrink logfiles to given size if they are too big. "
            "(-1 to disable, default value: 20 MB).")

        parser.add_argument(
            "--filesCountLimit",
            type=int,
            metavar="COUNT",
            help=
            "maximum number of files the tool may write to (checked periodically, counts only files written in container mode or to temporary directories)"
        )
        parser.add_argument(
            "--filesSizeLimit",
            type=util.parse_memory_value,
            metavar="BYTES",
            help=
            "maximum size of files the tool may write (checked periodically, counts only files written in container mode or to temporary directories)"
        )

        parser.add_argument(
            "--commit",
            dest="commit",
            action="store_true",
            help=
            "If the output path is a git repository without local changes, " +
            "add and commit the result files.")

        parser.add_argument("--message",
                            dest="commit_message",
                            type=str,
                            default="Results for benchmark run",
                            help="Commit message if --commit is used.")

        parser.add_argument(
            "--startTime",
            dest="start_time",
            type=parse_time_arg,
            default=None,
            metavar="'YYYY-MM-DD hh:mm'",
            help=
            'Set the given date and time as the start time of the benchmark.')

        parser.add_argument("--version",
                            action="version",
                            version="%(prog)s " + __version__)

        container_args = parser.add_argument_group(
            "optional arguments for run container")
        container_on_args = container_args.add_mutually_exclusive_group()
        container_on_args.add_argument(
            "--container",
            action='store_true',
            help=
            "force isolation of run in container (future default starting with BenchExec 2.0)"
        )
        container_on_args.add_argument(
            "--no-container",
            action='store_true',
            help=
            "disable use of containers for isolation of runs (current default)"
        )
        containerexecutor.add_basic_container_args(container_args)

        return parser
Ejemplo n.º 3
0
    def create_argument_parser(self):
        """
        Create a parser for the command-line options.
        May be overwritten for adding more configuration options.
        @return: an argparse.ArgumentParser instance
        """
        parser = argparse.ArgumentParser(
            fromfile_prefix_chars='@',
            description=
            """Execute benchmarks for a given tool with a set of input files.
               Benchmarks are defined in an XML file given as input.
               Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.
               The tool table-generator can be used to create tables for the results.
               Part of BenchExec: https://github.com/sosy-lab/benchexec/""")

        parser.add_argument("files", nargs='+', metavar="FILE",
                          help="XML file with benchmark definition")
        parser.add_argument("-d", "--debug",
                          action="store_true",
                          help="Enable debug output and a debugging helper on signal USR1")

        parser.add_argument("-r", "--rundefinition", dest="selected_run_definitions",
                          action="append",
                          help="Run only the specified RUN_DEFINITION from the benchmark definition file. "
                                + "This option can be specified several times and can contain wildcards.",
                          metavar="RUN_DEFINITION")

        parser.add_argument("-t", "--tasks", dest="selected_sourcefile_sets",
                          action="append",
                          help="Run only the tasks from the tasks tag with TASKS as name. "
                                + "This option can be specified several times and can contain wildcards.",
                          metavar="TASKS")

        parser.add_argument("-n", "--name",
                          dest="name", default=None,
                          help="Set name of benchmark execution to NAME",
                          metavar="NAME")

        parser.add_argument("-o", "--outputpath",
                          dest="output_path", type=str,
                          default=self.DEFAULT_OUTPUT_PATH,
                          help="Output prefix for the generated results. "
                                + "If the path is a folder files are put into it,"
                                + "otherwise it is used as a prefix for the resulting files.")

        parser.add_argument("-T", "--timelimit",
                          dest="timelimit", default=None,
                          help='Time limit for each run, e.g. "90s" '
                               '(overwrites time limit and hard time limit from XML file, '
                               'use "-1" to disable time limits completely)',
                          metavar="SECONDS")

        parser.add_argument("-M", "--memorylimit",
                          dest="memorylimit", default=None,
                          help="Memory limit, if no unit is given MB are assumed (-1 to disable)",
                          metavar="BYTES")

        parser.add_argument("-N", "--numOfThreads",
                          dest="num_of_threads", default=None, type=int,
                          help="Run n benchmarks in parallel",
                          metavar="n")

        parser.add_argument("-c", "--limitCores", dest="corelimit",
                          default=None,
                          metavar="N",
                          help="Limit each run of the tool to N CPU cores (-1 to disable).")

        parser.add_argument("--user",
                            dest="users",
                            action="append",
                            metavar="USER",
                            help="Execute benchmarks under given user account(s) (needs password-less sudo setup).")

        parser.add_argument("--no-compress-results",
                            dest="compress_results", action="store_false",
                            help="Do not compress result files.")

        def parse_filesize_value(value):
            try:
                value = int(value)
                if value == -1:
                    return None
                logging.warning(
                    'Value "%s" for logfile size interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.',
                    value)
                value = value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                value = util.parse_memory_value(value)
            return value

        parser.add_argument("--maxLogfileSize", dest="maxLogfileSize",
                            type=parse_filesize_value, default=20*_BYTE_FACTOR*_BYTE_FACTOR,
                            metavar="SIZE",
                            help="Shrink logfiles to given size if they are too big. "
                                 "(-1 to disable, default value: 20 MB).")

        parser.add_argument("--commit", dest="commit",
                          action="store_true",
                          help="If the output path is a git repository without local changes, "
                                + "add and commit the result files.")

        parser.add_argument("--message",
                          dest="commit_message", type=str,
                          default="Results for benchmark run",
                          help="Commit message if --commit is used.")

        parser.add_argument("--startTime",
                          dest="start_time",
                          type=parse_time_arg,
                          default=None,
                          metavar="'YYYY-MM-DD hh:mm'",
                          help='Set the given date and time as the start time of the benchmark.')

        parser.add_argument("--version",
                            action="version",
                            version="%(prog)s " + __version__)

        container_args = parser.add_argument_group("optional arguments for run container")
        container_on_args = container_args.add_mutually_exclusive_group()
        container_on_args.add_argument("--container", action='store_true',
            help="force isolation of run in container (future default starting with BenchExec 2.0)")
        container_on_args.add_argument("--no-container", action='store_true',
            help="disable use of containers for isolation of runs (current default)")
        containerexecutor.add_basic_container_args(container_args)

        return parser