Ejemplo n.º 1
0
def _facts():
    disks = []
    for disk in sysstats.disks():
        dev, _, fstype, _ = disk
        disks.append({
            "device": dev,
            "type": "TODO: Provide one of 'ssd', 'spinning'",
            "file-system": fstype,
        })
    return {
        "environment": {
            "type":
            "TODO: Provide one of 'cloud', 'bare-metal' + details about the environment (EC2, instance type)",
        },
        "hardware": {
            "cpu_model": sysstats.cpu_model(),
            "disk": disks,
            "memory": "%dgb" % convert.bytes_to_gb(sysstats.total_memory())
        },
        "software": {
            "jvm_vendor":
            _jvm_property("java.vm.vendor"),
            "jvm_version":
            _jvm_property("java.runtime.version"),
            "os_name":
            sysstats.os_name(),
            "os_version":
            sysstats.os_version(),
            "rally_version":
            version.version(),
            "distribution_version":
            "TODO: Provide Elasticsearch distribution version"
        }
    }
Ejemplo n.º 2
0
def main():
    check_python_version()
    log.install_default_log_config()
    log.configure_logging()
    logger = logging.getLogger(__name__)
    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)

    arg_parser = create_arg_parser()
    args = arg_parser.parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    if not cfg.config_present():
        cfg.install_default_config()
    cfg.load_config(auto_upgrade=True)
    cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow())
    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    logger.info("OS [%s]", str(platform.uname()))
    logger.info("Python [%s]", str(sys.implementation))
    logger.info("Rally version [%s]", version.version())
    logger.debug("Command line arguments: %s", args)
    # Configure networking
    net.init()
    if not args.offline:
        probing_url = cfg.opts("system", "probing.url", default_value="https://github.com", mandatory=False)
        if not net.has_internet_connection(probing_url):
            console.warn("No Internet connection detected. Automatic download of track data sets etc. is disabled.", logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True)
        else:
            logger.info("Detected a working Internet connection.")

    result = dispatch_sub_command(arg_parser, args, cfg)

    end = time.time()
    if result == ExitStatus.SUCCESSFUL:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-")
    elif result == ExitStatus.INTERRUPTED:
        console.println("")
        console.info("ABORTED (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(130)
    elif result == ExitStatus.ERROR:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(64)
Ejemplo n.º 3
0
def main():
    check_python_version()
    log.remove_obsolete_default_log_config()
    log.install_default_log_config()
    log.configure_logging()
    console.init()

    parser = argparse.ArgumentParser(
        prog=PROGRAM_NAME,
        description=BANNER + "\n\n Rally daemon to support remote benchmarks",
        epilog="Find out more about Rally at %s" %
        console.format.link(DOC_LINK),
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(title="subcommands",
                                       dest="subcommand",
                                       help="")
    subparsers.required = True

    start_command = subparsers.add_parser("start",
                                          help="Starts the Rally daemon")
    restart_command = subparsers.add_parser("restart",
                                            help="Restarts the Rally daemon")
    for p in [start_command, restart_command]:
        p.add_argument("--node-ip", required=True, help="The IP of this node.")
        p.add_argument("--coordinator-ip",
                       required=True,
                       help="The IP of the coordinator node.")
    subparsers.add_parser("stop", help="Stops the Rally daemon")
    subparsers.add_parser(
        "status", help="Shows the current status of the local Rally daemon")

    args = parser.parse_args()

    if args.subcommand == "start":
        start(args)
    elif args.subcommand == "stop":
        stop()
    elif args.subcommand == "status":
        status()
    elif args.subcommand == "restart":
        stop(raise_errors=False)
        start(args)
    else:
        raise exceptions.RallyError("Unknown subcommand [%s]" %
                                    args.subcommand)
Ejemplo n.º 4
0
def main():
    check_python_version()
    console.init()

    parser = argparse.ArgumentParser(prog=PROGRAM_NAME,
                                     description=BANNER + "\n\n Rally daemon to support remote benchmarks",
                                     epilog="Find out more about Rally at %s" % console.format.link(DOC_LINK),
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommand",
        help="")
    subparsers.required = True

    start_command = subparsers.add_parser("start", help="Starts the Rally daemon")
    restart_command = subparsers.add_parser("restart", help="Restarts the Rally daemon")
    for p in [start_command, restart_command]:
        p.add_argument(
            "--node-ip",
            help="The IP of this node.")
        p.add_argument(
            "--coordinator-ip",
            help="The IP of the coordinator node."
        )
    subparsers.add_parser("stop", help="Stops the Rally daemon")

    args = parser.parse_args()

    if args.subcommand == "start":
        start(args)
    elif args.subcommand == "stop":
        stop()
    elif args.subcommand == "restart":
        stop(raise_errors=False)
        start(args)
    else:
        raise exceptions.RallyError("Unknown subcommand [%s]" % args.subcommand)
Ejemplo n.º 5
0
def main():
    check_python_version()
    log.install_default_log_config()
    log.configure_logging()
    logger = logging.getLogger(__name__)
    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)

    arg_parser = create_arg_parser()
    args = arg_parser.parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "trial.id",
            str(uuid.uuid4()))
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names",
            opts.csv_to_list(args.car))
    if args.team_path:
        cfg.add(config.Scope.applicationOverride, "mechanic", "team.path",
                os.path.abspath(io.normalize_path(args.team_path)))
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "repository.name", None)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "repository.name", args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            opts.csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.params",
            opts.to_dict(args.car_params))
    cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params",
            opts.to_dict(args.plugin_params))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                False)
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk",
            args.runtime_jdk)
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            opts.csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.params",
            opts.to_dict(args.telemetry_params))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to
    # its default value)
    if args.track_path:
        cfg.add(config.Scope.applicationOverride, "track", "track.path",
                os.path.abspath(io.normalize_path(args.track_path)))
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                None)
        if args.track:
            # stay as close as possible to argparse errors although we have a custom validation.
            arg_parser.error(
                "argument --track not allowed with argument --track-path")
        # cfg.add(config.Scope.applicationOverride, "track", "track.name", None)
    else:
        # cfg.add(config.Scope.applicationOverride, "track", "track.path", None)
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                args.track_repository)
        # set the default programmatically because we need to determine whether the user has provided a value
        chosen_track = args.track if args.track else "geonames"
        cfg.add(config.Scope.applicationOverride, "track", "track.name",
                chosen_track)

    cfg.add(config.Scope.applicationOverride, "track", "params",
            opts.to_dict(args.track_params))
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks",
            opts.csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "values",
            args.show_in_report)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)
    if sub_command == "generate":
        cfg.add(config.Scope.applicationOverride, "generator", "chart.type",
                args.chart_type)
        cfg.add(config.Scope.applicationOverride, "generator", "output.path",
                args.output_path)

        if args.chart_spec_path and (args.track or args.challenge or args.car
                                     or args.node_count):
            console.println(
                "You need to specify either --chart-spec-path or --track, --challenge, --car and "
                "--node-count but not both.")
            exit(1)
        if args.chart_spec_path:
            cfg.add(config.Scope.applicationOverride, "generator",
                    "chart.spec.path", args.chart_spec_path)
        else:
            # other options are stored elsewhere already
            cfg.add(config.Scope.applicationOverride, "generator",
                    "node.count", args.node_count)

    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "on.error",
            args.on_error)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts",
            opts.csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        target_hosts = opts.TargetHosts(args.target_hosts)
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                target_hosts)
        client_options = opts.ClientOptions(args.client_options,
                                            target_hosts=target_hosts)
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options.default:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )
        if list(target_hosts.all_hosts) != list(
                client_options.all_client_options):
            console.println(
                "--target-hosts and --client-options must define the same keys for multi cluster setups."
            )
            exit(1)
    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    logger.info("OS [%s]", str(os.uname()))
    logger.info("Python [%s]", str(sys.implementation))
    logger.info("Rally version [%s]", version.version())
    logger.info("Command line arguments: %s", args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Ejemplo n.º 6
0
def create_arg_parser():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError("must be positive but was %s" %
                                             value)
        return value

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults",
                                    "preserve_benchmark_candidate",
                                    default_value=False,
                                    mandatory=False)
    else:
        preserve_install = False

    parser = argparse.ArgumentParser(
        prog=PROGRAM_NAME,
        description=BANNER + "\n\n You know for benchmarking Elasticsearch.",
        epilog="Find out more about Rally at %s" %
        console.format.link(DOC_LINK),
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(title="subcommands",
                                       dest="subcommand",
                                       help="")

    race_parser = subparsers.add_parser(
        "race",
        help=
        "Run the benchmarking pipeline. This sub-command should typically be used."
    )
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list",
                                        help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help=
        "The configuration for which Rally should show the available options. "
        "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=[
            "telemetry", "tracks", "pipelines", "races", "cars",
            "elasticsearch-plugins"
        ])
    list_parser.add_argument(
        "--limit",
        help=
        "Limit the number of search results for recent races (default: 10).",
        default=10,
    )

    generate_parser = subparsers.add_parser("generate",
                                            help="Generate artifacts")
    generate_parser.add_argument(
        "artifact",
        metavar="artifact",
        help="The artifact to create. Possible values are: charts",
        choices=["charts"])
    # We allow to either have a chart-spec-path *or* define a chart-spec on the fly with track, challenge and car. Convincing
    # argparse to validate that everything is correct *might* be doable but it is simpler to just do this manually.
    generate_parser.add_argument(
        "--chart-spec-path",
        help=
        "path to a JSON file containing all combinations of charts to generate"
    )
    generate_parser.add_argument(
        "--track",
        help=
        "define the track to use. List possible tracks with `%s list tracks` (default: geonames)."
        % PROGRAM_NAME
        # we set the default value later on because we need to determine whether the user has provided this value.
        # default="geonames"
    )
    generate_parser.add_argument(
        "--challenge",
        help=
        "define the challenge to use. List possible challenges for tracks with `%s list tracks`"
        % PROGRAM_NAME)
    generate_parser.add_argument(
        "--car",
        help=
        "define the car to use. List possible cars with `%s list cars` (default: defaults)."
        % PROGRAM_NAME)
    generate_parser.add_argument(
        "--node-count",
        type=positive_number,
        help="The number of Elasticsearch nodes to use in charts.")
    generate_parser.add_argument(
        "--chart-type",
        help="Chart type to generate. Default: time-series",
        choices=["time-series", "bar"],
        default="time-series")
    generate_parser.add_argument(
        "--quiet",
        help="suppress as much as output as possible (default: false).",
        default=False,
        action="store_true")
    generate_parser.add_argument("--output-path",
                                 help="Output file name (default: stdout).",
                                 default=None)

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        required=True,
        help="Race timestamp of the baseline (see %s list races)" %
        PROGRAM_NAME)
    compare_parser.add_argument(
        "--contender",
        required=True,
        help="Race timestamp of the contender (see %s list races)" %
        PROGRAM_NAME)
    compare_parser.add_argument(
        "--report-format",
        help=
        "define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="write the command line report also to the provided file",
        default="")

    config_parser = subparsers.add_parser(
        "configure", help="Write the configuration file or reconfigure Rally")
    for p in [parser, config_parser]:
        p.add_argument(
            "--advanced-config",
            help="show additional configuration options (default: false)",
            default=False,
            action="store_true")
        p.add_argument(
            "--assume-defaults",
            help=
            "Automatically accept all options with default values (default: false)",
            default=False,
            action="store_true")

    for p in [parser, list_parser, race_parser, generate_parser]:
        p.add_argument(
            "--distribution-version",
            help=
            "define the version of the Elasticsearch distribution to download. "
            "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument("--runtime-jdk",
                       type=positive_number,
                       help="The major version of the runtime JDK to use.",
                       default=None)

        track_source_group = p.add_mutually_exclusive_group()
        track_source_group.add_argument(
            "--track-repository",
            help=
            "define the repository from where Rally will load tracks (default: default).",
            # argparse is smart enough to use this default only if the user did not use --track-path and also did not specify anything
            default="default")
        track_source_group.add_argument("--track-path",
                                        help="define the path to a track")
        p.add_argument(
            "--team-repository",
            help=
            "define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--offline",
            help=
            "assume that Rally has no connection to the Internet (default: false)",
            default=False,
            action="store_true")

    for p in [parser, race_parser]:
        p.add_argument(
            "--pipeline",
            help="select the pipeline to run.",
            # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
            default="")
        p.add_argument(
            "--revision",
            help=
            "define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
            " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
            " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
            "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
            default="current"
        )  # optimized for local usage, don't fetch sources
        p.add_argument(
            "--track",
            help=
            "define the track to use. List possible tracks with `%s list tracks` (default: geonames)."
            % PROGRAM_NAME
            # we set the default value later on because we need to determine whether the user has provided this value.
            # default="geonames"
        )
        p.add_argument(
            "--track-params",
            help=
            "define a comma-separated list of key:value pairs that are injected verbatim to the track as variables",
            default="")
        p.add_argument(
            "--challenge",
            help=
            "define the challenge to use. List possible challenges for tracks with `%s list tracks`"
            % PROGRAM_NAME)
        p.add_argument(
            "--team-path",
            help="define the path to the car and plugin configurations to use."
        )
        p.add_argument(
            "--car",
            help=
            "define the car to use. List possible cars with `%s list cars` (default: defaults)."
            % PROGRAM_NAME,
            default="defaults")  # optimized for local usage
        p.add_argument(
            "--car-params",
            help=
            "define a comma-separated list of key:value pairs that are injected verbatim as variables for the car",
            default="")
        p.add_argument(
            "--elasticsearch-plugins",
            help=
            "define the Elasticsearch plugins to install. (default: install no plugins).",
            default="")
        p.add_argument(
            "--plugin-params",
            help=
            "define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables",
            default="")
        p.add_argument(
            "--target-hosts",
            help=
            "define a comma-separated list of host:port pairs which should be targeted iff using the pipeline 'benchmark-only' "
            "(default: localhost:9200).",
            default=""
        )  # actually the default is pipeline specific and it is set later
        p.add_argument(
            "--load-driver-hosts",
            help=
            "define a comma-separated list of hosts which should generate load (default: localhost).",
            default="localhost")
        p.add_argument(
            "--laps",
            type=positive_number,
            help="number of laps that the benchmark should run (default: 1).",
            default=1)
        p.add_argument(
            "--client-options",
            help=
            "define a comma-separated list of client options to use. The options will be passed to the Elasticsearch Python client "
            "(default: {}).".format(opts.ClientOptions.DEFAULT_CLIENT_OPTIONS),
            default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS)
        p.add_argument(
            "--on-error",
            choices=["continue", "abort"],
            help=
            "Either 'continue' or 'abort' when Rally gets an error response (default: continue)",
            default="continue")
        p.add_argument(
            "--telemetry",
            help=
            "enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
            "with `%s list telemetry`" % PROGRAM_NAME,
            default="")
        p.add_argument(
            "--telemetry-params",
            help=
            "define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters",
            default="")
        p.add_argument(
            "--distribution-repository",
            help=
            "define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
            default="release")
        p.add_argument(
            "--include-tasks",
            help=
            "defines a comma-separated list of tasks to run. By default all tasks of a challenge are run."
        )
        p.add_argument(
            "--user-tag",
            help=
            "define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
            "Example: intention:baseline-ticket-12345",
            default="")
        p.add_argument(
            "--report-format",
            help=
            "define the output format for the command line report (default: markdown).",
            choices=["markdown", "csv"],
            default="markdown")
        p.add_argument(
            "--show-in-report",
            help=
            "define which values are shown in the summary report (default: available).",
            choices=["available", "all-percentiles", "all"],
            default="available")
        p.add_argument(
            "--report-file",
            help="write the command line report also to the provided file",
            default="")
        p.add_argument(
            "--quiet",
            help="suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--preserve-install",
            help="keep the benchmark candidate and its index. (default: %s)" %
            str(preserve_install).lower(),
            default=preserve_install)
        p.add_argument(
            "--test-mode",
            help=
            "runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--enable-driver-profiling",
            help=
            "Enables a profiler for analyzing the performance of calls in Rally's driver (default: false)",
            default=False,
            action="store_true")

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    for p in [parser, race_parser]:
        # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
        # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
        p.add_argument(
            "--effective-start-date",
            help=argparse.SUPPRESS,
            type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
            default=None)
        # keeps the cluster running after the benchmark, only relevant if Rally provisions the cluster
        p.add_argument("--keep-cluster-running",
                       help=argparse.SUPPRESS,
                       action="store_true",
                       default=False)

    for p in [parser, config_parser, list_parser, race_parser, compare_parser]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument("--configuration-name",
                       help=argparse.SUPPRESS,
                       default=None)

    return parser
Ejemplo n.º 7
0
def create_arg_parser():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError("must be positive but was {}".format(value))
        return value

    def non_empty_list(arg):
        lst = opts.csv_to_list(arg)
        if len(lst) < 1:
            raise argparse.ArgumentError(argument=None, message="At least one argument required!")
        return lst

    def runtime_jdk(v):
        if v == "bundled":
            return v
        else:
            try:
                return positive_number(v)
            except argparse.ArgumentTypeError:
                raise argparse.ArgumentTypeError("must be a positive number or 'bundled' but was {}".format(v))

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults", "preserve_benchmark_candidate", default_value=False, mandatory=False)
    else:
        preserve_install = False

    parser = argparse.ArgumentParser(prog=PROGRAM_NAME,
                                     description=BANNER + "\n\n You Know, for Benchmarking Elasticsearch.",
                                     epilog="Find out more about Rally at {}".format(console.format.link(doc_link())),
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommand",
        help="")

    race_parser = subparsers.add_parser("race", help="Run the benchmarking pipeline. This sub-command should typically be used.")
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list", help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help="The configuration for which Rally should show the available options. "
             "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=["telemetry", "tracks", "pipelines", "races", "cars", "elasticsearch-plugins"])
    list_parser.add_argument(
        "--limit",
        help="Limit the number of search results for recent races (default: 10).",
        default=10,
    )

    info_parser = subparsers.add_parser("info", help="Show info about a track")
    info_track_source_group = info_parser.add_mutually_exclusive_group()
    info_track_source_group.add_argument(
        "--track-repository",
        help="Define the repository from where Rally will load tracks (default: default).",
        # argparse is smart enough to use this default only if the user did not use --track-path and also did not specify anything
        default="default"
    )
    info_track_source_group.add_argument(
        "--track-path",
        help="Define the path to a track.")

    info_parser.add_argument(
        "--track",
        help="Define the track to use. List possible tracks with `{} list tracks` (default: geonames).".format(PROGRAM_NAME)
        # we set the default value later on because we need to determine whether the user has provided this value.
        # default="geonames"
    )
    info_parser.add_argument(
        "--track-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the track as variables.",
        default=""
    )
    info_parser.add_argument(
        "--challenge",
        help="Define the challenge to use. List possible challenges for tracks with `{} list tracks`.".format(PROGRAM_NAME)
    )
    info_task_filter_group = info_parser.add_mutually_exclusive_group()
    info_task_filter_group.add_argument(
        "--include-tasks",
        help="Defines a comma-separated list of tasks to run. By default all tasks of a challenge are run.")
    info_task_filter_group.add_argument(
        "--exclude-tasks",
        help="Defines a comma-separated list of tasks not to run. By default all tasks of a challenge are run.")

    create_track_parser = subparsers.add_parser("create-track", help="Create a Rally track from existing data")
    create_track_parser.add_argument(
        "--track",
        required=True,
        help="Name of the generated track")
    create_track_parser.add_argument(
        "--indices",
        type=non_empty_list,
        required=True,
        help="Comma-separated list of indices to include in the track")
    create_track_parser.add_argument(
        "--target-hosts",
        default="",
        required=True,
        help="Comma-separated list of host:port pairs which should be targeted")
    create_track_parser.add_argument(
        "--client-options",
        default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS,
        help=f"Comma-separated list of client options to use. (default: {opts.ClientOptions.DEFAULT_CLIENT_OPTIONS})")
    create_track_parser.add_argument(
        "--output-path",
        default=os.path.join(os.getcwd(), "tracks"),
        help="Track output directory (default: tracks/)")

    generate_parser = subparsers.add_parser("generate", help="Generate artifacts")
    generate_parser.add_argument(
        "artifact",
        metavar="artifact",
        help="The artifact to create. Possible values are: charts",
        choices=["charts"])
    # We allow to either have a chart-spec-path *or* define a chart-spec on the fly with track, challenge and car. Convincing
    # argparse to validate that everything is correct *might* be doable but it is simpler to just do this manually.
    generate_parser.add_argument(
        "--chart-spec-path",
        help="Path to a JSON file(s) containing all combinations of charts to generate. Wildcard patterns can be used to specify "
             "multiple files.")
    generate_parser.add_argument(
        "--track",
        help="Define the track to use. List possible tracks with `%s list tracks` (default: geonames)." % PROGRAM_NAME
        # we set the default value later on because we need to determine whether the user has provided this value.
        # default="geonames"
    )
    generate_parser.add_argument(
        "--challenge",
        help="Define the challenge to use. List possible challenges for tracks with `%s list tracks`." % PROGRAM_NAME)
    generate_parser.add_argument(
        "--car",
        help="Define the car to use. List possible cars with `%s list cars` (default: defaults)." % PROGRAM_NAME)
    generate_parser.add_argument(
        "--node-count",
        type=positive_number,
        help="The number of Elasticsearch nodes to use in charts.")
    generate_parser.add_argument(
        "--chart-type",
        help="Chart type to generate (default: time-series).",
        choices=["time-series", "bar"],
        default="time-series")
    generate_parser.add_argument(
        "--output-path",
        help="Output file name (default: stdout).",
        default=None)

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        required=True,
        help="Race ID of the baseline (see %s list races)." % PROGRAM_NAME)
    compare_parser.add_argument(
        "--contender",
        required=True,
        help="Race ID of the contender (see %s list races)." % PROGRAM_NAME)
    compare_parser.add_argument(
        "--report-format",
        help="Define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="Write the command line report also to the provided file.",
        default="")

    config_parser = subparsers.add_parser("configure", help="Write the configuration file or reconfigure Rally")
    for p in [parser, config_parser]:
        p.add_argument(
            "--advanced-config",
            help="Show additional configuration options (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--assume-defaults",
            help="Automatically accept all options with default values (default: false).",
            default=False,
            action="store_true")

    download_parser = subparsers.add_parser("download", help="Downloads an artifact")
    download_parser.add_argument(
        "--team-repository",
        help="Define the repository from where Rally will load teams and cars (default: default).",
        default="default")
    download_parser.add_argument(
        "--team-path",
        help="Define the path to the car and plugin configurations to use.")
    download_parser.add_argument(
        "--distribution-version",
        help="Define the version of the Elasticsearch distribution to download. "
             "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
        default="")
    download_parser.add_argument(
        "--distribution-repository",
        help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")
    download_parser.add_argument(
        "--car",
        help="Define the car to use. List possible cars with `%s list cars` (default: defaults)." % PROGRAM_NAME,
        default="defaults")  # optimized for local usage
    download_parser.add_argument(
        "--car-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default=""
    )
    download_parser.add_argument(
        "--target-os",
        help="The name of the target operating system for which an artifact should be downloaded (default: current OS)",
    )
    download_parser.add_argument(
        "--target-arch",
        help="The name of the CPU architecture for which an artifact should be downloaded (default: current architecture)",
    )

    install_parser = subparsers.add_parser("install", help="Installs an Elasticsearch node locally")
    install_parser.add_argument(
        "--revision",
        help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
             " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
             " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
             "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
        default="current")  # optimized for local usage, don't fetch sources
    install_parser.add_argument(
        "--skip-build",
        help="Whether Rally should skip rebuilding Elasticsearch (default: false).",
        default=False,
        action="store_true")
    # Intentionally undocumented as we do not consider Docker a fully supported option.
    install_parser.add_argument(
        "--build-type",
        help=argparse.SUPPRESS,
        choices=["tar", "docker"],
        default="tar")
    install_parser.add_argument(
        "--team-repository",
        help="Define the repository from where Rally will load teams and cars (default: default).",
        default="default")
    install_parser.add_argument(
        "--team-revision",
        help="Define a specific revision in the team repository that Rally should use.",
        default=None)
    install_parser.add_argument(
        "--team-path",
        help="Define the path to the car and plugin configurations to use.")
    install_parser.add_argument(
        "--runtime-jdk",
        type=runtime_jdk,
        help="The major version of the runtime JDK to use during installation.",
        default=None)
    install_parser.add_argument(
        "--distribution-repository",
        help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")
    install_parser.add_argument(
        "--distribution-version",
        help="Define the version of the Elasticsearch distribution to download. "
             "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
        default="")
    install_parser.add_argument(
        "--car",
        help="Define the car to use. List possible cars with `%s list cars` (default: defaults)." % PROGRAM_NAME,
        default="defaults")  # optimized for local usage
    install_parser.add_argument(
        "--car-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default=""
    )
    install_parser.add_argument(
        "--elasticsearch-plugins",
        help="Define the Elasticsearch plugins to install. (default: install no plugins).",
        default="")
    install_parser.add_argument(
        "--plugin-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
        default=""
    )
    install_parser.add_argument(
        "--network-host",
        help="The IP address to bind to and publish",
        default="127.0.0.1"
    )
    install_parser.add_argument(
        "--http-port",
        help="The port to expose for HTTP traffic",
        default="39200"
    )
    install_parser.add_argument(
        "--node-name",
        help="The name of this Elasticsearch node",
        default="rally-node-0"
    )
    install_parser.add_argument(
        "--master-nodes",
        help="A comma-separated list of the initial master node names",
        default=""
    )
    install_parser.add_argument(
        "--seed-hosts",
        help="A comma-separated list of the initial seed host IPs",
        default=""
    )

    start_parser = subparsers.add_parser("start", help="Starts an Elasticsearch node locally")
    start_parser.add_argument(
        "--installation-id",
        required=True,
        help="The id of the installation to start",
        # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
        default="")
    start_parser.add_argument(
        "--race-id",
        required=True,
        help="Define a unique id for this race.",
        default="")
    start_parser.add_argument(
        "--runtime-jdk",
        type=runtime_jdk,
        help="The major version of the runtime JDK to use.",
        default=None)
    start_parser.add_argument(
        "--telemetry",
        help="Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
             "with `%s list telemetry`." % PROGRAM_NAME,
        default="")
    start_parser.add_argument(
        "--telemetry-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
        default=""
    )

    stop_parser = subparsers.add_parser("stop", help="Stops an Elasticsearch node locally")
    stop_parser.add_argument(
        "--installation-id",
        required=True,
        help="The id of the installation to stop",
        # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
        default="")
    stop_parser.add_argument(
        "--preserve-install",
        help="Keep the benchmark candidate and its index. (default: %s)." % str(preserve_install).lower(),
        default=preserve_install,
        action="store_true")

    for p in [parser, list_parser, race_parser, generate_parser]:
        p.add_argument(
            "--distribution-version",
            help="Define the version of the Elasticsearch distribution to download. "
                 "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument(
            "--runtime-jdk",
            type=runtime_jdk,
            help="The major version of the runtime JDK to use.",
            default=None)

        track_source_group = p.add_mutually_exclusive_group()
        track_source_group.add_argument(
            "--track-repository",
            help="Define the repository from where Rally will load tracks (default: default).",
            # argparse is smart enough to use this default only if the user did not use --track-path and also did not specify anything
            default="default"
        )
        track_source_group.add_argument(
            "--track-path",
            help="Define the path to a track.")
        p.add_argument(
            "--track-revision",
            help="Define a specific revision in the track repository that Rally should use.",
            default=None)
        p.add_argument(
            "--team-repository",
            help="Define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--team-revision",
            help="Define a specific revision in the team repository that Rally should use.",
            default=None)
        p.add_argument(
            "--offline",
            help="Assume that Rally has no connection to the Internet (default: false).",
            default=False,
            action="store_true")

    for p in [parser, race_parser]:
        p.add_argument(
            "--race-id",
            help="Define a unique id for this race.",
            default=str(uuid.uuid4()))
        p.add_argument(
            "--pipeline",
            help="Select the pipeline to run.",
            # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
            default="")
        p.add_argument(
            "--revision",
            help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
                 " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
                 " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
                 "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
            default="current")  # optimized for local usage, don't fetch sources
        p.add_argument(
            "--track",
            help="Define the track to use. List possible tracks with `%s list tracks` (default: geonames)." % PROGRAM_NAME
            # we set the default value later on because we need to determine whether the user has provided this value.
            # default="geonames"
        )
        p.add_argument(
            "--track-params",
            help="Define a comma-separated list of key:value pairs that are injected verbatim to the track as variables.",
            default=""
        )
        p.add_argument(
            "--challenge",
            help="Define the challenge to use. List possible challenges for tracks with `%s list tracks`." % PROGRAM_NAME)
        p.add_argument(
            "--team-path",
            help="Define the path to the car and plugin configurations to use.")
        p.add_argument(
            "--car",
            help="Define the car to use. List possible cars with `%s list cars` (default: defaults)." % PROGRAM_NAME,
            default="defaults")  # optimized for local usage
        p.add_argument(
            "--car-params",
            help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
            default=""
        )
        p.add_argument(
            "--elasticsearch-plugins",
            help="Define the Elasticsearch plugins to install. (default: install no plugins).",
            default="")
        p.add_argument(
            "--plugin-params",
            help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
            default=""
        )
        p.add_argument(
            "--target-hosts",
            help="Define a comma-separated list of host:port pairs which should be targeted if using the pipeline 'benchmark-only' "
                 "(default: localhost:9200).",
            default="")  # actually the default is pipeline specific and it is set later
        p.add_argument(
            "--load-driver-hosts",
            help="Define a comma-separated list of hosts which should generate load (default: localhost).",
            default="localhost")
        p.add_argument(
            "--client-options",
            help="Define a comma-separated list of client options to use. The options will be passed to the Elasticsearch Python client "
                 "(default: {}).".format(opts.ClientOptions.DEFAULT_CLIENT_OPTIONS),
            default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS)
        p.add_argument("--on-error",
                       choices=["continue", "continue-on-non-fatal", "abort"],
                       help="Controls how Rally behaves on response errors (default: continue-on-non-fatal).",
                       default="continue-on-non-fatal")
        p.add_argument(
            "--telemetry",
            help="Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
                 "with `%s list telemetry`." % PROGRAM_NAME,
            default="")
        p.add_argument(
            "--telemetry-params",
            help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
            default=""
        )
        p.add_argument(
            "--distribution-repository",
            help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
            default="release")

        task_filter_group = p.add_mutually_exclusive_group()
        task_filter_group.add_argument(
            "--include-tasks",
            help="Defines a comma-separated list of tasks to run. By default all tasks of a challenge are run.")
        task_filter_group.add_argument(
            "--exclude-tasks",
            help="Defines a comma-separated list of tasks not to run. By default all tasks of a challenge are run.")
        p.add_argument(
            "--user-tag",
            help="Define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
                 "Example: intention:baseline-ticket-12345",
            default="")
        p.add_argument(
            "--report-format",
            help="Define the output format for the command line report (default: markdown).",
            choices=["markdown", "csv"],
            default="markdown")
        p.add_argument(
            "--show-in-report",
            help="Define which values are shown in the summary report (default: available).",
            choices=["available", "all-percentiles", "all"],
            default="available")
        p.add_argument(
            "--report-file",
            help="Write the command line report also to the provided file.",
            default="")
        p.add_argument(
            "--preserve-install",
            help="Keep the benchmark candidate and its index. (default: %s)." % str(preserve_install).lower(),
            default=preserve_install,
            action="store_true")
        p.add_argument(
            "--test-mode",
            help="Runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--enable-driver-profiling",
            help="Enables a profiler for analyzing the performance of calls in Rally's driver (default: false).",
            default=False,
            action="store_true")

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    for p in [parser, race_parser]:
        # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
        # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
        p.add_argument(
            "--effective-start-date",
            help=argparse.SUPPRESS,
            type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
            default=None)
        # keeps the cluster running after the benchmark, only relevant if Rally provisions the cluster
        p.add_argument(
            "--keep-cluster-running",
            help=argparse.SUPPRESS,
            action="store_true",
            default=False)
        # skips checking that the REST API is available before proceeding with the benchmark
        p.add_argument(
            "--skip-rest-api-check",
            help=argparse.SUPPRESS,
            action="store_true",
            default=False)

    for p in [parser, config_parser, list_parser, race_parser, compare_parser, download_parser, install_parser,
              start_parser, stop_parser, info_parser, generate_parser, create_track_parser]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument(
            "--configuration-name",
            help=argparse.SUPPRESS,
            default=None)
        p.add_argument(
            "--quiet",
            help="Suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--kill-running-processes",
            action="store_true",
            default=False,
            help="If any processes is running, it is going to kill them and allow Rally to continue to run."
        )

    return parser
Ejemplo n.º 8
0
def parse_args():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError("must be positive but was %s" %
                                             value)
        return value

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults",
                                    "preserve_benchmark_candidate",
                                    default_value=False,
                                    mandatory=False)
    else:
        preserve_install = False

    # workaround for http://bugs.python.org/issue13041
    #
    # Set a proper width (see argparse.HelpFormatter)
    try:
        int(os.environ["COLUMNS"])
    except (KeyError, ValueError):
        # noinspection PyBroadException
        try:
            os.environ['COLUMNS'] = str(shutil.get_terminal_size().columns)
        except BaseException:
            # don't fail if anything goes wrong here
            pass

    parser = argparse.ArgumentParser(
        prog=PROGRAM_NAME,
        description=BANNER + "\n\n You know for benchmarking Elasticsearch.",
        epilog="Find out more about Rally at %s" %
        console.format.link(DOC_LINK),
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(title="subcommands",
                                       dest="subcommand",
                                       help="")

    race_parser = subparsers.add_parser(
        "race",
        help=
        "Run the benchmarking pipeline. This sub-command should typically be used."
    )
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list",
                                        help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help=
        "The configuration for which Rally should show the available options. "
        "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=[
            "telemetry", "tracks", "pipelines", "races", "cars",
            "elasticsearch-plugins"
        ])
    list_parser.add_argument(
        "--limit",
        help=
        "Limit the number of search results for recent races (default: 10).",
        default=10,
    )

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        help="Race timestamp of the baseline (see %s list races)" %
        PROGRAM_NAME,
        default="")
    compare_parser.add_argument(
        "--contender",
        help="Race timestamp of the contender (see %s list races)" %
        PROGRAM_NAME,
        default="")
    compare_parser.add_argument(
        "--report-format",
        help=
        "define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="write the command line report also to the provided file",
        default="")

    config_parser = subparsers.add_parser(
        "configure", help="Write the configuration file or reconfigure Rally")
    for p in [parser, config_parser]:
        p.add_argument(
            "--advanced-config",
            help="show additional configuration options (default: false)",
            default=False,
            action="store_true")
        p.add_argument(
            "--assume-defaults",
            help=
            "Automatically accept all options with default values (default: false)",
            default=False,
            action="store_true")

    for p in [parser, race_parser]:
        p.add_argument(
            "--pipeline",
            help="select the pipeline to run.",
            # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
            default="")
        p.add_argument(
            "--preserve-install",
            help="keep the benchmark candidate and its index. (default: %s)" %
            str(preserve_install).lower(),
            default=preserve_install)
        p.add_argument(
            "--telemetry",
            help=
            "enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
            "with `%s list telemetry`" % PROGRAM_NAME,
            default="")
        p.add_argument(
            "--revision",
            help=
            "define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
            " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
            " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
            "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
            default="current"
        )  # optimized for local usage, don't fetch sources
        p.add_argument(
            "--track",
            help=
            "define the track to use. List possible tracks with `%s list tracks` (default: geonames)."
            % PROGRAM_NAME,
            default="geonames")
        p.add_argument(
            "--challenge",
            help=
            "define the challenge to use. List possible challenges for tracks with `%s list tracks`"
            % PROGRAM_NAME)
        p.add_argument(
            "--car",
            help=
            "define the car to use. List possible cars with `%s list cars` (default: defaults)."
            % PROGRAM_NAME,
            default="defaults")  # optimized for local usage
        p.add_argument(
            "--elasticsearch-plugins",
            help=
            "define the Elasticsearch plugins to install. (default: install no plugins).",
            default="")
        p.add_argument(
            "--target-hosts",
            help=
            "define a comma-separated list of host:port pairs which should be targeted iff using the pipeline 'benchmark-only' "
            "(default: localhost:9200).",
            default=""
        )  # actually the default is pipeline specific and it is set later
        p.add_argument(
            "--client-options",
            help=
            "define a comma-separated list of client options to use. The options will be passed to the Elasticsearch Python client "
            "(default: %s)." % DEFAULT_CLIENT_OPTIONS,
            default=DEFAULT_CLIENT_OPTIONS)
        p.add_argument(
            "--cluster-health",
            choices=["red", "yellow", "green"],
            help=
            "Expected cluster health at the beginning of the benchmark (default: green)",
            default="green")
        p.add_argument(
            "--user-tag",
            help=
            "define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
            "Example: intention:baseline-ticket-12345",
            default="")
        p.add_argument(
            "--report-format",
            help=
            "define the output format for the command line report (default: markdown).",
            choices=["markdown", "csv"],
            default="markdown")
        p.add_argument(
            "--report-file",
            help="write the command line report also to the provided file",
            default="")
        p.add_argument(
            "--quiet",
            help="suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--laps",
            type=positive_number,
            help="number of laps that the benchmark should run (default: 1).",
            default=1)
        p.add_argument(
            "--test-mode",
            help=
            "runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
            default=False,
            action="store_true")

    for p in [parser, list_parser, race_parser]:
        p.add_argument(
            "--distribution-version",
            help=
            "define the version of the Elasticsearch distribution to download. "
            "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument(
            "--distribution-repository",
            help=
            "define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
            default="release")
        p.add_argument(
            "--track-repository",
            help=
            "define the repository from where Rally will load tracks (default: default).",
            default="default")
        p.add_argument(
            "--team-repository",
            help=
            "define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--offline",
            help=
            "assume that Rally has no connection to the Internet (default: false)",
            default=False,
            action="store_true")
        p.add_argument(
            "--enable-driver-profiling",
            help=
            "Enables a profiler for analyzing the performance of calls in Rally's driver (default: false)",
            default=False,
            action="store_true")

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    for p in [parser, race_parser]:
        # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
        # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
        p.add_argument(
            "--effective-start-date",
            help=argparse.SUPPRESS,
            type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
            default=None)
        # This is a highly experimental option and will likely be removed
        p.add_argument("--data-paths", help=argparse.SUPPRESS, default=None)
        p.add_argument("--override-src-dir",
                       help=argparse.SUPPRESS,
                       default=None)
        p.add_argument("--auto-manage-indices",
                       choices=["true", "false"],
                       help=argparse.SUPPRESS,
                       default=None)
        p.add_argument("--ignore-unknown-return-values",
                       help=argparse.SUPPRESS,
                       default=False,
                       action="store_true")

    for p in [parser, config_parser, list_parser, race_parser, compare_parser]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument("--configuration-name",
                       help=argparse.SUPPRESS,
                       default=None)
        p.add_argument("--logging",
                       choices=["file", "console"],
                       help=argparse.SUPPRESS,
                       default="file")

    return parser.parse_args()
Ejemplo n.º 9
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output",
            args.logging)
    # only temporary to ignore unknown actor messages
    cfg.add(config.Scope.applicationOverride, "system",
            "ignore.unknown.return", args.ignore_unknown_return_values)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    #TODO dm: Consider renaming this one. It's used by different modules
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name",
            args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.name", args.car)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths",
            csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install",
            convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            csv_to_list(args.telemetry))
    if args.override_src_dir is not None:
        cfg.add(config.Scope.applicationOverride, "source", "local.src.dir",
                args.override_src_dir)

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    cfg.add(config.Scope.applicationOverride, "track", "repository.name",
            args.track_repository)
    cfg.add(config.Scope.applicationOverride, "track", "track.name",
            args.track)
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices",
            to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)

    ################################
    # new section name: driver
    ################################
    cfg.add(config.Scope.applicationOverride, "benchmarks", "cluster.health",
            args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Ejemplo n.º 10
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    arg_parser = create_arg_parser()
    args = arg_parser.parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output",
            args.logging)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name",
            args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names",
            csv_to_list(args.car))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths",
            csv_to_list(args.data_paths))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                False)
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            csv_to_list(args.telemetry))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to
    # its default value)
    if args.track_path:
        cfg.add(config.Scope.applicationOverride, "track", "track.path",
                os.path.abspath(io.normalize_path(args.track_path)))
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                None)
        if args.track:
            # stay as close as possible to argparse errors although we have a custom validation.
            arg_parser.error(
                "argument --track not allowed with argument --track-path")
        # cfg.add(config.Scope.applicationOverride, "track", "track.name", None)
    else:
        # cfg.add(config.Scope.applicationOverride, "track", "track.path", None)
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                args.track_repository)
        # set the default programmatically because we need to determine whether the user has provided a value
        chosen_track = args.track if args.track else "geonames"
        cfg.add(config.Scope.applicationOverride, "track", "track.name",
                chosen_track)

    cfg.add(config.Scope.applicationOverride, "track", "params",
            kv_to_map(csv_to_list(args.track_params)))
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks",
            csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices",
            to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "values",
            args.show_in_report)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)

    cfg.add(config.Scope.applicationOverride, "driver", "cluster.health",
            args.cluster_health)
    if args.cluster_health != "green":
        console.warn(
            "--cluster-health is deprecated and will be removed in a future version of Rally."
        )
    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "on.error",
            args.on_error)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts",
            csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Ejemplo n.º 11
0
def parse_args():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError("must be positive but was %s" % value)
        return value

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults", "preserve_benchmark_candidate", default_value=False, mandatory=False)
    else:
        preserve_install = False

    # workaround for http://bugs.python.org/issue13041
    #
    # Set a proper width (see argparse.HelpFormatter)
    try:
        int(os.environ["COLUMNS"])
    except (KeyError, ValueError):
        # noinspection PyBroadException
        try:
            os.environ['COLUMNS'] = str(shutil.get_terminal_size().columns)
        except BaseException:
            # don't fail if anything goes wrong here
            pass

    parser = argparse.ArgumentParser(prog=PROGRAM_NAME,
                                     description=BANNER + "\n\n You know for benchmarking Elasticsearch.",
                                     epilog="Find out more about Rally at %s" % console.format.link(DOC_LINK),
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommand",
        help="")

    race_parser = subparsers.add_parser("race", help="Run the benchmarking pipeline. This sub-command should typically be used.")
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list", help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help="The configuration for which Rally should show the available options. "
             "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=["telemetry", "tracks", "pipelines", "races", "cars", "elasticsearch-plugins"])
    list_parser.add_argument(
        "--limit",
        help="Limit the number of search results for recent races (default: 10).",
        default=10,
    )

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        help="Race timestamp of the baseline (see %s list races)" % PROGRAM_NAME,
        default="")
    compare_parser.add_argument(
        "--contender",
        help="Race timestamp of the contender (see %s list races)" % PROGRAM_NAME,
        default="")
    compare_parser.add_argument(
        "--report-format",
        help="define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="write the command line report also to the provided file",
        default="")

    config_parser = subparsers.add_parser("configure", help="Write the configuration file or reconfigure Rally")
    for p in [parser, config_parser]:
        p.add_argument(
            "--advanced-config",
            help="show additional configuration options (default: false)",
            default=False,
            action="store_true")
        p.add_argument(
            "--assume-defaults",
            help="Automatically accept all options with default values (default: false)",
            default=False,
            action="store_true")

    for p in [parser, race_parser]:
        p.add_argument(
            "--pipeline",
            help="select the pipeline to run.",
            # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
            default="")
        p.add_argument(
            "--preserve-install",
            help="keep the benchmark candidate and its index. (default: %s)" % str(preserve_install).lower(),
            default=preserve_install)
        p.add_argument(
            "--telemetry",
            help="enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
                 "with `%s list telemetry`" % PROGRAM_NAME,
            default="")
        p.add_argument(
            "--revision",
            help="define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
                 " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
                 " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
                 "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
            default="current")  # optimized for local usage, don't fetch sources
        p.add_argument(
            "--track",
            help="define the track to use. List possible tracks with `%s list tracks` (default: geonames)." % PROGRAM_NAME,
            default="geonames")
        p.add_argument(
            "--challenge",
            help="define the challenge to use. List possible challenges for tracks with `%s list tracks`" % PROGRAM_NAME)
        p.add_argument(
            "--include-tasks",
            help="defines a comma-separated list of tasks to run. By default all tasks of a challenge are run.")
        p.add_argument(
            "--car",
            help="define the car to use. List possible cars with `%s list cars` (default: defaults)." % PROGRAM_NAME,
            default="defaults")  # optimized for local usage
        p.add_argument(
            "--elasticsearch-plugins",
            help="define the Elasticsearch plugins to install. (default: install no plugins).",
            default="")
        p.add_argument(
            "--target-hosts",
            help="define a comma-separated list of host:port pairs which should be targeted iff using the pipeline 'benchmark-only' "
                 "(default: localhost:9200).",
            default="")  # actually the default is pipeline specific and it is set later
        p.add_argument(
            "--load-driver-hosts",
            help="define a comma-separated list of hosts which should generate load (default: localhost).",
            default="localhost")
        p.add_argument(
            "--client-options",
            help="define a comma-separated list of client options to use. The options will be passed to the Elasticsearch Python client "
                 "(default: %s)." % DEFAULT_CLIENT_OPTIONS,
            default=DEFAULT_CLIENT_OPTIONS)
        p.add_argument(
            "--cluster-health",
            choices=["red", "yellow", "green", "skip"],
            help="Expected cluster health at the beginning of the benchmark (default: green)",
            default="green")
        p.add_argument(
            "--user-tag",
            help="define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
                 "Example: intention:baseline-ticket-12345",
            default="")
        p.add_argument(
            "--report-format",
            help="define the output format for the command line report (default: markdown).",
            choices=["markdown", "csv"],
            default="markdown")
        p.add_argument(
            "--report-file",
            help="write the command line report also to the provided file",
            default="")
        p.add_argument(
            "--quiet",
            help="suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--laps",
            type=positive_number,
            help="number of laps that the benchmark should run (default: 1).",
            default=1)
        p.add_argument(
            "--test-mode",
            help="runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
            default=False,
            action="store_true")

    for p in [parser, list_parser, race_parser]:
        p.add_argument(
            "--distribution-version",
            help="define the version of the Elasticsearch distribution to download. "
                 "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument(
            "--distribution-repository",
            help="define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
            default="release")
        p.add_argument(
            "--track-repository",
            help="define the repository from where Rally will load tracks (default: default).",
            default="default")
        p.add_argument(
            "--team-repository",
            help="define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--offline",
            help="assume that Rally has no connection to the Internet (default: false)",
            default=False,
            action="store_true")
        p.add_argument(
            "--enable-driver-profiling",
            help="Enables a profiler for analyzing the performance of calls in Rally's driver (default: false)",
            default=False,
            action="store_true")

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    for p in [parser, race_parser]:
        # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
        # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
        p.add_argument(
            "--effective-start-date",
            help=argparse.SUPPRESS,
            type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
            default=None)
        # This is a highly experimental option and will likely be removed
        p.add_argument(
            "--data-paths",
            help=argparse.SUPPRESS,
            default=None)
        p.add_argument(
            "--auto-manage-indices",
            choices=["true", "false"],
            help=argparse.SUPPRESS,
            default=None)
        # keeps the cluster running after the benchmark, only relevant if Rally provisions the cluster
        p.add_argument(
            "--keep-cluster-running",
            help=argparse.SUPPRESS,
            action="store_true",
            default=False)

    for p in [parser, config_parser, list_parser, race_parser, compare_parser]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument(
            "--configuration-name",
            help=argparse.SUPPRESS,
            default=None)
        p.add_argument(
            "--logging",
            choices=["file", "console"],
            help=argparse.SUPPRESS,
            default="file"
        )

    return parser.parse_args()
Ejemplo n.º 12
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start", args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided", True)
    else:
        cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided", False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output", args.logging)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision)
    #TODO dm: Consider renaming this one. It's used by different modules
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names", csv_to_list(args.car))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths", csv_to_list(args.data_paths))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False)
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices", csv_to_list(args.telemetry))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag)

    cfg.add(config.Scope.applicationOverride, "track", "repository.name", args.track_repository)
    cfg.add(config.Scope.applicationOverride, "track", "track.name", args.track)
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name", args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks", csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices", to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format", args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path", args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting", "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting", "contender.timestamp", args.contender)

    ################################
    # new section name: driver
    ################################
    cfg.add(config.Scope.applicationOverride, "driver", "cluster.health", args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts", _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options", client_options)
        if "timeout" not in client_options:
            console.info("You did not provide an explicit timeout in the client options. Assuming default of 10 seconds.")

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn("No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                         logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception("Could not terminate potentially running Rally instances correctly. Attempting to go on anyway.")

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(64)