Esempio n. 1
0
    def test_raises_error_when_only_one_of_client_cert_and_client_key_defined(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password"),
            "ca_certs": os.path.join(self.cwd, "utils/resources/certs/ca.crt"),
        }

        client_ssl_options = {"client_cert": "utils/resources/certs/client.crt", "client_key": "utils/resources/certs/client.key"}

        random_client_ssl_option = random.choice(list(client_ssl_options.keys()))
        missing_client_ssl_option = list(set(client_ssl_options) - set([random_client_ssl_option]))[0]
        client_options.update({random_client_ssl_option: client_ssl_options[random_client_ssl_option]})

        with pytest.raises(exceptions.SystemSetupError) as ctx:
            with mock.patch.object(console, "println") as mocked_console_println:
                client.EsClientFactory(hosts, client_options)
        mocked_console_println.assert_called_once_with(
            "'{}' is missing from client-options but '{}' has been specified.\n"
            "If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
            "Read the documentation at {}\n".format(
                missing_client_ssl_option,
                random_client_ssl_option,
                console.format.link(doc_link("command_line_reference.html#client-options")),
            )
        )
        assert ctx.value.args[0] == (
            "Cannot specify '{}' without also specifying '{}' in client-options.".format(
                random_client_ssl_option, missing_client_ssl_option
            )
        )
Esempio n. 2
0
def print_help_on_errors():
    heading = "Getting further help:"
    console.println(console.format.bold(heading))
    console.println(console.format.underline_for(heading))
    console.println("* Check the log files in {} for errors.".format(paths.logs()))
    console.println("* Read the documentation at {}".format(console.format.link(doc_link())))
    console.println("* Ask a question on the forum at {}".format(console.format.link("https://discuss.elastic.co/c/elasticsearch/rally")))
    console.println("* Raise an issue at {} and include the log files in {}."
                    .format(console.format.link("https://github.com/elastic/rally/issues"), paths.logs()))
Esempio n. 3
0
def main():
    check_python_version()
    log.install_default_log_config()
    log.configure_logging()
    console.init(assume_tty=False)

    parser = argparse.ArgumentParser(
        prog=PROGRAM_NAME,
        description=BANNER + "\n\n Rally daemon to support remote benchmarks",
        epilog="Find out more about Rally at {}".format(
            console.format.link(doc_link())),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    parser.add_argument("--version",
                        action="version",
                        version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(title="subcommands",
                                       dest="subcommand",
                                       help="")
    subparsers.required = True

    start_command = subparsers.add_parser("start",
                                          help="Starts the Rally daemon")
    restart_command = subparsers.add_parser("restart",
                                            help="Restarts the Rally daemon")
    for p in [start_command, restart_command]:
        p.add_argument("--node-ip", required=True, help="The IP of this node.")
        p.add_argument("--coordinator-ip",
                       required=True,
                       help="The IP of the coordinator node.")
    subparsers.add_parser("stop", help="Stops the Rally daemon")
    subparsers.add_parser(
        "status", help="Shows the current status of the local Rally daemon")

    args = parser.parse_args()

    if args.subcommand == "start":
        start(args)
    elif args.subcommand == "stop":
        stop()
    elif args.subcommand == "status":
        status()
    elif args.subcommand == "restart":
        stop(raise_errors=False)
        start(args)
    else:
        raise exceptions.RallyError("Unknown subcommand [%s]" %
                                    args.subcommand)
Esempio n. 4
0
 def warn_if_plugin_build_task_is_in_use(config):
     if "source" not in config:
         return
     for k, v in config["source"].items():
         plugin_match = re.match(r"^plugin\.([^.]+)\.build\.task$",k)
         if plugin_match != None and len(plugin_match.groups()) > 0 :
             plugin_name = plugin_match.group(1)
             new_key = "plugin.{}.build.command".format(plugin_name)
             out("\n"
                 "WARNING:"
                 "  The build.task property for plugins has been obsoleted in favor of the full build.command."
                 "  You will need to edit the plugin [{}] section in {} and change from:"
                 "  [{} = {}] to [{} = <the full command>]."
                 "  Please refer to the documentation for more details:"
                 "  {}.\n".format(plugin_match.group(1), config_file.location, k, v, new_key,
                                  console.format.link(doc_link("elasticsearch_plugins.html#running-a-benchmark-with-plugins"))))
Esempio n. 5
0
def run(cfg):
    logger = logging.getLogger(__name__)
    name = cfg.opts("race", "pipeline")
    race_id = cfg.opts("system", "race.id")
    console.info(f"Race id is [{race_id}]", logger=logger)
    if len(name) == 0:
        # assume from-distribution pipeline if distribution.version has been specified and --pipeline cli arg not set
        if cfg.exists("mechanic", "distribution.version"):
            name = "from-distribution"
        else:
            name = "from-sources"
        logger.info(
            "User specified no pipeline. Automatically derived pipeline [%s].",
            name)
        cfg.add(config.Scope.applicationOverride, "race", "pipeline", name)
    else:
        logger.info("User specified pipeline [%s].", name)

    if os.environ.get("RALLY_RUNNING_IN_DOCKER", "").upper() == "TRUE":
        # in this case only benchmarking remote Elasticsearch clusters makes sense
        if name != "benchmark-only":
            raise exceptions.SystemSetupError(
                "Only the [benchmark-only] pipeline is supported by the Rally Docker image.\n"
                "Add --pipeline=benchmark-only in your Rally arguments and try again.\n"
                "For more details read the docs for the benchmark-only pipeline in {}\n"
                .format(doc_link("pipelines.html#benchmark-only")))

    try:
        pipeline = pipelines[name]
    except KeyError:
        raise exceptions.SystemSetupError(
            "Unknown pipeline [%s]. List the available pipelines with %s list pipelines."
            % (name, PROGRAM_NAME))
    try:
        pipeline(cfg)
    except exceptions.RallyError as e:
        # just pass on our own errors. It should be treated differently on top-level
        raise e
    except KeyboardInterrupt:
        logger.info("User has cancelled the benchmark.")
        raise exceptions.UserInterrupted(
            "User has cancelled the benchmark (detected by race control)."
        ) from None
    except BaseException:
        tb = sys.exc_info()[2]
        raise exceptions.RallyError(
            "This race ended with a fatal crash.").with_traceback(tb)
Esempio n. 6
0
def create_arg_parser():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError("must be positive but was %s" %
                                             value)
        return value

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults",
                                    "preserve_benchmark_candidate",
                                    default_value=False,
                                    mandatory=False)
    else:
        preserve_install = False

    parser = argparse.ArgumentParser(
        prog=PROGRAM_NAME,
        description=BANNER + "\n\n You Know, for Benchmarking Elasticsearch.",
        epilog="Find out more about Rally at {}".format(
            console.format.link(doc_link())),
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(title="subcommands",
                                       dest="subcommand",
                                       help="")

    race_parser = subparsers.add_parser(
        "race",
        help=
        "Run the benchmarking pipeline. This sub-command should typically be used."
    )
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list",
                                        help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help=
        "The configuration for which Rally should show the available options. "
        "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=[
            "telemetry", "tracks", "pipelines", "races", "cars",
            "elasticsearch-plugins"
        ])
    list_parser.add_argument(
        "--limit",
        help=
        "Limit the number of search results for recent races (default: 10).",
        default=10,
    )

    generate_parser = subparsers.add_parser("generate",
                                            help="Generate artifacts")
    generate_parser.add_argument(
        "artifact",
        metavar="artifact",
        help="The artifact to create. Possible values are: charts",
        choices=["charts"])
    # We allow to either have a chart-spec-path *or* define a chart-spec on the fly with track, challenge and car. Convincing
    # argparse to validate that everything is correct *might* be doable but it is simpler to just do this manually.
    generate_parser.add_argument(
        "--chart-spec-path",
        help=
        "Path to a JSON file(s) containing all combinations of charts to generate. Wildcard patterns can be used to specify "
        "multiple files.")
    generate_parser.add_argument(
        "--track",
        help=
        "Define the track to use. List possible tracks with `%s list tracks` (default: geonames)."
        % PROGRAM_NAME
        # we set the default value later on because we need to determine whether the user has provided this value.
        # default="geonames"
    )
    generate_parser.add_argument(
        "--challenge",
        help=
        "Define the challenge to use. List possible challenges for tracks with `%s list tracks`."
        % PROGRAM_NAME)
    generate_parser.add_argument(
        "--car",
        help=
        "Define the car to use. List possible cars with `%s list cars` (default: defaults)."
        % PROGRAM_NAME)
    generate_parser.add_argument(
        "--node-count",
        type=positive_number,
        help="The number of Elasticsearch nodes to use in charts.")
    generate_parser.add_argument(
        "--chart-type",
        help="Chart type to generate (default: time-series).",
        choices=["time-series", "bar"],
        default="time-series")
    generate_parser.add_argument(
        "--quiet",
        help="Suppress as much as output as possible (default: false).",
        default=False,
        action="store_true")
    generate_parser.add_argument("--output-path",
                                 help="Output file name (default: stdout).",
                                 default=None)

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        required=True,
        help="Race ID of the baseline (see %s list races)." % PROGRAM_NAME)
    compare_parser.add_argument(
        "--contender",
        required=True,
        help="Race ID of the contender (see %s list races)." % PROGRAM_NAME)
    compare_parser.add_argument(
        "--report-format",
        help=
        "Define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="Write the command line report also to the provided file.",
        default="")

    config_parser = subparsers.add_parser(
        "configure", help="Write the configuration file or reconfigure Rally")
    for p in [parser, config_parser]:
        p.add_argument(
            "--advanced-config",
            help="Show additional configuration options (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--assume-defaults",
            help=
            "Automatically accept all options with default values (default: false).",
            default=False,
            action="store_true")

    download_parser = subparsers.add_parser("download",
                                            help="Downloads an artifact")
    download_parser.add_argument(
        "--team-repository",
        help=
        "Define the repository from where Rally will load teams and cars (default: default).",
        default="default")
    download_parser.add_argument(
        "--distribution-version",
        help="Define the version of the Elasticsearch distribution to download. "
        "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
        default="")
    download_parser.add_argument(
        "--distribution-repository",
        help=
        "Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")
    download_parser.add_argument(
        "--car",
        help=
        "Define the car to use. List possible cars with `%s list cars` (default: defaults)."
        % PROGRAM_NAME,
        default="defaults")  # optimized for local usage
    download_parser.add_argument(
        "--car-params",
        help=
        "Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default="")

    for p in [parser, list_parser, race_parser, generate_parser]:
        p.add_argument(
            "--distribution-version",
            help=
            "Define the version of the Elasticsearch distribution to download. "
            "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument("--runtime-jdk",
                       type=positive_number,
                       help="The major version of the runtime JDK to use.",
                       default=None)

        track_source_group = p.add_mutually_exclusive_group()
        track_source_group.add_argument(
            "--track-repository",
            help=
            "Define the repository from where Rally will load tracks (default: default).",
            # argparse is smart enough to use this default only if the user did not use --track-path and also did not specify anything
            default="default")
        track_source_group.add_argument("--track-path",
                                        help="Define the path to a track.")
        track_source_group.add_argument(
            "--track-revision",
            help=
            "Define a specific revision in the track repository that Rally should use.",
            default=None)
        p.add_argument(
            "--team-repository",
            help=
            "Define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--team-revision",
            help=
            "Define a specific revision in the team repository that Rally should use.",
            default=None)
        p.add_argument(
            "--offline",
            help=
            "Assume that Rally has no connection to the Internet (default: false).",
            default=False,
            action="store_true")

    for p in [parser, race_parser]:
        p.add_argument(
            "--pipeline",
            help="Select the pipeline to run.",
            # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
            default="")
        p.add_argument(
            "--revision",
            help=
            "Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
            " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
            " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
            "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
            default="current"
        )  # optimized for local usage, don't fetch sources
        p.add_argument(
            "--track",
            help=
            "Define the track to use. List possible tracks with `%s list tracks` (default: geonames)."
            % PROGRAM_NAME
            # we set the default value later on because we need to determine whether the user has provided this value.
            # default="geonames"
        )
        p.add_argument(
            "--track-params",
            help=
            "Define a comma-separated list of key:value pairs that are injected verbatim to the track as variables.",
            default="")
        p.add_argument(
            "--challenge",
            help=
            "Define the challenge to use. List possible challenges for tracks with `%s list tracks`."
            % PROGRAM_NAME)
        p.add_argument(
            "--team-path",
            help="Define the path to the car and plugin configurations to use."
        )
        p.add_argument(
            "--car",
            help=
            "Define the car to use. List possible cars with `%s list cars` (default: defaults)."
            % PROGRAM_NAME,
            default="defaults")  # optimized for local usage
        p.add_argument(
            "--car-params",
            help=
            "Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
            default="")
        p.add_argument(
            "--elasticsearch-plugins",
            help=
            "Define the Elasticsearch plugins to install. (default: install no plugins).",
            default="")
        p.add_argument(
            "--plugin-params",
            help=
            "Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
            default="")
        p.add_argument(
            "--target-hosts",
            help=
            "Define a comma-separated list of host:port pairs which should be targeted if using the pipeline 'benchmark-only' "
            "(default: localhost:9200).",
            default=""
        )  # actually the default is pipeline specific and it is set later
        p.add_argument(
            "--load-driver-hosts",
            help=
            "Define a comma-separated list of hosts which should generate load (default: localhost).",
            default="localhost")
        p.add_argument(
            "--client-options",
            help=
            "Define a comma-separated list of client options to use. The options will be passed to the Elasticsearch Python client "
            "(default: {}).".format(opts.ClientOptions.DEFAULT_CLIENT_OPTIONS),
            default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS)
        p.add_argument(
            "--on-error",
            choices=["continue", "abort"],
            help=
            "Either 'continue' or 'abort' when Rally gets an error response (default: continue).",
            default="continue")
        p.add_argument(
            "--telemetry",
            help=
            "Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry devices "
            "with `%s list telemetry`." % PROGRAM_NAME,
            default="")
        p.add_argument(
            "--telemetry-params",
            help=
            "Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
            default="")
        p.add_argument(
            "--distribution-repository",
            help=
            "Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
            default="release")
        p.add_argument(
            "--include-tasks",
            help=
            "Defines a comma-separated list of tasks to run. By default all tasks of a challenge are run."
        )
        p.add_argument(
            "--user-tag",
            help=
            "Define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
            "Example: intention:baseline-ticket-12345",
            default="")
        p.add_argument(
            "--report-format",
            help=
            "Define the output format for the command line report (default: markdown).",
            choices=["markdown", "csv"],
            default="markdown")
        p.add_argument(
            "--show-in-report",
            help=
            "Define which values are shown in the summary report (default: available).",
            choices=["available", "all-percentiles", "all"],
            default="available")
        p.add_argument(
            "--report-file",
            help="Write the command line report also to the provided file.",
            default="")
        p.add_argument(
            "--preserve-install",
            help="Keep the benchmark candidate and its index. (default: %s)." %
            str(preserve_install).lower(),
            default=preserve_install)
        p.add_argument(
            "--test-mode",
            help=
            "Runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--enable-driver-profiling",
            help=
            "Enables a profiler for analyzing the performance of calls in Rally's driver (default: false).",
            default=False,
            action="store_true")

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    for p in [parser, race_parser]:
        # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
        # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
        p.add_argument(
            "--effective-start-date",
            help=argparse.SUPPRESS,
            type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
            default=None)
        # keeps the cluster running after the benchmark, only relevant if Rally provisions the cluster
        p.add_argument("--keep-cluster-running",
                       help=argparse.SUPPRESS,
                       action="store_true",
                       default=False)
        # skips checking that the REST API is available before proceeding with the benchmark
        p.add_argument("--skip-rest-api-check",
                       help=argparse.SUPPRESS,
                       action="store_true",
                       default=False)

    for p in [
            parser, config_parser, list_parser, race_parser, compare_parser,
            download_parser
    ]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument("--configuration-name",
                       help=argparse.SUPPRESS,
                       default=None)
        p.add_argument(
            "--quiet",
            help="Suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")

    return parser
Esempio n. 7
0
    def create_config(self, config_file, advanced_config=False, assume_defaults=False):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param config_file:
        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        :param assume_defaults: If True, assume the user accepted all values for which defaults are provided. Mainly intended for automatic
        configuration in CI run. Default: False.
        """
        self.prompter = Prompter(self.i, self.sec_i, self.o, assume_defaults)

        if advanced_config:
            self.o("Running advanced configuration. You can get additional help at:")
            self.o("")
            self.o("  %s" % console.format.link(doc_link("configuration.html")))
            self.o("")
        else:
            self.o("Running simple configuration. Run the advanced configuration with:")
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" % config_file.location)
            self.logger.debug("Detected an existing configuration file at [%s]", config_file.location)
        else:
            self.logger.debug("Did not detect a configuration file at [%s]. Running initial configuration routine.", config_file.location)

        root_dir = io.normalize_path(os.path.abspath(os.path.join(config_file.config_dir, "benchmarks")))
        if advanced_config:
            root_dir = io.normalize_path(self._ask_property("Enter the benchmark root directory", default_value=root_dir))
        else:
            self.o("* Setting up benchmark root directory in %s" % root_dir)

        # We try to autodetect an existing ES source directory
        guess = self._guess_es_src_dir()
        if guess:
            source_dir = guess
            self.logger.debug("Autodetected Elasticsearch project directory at [%s].", source_dir)
        else:
            default_src_dir = os.path.join(root_dir, "src", "elasticsearch")
            self.logger.debug("Could not autodetect Elasticsearch project directory. Providing [%s] as default.", default_src_dir)
            source_dir = default_src_dir

        if advanced_config:
            source_dir = io.normalize_path(self._ask_property("Enter your Elasticsearch project directory:",
                                                              default_value=source_dir))
        if not advanced_config:
            self.o("* Setting up benchmark source directory in %s" % source_dir)
            self.o("")

        # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
        repo_url = "https://github.com/elastic/elasticsearch.git"

        if advanced_config:
            data_store_choice = self._ask_property("Where should metrics be kept?"
                                                   "\n\n"
                                                   "(1) In memory (simpler but less options for analysis)\n"
                                                   "(2) Elasticsearch (requires a separate ES instance, keeps all raw samples for analysis)"
                                                   "\n\n", default_value="1", choices=["1", "2"])
            if data_store_choice == "1":
                env_name = "local"
                data_store_type = "in-memory"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "False", "", ""
            else:
                data_store_type = "elasticsearch"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store()

                env_name = self._ask_env_name()

            preserve_install = convert.to_bool(self._ask_property("Do you want Rally to keep the Elasticsearch benchmark candidate "
                                                                  "installation including the index (will use several GB per race)?",
                                                                  default_value=False))
        else:
            # Does not matter for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "False", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["env.name"] = env_name

        config["node"] = {}
        config["node"]["root.dir"] = root_dir

        final_source_dir = io.normalize_path(os.path.abspath(os.path.join(source_dir, os.pardir)))
        config["node"]["src.root.dir"] = final_source_dir

        config["source"] = {}
        config["source"]["remote.repo.url"] = repo_url
        # the Elasticsearch directory is just the last path component (relative to the source root directory)
        config["source"]["elasticsearch.src.subdir"] = io.basename(source_dir)

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = os.path.join(root_dir, "data")

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"]["default.url"] = "https://github.com/elastic/rally-tracks"

        config["teams"] = {}
        config["teams"]["default.url"] = "https://github.com/elastic/rally-teams"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(preserve_install)

        config["distributions"] = {}
        config["distributions"]["release.cache"] = "true"

        config_file.store(config)

        self.o("Configuration successfully written to %s. Happy benchmarking!" % config_file.location)
        self.o("")
        self.o("More info about Rally:")
        self.o("")
        self.o("* Type %s --help" % PROGRAM_NAME)
        self.o("* Read the documentation at %s" % console.format.link(doc_link()))
        self.o("* Ask a question on the forum at %s" % console.format.link("https://discuss.elastic.co/c/elasticsearch/rally"))
Esempio n. 8
0
def create_arg_parser():
    def positive_number(v):
        value = int(v)
        if value <= 0:
            raise argparse.ArgumentTypeError(f"must be positive but was {value}")
        return value

    def non_empty_list(arg):
        lst = opts.csv_to_list(arg)
        if len(lst) < 1:
            raise argparse.ArgumentError(argument=None, message="At least one argument required!")
        return lst

    def runtime_jdk(v):
        if v == "bundled":
            return v
        else:
            try:
                return positive_number(v)
            except argparse.ArgumentTypeError:
                raise argparse.ArgumentTypeError(f"must be a positive number or 'bundled' but was {v}")

    def add_track_source(subparser):
        track_source_group = subparser.add_mutually_exclusive_group()
        track_source_group.add_argument(
            "--track-repository",
            help="Define the repository from where Rally will load tracks (default: default).",
            # argparse is smart enough to use this default only if the user did not use --track-path and also did not specify anything
            default="default"
        )
        track_source_group.add_argument(
            "--track-path",
            help="Define the path to a track.")
        subparser.add_argument(
            "--track-revision",
            help="Define a specific revision in the track repository that Rally should use.",
            default=None)

    # try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
    cfg = config.Config()
    if cfg.config_present():
        cfg.load_config()
        preserve_install = cfg.opts("defaults", "preserve_benchmark_candidate", default_value=False, mandatory=False)
    else:
        preserve_install = False

    parser = argparse.ArgumentParser(prog=PROGRAM_NAME,
                                     description=BANNER + "\n\n You Know, for Benchmarking Elasticsearch.",
                                     epilog="Find out more about Rally at {}".format(console.format.link(doc_link())),
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version="%(prog)s " + version.version())

    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommand",
        help="")

    race_parser = subparsers.add_parser("race", help="Run a benchmark")
    # change in favor of "list telemetry", "list tracks", "list pipelines"
    list_parser = subparsers.add_parser("list", help="List configuration options")
    list_parser.add_argument(
        "configuration",
        metavar="configuration",
        help="The configuration for which Rally should show the available options. "
             "Possible values are: telemetry, tracks, pipelines, races, cars, elasticsearch-plugins",
        choices=["telemetry", "tracks", "pipelines", "races", "cars", "elasticsearch-plugins"])
    list_parser.add_argument(
        "--limit",
        help="Limit the number of search results for recent races (default: 10).",
        default=10,
    )
    add_track_source(list_parser)

    info_parser = subparsers.add_parser("info", help="Show info about a track")
    add_track_source(info_parser)
    info_parser.add_argument(
        "--track",
        help=f"Define the track to use. List possible tracks with `{PROGRAM_NAME} list tracks`."
        # we set the default value later on because we need to determine whether the user has provided this value.
        # default="geonames"
    )

    info_parser.add_argument(
        "--track-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the track as variables.",
        default=""
    )
    info_parser.add_argument(
        "--challenge",
        help=f"Define the challenge to use. List possible challenges for tracks with `{PROGRAM_NAME} list tracks`."
    )
    info_task_filter_group = info_parser.add_mutually_exclusive_group()
    info_task_filter_group.add_argument(
        "--include-tasks",
        help="Defines a comma-separated list of tasks to run. By default all tasks of a challenge are run.")
    info_task_filter_group.add_argument(
        "--exclude-tasks",
        help="Defines a comma-separated list of tasks not to run. By default all tasks of a challenge are run.")

    create_track_parser = subparsers.add_parser("create-track", help="Create a Rally track from existing data")
    create_track_parser.add_argument(
        "--track",
        required=True,
        help="Name of the generated track")
    create_track_parser.add_argument(
        "--indices",
        type=non_empty_list,
        required=True,
        help="Comma-separated list of indices to include in the track")
    create_track_parser.add_argument(
        "--target-hosts",
        default="",
        required=True,
        help="Comma-separated list of host:port pairs which should be targeted")
    create_track_parser.add_argument(
        "--client-options",
        default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS,
        help=f"Comma-separated list of client options to use. (default: {opts.ClientOptions.DEFAULT_CLIENT_OPTIONS})")
    create_track_parser.add_argument(
        "--output-path",
        default=os.path.join(os.getcwd(), "tracks"),
        help="Track output directory (default: tracks/)")

    generate_parser = subparsers.add_parser("generate", help="Generate artifacts")
    generate_parser.add_argument(
        "artifact",
        metavar="artifact",
        help="The artifact to create. Possible values are: charts",
        choices=["charts"])
    # We allow to either have a chart-spec-path *or* define a chart-spec on the fly with track, challenge and car. Convincing
    # argparse to validate that everything is correct *might* be doable but it is simpler to just do this manually.
    generate_parser.add_argument(
        "--chart-spec-path",
        required=True,
        help="Path to a JSON file(s) containing all combinations of charts to generate. Wildcard patterns can be used to specify "
             "multiple files.")
    generate_parser.add_argument(
        "--chart-type",
        help="Chart type to generate (default: time-series).",
        choices=["time-series", "bar"],
        default="time-series")
    generate_parser.add_argument(
        "--output-path",
        help="Output file name (default: stdout).",
        default=None)

    compare_parser = subparsers.add_parser("compare", help="Compare two races")
    compare_parser.add_argument(
        "--baseline",
        required=True,
        help=f"Race ID of the baseline (see {PROGRAM_NAME} list races).")
    compare_parser.add_argument(
        "--contender",
        required=True,
        help=f"Race ID of the contender (see {PROGRAM_NAME} list races).")
    compare_parser.add_argument(
        "--report-format",
        help="Define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    compare_parser.add_argument(
        "--report-file",
        help="Write the command line report also to the provided file.",
        default="")

    download_parser = subparsers.add_parser("download", help="Downloads an artifact")
    download_parser.add_argument(
        "--team-repository",
        help="Define the repository from where Rally will load teams and cars (default: default).",
        default="default")
    download_parser.add_argument(
        "--team-revision",
        help="Define a specific revision in the team repository that Rally should use.",
        default=None)
    download_parser.add_argument(
        "--team-path",
        help="Define the path to the car and plugin configurations to use.")
    download_parser.add_argument(
        "--distribution-version",
        help="Define the version of the Elasticsearch distribution to download. "
             "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
        default="")
    download_parser.add_argument(
        "--distribution-repository",
        help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")
    download_parser.add_argument(
        "--car",
        help=f"Define the car to use. List possible cars with `{PROGRAM_NAME} list cars` (default: defaults).",
        default="defaults")  # optimized for local usage
    download_parser.add_argument(
        "--car-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default=""
    )
    download_parser.add_argument(
        "--target-os",
        help="The name of the target operating system for which an artifact should be downloaded (default: current OS)",
    )
    download_parser.add_argument(
        "--target-arch",
        help="The name of the CPU architecture for which an artifact should be downloaded (default: current architecture)",
    )

    install_parser = subparsers.add_parser("install", help="Installs an Elasticsearch node locally")
    install_parser.add_argument(
        "--revision",
        help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
             " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
             " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
             "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
        default="current")  # optimized for local usage, don't fetch sources
    install_parser.add_argument(
        "--skip-build",
        help="Whether Rally should skip rebuilding Elasticsearch (default: false).",
        default=False,
        action="store_true")
    # Intentionally undocumented as we do not consider Docker a fully supported option.
    install_parser.add_argument(
        "--build-type",
        help=argparse.SUPPRESS,
        choices=["tar", "docker"],
        default="tar")
    install_parser.add_argument(
        "--team-repository",
        help="Define the repository from where Rally will load teams and cars (default: default).",
        default="default")
    install_parser.add_argument(
        "--team-revision",
        help="Define a specific revision in the team repository that Rally should use.",
        default=None)
    install_parser.add_argument(
        "--team-path",
        help="Define the path to the car and plugin configurations to use.")
    install_parser.add_argument(
        "--runtime-jdk",
        type=runtime_jdk,
        help="The major version of the runtime JDK to use during installation.",
        default=None)
    install_parser.add_argument(
        "--distribution-repository",
        help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")
    install_parser.add_argument(
        "--distribution-version",
        help="Define the version of the Elasticsearch distribution to download. "
             "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
        default="")
    install_parser.add_argument(
        "--car",
        help=f"Define the car to use. List possible cars with `{PROGRAM_NAME} list cars` (default: defaults).",
        default="defaults")  # optimized for local usage
    install_parser.add_argument(
        "--car-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default=""
    )
    install_parser.add_argument(
        "--elasticsearch-plugins",
        help="Define the Elasticsearch plugins to install. (default: install no plugins).",
        default="")
    install_parser.add_argument(
        "--plugin-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
        default=""
    )
    install_parser.add_argument(
        "--network-host",
        help="The IP address to bind to and publish",
        default="127.0.0.1"
    )
    install_parser.add_argument(
        "--http-port",
        help="The port to expose for HTTP traffic",
        default="39200"
    )
    install_parser.add_argument(
        "--node-name",
        help="The name of this Elasticsearch node",
        default="rally-node-0"
    )
    install_parser.add_argument(
        "--master-nodes",
        help="A comma-separated list of the initial master node names",
        default=""
    )
    install_parser.add_argument(
        "--seed-hosts",
        help="A comma-separated list of the initial seed host IPs",
        default=""
    )

    start_parser = subparsers.add_parser("start", help="Starts an Elasticsearch node locally")
    start_parser.add_argument(
        "--installation-id",
        required=True,
        help="The id of the installation to start",
        # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
        default="")
    start_parser.add_argument(
        "--race-id",
        required=True,
        help="Define a unique id for this race.",
        default="")
    start_parser.add_argument(
        "--runtime-jdk",
        type=runtime_jdk,
        help="The major version of the runtime JDK to use.",
        default=None)
    start_parser.add_argument(
        "--telemetry",
        help=f"Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry "
             f"devices with `{PROGRAM_NAME} list telemetry`.",
        default="")
    start_parser.add_argument(
        "--telemetry-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
        default=""
    )

    stop_parser = subparsers.add_parser("stop", help="Stops an Elasticsearch node locally")
    stop_parser.add_argument(
        "--installation-id",
        required=True,
        help="The id of the installation to stop",
        # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
        default="")
    stop_parser.add_argument(
        "--preserve-install",
        help=f"Keep the benchmark candidate and its index. (default: {str(preserve_install).lower()}).",
        default=preserve_install,
        action="store_true")

    for p in [list_parser, race_parser]:
        p.add_argument(
            "--distribution-version",
            help="Define the version of the Elasticsearch distribution to download. "
                 "Check https://www.elastic.co/downloads/elasticsearch for released versions.",
            default="")
        p.add_argument(
            "--team-path",
            help="Define the path to the car and plugin configurations to use.")
        p.add_argument(
            "--team-repository",
            help="Define the repository from where Rally will load teams and cars (default: default).",
            default="default")
        p.add_argument(
            "--team-revision",
            help="Define a specific revision in the team repository that Rally should use.",
            default=None)

    race_parser.add_argument(
        "--race-id",
        help="Define a unique id for this race.",
        default=str(uuid.uuid4()))
    race_parser.add_argument(
        "--pipeline",
        help="Select the pipeline to run.",
        # the default will be dynamically derived by racecontrol based on the presence / absence of other command line options
        default="")
    race_parser.add_argument(
        "--revision",
        help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
             " 'latest' fetches the latest version on master. It is also possible to specify a commit id or a timestamp."
             " The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
             "e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
        default="current")  # optimized for local usage, don't fetch sources
    add_track_source(race_parser)
    race_parser.add_argument(
        "--track",
        help=f"Define the track to use. List possible tracks with `{PROGRAM_NAME} list tracks`."
    )
    race_parser.add_argument(
        "--track-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the track as variables.",
        default=""
    )
    race_parser.add_argument(
        "--challenge",
        help=f"Define the challenge to use. List possible challenges for tracks with `{PROGRAM_NAME} list tracks`.")
    race_parser.add_argument(
        "--car",
        help=f"Define the car to use. List possible cars with `{PROGRAM_NAME} list cars` (default: defaults).",
        default="defaults")  # optimized for local usage
    race_parser.add_argument(
        "--car-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the car.",
        default=""
    )
    race_parser.add_argument(
        "--runtime-jdk",
        type=runtime_jdk,
        help="The major version of the runtime JDK to use.",
        default=None)
    race_parser.add_argument(
        "--elasticsearch-plugins",
        help="Define the Elasticsearch plugins to install. (default: install no plugins).",
        default="")
    race_parser.add_argument(
        "--plugin-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
        default=""
    )
    race_parser.add_argument(
        "--target-hosts",
        help="Define a comma-separated list of host:port pairs which should be targeted if using the pipeline 'benchmark-only' "
             "(default: localhost:9200).",
        default="")  # actually the default is pipeline specific and it is set later
    race_parser.add_argument(
        "--load-driver-hosts",
        help="Define a comma-separated list of hosts which should generate load (default: localhost).",
        default="localhost")
    race_parser.add_argument(
        "--client-options",
        help=f"Define a comma-separated list of client options to use. The options will be passed to the Elasticsearch "
             f"Python client (default: {opts.ClientOptions.DEFAULT_CLIENT_OPTIONS}).",
        default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS)
    race_parser.add_argument("--on-error",
                             choices=["continue", "continue-on-non-fatal", "abort"],
                             help="Controls how Rally behaves on response errors (default: continue-on-non-fatal).",
                             default="continue-on-non-fatal")
    race_parser.add_argument(
        "--telemetry",
        help=f"Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry "
             f"devices with `{PROGRAM_NAME} list telemetry`.",
        default="")
    race_parser.add_argument(
        "--telemetry-params",
        help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
        default=""
    )
    race_parser.add_argument(
        "--distribution-repository",
        help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
        default="release")

    task_filter_group = race_parser.add_mutually_exclusive_group()
    task_filter_group.add_argument(
        "--include-tasks",
        help="Defines a comma-separated list of tasks to run. By default all tasks of a challenge are run.")
    task_filter_group.add_argument(
        "--exclude-tasks",
        help="Defines a comma-separated list of tasks not to run. By default all tasks of a challenge are run.")
    race_parser.add_argument(
        "--user-tag",
        help="Define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
             "Example: intention:baseline-ticket-12345",
        default="")
    race_parser.add_argument(
        "--report-format",
        help="Define the output format for the command line report (default: markdown).",
        choices=["markdown", "csv"],
        default="markdown")
    race_parser.add_argument(
        "--show-in-report",
        help="Define which values are shown in the summary report (default: available).",
        choices=["available", "all-percentiles", "all"],
        default="available")
    race_parser.add_argument(
        "--report-file",
        help="Write the command line report also to the provided file.",
        default="")
    race_parser.add_argument(
        "--preserve-install",
        help=f"Keep the benchmark candidate and its index. (default: {str(preserve_install).lower()}).",
        default=preserve_install,
        action="store_true")
    race_parser.add_argument(
        "--test-mode",
        help="Runs the given track in 'test mode'. Meant to check a track for errors but not for real benchmarks (default: false).",
        default=False,
        action="store_true")
    race_parser.add_argument(
        "--enable-driver-profiling",
        help="Enables a profiler for analyzing the performance of calls in Rally's driver (default: false).",
        default=False,
        action="store_true")
    race_parser.add_argument(
        "--enable-assertions",
        help="Enables assertion checks for tasks (default: false).",
        default=False,
        action="store_true")
    race_parser.add_argument(
        "--kill-running-processes",
        action="store_true",
        default=False,
        help="If any processes is running, it is going to kill them and allow Rally to continue to run."
    )

    ###############################################################################
    #
    # The options below are undocumented and can be removed or changed at any time.
    #
    ###############################################################################
    # This option is intended to tell Rally to assume a different start date than 'now'. This is effectively just useful for things like
    # backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
    race_parser.add_argument(
        "--effective-start-date",
        help=argparse.SUPPRESS,
        type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
        default=None)
    # skips checking that the REST API is available before proceeding with the benchmark
    race_parser.add_argument(
        "--skip-rest-api-check",
        help=argparse.SUPPRESS,
        action="store_true",
        default=False)

    for p in [list_parser, race_parser, compare_parser, download_parser, install_parser,
              start_parser, stop_parser, info_parser, generate_parser, create_track_parser]:
        # This option is needed to support a separate configuration for the integration tests on the same machine
        p.add_argument(
            "--configuration-name",
            help=argparse.SUPPRESS,
            default=None)
        p.add_argument(
            "--quiet",
            help="Suppress as much as output as possible (default: false).",
            default=False,
            action="store_true")
        p.add_argument(
            "--offline",
            help="Assume that Rally has no connection to the Internet (default: false).",
            default=False,
            action="store_true")

    return parser
Esempio n. 9
0
    def __init__(self, hosts, client_options):
        self.hosts = hosts
        self.client_options = dict(client_options)
        self.ssl_context = None
        self.logger = logging.getLogger(__name__)

        masked_client_options = dict(client_options)
        if "basic_auth_password" in masked_client_options:
            masked_client_options["basic_auth_password"] = "******"
        if "http_auth" in masked_client_options:
            masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
        self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)

        # we're using an SSL context now and it is not allowed to have use_ssl present in client options anymore
        if self.client_options.pop("use_ssl", False):
            import ssl
            self.logger.info("SSL support: on")
            self.client_options["scheme"] = "https"

            # ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
            # but can be disabled via the verify_mode property later on.
            self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH,
                                                          cafile=self.client_options.pop("ca_certs", certifi.where()))

            if not self.client_options.pop("verify_certs", True):
                self.logger.info("SSL certificate verification: off")
                # order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
                self.ssl_context.verify_mode = ssl.CERT_NONE
                self.ssl_context.check_hostname = False

                self.logger.warning("User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
                                    "benchmark. Disabling urllib warnings now to avoid a logging storm. "
                                    "See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details.")
                # disable:  "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
                # advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
                urllib3.disable_warnings()
            else:
                self.ssl_context.verify_mode=ssl.CERT_REQUIRED
                self.ssl_context.check_hostname = True
                self.logger.info("SSL certificate verification: on")

            # When using SSL_context, all SSL related kwargs in client options get ignored
            client_cert = self.client_options.pop("client_cert", False)
            client_key = self.client_options.pop("client_key", False)

            if not client_cert and not client_key:
                self.logger.info("SSL client authentication: off")
            elif bool(client_cert) != bool(client_key):
                self.logger.error(
                    "Supplied client-options contain only one of client_cert/client_key. "
                )
                defined_client_ssl_option = "client_key" if client_key else "client_cert"
                missing_client_ssl_option = "client_cert" if client_key else "client_key"
                console.println(
                    "'{}' is missing from client-options but '{}' has been specified.\n"
                    "If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
                    "Read the documentation at {}\n".format(
                        missing_client_ssl_option,
                        defined_client_ssl_option,
                        console.format.link(doc_link("command_line_reference.html#client-options")))
                )
                raise exceptions.SystemSetupError(
                    "Cannot specify '{}' without also specifying '{}' in client-options.".format(
                        defined_client_ssl_option,
                        missing_client_ssl_option))
            elif client_cert and client_key:
                self.logger.info("SSL client authentication: on")
                self.ssl_context.load_cert_chain(certfile=client_cert,
                                                 keyfile=client_key)
        else:
            self.logger.info("SSL support: off")
            self.client_options["scheme"] = "http"

        if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
            self.logger.info("HTTP basic authentication: on")
            self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
        else:
            self.logger.info("HTTP basic authentication: off")

        if self._is_set(self.client_options, "compressed"):
            console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
            self.client_options["http_compress"] = self.client_options.pop("compressed")

        if self._is_set(self.client_options, "http_compress"):
            self.logger.info("HTTP compression: on")
        else:
            self.logger.info("HTTP compression: off")

        if self._is_set(self.client_options, "enable_cleanup_closed"):
            self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))