Пример #1
0
 def cache(self):
     k = "%s.cache" % self.name
     try:
         raw_value = self.cfg[k]
     except KeyError:
         raise exceptions.SystemSetupError("Mandatory config key [%s] is undefined." % k)
     try:
         return convert.to_bool(raw_value)
     except ValueError:
         raise exceptions.SystemSetupError("Value [%s] for config key [%s] is not a valid boolean value." % (raw_value, k))
Пример #2
0
 def _ask_data_store(self):
     data_store_host = self._ask_property("Enter the host name of the ES metrics store", default_value="localhost")
     data_store_port = self._ask_property("Enter the port of the ES metrics store", check_pattern=ConfigFactory.PORT_RANGE_PATTERN)
     data_store_secure = self._ask_property("Use secure connection (True, False)", default_value=False,
                                            check_pattern=ConfigFactory.BOOLEAN_PATTERN)
     data_store_user = self._ask_property("Username for basic authentication (empty if not needed)", mandatory=False, default_value="")
     data_store_password = self._ask_property("Password for basic authentication (empty if not needed)", mandatory=False,
                                              default_value="", sensitive=True)
     # do an intermediate conversion to bool in order to normalize input
     return data_store_host, data_store_port, str(convert.to_bool(data_store_secure)), data_store_user, data_store_password
Пример #3
0
 def __init__(self, config):
     self.report_file = config.opts("reporting", "output.path")
     self.report_format = config.opts("reporting", "format")
     self.numbers_align = config.opts("reporting",
                                      "numbers.align",
                                      mandatory=False,
                                      default_value="decimal")
     self.cwd = config.opts("node", "rally.cwd")
     self.show_processing_time = convert.to_bool(
         config.opts("reporting",
                     "output.processingtime",
                     mandatory=False,
                     default_value=False))
     self.plain = False
Пример #4
0
def main():
    pre_configure_logging()
    args = parse_args()
    print(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)
    # Add global meta info derived by rally itself
    cfg.add(config.Scope.application, "meta", "time.start", args.effective_start_date)
    cfg.add(config.Scope.application, "system", "rally.root", rally_root_path())
    cfg.add(config.Scope.application, "system", "invocation.root.dir", paths.Paths(cfg).invocation_root())
    # Add command line config
    cfg.add(config.Scope.applicationOverride, "source", "revision", args.revision)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "system", "pipeline", args.pipeline)
    cfg.add(config.Scope.applicationOverride, "system", "track.repository", args.track_repository)
    cfg.add(config.Scope.applicationOverride, "system", "track", args.track)
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "user.tag", args.user_tag)
    cfg.add(config.Scope.applicationOverride, "telemetry", "devices", csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "benchmarks", "challenge", args.challenge)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "car", args.car)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "rounds", args.rounds)
    cfg.add(config.Scope.applicationOverride, "provisioning", "datapaths", csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "provisioning", "install.preserve", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "launcher", "external.target.hosts", csv_to_list(args.target_hosts))
    cfg.add(config.Scope.applicationOverride, "launcher", "client.options", kv_to_map(csv_to_list(args.client_options)))
    cfg.add(config.Scope.applicationOverride, "report", "reportformat", args.report_format)
    cfg.add(config.Scope.applicationOverride, "report", "reportfile", args.report_file)
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "report", "comparison.baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "report", "comparison.contender.timestamp", args.contender)

    configure_logging(cfg)

    logger.info("Rally version [%s]" % version())
    logger.info("Command line arguments: %s" % args)

    success = dispatch_sub_command(cfg, sub_command)
    if not success:
        sys.exit(1)
Пример #5
0
 def __init__(self, results, config):
     self.results = results
     self.report_file = config.opts("reporting", "output.path")
     self.report_format = config.opts("reporting", "format")
     self.numbers_align = config.opts("reporting",
                                      "numbers.align",
                                      mandatory=False,
                                      default_value="decimal")
     reporting_values = config.opts("reporting", "values")
     self.report_all_values = reporting_values == "all"
     self.report_all_percentile_values = reporting_values == "all-percentiles"
     self.show_processing_time = convert.to_bool(
         config.opts("reporting",
                     "output.processingtime",
                     mandatory=False,
                     default_value=False))
     self.cwd = config.opts("node", "rally.cwd")
Пример #6
0
 def _ask_data_store(self):
     data_store_host = self._ask_property(
         "Enter the host name of the ES metrics store",
         default_value="localhost")
     data_store_port = self._ask_property(
         "Enter the port of the ES metrics store",
         check_pattern=ConfigFactory.PORT_RANGE_PATTERN)
     data_store_secure = self._ask_property(
         "Use secure connection (True, False)",
         default_value=False,
         check_pattern=ConfigFactory.BOOLEAN_PATTERN)
     data_store_user = self._ask_property(
         "Username for basic authentication (empty if not needed)",
         mandatory=False,
         default_value="")
     data_store_password = self._ask_property(
         "Password for basic authentication (empty if not needed)",
         mandatory=False,
         default_value="",
         sensitive=True)
     # do an intermediate conversion to bool in order to normalize input
     return data_store_host, data_store_port, str(
         convert.to_bool(
             data_store_secure)), data_store_user, data_store_password
Пример #7
0
    def prepare(self, binary):
        self.es_installer.install(binary["elasticsearch"])
        # we need to immediately delete it as plugins may copy their configuration during installation.
        self.es_installer.delete_pre_bundled_configuration()

        # determine after installation because some variables will depend on the install directory
        target_root_path = self.es_installer.es_home_path
        provisioner_vars = self._provisioner_variables()
        for p in self.es_installer.config_source_paths:
            self.apply_config(p, target_root_path, provisioner_vars)

        for installer in self.plugin_installers:
            installer.install(target_root_path,
                              binary.get(installer.plugin_name))
            for plugin_config_path in installer.config_source_paths:
                self.apply_config(plugin_config_path, target_root_path,
                                  provisioner_vars)

        # Never let install hooks modify our original provisioner variables and just provide a copy!
        self.es_installer.invoke_install_hook(team.BootstrapPhase.post_install,
                                              provisioner_vars.copy())
        for installer in self.plugin_installers:
            installer.invoke_install_hook(team.BootstrapPhase.post_install,
                                          provisioner_vars.copy())

        return NodeConfiguration(
            "tar",
            self.es_installer.car.mandatory_var("runtime.jdk"),
            convert.to_bool(
                self.es_installer.car.mandatory_var("runtime.jdk.bundled")),
            self.es_installer.node_ip,
            self.es_installer.node_name,
            self.es_installer.node_root_dir,
            self.es_installer.es_home_path,
            self.es_installer.data_paths,
        )
Пример #8
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    arg_parser = create_arg_parser()
    args = arg_parser.parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output",
            args.logging)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name",
            args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names",
            csv_to_list(args.car))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths",
            csv_to_list(args.data_paths))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                False)
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            csv_to_list(args.telemetry))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to
    # its default value)
    if args.track_path:
        cfg.add(config.Scope.applicationOverride, "track", "track.path",
                os.path.abspath(io.normalize_path(args.track_path)))
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                None)
        if args.track:
            # stay as close as possible to argparse errors although we have a custom validation.
            arg_parser.error(
                "argument --track not allowed with argument --track-path")
        # cfg.add(config.Scope.applicationOverride, "track", "track.name", None)
    else:
        # cfg.add(config.Scope.applicationOverride, "track", "track.path", None)
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                args.track_repository)
        # set the default programmatically because we need to determine whether the user has provided a value
        chosen_track = args.track if args.track else "geonames"
        cfg.add(config.Scope.applicationOverride, "track", "track.name",
                chosen_track)

    cfg.add(config.Scope.applicationOverride, "track", "params",
            kv_to_map(csv_to_list(args.track_params)))
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks",
            csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices",
            to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "values",
            args.show_in_report)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)

    cfg.add(config.Scope.applicationOverride, "driver", "cluster.health",
            args.cluster_health)
    if args.cluster_health != "green":
        console.warn(
            "--cluster-health is deprecated and will be removed in a future version of Rally."
        )
    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "on.error",
            args.on_error)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts",
            csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Пример #9
0
    def create_config(self, config_file, advanced_config=False, assume_defaults=False):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param config_file:
        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        :param assume_defaults: If True, assume the user accepted all values for which defaults are provided. Mainly intended for automatic
        configuration in CI run. Default: False.
        """
        self.assume_defaults = assume_defaults
        if advanced_config:
            self.o("Running advanced configuration. You can get additional help at:")
            self.o("")
            self.o("  %s" % console.format.link("%sconfiguration.html" % DOC_LINK))
            self.o("")

            logger.info("Running advanced configuration routine.")
            self.o("")
        else:
            self.o("Running simple configuration. Run the advanced configuration with:")
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")
            logger.info("Running simple configuration routine.")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" % config_file.location)
            logger.debug("Detected an existing configuration file at [%s]" % config_file.location)
        else:
            logger.debug("Did not detect a configuration file at [%s]. Running initial configuration routine." % config_file.location)

        # Autodetect settings
        self.o("[✓] Autodetecting available third-party software")
        git_path = io.guess_install_location("git")
        gradle_bin = io.guess_install_location("gradle")
        # default_jdk_7 = io.guess_java_home(major_version=7)
        default_jdk_8 = io.guess_java_home(major_version=8)

        self.print_detection_result("git    ", git_path)
        self.print_detection_result("gradle ", gradle_bin)
        self.print_detection_result("JDK 8  ", default_jdk_8,
                                    warn_if_missing=True,
                                    additional_message="You cannot benchmark Elasticsearch 5.x without a JDK 8 installation")
        # self.print_detection_result("JDK 9 ", default_jdk_9, warn_if_missing=True)
        self.o("")

        # users that don't have Gradle available cannot benchmark from sources
        benchmark_from_sources = gradle_bin

        if not benchmark_from_sources:
            self.o("**********************************************************************************")
            self.o("You don't have the necessary software to benchmark source builds of Elasticsearch.")
            self.o("")
            self.o("You can still benchmark binary distributions with e.g.:")
            self.o("")
            self.o("  %s --distribution-version=5.0.0" % PROGRAM_NAME)
            self.o("**********************************************************************************")
            self.o("")

        root_dir = "%s/benchmarks" % config_file.config_dir
        self.o("[✓] Setting up benchmark data directory in [%s] (needs several GB)." % root_dir)

        if benchmark_from_sources:
            # We try to autodetect an existing ES source directory
            guess = self._guess_es_src_dir()
            if guess:
                source_dir = guess
                self.o("[✓] Autodetected Elasticsearch project directory at [%s]." % source_dir)
                logger.debug("Autodetected Elasticsearch project directory at [%s]." % source_dir)
            else:
                default_src_dir = "%s/src" % root_dir
                logger.debug("Could not autodetect Elasticsearch project directory. Providing [%s] as default." % default_src_dir)
                source_dir = io.normalize_path(self._ask_property("Enter your Elasticsearch project directory:",
                                                                  default_value=default_src_dir))
            # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
            repo_url = "https://github.com/elastic/elasticsearch.git"

        if default_jdk_8:
            jdk8_home = default_jdk_8
        else:
            self.o("")
            jdk8_home = io.normalize_path(self._ask_property("Enter the JDK 8 root directory:", check_path_exists=True))

        if advanced_config:
            env_name = self._ask_env_name()
            data_store_type = "elasticsearch"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store()

            preserve_install = convert.to_bool(self._ask_property("Do you want Rally to keep the Elasticsearch benchmark candidate "
                                                                  "installation including the index (will use lots of disk space)?",
                                                                  default_value=False))
        else:
            # Does not matter too much for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["root.dir"] = root_dir
        config["system"]["log.root.dir"] = "logs"
        config["system"]["env.name"] = env_name

        if benchmark_from_sources:
            config["source"] = {}
            config["source"]["local.src.dir"] = source_dir
            config["source"]["remote.repo.url"] = repo_url

            config["build"] = {}
            config["build"]["gradle.bin"] = gradle_bin

        config["provisioning"] = {}
        config["provisioning"]["local.install.dir"] = "install"

        config["runtime"] = {}
        config["runtime"]["java8.home"] = jdk8_home

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = "${system:root.dir}/data"

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"]["default.url"] = "https://github.com/elastic/rally-tracks"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(preserve_install)

        config_file.store(config)

        self.o("[✓] Configuration successfully written to [%s]. Happy benchmarking!" % config_file.location)
        self.o("")
        if benchmark_from_sources:
            self.o("To benchmark Elasticsearch with the default benchmark run:")
            self.o("")
            self.o("  %s" % PROGRAM_NAME)
        else:
            self.o("To benchmark Elasticsearch 5.0.0 with the default benchmark run:")
            self.o("")
            self.o("  %s --distribution-version=5.0.0" % PROGRAM_NAME)

        self.o("")
        self.o("For help, type %s --help or see the user documentation at %s"
               % (PROGRAM_NAME, console.format.link(DOC_LINK)))
Пример #10
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start", args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided", True)
    else:
        cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided", False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output", args.logging)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision)
    #TODO dm: Consider renaming this one. It's used by different modules
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names", csv_to_list(args.car))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths", csv_to_list(args.data_paths))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False)
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices", csv_to_list(args.telemetry))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag)

    cfg.add(config.Scope.applicationOverride, "track", "repository.name", args.track_repository)
    cfg.add(config.Scope.applicationOverride, "track", "track.name", args.track)
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name", args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks", csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices", to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format", args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path", args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting", "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting", "contender.timestamp", args.contender)

    ################################
    # new section name: driver
    ################################
    cfg.add(config.Scope.applicationOverride, "driver", "cluster.health", args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts", _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options", client_options)
        if "timeout" not in client_options:
            console.info("You did not provide an explicit timeout in the client options. Assuming default of 10 seconds.")

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn("No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                         logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception("Could not terminate potentially running Rally instances correctly. Attempting to go on anyway.")

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(64)
Пример #11
0
    def create_config(self,
                      config_file,
                      advanced_config=False,
                      assume_defaults=False):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param config_file:
        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        :param assume_defaults: If True, assume the user accepted all values for which defaults are provided. Mainly intended for automatic
        configuration in CI run. Default: False.
        """
        self.assume_defaults = assume_defaults
        if advanced_config:
            self.o(
                "Running advanced configuration. You can get additional help at:"
            )
            self.o("")
            self.o("  %s" %
                   console.format.link("%sconfiguration.html" % DOC_LINK))
            self.o("")

            logger.info("Running advanced configuration routine.")
            self.o("")
        else:
            self.o(
                "Running simple configuration. Run the advanced configuration with:"
            )
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")
            logger.info("Running simple configuration routine.")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" %
                   config_file.location)
            logger.debug("Detected an existing configuration file at [%s]" %
                         config_file.location)
        else:
            logger.debug(
                "Did not detect a configuration file at [%s]. Running initial configuration routine."
                % config_file.location)

        # Autodetect settings
        self.o("* Autodetecting available third-party software")
        git_path = io.guess_install_location("git")
        gradle_bin = io.guess_install_location("gradle")
        java_9_home = io.guess_java_home(major_version=9)
        from esrally.utils import jvm
        # Don't auto-detect an EA release and bring trouble to the user later on. They can still configure it manually if they want to.
        if java_9_home and not jvm.is_early_access_release(java_9_home):
            auto_detected_java_home = java_9_home
        else:
            auto_detected_java_home = io.guess_java_home(major_version=8)

        self.print_detection_result("git    ", git_path)
        self.print_detection_result("gradle ", gradle_bin)
        self.print_detection_result(
            "JDK    ",
            auto_detected_java_home,
            warn_if_missing=True,
            additional_message=
            "You cannot benchmark Elasticsearch without a JDK installation")
        self.o("")

        # users that don't have Gradle available cannot benchmark from sources
        benchmark_from_sources = gradle_bin

        if not benchmark_from_sources:
            self.o(
                "**********************************************************************************"
            )
            self.o(
                "You don't have the necessary software to benchmark source builds of Elasticsearch."
            )
            self.o("")
            self.o("You can still benchmark binary distributions with e.g.:")
            self.o("")
            self.o("  %s --distribution-version=5.0.0" % PROGRAM_NAME)
            self.o(
                "**********************************************************************************"
            )
            self.o("")

        root_dir = "%s/benchmarks" % config_file.config_dir
        if advanced_config:
            root_dir = io.normalize_path(
                self._ask_property("Enter the benchmark data directory:",
                                   default_value=root_dir))
        else:
            self.o(
                "* Setting up benchmark data directory in [%s] (needs several GB)."
                % root_dir)

        if benchmark_from_sources:
            # We try to autodetect an existing ES source directory
            guess = self._guess_es_src_dir()
            if guess:
                source_dir = guess
                self.o(
                    "Autodetected Elasticsearch project directory at [%s]." %
                    source_dir)
                logger.debug(
                    "Autodetected Elasticsearch project directory at [%s]." %
                    source_dir)
            else:
                default_src_dir = "%s/src" % root_dir
                logger.debug(
                    "Could not autodetect Elasticsearch project directory. Providing [%s] as default."
                    % default_src_dir)
                source_dir = io.normalize_path(
                    self._ask_property(
                        "Enter your Elasticsearch project directory:",
                        default_value=default_src_dir))
            # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
            repo_url = "https://github.com/elastic/elasticsearch.git"

        if auto_detected_java_home:
            java_home = auto_detected_java_home
        else:
            self.o("")
            java_home = io.normalize_path(
                self._ask_property("Enter the JDK root directory:",
                                   check_path_exists=True))

        if advanced_config:
            env_name = self._ask_env_name()
            data_store_type = "elasticsearch"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store(
            )

            preserve_install = convert.to_bool(
                self._ask_property(
                    "Do you want Rally to keep the Elasticsearch benchmark candidate "
                    "installation including the index (will use lots of disk space)?",
                    default_value=False))
        else:
            # Does not matter too much for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["env.name"] = env_name

        config["node"] = {}
        config["node"]["root.dir"] = root_dir

        if benchmark_from_sources:
            config["source"] = {}
            config["source"]["local.src.dir"] = source_dir
            config["source"]["remote.repo.url"] = repo_url

            config["build"] = {}
            config["build"]["gradle.bin"] = gradle_bin

        config["runtime"] = {}
        config["runtime"]["java.home"] = java_home

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = "${node:root.dir}/data"

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"][
            "default.url"] = "https://github.com/elastic/rally-tracks"

        config["teams"] = {}
        config["teams"][
            "default.url"] = "https://github.com/elastic/rally-teams"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(
            preserve_install)

        config["distributions"] = {}
        config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \
                                                   "{{VERSION}}.tar.gz"
        config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \
                                                   "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"][
            "release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"]["release.cache"] = "true"

        config_file.store(config)

        self.o(
            "Configuration successfully written to [%s]. Happy benchmarking!" %
            config_file.location)
        self.o("")
        if benchmark_from_sources:
            self.o(
                "To benchmark Elasticsearch with the default benchmark run:")
            self.o("")
            self.o("  %s" % PROGRAM_NAME)
        else:
            self.o(
                "To benchmark Elasticsearch 5.0.0 with the default benchmark run:"
            )
            self.o("")
            self.o("  %s --distribution-version=5.0.0" % PROGRAM_NAME)

        self.o("")
        self.o("For help, type %s --help or see the user documentation at %s" %
               (PROGRAM_NAME, console.format.link(DOC_LINK)))
Пример #12
0
    def create_config(self, config_file, advanced_config=False, assume_defaults=False):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param config_file:
        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        :param assume_defaults: If True, assume the user accepted all values for which defaults are provided. Mainly intended for automatic
        configuration in CI run. Default: False.
        """
        self.prompter = Prompter(self.i, self.sec_i, self.o, assume_defaults)

        if advanced_config:
            self.o("Running advanced configuration. You can get additional help at:")
            self.o("")
            self.o("  %s" % console.format.link(doc_link("configuration.html")))
            self.o("")
        else:
            self.o("Running simple configuration. Run the advanced configuration with:")
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" % config_file.location)
            self.logger.debug("Detected an existing configuration file at [%s]", config_file.location)
        else:
            self.logger.debug("Did not detect a configuration file at [%s]. Running initial configuration routine.", config_file.location)

        root_dir = io.normalize_path(os.path.abspath(os.path.join(config_file.config_dir, "benchmarks")))
        if advanced_config:
            root_dir = io.normalize_path(self._ask_property("Enter the benchmark root directory", default_value=root_dir))
        else:
            self.o("* Setting up benchmark root directory in %s" % root_dir)

        # We try to autodetect an existing ES source directory
        guess = self._guess_es_src_dir()
        if guess:
            source_dir = guess
            self.logger.debug("Autodetected Elasticsearch project directory at [%s].", source_dir)
        else:
            default_src_dir = os.path.join(root_dir, "src", "elasticsearch")
            self.logger.debug("Could not autodetect Elasticsearch project directory. Providing [%s] as default.", default_src_dir)
            source_dir = default_src_dir

        if advanced_config:
            source_dir = io.normalize_path(self._ask_property("Enter your Elasticsearch project directory:",
                                                              default_value=source_dir))
        if not advanced_config:
            self.o("* Setting up benchmark source directory in %s" % source_dir)
            self.o("")

        # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
        repo_url = "https://github.com/elastic/elasticsearch.git"

        if advanced_config:
            data_store_choice = self._ask_property("Where should metrics be kept?"
                                                   "\n\n"
                                                   "(1) In memory (simpler but less options for analysis)\n"
                                                   "(2) Elasticsearch (requires a separate ES instance, keeps all raw samples for analysis)"
                                                   "\n\n", default_value="1", choices=["1", "2"])
            if data_store_choice == "1":
                env_name = "local"
                data_store_type = "in-memory"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "False", "", ""
            else:
                data_store_type = "elasticsearch"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store()

                env_name = self._ask_env_name()

            preserve_install = convert.to_bool(self._ask_property("Do you want Rally to keep the Elasticsearch benchmark candidate "
                                                                  "installation including the index (will use several GB per race)?",
                                                                  default_value=False))
        else:
            # Does not matter for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "False", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["env.name"] = env_name

        config["node"] = {}
        config["node"]["root.dir"] = root_dir

        final_source_dir = io.normalize_path(os.path.abspath(os.path.join(source_dir, os.pardir)))
        config["node"]["src.root.dir"] = final_source_dir

        config["source"] = {}
        config["source"]["remote.repo.url"] = repo_url
        # the Elasticsearch directory is just the last path component (relative to the source root directory)
        config["source"]["elasticsearch.src.subdir"] = io.basename(source_dir)

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = os.path.join(root_dir, "data")

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"]["default.url"] = "https://github.com/elastic/rally-tracks"

        config["teams"] = {}
        config["teams"]["default.url"] = "https://github.com/elastic/rally-teams"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(preserve_install)

        config["distributions"] = {}
        config["distributions"]["release.cache"] = "true"

        config_file.store(config)

        self.o("Configuration successfully written to %s. Happy benchmarking!" % config_file.location)
        self.o("")
        self.o("More info about Rally:")
        self.o("")
        self.o("* Type %s --help" % PROGRAM_NAME)
        self.o("* Read the documentation at %s" % console.format.link(doc_link()))
        self.o("* Ask a question on the forum at %s" % console.format.link("https://discuss.elastic.co/c/elasticsearch/rally"))
Пример #13
0
def main():
    start = time.time()
    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)
    # Add global meta info derived by rally itself
    cfg.add(config.Scope.application, "meta", "time.start",
            args.effective_start_date)
    cfg.add(config.Scope.application, "system", "rally.root",
            rally_root_path())
    cfg.add(config.Scope.application, "system", "invocation.root.dir",
            paths.Paths(cfg).invocation_root())
    # Add command line config
    cfg.add(config.Scope.applicationOverride, "source", "revision",
            args.revision)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.version",
            args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "source",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "system", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "system", "track.repository",
            args.track_repository)
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "user.tag",
            args.user_tag)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output",
            args.logging)
    cfg.add(config.Scope.applicationOverride, "telemetry", "devices",
            csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "benchmarks", "track",
            args.track)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "challenge",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "car", args.car)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "cluster.health",
            args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "provisioning", "datapaths",
            csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "provisioning",
            "install.preserve", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "launcher",
            "external.target.hosts",
            convert_hosts(csv_to_list(args.target_hosts)))
    cfg.add(config.Scope.applicationOverride, "launcher", "client.options",
            kv_to_map(csv_to_list(args.client_options)))
    cfg.add(config.Scope.applicationOverride, "report", "reportformat",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "report", "reportfile",
            args.report_file)
    if args.override_src_dir is not None:
        cfg.add(config.Scope.applicationOverride, "source", "local.src.dir",
                args.override_src_dir)

    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "report",
                "comparison.baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "report",
                "comparison.contender.timestamp", args.contender)

    configure_logging(cfg)
    logger.info("Rally version [%s]" % version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    try:
        actors = bootstrap_actor_system(cfg)
    except RuntimeError as e:
        logger.exception("Could not bootstrap actor system.")
        if str(e) == "Unable to determine valid external socket address.":
            console.warn(
                "Could not determine a socket address. Are you running without any network?",
                logger=logger)
            actors = bootstrap_actor_system(cfg,
                                            system_base="multiprocQueueBase")
        else:
            raise

    success = False
    try:
        success = dispatch_sub_command(cfg, sub_command)
    finally:
        shutdown_complete = False
        times_interrupted = 0
        while not shutdown_complete and times_interrupted < 2:
            try:
                logger.info("Attempting to shutdown internal actor system.")
                actors.shutdown()
                shutdown_complete = True
                logger.info("Shutdown completed.")
            except KeyboardInterrupt:
                times_interrupted += 1
                logger.warn(
                    "User interrupted shutdown of internal actor system.")
                console.info(
                    "Please wait a moment for Rally's internal components to shutdown."
                )
        if not shutdown_complete and times_interrupted > 0:
            logger.warn(
                "Terminating after user has interrupted actor system shutdown explicitly for [%d] times."
                % times_interrupted)
            console.println("")
            console.warn(
                "Terminating now at the risk of leaving child processes behind."
            )
            console.println("")
            console.warn("The next race may fail due to an unclean shutdown.")
            console.println("")
            console.println(SKULL)
            console.println("")

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Пример #14
0
 def test_cannot_convert_invalid_value(self):
     values = ["Invalid", None, []]
     for value in values:
         with pytest.raises(ValueError) as exc:
             convert.to_bool(value)
         assert exc.value.args[0] == f"Cannot convert [{value}] to bool."
Пример #15
0
 def test_convert_to_false(self):
     values = ["False", "false", "No", "no", "f", "n", "0", False]
     for value in values:
         assert convert.to_bool(value) is False
Пример #16
0
 def test_convert_to_true(self):
     values = ["True", "true", "Yes", "yes", "t", "y", "1", True]
     for value in values:
         assert convert.to_bool(value) is True
Пример #17
0
    def prepare(self, binary):
        # we need to allow other users to write to these directories due to Docker.
        #
        # Although os.mkdir passes 0o777 by default, mkdir(2) uses `mode & ~umask & 0777` to determine the final flags and
        # hence we need to modify the process' umask here. For details see https://linux.die.net/man/2/mkdir.
        previous_umask = os.umask(0)
        try:
            io.ensure_dir(self.binary_path)
            io.ensure_dir(self.node_log_dir)
            io.ensure_dir(self.heap_dump_dir)
            io.ensure_dir(self.data_paths[0])
        finally:
            os.umask(previous_umask)

        mounts = {}

        for car_config_path in self.car.config_paths:
            for root, _, files in os.walk(car_config_path):
                env = jinja2.Environment(loader=jinja2.FileSystemLoader(root))

                relative_root = root[len(car_config_path) + 1:]
                absolute_target_root = os.path.join(self.binary_path,
                                                    relative_root)
                io.ensure_dir(absolute_target_root)

                for name in files:
                    source_file = os.path.join(root, name)
                    target_file = os.path.join(absolute_target_root, name)
                    mounts[target_file] = os.path.join(
                        "/usr/share/elasticsearch", relative_root, name)
                    if plain_text(source_file):
                        self.logger.info(
                            "Reading config template file [%s] and writing to [%s].",
                            source_file, target_file)
                        with open(target_file, mode="a",
                                  encoding="utf-8") as f:
                            f.write(
                                _render_template(env, self.config_vars,
                                                 source_file))
                    else:
                        self.logger.info(
                            "Treating [%s] as binary and copying as is to [%s].",
                            source_file, target_file)
                        shutil.copy(source_file, target_file)

        docker_cfg = self._render_template_from_file(self.docker_vars(mounts))
        self.logger.info("Starting Docker container with configuration:\n%s",
                         docker_cfg)

        with open(os.path.join(self.binary_path, "docker-compose.yml"),
                  mode="wt",
                  encoding="utf-8") as f:
            f.write(docker_cfg)

        return NodeConfiguration(
            "docker",
            self.car.mandatory_var("runtime.jdk"),
            convert.to_bool(self.car.mandatory_var("runtime.jdk.bundled")),
            self.node_ip,
            self.node_name,
            self.node_root_dir,
            self.binary_path,
            self.data_paths,
        )
Пример #18
0
    def __init__(self, hosts, client_options):
        self.hosts = hosts
        self.client_options = dict(client_options)
        self.ssl_context = None
        self.logger = logging.getLogger(__name__)

        masked_client_options = dict(client_options)
        if "basic_auth_password" in masked_client_options:
            masked_client_options["basic_auth_password"] = "******"
        if "http_auth" in masked_client_options:
            masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
        self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)

        # we're using an SSL context now and it is not allowed to have use_ssl present in client options anymore
        if self.client_options.pop("use_ssl", False):
            import ssl
            self.logger.info("SSL support: on")
            self.client_options["scheme"] = "https"

            # ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
            # but can be disabled via the verify_mode property later on.
            self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH,
                                                          cafile=self.client_options.pop("ca_certs", certifi.where()))

            if not self.client_options.pop("verify_certs", True):
                self.logger.info("SSL certificate verification: off")
                # order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
                self.ssl_context.verify_mode = ssl.CERT_NONE
                self.ssl_context.check_hostname = False

                self.logger.warning("User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
                                    "benchmark. Disabling urllib warnings now to avoid a logging storm. "
                                    "See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details.")
                # disable:  "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
                # advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
                urllib3.disable_warnings()
            else:
                self.ssl_context.verify_mode=ssl.CERT_REQUIRED
                self.ssl_context.check_hostname = True
                self.logger.info("SSL certificate verification: on")

            # When using SSL_context, all SSL related kwargs in client options get ignored
            client_cert = self.client_options.pop("client_cert", False)
            client_key = self.client_options.pop("client_key", False)

            if not client_cert and not client_key:
                self.logger.info("SSL client authentication: off")
            elif bool(client_cert) != bool(client_key):
                self.logger.error(
                    "Supplied client-options contain only one of client_cert/client_key. "
                )
                defined_client_ssl_option = "client_key" if client_key else "client_cert"
                missing_client_ssl_option = "client_cert" if client_key else "client_key"
                console.println(
                    "'{}' is missing from client-options but '{}' has been specified.\n"
                    "If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
                    "Read the documentation at {}\n".format(
                        missing_client_ssl_option,
                        defined_client_ssl_option,
                        console.format.link(doc_link("command_line_reference.html#client-options")))
                )
                raise exceptions.SystemSetupError(
                    "Cannot specify '{}' without also specifying '{}' in client-options.".format(
                        defined_client_ssl_option,
                        missing_client_ssl_option))
            elif client_cert and client_key:
                self.logger.info("SSL client authentication: on")
                self.ssl_context.load_cert_chain(certfile=client_cert,
                                                 keyfile=client_key)
        else:
            self.logger.info("SSL support: off")
            self.client_options["scheme"] = "http"

        if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
            self.logger.info("HTTP basic authentication: on")
            self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
        else:
            self.logger.info("HTTP basic authentication: off")

        if self._is_set(self.client_options, "compressed"):
            console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
            self.client_options["http_compress"] = self.client_options.pop("compressed")

        if self._is_set(self.client_options, "http_compress"):
            self.logger.info("HTTP compression: on")
        else:
            self.logger.info("HTTP compression: off")

        if self._is_set(self.client_options, "enable_cleanup_closed"):
            self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))
Пример #19
0
def main():
    pre_configure_logging()
    args = parse_args()
    if not args.quiet:
        print(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)
    # Add global meta info derived by rally itself
    cfg.add(config.Scope.application, "meta", "time.start",
            args.effective_start_date)
    cfg.add(config.Scope.application, "system", "rally.root",
            rally_root_path())
    cfg.add(config.Scope.application, "system", "invocation.root.dir",
            paths.Paths(cfg).invocation_root())
    # Add command line config
    cfg.add(config.Scope.applicationOverride, "source", "revision",
            args.revision)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.version",
            args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "source",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "system", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "system", "track.repository",
            args.track_repository)
    cfg.add(config.Scope.applicationOverride, "system", "track", args.track)
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "user.tag",
            args.user_tag)
    cfg.add(config.Scope.applicationOverride, "telemetry", "devices",
            csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "benchmarks", "challenge",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "car", args.car)
    cfg.add(config.Scope.applicationOverride, "provisioning", "datapaths",
            csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "provisioning",
            "install.preserve", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "launcher",
            "external.target.hosts",
            convert_hosts(csv_to_list(args.target_hosts)))
    cfg.add(config.Scope.applicationOverride, "launcher", "client.options",
            kv_to_map(csv_to_list(args.client_options)))
    cfg.add(config.Scope.applicationOverride, "report", "reportformat",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "report", "reportfile",
            args.report_file)
    if args.override_src_dir is not None:
        cfg.add(config.Scope.applicationOverride, "source", "local.src.dir",
                args.override_src_dir)

    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "report",
                "comparison.baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "report",
                "comparison.contender.timestamp", args.contender)

    configure_logging(cfg)
    logger.info("Rally version [%s]" % version())
    logger.info("Command line arguments: %s" % args)

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to a singleton on this machine
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    # bootstrap Rally's Actor system
    try:
        actors = thespian.actors.ActorSystem(
            "multiprocTCPBase", logDefs=configure_actor_logging(cfg))
    except thespian.actors.ActorSystemException:
        logger.exception(
            "Could not initialize internal actor system. Terminating.")
        print("ERROR: Could not initialize successfully.")
        print("")
        print(
            "The most likely cause is that there are still processes running from a previous race."
        )
        print(
            "Please check for running Python processes and terminate them before running Rally again."
        )
        print("")
        print_help_on_errors(cfg)
        sys.exit(70)

    success = False
    try:
        success = dispatch_sub_command(cfg, sub_command)
    finally:
        shutdown_complete = False
        times_interrupted = 0
        while not shutdown_complete and times_interrupted < 2:
            try:
                actors.shutdown()
                shutdown_complete = True
            except KeyboardInterrupt:
                times_interrupted += 1
                logger.warn(
                    "User interrupted shutdown of internal actor system.")
                print(
                    "Please wait a moment for Rally's internal components to shutdown."
                )
        if not shutdown_complete and times_interrupted > 0:
            logger.warn(
                "Terminating after user has interrupted actor system shutdown explicitly for [%d] times."
                % times_interrupted)
            print(
                "**********************************************************************"
            )
            print("")
            print(
                "WARN: Terminating now at the risk of leaving child processes behind."
            )
            print("")
            print("The next race may fail due to an unclean shutdown.")
            print("")
            print(SKULL)
            print("")
            print(
                "**********************************************************************"
            )

    if not success:
        sys.exit(64)
Пример #20
0
def dispatch_sub_command(arg_parser, args, cfg):
    sub_command = args.subcommand

    cfg.add(config.Scope.application, "system", "quiet.mode", args.quiet)
    cfg.add(config.Scope.application, "system", "offline.mode", args.offline)

    try:
        if sub_command == "compare":
            configure_reporting_params(args, cfg)
            reporter.compare(cfg, args.baseline, args.contender)
        elif sub_command == "list":
            cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
            cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)
            configure_mechanic_params(args, cfg, command_requires_car=False)
            configure_track_params(arg_parser, args, cfg, command_requires_track=False)
            dispatch_list(cfg)
        elif sub_command == "download":
            cfg.add(config.Scope.applicationOverride, "mechanic", "target.os", args.target_os)
            cfg.add(config.Scope.applicationOverride, "mechanic", "target.arch", args.target_arch)
            configure_mechanic_params(args, cfg)
            mechanic.download(cfg)
        elif sub_command == "install":
            cfg.add(config.Scope.applicationOverride, "system", "install.id", str(uuid.uuid4()))
            cfg.add(config.Scope.applicationOverride, "mechanic", "network.host", args.network_host)
            cfg.add(config.Scope.applicationOverride, "mechanic", "network.http.port", args.http_port)
            cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision)
            # TODO: Remove this special treatment and rely on artifact caching (follow-up PR)
            cfg.add(config.Scope.applicationOverride, "mechanic", "skip.build", args.skip_build)
            cfg.add(config.Scope.applicationOverride, "mechanic", "build.type", args.build_type)
            cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk)
            cfg.add(config.Scope.applicationOverride, "mechanic", "node.name", args.node_name)
            cfg.add(config.Scope.applicationOverride, "mechanic", "master.nodes", opts.csv_to_list(args.master_nodes))
            cfg.add(config.Scope.applicationOverride, "mechanic", "seed.hosts", opts.csv_to_list(args.seed_hosts))
            cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins))
            cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params))
            configure_mechanic_params(args, cfg)
            mechanic.install(cfg)
        elif sub_command == "start":
            cfg.add(config.Scope.applicationOverride, "system", "race.id", args.race_id)
            cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id)
            cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk)
            configure_telemetry_params(args, cfg)
            mechanic.start(cfg)
        elif sub_command == "stop":
            cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
            cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id)
            mechanic.stop(cfg)
        elif sub_command == "race":
            # As the race command is doing more work than necessary at the moment, we duplicate several parameters
            # in this section that actually belong to dedicated subcommands (like install, start or stop). Over time
            # these duplicated parameters will vanish as we move towards dedicated subcommands and use "race" only
            # to run the actual benchmark (i.e. generating load).
            if args.effective_start_date:
                cfg.add(config.Scope.applicationOverride, "system", "time.start", args.effective_start_date)
            cfg.add(config.Scope.applicationOverride, "system", "race.id", args.race_id)
            # use the race id implicitly also as the install id.
            cfg.add(config.Scope.applicationOverride, "system", "install.id", args.race_id)
            cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline)
            cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag)
            cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling)
            cfg.add(config.Scope.applicationOverride, "driver", "assertions", args.enable_assertions)
            cfg.add(config.Scope.applicationOverride, "driver", "on.error", args.on_error)
            cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", opts.csv_to_list(args.load_driver_hosts))
            cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode)
            configure_track_params(arg_parser, args, cfg)
            configure_connection_params(arg_parser, args, cfg)
            configure_telemetry_params(args, cfg)
            configure_mechanic_params(args, cfg)
            cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk)
            cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision)
            cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins))
            cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params))
            cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
            cfg.add(config.Scope.applicationOverride, "mechanic", "skip.rest.api.check", convert.to_bool(args.skip_rest_api_check))

            configure_reporting_params(args, cfg)

            race(cfg, args.kill_running_processes)
        elif sub_command == "generate":
            cfg.add(config.Scope.applicationOverride, "generator", "chart.spec.path", args.chart_spec_path)
            cfg.add(config.Scope.applicationOverride, "generator", "chart.type", args.chart_type)
            cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path)
            generate(cfg)
        elif sub_command == "create-track":
            cfg.add(config.Scope.applicationOverride, "generator", "indices", args.indices)
            cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path)
            cfg.add(config.Scope.applicationOverride, "track", "track.name", args.track)
            configure_connection_params(arg_parser, args, cfg)

            tracker.create_track(cfg)
        elif sub_command == "info":
            configure_track_params(arg_parser, args, cfg)
            track.track_info(cfg)
        else:
            raise exceptions.SystemSetupError(f"Unknown subcommand [{sub_command}]")
        return True
    except exceptions.RallyError as e:
        logging.getLogger(__name__).exception("Cannot run subcommand [%s].", sub_command)
        msg = str(e.message)
        nesting = 0
        while hasattr(e, "cause") and e.cause:
            nesting += 1
            e = e.cause
            if hasattr(e, "message"):
                msg += "\n%s%s" % ("\t" * nesting, e.message)
            else:
                msg += "\n%s%s" % ("\t" * nesting, str(e))

        console.error("Cannot %s. %s" % (sub_command, msg))
        console.println("")
        print_help_on_errors()
        return False
    except BaseException as e:
        logging.getLogger(__name__).exception("A fatal error occurred while running subcommand [%s].", sub_command)
        console.error("Cannot %s. %s." % (sub_command, e))
        console.println("")
        print_help_on_errors()
        return False
Пример #21
0
 def test_convert_to_false(self):
     values = ["False", "false", "No", "no", "f", "n", "0", False]
     for value in values:
         self.assertFalse(convert.to_bool(value), msg="Expect [%s] of type [%s] to be converted to False." % (str(value), type(value)))
Пример #22
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output",
            args.logging)
    # only temporary to ignore unknown actor messages
    cfg.add(config.Scope.applicationOverride, "system",
            "ignore.unknown.return", args.ignore_unknown_return_values)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    #TODO dm: Consider renaming this one. It's used by different modules
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name",
            args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.name", args.car)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths",
            csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install",
            convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            csv_to_list(args.telemetry))
    if args.override_src_dir is not None:
        cfg.add(config.Scope.applicationOverride, "source", "local.src.dir",
                args.override_src_dir)

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    cfg.add(config.Scope.applicationOverride, "track", "repository.name",
            args.track_repository)
    cfg.add(config.Scope.applicationOverride, "track", "track.name",
            args.track)
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices",
            to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)

    ################################
    # new section name: driver
    ################################
    cfg.add(config.Scope.applicationOverride, "benchmarks", "cluster.health",
            args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception(
            "Could not terminate potentially running Rally instances correctly. Attempting to go on anyway."
        )

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Пример #23
0
 def test_cannot_convert_invalid_value(self):
     values = ["Invalid", None, []]
     for value in values:
         with self.assertRaises(ValueError, msg="Expect [%s] of type [%s] to fail to be converted." % (str(value), type(value))) as ctx:
             convert.to_bool(value)
         self.assertEqual("Cannot convert [%s] to bool." % value, ctx.exception.args[0])
Пример #24
0
 def __init__(self, name, distribution_config, template_renderer):
     self.name = name
     self.cfg = distribution_config
     self.runtime_jdk_bundled = convert.to_bool(
         self.cfg.get("runtime.jdk.bundled", False))
     self.template_renderer = template_renderer
Пример #25
0
 def test_convert_to_true(self):
     values = ["True", "true", "Yes", "yes", "t", "y", "1", True]
     for value in values:
         self.assertTrue(convert.to_bool(value), msg="Expect [%s] of type [%s] to be converted to True." % (str(value), type(value)))
Пример #26
0
    def create_config(self,
                      config_file,
                      advanced_config=False,
                      assume_defaults=False,
                      use_gradle_wrapper=False,
                      java_home=None,
                      runtime_java_home=None):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param config_file:
        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        :param assume_defaults: If True, assume the user accepted all values for which defaults are provided. Mainly intended for automatic
        configuration in CI run. Default: False.
        :param use_gradle_wrapper: If True, use the Gradle wrapper, otherwise use the system's Gradle version. Default: False.
        """
        self.prompter = Prompter(self.i, self.sec_i, self.o, assume_defaults)
        if advanced_config:
            self.o(
                "Running advanced configuration. You can get additional help at:"
            )
            self.o("")
            self.o("  %s" %
                   console.format.link("%sconfiguration.html" % DOC_LINK))
            self.o("")
        else:
            self.o(
                "Running simple configuration. Run the advanced configuration with:"
            )
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" %
                   config_file.location)
            logger.debug("Detected an existing configuration file at [%s]" %
                         config_file.location)
        else:
            logger.debug(
                "Did not detect a configuration file at [%s]. Running initial configuration routine."
                % config_file.location)

        # Autodetect settings
        self.o("* Autodetecting available third-party software")
        git_path = io.guess_install_location("git")
        gradle_bin = "./gradlew" if use_gradle_wrapper else io.guess_install_location(
            "gradle")

        java_8_home = runtime_java_home if runtime_java_home else io.guess_java_home(
            major_version=8)
        java_9_home = java_home if java_home else io.guess_java_home(
            major_version=9)
        from esrally.utils import jvm
        if java_8_home:
            auto_detected_java_home = java_8_home
        # Don't auto-detect an EA release and bring trouble to the user later on. They can still configure it manually if they want to.
        elif java_9_home and not jvm.is_early_access_release(java_9_home):
            auto_detected_java_home = java_9_home
        else:
            auto_detected_java_home = None

        self.print_detection_result("git    ", git_path)
        self.print_detection_result("gradle ", gradle_bin)
        self.print_detection_result(
            "JDK    ",
            auto_detected_java_home,
            warn_if_missing=True,
            additional_message=
            "You cannot benchmark Elasticsearch on this machine without a JDK."
        )
        self.o("")

        # users that don't have Gradle available cannot benchmark from sources
        benchmark_from_sources = gradle_bin

        if not benchmark_from_sources:
            self.o(
                "********************************************************************************"
            )
            self.o(
                "You don't have the required software to benchmark Elasticsearch source builds."
            )
            self.o("")
            self.o("You can still benchmark binary distributions with e.g.:")
            self.o("")
            self.o("  %s --distribution-version=6.0.0" % PROGRAM_NAME)
            self.o(
                "********************************************************************************"
            )
            self.o("")

        root_dir = io.normalize_path(
            os.path.abspath(os.path.join(config_file.config_dir,
                                         "benchmarks")))
        if advanced_config:
            root_dir = io.normalize_path(
                self._ask_property("Enter the benchmark data directory",
                                   default_value=root_dir))
        else:
            self.o("* Setting up benchmark data directory in %s" % root_dir)

        if benchmark_from_sources:
            if not java_9_home or jvm.is_early_access_release(java_9_home):
                raw_java_9_home = self._ask_property(
                    "Enter the JDK 9 root directory",
                    check_path_exists=True,
                    mandatory=False)
                if raw_java_9_home and jvm.major_version(
                        raw_java_9_home
                ) == 9 and not jvm.is_early_access_release(raw_java_9_home):
                    java_9_home = io.normalize_path(
                        raw_java_9_home) if raw_java_9_home else None
                else:
                    benchmark_from_sources = False
                    self.o(
                        "********************************************************************************"
                    )
                    self.o(
                        "You don't have a valid JDK 9 installation and cannot benchmark source builds."
                    )
                    self.o("")
                    self.o(
                        "You can still benchmark binary distributions with e.g.:"
                    )
                    self.o("")
                    self.o("  %s --distribution-version=6.0.0" % PROGRAM_NAME)
                    self.o(
                        "********************************************************************************"
                    )
                    self.o("")

        if benchmark_from_sources:
            # We try to autodetect an existing ES source directory
            guess = self._guess_es_src_dir()
            if guess:
                source_dir = guess
                logger.debug(
                    "Autodetected Elasticsearch project directory at [%s]." %
                    source_dir)
            else:
                default_src_dir = os.path.join(root_dir, "src",
                                               "elasticsearch")
                logger.debug(
                    "Could not autodetect Elasticsearch project directory. Providing [%s] as default."
                    % default_src_dir)
                source_dir = default_src_dir

            if advanced_config:
                source_dir = io.normalize_path(
                    self._ask_property(
                        "Enter your Elasticsearch project directory:",
                        default_value=source_dir))
            if not advanced_config:
                self.o("* Setting up benchmark source directory in %s" %
                       source_dir)
                self.o("")

            # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
            repo_url = "https://github.com/elastic/elasticsearch.git"

        if auto_detected_java_home:
            java_home = auto_detected_java_home
            local_benchmarks = True
        else:
            raw_java_home = self._ask_property(
                "Enter the JDK root directory (version 8 or later)",
                check_path_exists=True,
                mandatory=False)
            java_home = io.normalize_path(
                raw_java_home) if raw_java_home else None
            if not java_home:
                local_benchmarks = False
                self.o("")
                self.o(
                    "********************************************************************************"
                )
                self.o(
                    "You don't have a JDK installed but Elasticsearch requires one to run. This means"
                )
                self.o(
                    "that you cannot benchmark Elasticsearch on this machine.")
                self.o("")
                self.o("You can still benchmark against remote machines e.g.:")
                self.o("")
                self.o(
                    "  %s --pipeline=benchmark-only --target-host=\"NODE_IP:9200\""
                    % PROGRAM_NAME)
                self.o("")
                self.o("See %s for further info." %
                       console.format.link("%srecipes.html" % DOC_LINK))
                self.o(
                    "********************************************************************************"
                )
                self.o("")
            else:
                local_benchmarks = True

        if advanced_config:
            data_store_choice = self._ask_property(
                "Where should metrics be kept?"
                "\n\n"
                "(1) In memory (simpler but less options for analysis)\n"
                "(2) Elasticsearch (requires a separate ES instance, keeps all raw samples for analysis)"
                "\n\n",
                default_value="1",
                choices=["1", "2"])
            if data_store_choice == "1":
                env_name = "local"
                data_store_type = "in-memory"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "", "", ""
            else:
                data_store_type = "elasticsearch"
                data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store(
                )

                env_name = self._ask_env_name()

            preserve_install = convert.to_bool(
                self._ask_property(
                    "Do you want Rally to keep the Elasticsearch benchmark candidate "
                    "installation including the index (will use several GB per trial run)?",
                    default_value=False))
        else:
            # Does not matter for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["env.name"] = env_name

        config["node"] = {}
        config["node"]["root.dir"] = root_dir

        if benchmark_from_sources:
            # user has provided the Elasticsearch directory but the root for Elasticsearch and related plugins will be one level above
            final_source_dir = io.normalize_path(
                os.path.abspath(os.path.join(source_dir, os.pardir)))
            config["node"]["src.root.dir"] = final_source_dir

            config["source"] = {}
            config["source"]["remote.repo.url"] = repo_url
            # the Elasticsearch directory is just the last path component (relative to the source root directory)
            config["source"]["elasticsearch.src.subdir"] = io.basename(
                source_dir)

        if gradle_bin:
            config["build"] = {}
            config["build"]["gradle.bin"] = gradle_bin

        config["runtime"] = {}
        if java_home:
            config["runtime"]["java.home"] = java_home
        if java_9_home:
            config["runtime"]["java9.home"] = java_9_home

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = "${node:root.dir}/data"

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"][
            "default.url"] = "https://github.com/elastic/rally-tracks"

        config["teams"] = {}
        config["teams"][
            "default.url"] = "https://github.com/elastic/rally-teams"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(
            preserve_install)

        config["distributions"] = {}
        config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \
                                                   "{{VERSION}}.tar.gz"
        config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \
                                                   "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"][
            "release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"]["release.cache"] = "true"

        config_file.store(config)

        self.o(
            "Configuration successfully written to %s. Happy benchmarking!" %
            config_file.location)
        self.o("")
        if local_benchmarks and benchmark_from_sources:
            self.o(
                "To benchmark Elasticsearch with the default benchmark, run:")
            self.o("")
            self.o("  %s" % PROGRAM_NAME)
            self.o("")
        elif local_benchmarks:
            self.o(
                "To benchmark Elasticsearch 6.0.0 with the default benchmark, run:"
            )
            self.o("")
            self.o("  %s --distribution-version=6.0.0" % PROGRAM_NAME)
            self.o("")
        else:
            # we've already printed an info for the user. No need to repeat that.
            pass

        self.o("More info about Rally:")
        self.o("")
        self.o("* Type %s --help" % PROGRAM_NAME)
        self.o("* Read the documentation at %s" %
               console.format.link(DOC_LINK))
        self.o("* Ask a question on the forum at %s" % console.format.link(
            "https://discuss.elastic.co/c/elasticsearch/rally"))
Пример #27
0
    def create_config(self, config_file, advanced_config=False):
        """
        Either creates a new configuration file or overwrites an existing one. Will ask the user for input on configurable properties
        and writes them to the configuration file in ~/.rally/rally.ini.

        :param advanced_config: Whether to ask for properties that are not necessary for everyday use (on a dev machine). Default: False.
        """
        if advanced_config:
            self.o(
                "Running advanced configuration. You can get additional help at:"
            )
            self.o("")
            self.o("  %s" % format.link(
                "https://esrally.readthedocs.io/en/latest/configuration.html"))
            self.o("")

            logger.debug("Running advanced configuration routine.")
            self.o("")
        else:
            self.o(
                "Running simple configuration. You can run the advanced configuration with:"
            )
            self.o("")
            self.o("  %s configure --advanced-config" % PROGRAM_NAME)
            self.o("")
            logger.debug("Running simple configuration routine.")

        if config_file.present:
            self.o("\nWARNING: Will overwrite existing config file at [%s]\n" %
                   config_file.location)
            logger.debug("Detected an existing configuration file at [%s]" %
                         config_file.location)
        else:
            logger.debug(
                "Did not detect a configuration file at [%s]. Running initial configuration routine."
                % config_file.location)

        # Autodetect settings
        self.o("[✓] Autodetecting available third-party software")
        git_path = io.guess_install_location("git")
        gradle_bin = io.guess_install_location("gradle")
        # default_jdk_7 = io.guess_java_home(major_version=7)
        default_jdk_8 = io.guess_java_home(major_version=8)

        self.print_detection_result("git    ", git_path)
        self.print_detection_result("gradle ", gradle_bin)
        # self.print_detection_result("JDK 7  ", default_jdk_7, warn_if_missing=True, additional_message="Cannot benchmark Elasticsearch 2.x")
        self.print_detection_result(
            "JDK 8  ",
            default_jdk_8,
            warn_if_missing=True,
            additional_message=
            "You cannot benchmark Elasticsearch 5.x without a JDK 8 installation"
        )
        # self.print_detection_result("JDK 9 ", default_jdk_9, warn_if_missing=True)
        self.o("")

        # users that don't have Gradle available cannot benchmark from sources
        benchmark_from_sources = gradle_bin

        if not benchmark_from_sources:
            self.o(
                "**********************************************************************************"
            )
            self.o(
                "You don't have the necessary software to benchmark source builds of Elasticsearch."
            )
            self.o("")
            self.o("You can still benchmark binary distributions with e.g.:")
            self.o("")
            self.o(
                "  %s --pipeline=from-distribution --distribution-version=5.0.0-alpha2"
                % PROGRAM_NAME)
            self.o("")
            self.o("See %s" % format.link(
                "https://esrally.readthedocs.io/en/latest/pipelines.html#from-distribution"
            ))
            self.o(
                "**********************************************************************************"
            )
            self.o("")

        root_dir = "%s/benchmarks" % config_file.config_dir
        self.o(
            "[✓] Setting up benchmark data directory in [%s] (needs several GB)."
            % root_dir)

        if benchmark_from_sources:
            # We try to autodetect an existing ES source directory
            guess = self._guess_es_src_dir()
            if guess:
                source_dir = guess
                self.o(
                    "[✓] Autodetected Elasticsearch project directory at [%s]."
                    % source_dir)
                logger.debug(
                    "Autodetected Elasticsearch project directory at [%s]." %
                    source_dir)
            else:
                default_src_dir = "%s/src" % root_dir
                logger.debug(
                    "Could not autodetect Elasticsearch project directory. Providing [%s] as default."
                    % default_src_dir)
                source_dir = io.normalize_path(
                    self._ask_property(
                        "Enter your Elasticsearch project directory:",
                        default_value=default_src_dir))
            # Not everybody might have SSH access. Play safe with the default. It may be slower but this will work for everybody.
            repo_url = "https://github.com/elastic/elasticsearch.git"

        if default_jdk_8:
            jdk8_home = default_jdk_8
        else:
            self.o("")
            jdk8_home = io.normalize_path(
                self._ask_property("Enter the JDK 8 root directory:",
                                   check_path_exists=True))

        if advanced_config:
            env_name = self._ask_env_name()
            data_store_type = "elasticsearch"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = self._ask_data_store(
            )

            preserve_install = convert.to_bool(
                self._ask_property(
                    "Do you want Rally to keep the Elasticsearch benchmark candidate "
                    "installation including all data (will use lots of disk space)",
                    default_value=False))
        else:
            # Does not matter too much for an in-memory store
            env_name = "local"
            data_store_type = "in-memory"
            data_store_host, data_store_port, data_store_secure, data_store_user, data_store_password = "", "", "", "", ""
            preserve_install = False

        config = configparser.ConfigParser()
        config["meta"] = {}
        config["meta"]["config.version"] = str(Config.CURRENT_CONFIG_VERSION)

        config["system"] = {}
        config["system"]["root.dir"] = root_dir
        config["system"]["log.root.dir"] = "logs"
        config["system"]["env.name"] = env_name

        if benchmark_from_sources:
            config["source"] = {}
            config["source"]["local.src.dir"] = source_dir
            config["source"]["remote.repo.url"] = repo_url

            config["build"] = {}
            config["build"]["gradle.bin"] = gradle_bin

        config["provisioning"] = {}
        config["provisioning"]["local.install.dir"] = "install"

        config["runtime"] = {}
        config["runtime"]["java8.home"] = jdk8_home

        config["benchmarks"] = {}
        config["benchmarks"]["local.dataset.cache"] = "${system:root.dir}/data"

        config["reporting"] = {}
        config["reporting"]["datastore.type"] = data_store_type
        config["reporting"]["datastore.host"] = data_store_host
        config["reporting"]["datastore.port"] = data_store_port
        config["reporting"]["datastore.secure"] = data_store_secure
        config["reporting"]["datastore.user"] = data_store_user
        config["reporting"]["datastore.password"] = data_store_password

        config["tracks"] = {}
        config["tracks"][
            "default.url"] = "https://github.com/elastic/rally-tracks"

        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(
            preserve_install)

        config_file.store(config)

        self.o(
            "[✓] Configuration successfully written to [%s]. Happy benchmarking!"
            % config_file.location)
        self.o("")
        if benchmark_from_sources:
            self.o(
                "To benchmark the currently checked out version of Elasticsearch with the default benchmark run:"
            )
            self.o("")
            self.o("  %s" % PROGRAM_NAME)
        else:
            self.o(
                "To benchmark Elasticsearch 5.0.0-alpha2 with the default benchmark run:"
            )
            self.o("")
            self.o(
                "  %s --pipeline=from-distribution --distribution-version=5.0.0-alpha2"
                % PROGRAM_NAME)

        self.o()
        self.o("For help, type %s --help or see the user documentation at %s" %
               (PROGRAM_NAME, format.link("https://esrally.readthedocs.io")))
Пример #28
0
def main():
    check_python_version()
    log.install_default_log_config()
    log.configure_logging()
    logger = logging.getLogger(__name__)
    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)

    arg_parser = create_arg_parser()
    args = arg_parser.parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start",
                args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                True)
    else:
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided",
                False)

    cfg.add(config.Scope.applicationOverride, "system", "trial.id",
            str(uuid.uuid4()))
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode",
            args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
            args.offline)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision",
            args.revision)
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic",
            "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names",
            opts.csv_to_list(args.car))
    if args.team_path:
        cfg.add(config.Scope.applicationOverride, "mechanic", "team.path",
                os.path.abspath(io.normalize_path(args.team_path)))
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "repository.name", None)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "repository.name", args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins",
            opts.csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.params",
            opts.to_dict(args.car_params))
    cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params",
            opts.to_dict(args.plugin_params))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running",
                False)
        cfg.add(config.Scope.applicationOverride, "mechanic",
                "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk",
            args.runtime_jdk)
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices",
            opts.csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.params",
            opts.to_dict(args.telemetry_params))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline",
            args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag",
            args.user_tag)

    # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to
    # its default value)
    if args.track_path:
        cfg.add(config.Scope.applicationOverride, "track", "track.path",
                os.path.abspath(io.normalize_path(args.track_path)))
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                None)
        if args.track:
            # stay as close as possible to argparse errors although we have a custom validation.
            arg_parser.error(
                "argument --track not allowed with argument --track-path")
        # cfg.add(config.Scope.applicationOverride, "track", "track.name", None)
    else:
        # cfg.add(config.Scope.applicationOverride, "track", "track.path", None)
        cfg.add(config.Scope.applicationOverride, "track", "repository.name",
                args.track_repository)
        # set the default programmatically because we need to determine whether the user has provided a value
        chosen_track = args.track if args.track else "geonames"
        cfg.add(config.Scope.applicationOverride, "track", "track.name",
                chosen_track)

    cfg.add(config.Scope.applicationOverride, "track", "params",
            opts.to_dict(args.track_params))
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name",
            args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks",
            opts.csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled",
            args.test_mode)

    cfg.add(config.Scope.applicationOverride, "reporting", "format",
            args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "values",
            args.show_in_report)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path",
            args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting",
                "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting",
                "contender.timestamp", args.contender)
    if sub_command == "generate":
        cfg.add(config.Scope.applicationOverride, "generator", "chart.type",
                args.chart_type)
        cfg.add(config.Scope.applicationOverride, "generator", "output.path",
                args.output_path)

        if args.chart_spec_path and (args.track or args.challenge or args.car
                                     or args.node_count):
            console.println(
                "You need to specify either --chart-spec-path or --track, --challenge, --car and "
                "--node-count but not both.")
            exit(1)
        if args.chart_spec_path:
            cfg.add(config.Scope.applicationOverride, "generator",
                    "chart.spec.path", args.chart_spec_path)
        else:
            # other options are stored elsewhere already
            cfg.add(config.Scope.applicationOverride, "generator",
                    "node.count", args.node_count)

    cfg.add(config.Scope.applicationOverride, "driver", "profiling",
            args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "on.error",
            args.on_error)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts",
            opts.csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        target_hosts = opts.TargetHosts(args.target_hosts)
        cfg.add(config.Scope.applicationOverride, "client", "hosts",
                target_hosts)
        client_options = opts.ClientOptions(args.client_options,
                                            target_hosts=target_hosts)
        cfg.add(config.Scope.applicationOverride, "client", "options",
                client_options)
        if "timeout" not in client_options.default:
            console.info(
                "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds."
            )
        if list(target_hosts.all_hosts) != list(
                client_options.all_client_options):
            console.println(
                "--target-hosts and --client-options must define the same keys for multi cluster setups."
            )
            exit(1)
    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system",
                "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system",
                "list.races.max_results", args.limit)

    logger.info("OS [%s]", str(os.uname()))
    logger.info("Python [%s]", str(sys.implementation))
    logger.info("Rally version [%s]", version.version())
    logger.info("Command line arguments: %s", args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn(
                "No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode",
                    True)
        else:
            logger.info("Detected a working Internet connection.")

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start),
                     overline="-",
                     underline="-")
        sys.exit(64)
Пример #29
0
def main():
    start = time.time()
    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)
    # Add global meta info derived by rally itself
    cfg.add(config.Scope.application, "meta", "time.start", args.effective_start_date)
    cfg.add(config.Scope.application, "system", "rally.root", rally_root_path())
    cfg.add(config.Scope.application, "system", "rally.cwd", os.getcwd())
    cfg.add(config.Scope.application, "system", "invocation.root.dir", paths.Paths(cfg).invocation_root())
    # Add command line config
    cfg.add(config.Scope.applicationOverride, "source", "revision", args.revision)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "source", "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "system", "pipeline", args.pipeline)
    cfg.add(config.Scope.applicationOverride, "system", "track.repository", args.track_repository)
    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet)
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "user.tag", args.user_tag)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output", args.logging)
    cfg.add(config.Scope.applicationOverride, "telemetry", "devices", csv_to_list(args.telemetry))
    cfg.add(config.Scope.applicationOverride, "benchmarks", "track", args.track)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "challenge", args.challenge)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "car", args.car)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "cluster.health", args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "benchmarks", "test.mode", args.test_mode)
    cfg.add(config.Scope.applicationOverride, "provisioning", "datapaths", csv_to_list(args.data_paths))
    cfg.add(config.Scope.applicationOverride, "provisioning", "install.preserve", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "launcher", "external.target.hosts", convert_hosts(csv_to_list(args.target_hosts)))
    cfg.add(config.Scope.applicationOverride, "launcher", "client.options", kv_to_map(csv_to_list(args.client_options)))
    cfg.add(config.Scope.applicationOverride, "report", "reportformat", args.report_format)
    cfg.add(config.Scope.applicationOverride, "report", "reportfile", args.report_file)
    if args.override_src_dir is not None:
        cfg.add(config.Scope.applicationOverride, "source", "local.src.dir", args.override_src_dir)

    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "report", "comparison.baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "report", "comparison.contender.timestamp", args.contender)

    configure_logging(cfg)
    logger.info("Rally version [%s]" % version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn("No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                         logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception("Could not terminate potentially running Rally instances correctly. Attempting to go on anyway.")

    try:
        actors = bootstrap_actor_system(cfg)
    except RuntimeError as e:
        logger.exception("Could not bootstrap actor system.")
        if str(e) == "Unable to determine valid external socket address.":
            console.warn("Could not determine a socket address. Are you running without any network?", logger=logger)
            actors = bootstrap_actor_system(cfg, system_base="multiprocQueueBase")
        else:
            raise

    success = False
    try:
        success = dispatch_sub_command(cfg, sub_command)
    finally:
        shutdown_complete = False
        times_interrupted = 0
        while not shutdown_complete and times_interrupted < 2:
            try:
                logger.info("Attempting to shutdown internal actor system.")
                actors.shutdown()
                shutdown_complete = True
                logger.info("Shutdown completed.")
            except KeyboardInterrupt:
                times_interrupted += 1
                logger.warn("User interrupted shutdown of internal actor system.")
                console.info("Please wait a moment for Rally's internal components to shutdown.")
        if not shutdown_complete and times_interrupted > 0:
            logger.warn("Terminating after user has interrupted actor system shutdown explicitly for [%d] times." % times_interrupted)
            console.println("")
            console.warn("Terminating now at the risk of leaving child processes behind.")
            console.println("")
            console.warn("The next race may fail due to an unclean shutdown.")
            console.println("")
            console.println(SKULL)
            console.println("")

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(64)