def stop(raise_errors=True): if actor.actor_system_already_running(): # noinspection PyBroadException try: # TheSpian writes the following warning upon start (at least) on Mac OS X: # # WARNING:root:Unable to get address info for address 103.1.168.192.in-addr.arpa (AddressFamily.AF_INET,\ # SocketKind.SOCK_DGRAM, 17, 0): <class 'socket.gaierror'> [Errno 8] nodename nor servname provided, or not known # # Therefore, we will not show warnings but only errors. logging.basicConfig(level=logging.ERROR) running_system = actor.bootstrap_actor_system(try_join=True) running_system.shutdown() # await termination... console.info("Shutting down actor system.", end="", flush=True) while actor.actor_system_already_running(): console.println(".", end="", flush=True) time.sleep(1) console.println(" [OK]") except BaseException: console.error("Could not shut down actor system.") if raise_errors: # raise again so user can see the error raise elif raise_errors: console.error( "Could not shut down actor system: Actor system is not running.") sys.exit(1)
def dispatch_sub_command(cfg, sub_command): try: if sub_command == "compare": reporter.compare(cfg) elif sub_command == "list": list(cfg) elif sub_command == "race": race(cfg) elif sub_command == "generate": generate(cfg) else: raise exceptions.SystemSetupError("Unknown subcommand [%s]" % sub_command) return True except exceptions.RallyError as e: logging.exception("Cannot run subcommand [%s]." % sub_command) msg = str(e.message) nesting = 0 while hasattr(e, "cause") and e.cause: nesting += 1 e = e.cause if hasattr(e, "message"): msg += "\n%s%s" % ("\t" * nesting, e.message) else: msg += "\n%s%s" % ("\t" * nesting, str(e)) console.error("Cannot %s. %s" % (sub_command, msg)) console.println("") print_help_on_errors() return False except BaseException as e: logging.exception("A fatal error occurred while running subcommand [%s]." % sub_command) console.error("Cannot %s. %s." % (sub_command, e)) console.println("") print_help_on_errors() return False
def stop(raise_errors=True): if actor.actor_system_already_running(): try: # TheSpian writes the following warning upon start (at least) on Mac OS X: # # WARNING:root:Unable to get address info for address 103.1.168.192.in-addr.arpa (AddressFamily.AF_INET,\ # SocketKind.SOCK_DGRAM, 17, 0): <class 'socket.gaierror'> [Errno 8] nodename nor servname provided, or not known # # Therefore, we will not show warnings but only errors. logging.basicConfig(level=logging.ERROR) running_system = actor.bootstrap_actor_system(try_join=True) running_system.shutdown() # await termination... console.info("Shutting down actor system.", end="", flush=True) while actor.actor_system_already_running(): console.println(".", end="", flush=True) time.sleep(1) console.println(" [OK]") except BaseException: console.error("Could not shut down actor system.") if raise_errors: # raise again so user can see the error raise elif raise_errors: console.error("Could not shut down actor system: Actor system is not running.") sys.exit(1)
def dispatch_sub_command(cfg, sub_command): try: if sub_command == "compare": reporter.compare(cfg) elif sub_command == "list": list(cfg) elif sub_command == "race": race(cfg) else: raise exceptions.SystemSetupError("Unknown subcommand [%s]" % sub_command) return True except exceptions.RallyError as e: logging.exception("Cannot run subcommand [%s]." % sub_command) console.error("Cannot %s. %s" % (sub_command, e)) console.println("") print_help_on_errors() return False except BaseException as e: logging.exception( "A fatal error occurred while running subcommand [%s]." % sub_command) console.error("Cannot %s. %s." % (sub_command, e)) console.println("") print_help_on_errors() return False
def ensure_configuration_present(cfg, args, sub_command): if sub_command == "configure": config.ConfigFactory().create_config(cfg.config_file, advanced_config=args.advanced_config, assume_defaults=args.assume_defaults) exit(0) else: if cfg.config_present(): cfg.load_config(auto_upgrade=True) else: console.error("No config present. Please run '%s configure' first." % PROGRAM_NAME) exit(64)
def ensure_configuration_present(cfg, args, sub_command): if sub_command == "configure": config.ConfigFactory().create_config(cfg.config_file, advanced_config=args.advanced_config, assume_defaults=args.assume_defaults) sys.exit(0) else: if cfg.config_present(): cfg.load_config(auto_upgrade=True) else: console.error("No config present. Please run '%s configure' first." % PROGRAM_NAME) sys.exit(64)
def dispatch_sub_command(cfg, sub_command): try: if sub_command == "compare": reporter.compare(cfg) elif sub_command == "list": dispatch_list(cfg) elif sub_command == "download": mechanic.download(cfg) elif sub_command == "install": mechanic.install(cfg) elif sub_command == "start": mechanic.start(cfg) elif sub_command == "stop": mechanic.stop(cfg) elif sub_command == "race": race(cfg) elif sub_command == "generate": generate(cfg) elif sub_command == "create-track": tracker.create_track(cfg) elif sub_command == "info": track.track_info(cfg) else: raise exceptions.SystemSetupError("Unknown subcommand [%s]" % sub_command) return True except exceptions.RallyError as e: logging.getLogger(__name__).exception("Cannot run subcommand [%s].", sub_command) msg = str(e.message) nesting = 0 while hasattr(e, "cause") and e.cause: nesting += 1 e = e.cause if hasattr(e, "message"): msg += "\n%s%s" % ("\t" * nesting, e.message) else: msg += "\n%s%s" % ("\t" * nesting, str(e)) console.error("Cannot %s. %s" % (sub_command, msg)) console.println("") print_help_on_errors() return False except BaseException as e: logging.getLogger(__name__).exception( "A fatal error occurred while running subcommand [%s].", sub_command) console.error("Cannot %s. %s." % (sub_command, e)) console.println("") print_help_on_errors() return False
def ensure_configuration_present(cfg, args, sub_command): if sub_command == "configure": config.ConfigFactory().create_config(cfg.config_file, advanced_config=args.advanced_config, assume_defaults=args.assume_defaults) exit(0) else: if cfg.config_present(): cfg.load_config() if not cfg.config_compatible(): cfg.migrate_config() # Reload config after upgrading cfg.load_config() else: console.error("No config present. Please run '%s configure' first." % PROGRAM_NAME) exit(64)
def _download_from_s3_bucket(bucket_name, bucket_path, local_path, expected_size_in_bytes=None, progress_indicator=None): # pylint: disable=import-outside-toplevel # lazily initialize S3 support - it might not be available try: _fake_import_boto3() import boto3.s3.transfer except ImportError: console.error( "S3 support is optional. Install it with `python -m pip install esrally[s3]`" ) raise class S3ProgressAdapter: def __init__(self, size, progress): self._expected_size_in_bytes = size self._progress = progress self._bytes_read = 0 def __call__(self, bytes_amount): self._bytes_read += bytes_amount self._progress(self._bytes_read, self._expected_size_in_bytes) s3 = boto3.resource("s3") bucket = s3.Bucket(bucket_name) if expected_size_in_bytes is None: expected_size_in_bytes = bucket.Object(bucket_path).content_length progress_callback = S3ProgressAdapter( expected_size_in_bytes, progress_indicator) if progress_indicator else None bucket.download_file( bucket_path, local_path, Callback=progress_callback, Config=boto3.s3.transfer.TransferConfig(use_threads=False))
def dispatch_sub_command(cfg, sub_command): try: if sub_command == "compare": reporter.compare(cfg) elif sub_command == "list": list(cfg) elif sub_command == "race": race(cfg) else: raise exceptions.SystemSetupError("Unknown subcommand [%s]" % sub_command) return True except exceptions.RallyError as e: logging.exception("Cannot run subcommand [%s]." % sub_command) console.error("Cannot %s. %s" % (sub_command, e)) console.println("") print_help_on_errors() return False except BaseException as e: logging.exception("A fatal error occurred while running subcommand [%s]." % sub_command) console.error("Cannot %s. %s." % (sub_command, e)) console.println("") print_help_on_errors() return False
def bootstrap_actor_system(cfg, system_base="multiprocTCPBase"): try: return thespian.actors.ActorSystem(system_base, logDefs=configure_actor_logging(cfg)) except thespian.actors.ActorSystemException: logger.exception("Could not initialize internal actor system. Terminating.") console.error("Could not initialize successfully.\n") console.error("Are there are still processes from a previous race?") console.error("Please check and terminate related Python processes before running Rally again.\n") print_help_on_errors(cfg) sys.exit(70)
def bootstrap_actor_system(cfg, system_base="multiprocTCPBase"): try: return thespian.actors.ActorSystem( system_base, logDefs=configure_actor_logging(cfg)) except thespian.actors.ActorSystemException: logger.exception( "Could not initialize internal actor system. Terminating.") console.error("Could not initialize successfully.\n") console.error("Are there are still processes from a previous race?") console.error( "Please check and terminate related Python processes before running Rally again.\n" ) print_help_on_errors(cfg) sys.exit(70)
def bootstrap_actor_system(try_join=False, prefer_local_only=False, local_ip=None, coordinator_ip=None): logger = logging.getLogger(__name__) system_base = __SYSTEM_BASE try: if try_join: if actor_system_already_running(): logger.info("Joining already running actor system with system base [%s].", system_base) return thespian.actors.ActorSystem(system_base) else: logger.info("Creating new actor system with system base [%s] on coordinator node.", system_base) # if we try to join we can only run on the coordinator... return thespian.actors.ActorSystem(system_base, logDefs=log.load_configuration(), capabilities={"coordinator": True}) elif prefer_local_only: coordinator = True if system_base != "multiprocQueueBase": coordinator_ip = "127.0.0.1" local_ip = "127.0.0.1" else: coordinator_ip = None local_ip = None else: if system_base != "multiprocTCPBase" and system_base != "multiprocUDPBase": raise exceptions.SystemSetupError("Rally requires a network-capable system base but got [%s]." % system_base) if not coordinator_ip: raise exceptions.SystemSetupError("coordinator IP is required") if not local_ip: raise exceptions.SystemSetupError("local IP is required") # always resolve the public IP here, even if a DNS name is given. Otherwise Thespian will be unhappy local_ip = net.resolve(local_ip) coordinator_ip = net.resolve(coordinator_ip) coordinator = local_ip == coordinator_ip capabilities = {"coordinator": coordinator} if local_ip: # just needed to determine whether to run benchmarks locally capabilities["ip"] = local_ip if coordinator_ip: # Make the coordinator node the convention leader capabilities["Convention Address.IPv4"] = "%s:1900" % coordinator_ip logger.info("Starting actor system with system base [%s] and capabilities [%s].", system_base, capabilities) return thespian.actors.ActorSystem(system_base, logDefs=log.load_configuration(), capabilities=capabilities) except thespian.actors.ActorSystemException: logger.exception("Could not initialize internal actor system. Terminating.") console.error("Could not initialize successfully.\n") console.error("Are there are still processes from a previous race?") console.error("Please check and terminate related Python processes before running Rally again.\n") raise
def bootstrap_actor_system(try_join=False, prefer_local_only=False, local_ip=None, coordinator_ip=None): system_base = __SYSTEM_BASE try: if try_join: if actor_system_already_running(): logger.info("Joining already running actor system with system base [%s]." % system_base) return thespian.actors.ActorSystem(system_base) else: logger.info("Creating new actor system with system base [%s] on coordinator node." % system_base) # if we try to join we can only run on the coordinator... return thespian.actors.ActorSystem(system_base, logDefs=configure_actor_logging(), capabilities={"coordinator": True}) elif prefer_local_only: coordinator = True if system_base != "multiprocQueueBase": coordinator_ip = "127.0.0.1" local_ip = "127.0.0.1" else: coordinator_ip = None local_ip = None else: if system_base != "multiprocTCPBase" and system_base != "multiprocUDPBase": raise exceptions.SystemSetupError("Rally requires a network-capable system base but got [%s]." % system_base) if not coordinator_ip: raise exceptions.SystemSetupError("coordinator IP is required") if not local_ip: raise exceptions.SystemSetupError("local IP is required") # always resolve the public IP here, even if a DNS name is given. Otherwise Thespian will be unhappy local_ip = net.resolve(local_ip) coordinator_ip = net.resolve(coordinator_ip) coordinator = local_ip == coordinator_ip capabilities = {"coordinator": coordinator} if local_ip: # just needed to determine whether to run benchmarks locally capabilities["ip"] = local_ip if coordinator_ip: # Make the coordinator node the convention leader capabilities["Convention Address.IPv4"] = "%s:1900" % coordinator_ip logger.info("Starting actor system with system base [%s] and capabilities [%s]." % (system_base, capabilities)) return thespian.actors.ActorSystem(system_base, logDefs=configure_actor_logging(), capabilities=capabilities) except thespian.actors.ActorSystemException: logger.exception("Could not initialize internal actor system. Terminating.") console.error("Could not initialize successfully.\n") console.error("Are there are still processes from a previous race?") console.error("Please check and terminate related Python processes before running Rally again.\n") raise
def bootstrap_actor_system(try_join=False, prefer_local_only=False, local_ip=None, coordinator_ip=None, system_base="multiprocTCPBase"): try: if try_join: if actor_system_already_running(): return thespian.actors.ActorSystem(system_base) else: # if we try to join we can only run on the coordinator... return thespian.actors.ActorSystem(system_base, logDefs=configure_actor_logging(), capabilities={"coordinator": True}) elif prefer_local_only: coordinator_ip = "127.0.0.1" local_ip = "127.0.0.1" coordinator = True else: if system_base != "multiprocTCPBase" and system_base != "multiprocUDPBase": raise exceptions.SystemSetupError("Rally requires a network-capable system base but got [%s]." % system_base) if not coordinator_ip: raise exceptions.SystemSetupError("coordinator IP is required") if not local_ip: raise exceptions.SystemSetupError("local IP is required") coordinator = local_ip == coordinator_ip return thespian.actors.ActorSystem(system_base, logDefs=configure_actor_logging(), capabilities={ "coordinator": coordinator, # just needed to determine whether to run benchmarks locally "ip": local_ip, # Make the coordinator node the convention leader "Convention Address.IPv4": "%s:1900" % coordinator_ip }) except thespian.actors.ActorSystemException: logger.exception("Could not initialize internal actor system. Terminating.") console.error("Could not initialize successfully.\n") console.error("Are there are still processes from a previous race?") console.error("Please check and terminate related Python processes before running Rally again.\n") raise
def dispatch_sub_command(arg_parser, args, cfg): sub_command = args.subcommand cfg.add(config.Scope.application, "system", "quiet.mode", args.quiet) cfg.add(config.Scope.application, "system", "offline.mode", args.offline) try: if sub_command == "compare": configure_reporting_params(args, cfg) reporter.compare(cfg, args.baseline, args.contender) elif sub_command == "list": cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration) cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit) configure_mechanic_params(args, cfg, command_requires_car=False) configure_track_params(arg_parser, args, cfg, command_requires_track=False) dispatch_list(cfg) elif sub_command == "download": cfg.add(config.Scope.applicationOverride, "mechanic", "target.os", args.target_os) cfg.add(config.Scope.applicationOverride, "mechanic", "target.arch", args.target_arch) configure_mechanic_params(args, cfg) mechanic.download(cfg) elif sub_command == "install": cfg.add(config.Scope.applicationOverride, "system", "install.id", str(uuid.uuid4())) cfg.add(config.Scope.applicationOverride, "mechanic", "network.host", args.network_host) cfg.add(config.Scope.applicationOverride, "mechanic", "network.http.port", args.http_port) cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision) # TODO: Remove this special treatment and rely on artifact caching (follow-up PR) cfg.add(config.Scope.applicationOverride, "mechanic", "skip.build", args.skip_build) cfg.add(config.Scope.applicationOverride, "mechanic", "build.type", args.build_type) cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk) cfg.add(config.Scope.applicationOverride, "mechanic", "node.name", args.node_name) cfg.add(config.Scope.applicationOverride, "mechanic", "master.nodes", opts.csv_to_list(args.master_nodes)) cfg.add(config.Scope.applicationOverride, "mechanic", "seed.hosts", opts.csv_to_list(args.seed_hosts)) cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins)) cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params)) configure_mechanic_params(args, cfg) mechanic.install(cfg) elif sub_command == "start": cfg.add(config.Scope.applicationOverride, "system", "race.id", args.race_id) cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id) cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk) configure_telemetry_params(args, cfg) mechanic.start(cfg) elif sub_command == "stop": cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install)) cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id) mechanic.stop(cfg) elif sub_command == "race": # As the race command is doing more work than necessary at the moment, we duplicate several parameters # in this section that actually belong to dedicated subcommands (like install, start or stop). Over time # these duplicated parameters will vanish as we move towards dedicated subcommands and use "race" only # to run the actual benchmark (i.e. generating load). if args.effective_start_date: cfg.add(config.Scope.applicationOverride, "system", "time.start", args.effective_start_date) cfg.add(config.Scope.applicationOverride, "system", "race.id", args.race_id) # use the race id implicitly also as the install id. cfg.add(config.Scope.applicationOverride, "system", "install.id", args.race_id) cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline) cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag) cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling) cfg.add(config.Scope.applicationOverride, "driver", "assertions", args.enable_assertions) cfg.add(config.Scope.applicationOverride, "driver", "on.error", args.on_error) cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", opts.csv_to_list(args.load_driver_hosts)) cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode) configure_track_params(arg_parser, args, cfg) configure_connection_params(arg_parser, args, cfg) configure_telemetry_params(args, cfg) configure_mechanic_params(args, cfg) cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk) cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision) cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins)) cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params)) cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install)) cfg.add(config.Scope.applicationOverride, "mechanic", "skip.rest.api.check", convert.to_bool(args.skip_rest_api_check)) configure_reporting_params(args, cfg) race(cfg, args.kill_running_processes) elif sub_command == "generate": cfg.add(config.Scope.applicationOverride, "generator", "chart.spec.path", args.chart_spec_path) cfg.add(config.Scope.applicationOverride, "generator", "chart.type", args.chart_type) cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path) generate(cfg) elif sub_command == "create-track": cfg.add(config.Scope.applicationOverride, "generator", "indices", args.indices) cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path) cfg.add(config.Scope.applicationOverride, "track", "track.name", args.track) configure_connection_params(arg_parser, args, cfg) tracker.create_track(cfg) elif sub_command == "info": configure_track_params(arg_parser, args, cfg) track.track_info(cfg) else: raise exceptions.SystemSetupError(f"Unknown subcommand [{sub_command}]") return True except exceptions.RallyError as e: logging.getLogger(__name__).exception("Cannot run subcommand [%s].", sub_command) msg = str(e.message) nesting = 0 while hasattr(e, "cause") and e.cause: nesting += 1 e = e.cause if hasattr(e, "message"): msg += "\n%s%s" % ("\t" * nesting, e.message) else: msg += "\n%s%s" % ("\t" * nesting, str(e)) console.error("Cannot %s. %s" % (sub_command, msg)) console.println("") print_help_on_errors() return False except BaseException as e: logging.getLogger(__name__).exception("A fatal error occurred while running subcommand [%s].", sub_command) console.error("Cannot %s. %s." % (sub_command, e)) console.println("") print_help_on_errors() return False