def main(): check_python_version() log.remove_obsolete_default_log_config() log.install_default_log_config() log.configure_logging() logger = logging.getLogger(__name__) start = time.time() # Early init of console output so we start to show everything consistently. console.init(quiet=False) arg_parser = create_arg_parser() args = arg_parser.parse_args() console.init(quiet=args.quiet) console.println(BANNER) cfg = config.Config(config_name=args.configuration_name) sub_command = derive_sub_command(args, cfg) ensure_configuration_present(cfg, args, sub_command) if args.effective_start_date: cfg.add(config.Scope.application, "system", "time.start", args.effective_start_date) cfg.add(config.Scope.application, "system", "time.start.user_provided", True) else: cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow()) cfg.add(config.Scope.application, "system", "time.start.user_provided", False) cfg.add(config.Scope.applicationOverride, "system", "trial.id", str(uuid.uuid4())) cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet) cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline) # Local config per node cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root()) cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd()) cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision) if args.distribution_version: cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.version", args.distribution_version) cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.repository", args.distribution_repository) cfg.add(config.Scope.applicationOverride, "mechanic", "car.names", opts.csv_to_list(args.car)) if args.team_path: cfg.add(config.Scope.applicationOverride, "mechanic", "team.path", os.path.abspath(io.normalize_path(args.team_path))) cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", None) else: cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository) cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins)) cfg.add(config.Scope.applicationOverride, "mechanic", "car.params", opts.to_dict(args.car_params)) cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params)) if args.keep_cluster_running: cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True) # force-preserve the cluster nodes. cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True) else: cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False) cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install)) cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk) cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices", opts.csv_to_list(args.telemetry)) cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.params", opts.to_dict(args.telemetry_params)) cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline) cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps) cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag) # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to # its default value) if args.track_path: cfg.add(config.Scope.applicationOverride, "track", "track.path", os.path.abspath(io.normalize_path(args.track_path))) cfg.add(config.Scope.applicationOverride, "track", "repository.name", None) if args.track: # stay as close as possible to argparse errors although we have a custom validation. arg_parser.error( "argument --track not allowed with argument --track-path") # cfg.add(config.Scope.applicationOverride, "track", "track.name", None) else: # cfg.add(config.Scope.applicationOverride, "track", "track.path", None) cfg.add(config.Scope.applicationOverride, "track", "repository.name", args.track_repository) # set the default programmatically because we need to determine whether the user has provided a value chosen_track = args.track if args.track else "geonames" cfg.add(config.Scope.applicationOverride, "track", "track.name", chosen_track) cfg.add(config.Scope.applicationOverride, "track", "params", opts.to_dict(args.track_params)) cfg.add(config.Scope.applicationOverride, "track", "challenge.name", args.challenge) cfg.add(config.Scope.applicationOverride, "track", "include.tasks", opts.csv_to_list(args.include_tasks)) cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode) cfg.add(config.Scope.applicationOverride, "reporting", "format", args.report_format) cfg.add(config.Scope.applicationOverride, "reporting", "values", args.show_in_report) cfg.add(config.Scope.applicationOverride, "reporting", "output.path", args.report_file) if sub_command == "compare": cfg.add(config.Scope.applicationOverride, "reporting", "baseline.timestamp", args.baseline) cfg.add(config.Scope.applicationOverride, "reporting", "contender.timestamp", args.contender) if sub_command == "generate": cfg.add(config.Scope.applicationOverride, "generator", "chart.type", args.chart_type) cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path) if args.chart_spec_path and (args.track or args.challenge or args.car or args.node_count): console.println( "You need to specify either --chart-spec-path or --track, --challenge, --car and " "--node-count but not both.") exit(1) if args.chart_spec_path: cfg.add(config.Scope.applicationOverride, "generator", "chart.spec.path", args.chart_spec_path) else: # other options are stored elsewhere already cfg.add(config.Scope.applicationOverride, "generator", "node.count", args.node_count) cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling) cfg.add(config.Scope.applicationOverride, "driver", "on.error", args.on_error) cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", opts.csv_to_list(args.load_driver_hosts)) if sub_command != "list": # Also needed by mechanic (-> telemetry) - duplicate by module? target_hosts = opts.TargetHosts(args.target_hosts) cfg.add(config.Scope.applicationOverride, "client", "hosts", target_hosts) client_options = opts.ClientOptions(args.client_options, target_hosts=target_hosts) cfg.add(config.Scope.applicationOverride, "client", "options", client_options) if "timeout" not in client_options.default: console.info( "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds." ) if list(target_hosts.all_hosts) != list( client_options.all_client_options): console.println( "--target-hosts and --client-options must define the same keys for multi cluster setups." ) exit(1) # split by component? if sub_command == "list": cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration) cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit) logger.info("OS [%s]", str(os.uname())) logger.info("Python [%s]", str(sys.implementation)) logger.info("Rally version [%s]", version.version()) logger.debug("Command line arguments: %s", args) # Configure networking net.init() if not args.offline: if not net.has_internet_connection(): console.warn( "No Internet connection detected. Automatic download of track data sets etc. is disabled.", logger=logger) cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True) else: logger.info("Detected a working Internet connection.") success = dispatch_sub_command(cfg, sub_command) end = time.time() if success: console.println("") console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-") else: console.println("") console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-") sys.exit(64)
def test_keeps_already_specified_max_connections(self): client_options_string = '{"default": {"timeout":60,"max_connections":5}, "remote": {"timeout":60}}' target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') self.assertEqual( {"default": {"timeout": 60, "max_connections": 5}, "remote": {"timeout": 60, "max_connections": 32}}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(32))
class ClusterLauncherTests(TestCase): test_host = opts.TargetHosts("10.0.0.10:9200,10.0.0.11:9200") client_options = opts.ClientOptions('timeout:60') def test_launches_cluster(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cfg.add(config.Scope.application, "system", "env.name", "test") ms = get_metrics_store(cfg) cluster_launcher = launcher.ClusterLauncher(cfg, ms, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{"host": "10.0.0.10", "port": 9200}, {"host": "10.0.0.11", "port": 9200}], cluster.hosts) self.assertIsNotNone(cluster.telemetry) def test_launches_cluster_with_telemetry_client_timeout_enabled(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cfg.add(config.Scope.application, "system", "env.name", "test") ms = get_metrics_store(cfg) cluster_launcher = launcher.ClusterLauncher(cfg, ms, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() for telemetry_device in cluster.telemetry.devices: if hasattr(telemetry_device, "clients"): # Process all clients options for multi cluster aware telemetry devices, like CcrStats for _, client in telemetry_device.clients.items(): self.assertDictEqual({"retry-on-timeout": True, "timeout": 60}, client.client_options) else: self.assertDictEqual({"retry-on-timeout": True, "timeout": 60}, telemetry_device.client.client_options) @mock.patch("time.sleep") def test_error_on_cluster_launch(self, sleep): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) # Simulate that the client will raise an error upon startup cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true")) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cfg.add(config.Scope.application, "system", "env.name", "test") ms = get_metrics_store(cfg) cluster_launcher = launcher.ClusterLauncher(cfg, ms, client_factory_class=MockClientFactory) with self.assertRaisesRegex(exceptions.LaunchError, "Elasticsearch REST API layer is not available. Forcefully terminated cluster."): cluster_launcher.start()
def test_amends_with_max_connections(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') self.assertEqual( {"default": {"timeout": 60, "max_connections": 128}, "remote": {"timeout": 60, "max_connections": 128}}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(128))
class ClusterLauncherTests(TestCase): test_host = opts.TargetHosts("10.0.0.10:9200,10.0.0.11:9200") client_options = opts.ClientOptions('timeout:60') def test_launches_cluster_with_post_launch_handler(self): on_post_launch = mock.Mock() cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), on_post_launch=on_post_launch, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry) # this requires at least Python 3.6 # on_post_launch.assert_called_once() self.assertEqual(1, on_post_launch.call_count) def test_launches_cluster_without_post_launch_handler(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry) @mock.patch("time.sleep") def test_error_on_cluster_launch(self, sleep): on_post_launch = mock.Mock() cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) # Simulate that the client will raise an error upon startup cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true")) #cfg.add(config.Scope.application, "client", "options", {"raise-error-on-info": True}) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), on_post_launch=on_post_launch, client_factory_class=MockClientFactory) with self.assertRaisesRegex( exceptions.LaunchError, "Elasticsearch REST API layer is not available. Forcefully terminated cluster." ): cluster_launcher.start() self.assertEqual(0, on_post_launch.call_count)