def test_json_file_parameter_parses(self): self.assertEqual({"default": ["127.0.0.1:9200", "10.127.0.3:19200"]}, opts.TargetHosts( os.path.join(os.path.dirname(__file__), "resources", "target_hosts_1.json")).all_hosts) self.assertEqual( { "default": [{ "host": "127.0.0.1", "port": 9200 }, { "host": "127.0.0.1", "port": 19200 }], "remote_1": [{ "host": "10.127.0.3", "port": 9200 }, { "host": "10.127.0.8", "port": 9201 }], "remote_2": [{ "host": "88.33.27.15", "port": 39200 }] }, opts.TargetHosts( os.path.join(os.path.dirname(__file__), "resources", "target_hosts_2.json")).all_hosts)
def test_csv_hosts_parses(self): target_hosts = '127.0.0.1:9200,10.17.0.5:19200' self.assertEqual( { 'default': [{ 'host': '127.0.0.1', 'port': 9200 }, { 'host': '10.17.0.5', 'port': 19200 }] }, opts.TargetHosts(target_hosts).all_hosts) self.assertEqual([{ 'host': '127.0.0.1', 'port': 9200 }, { 'host': '10.17.0.5', 'port': 19200 }], opts.TargetHosts(target_hosts).default) self.assertEqual([{ 'host': '127.0.0.1', 'port': 9200 }, { 'host': '10.17.0.5', 'port': 19200 }], opts.TargetHosts(target_hosts).default)
def test_csv_hosts_parses(self): target_hosts = "127.0.0.1:9200,10.17.0.5:19200" assert opts.TargetHosts(target_hosts).all_hosts == { "default": [{"host": "127.0.0.1", "port": 9200}, {"host": "10.17.0.5", "port": 19200}] } assert opts.TargetHosts(target_hosts).default == [{"host": "127.0.0.1", "port": 9200}, {"host": "10.17.0.5", "port": 19200}] assert opts.TargetHosts(target_hosts).default == [{"host": "127.0.0.1", "port": 9200}, {"host": "10.17.0.5", "port": 19200}]
def test_default_client_ops_with_max_connections(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') assert opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(256) == { "default": {"timeout": 60, "max_connections": 256}, "remote": {"timeout": 60, "max_connections": 256}, }
def test_sets_minimum_max_connections(self): client_options_string = '{"default": {"timeout":60,"max_connections":5}, "remote": {"timeout":60}}' target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') assert opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(5) == { "default": {"timeout": 60, "max_connections": 256}, "remote": {"timeout": 60, "max_connections": 256}, }
def test_no_client_option_parses_to_default_with_multicluster(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts( '{"default": ["127.0.0.1:9200,10.17.0.5:19200"], "remote": ["88.33.22.15:19200"]}' ) self.assertEqual({"timeout": 60}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).default) self.assertEqual( { "default": { "timeout": 60 }, "remote": { "timeout": 60 } }, opts.ClientOptions(client_options_string, target_hosts=target_hosts).all_client_options) self.assertEqual({"timeout": 60}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).default)
class ExternalLauncherTests(TestCase): test_host = opts.TargetHosts("127.0.0.1:9200,10.17.0.5:19200") client_options = opts.ClientOptions("timeout:60") def test_setup_external_cluster_single_node(self): cfg = config.Config() cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options",self.client_options) m = launcher.ExternalLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) m.start() # automatically determined by launcher on attach self.assertEqual(cfg.opts("mechanic", "distribution.version"), "5.0.0") def test_setup_external_cluster_multiple_nodes(self): cfg = config.Config() cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "distribution.version", "2.3.3") m = launcher.ExternalLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) m.start() # did not change user defined value self.assertEqual(cfg.opts("mechanic", "distribution.version"), "2.3.3")
def test_keeps_greater_than_minimum_max_connections(self): client_options_string = '{"default": {"timeout":60,"max_connections":512}, "remote": {"timeout":60,"max_connections":1024}}' target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') assert opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(32) == { "default": {"timeout": 60, "max_connections": 512}, "remote": {"timeout": 60, "max_connections": 1024}, }
def test_jsonstring_parses_as_dict_of_clusters(self): target_hosts = '{"default": ["127.0.0.1:9200","10.17.0.5:19200"], "remote_1": ["88.33.22.15:19200"], "remote_2": ["10.18.0.6:19200","10.18.0.7:19201"]}' self.assertEqual( {'default': ['127.0.0.1:9200','10.17.0.5:19200'], 'remote_1': ['88.33.22.15:19200'], 'remote_2': ['10.18.0.6:19200','10.18.0.7:19201']}, opts.TargetHosts(target_hosts).all_hosts)
def configure_connection_params(arg_parser, args, cfg): # Also needed by mechanic (-> telemetry) - duplicate by module? target_hosts = opts.TargetHosts(args.target_hosts) cfg.add(config.Scope.applicationOverride, "client", "hosts", target_hosts) client_options = opts.ClientOptions(args.client_options, target_hosts=target_hosts) cfg.add(config.Scope.applicationOverride, "client", "options", client_options) if list(target_hosts.all_hosts) != list(client_options.all_client_options): arg_parser.error("--target-hosts and --client-options must define the same keys for multi cluster setups.")
def set_default_hosts(cfg, host="127.0.0.1", port=9200): configured_hosts = cfg.opts("client", "hosts") if len(configured_hosts.default) != 0: logger.info("Using configured hosts %s", configured_hosts.default) else: logger.info("Setting default host to [%s:%d]", host, port) default_host_object = opts.TargetHosts("{}:{}".format(host, port)) cfg.add(config.Scope.benchmark, "client", "hosts", default_host_object)
def configure_connection_params(arg_parser, args, cfg): # Also needed by mechanic (-> telemetry) - duplicate by module? target_hosts = opts.TargetHosts(args.target_hosts) cfg.add(config.Scope.applicationOverride, "client", "hosts", target_hosts) client_options = opts.ClientOptions(args.client_options, target_hosts=target_hosts) cfg.add(config.Scope.applicationOverride, "client", "options", client_options) if "timeout" not in client_options.default: console.info("You did not provide an explicit timeout in the client options. Assuming default of 10 seconds.") if list(target_hosts.all_hosts) != list(client_options.all_client_options): arg_parser.error("--target-hosts and --client-options must define the same keys for multi cluster setups.")
class ClusterLauncherTests(TestCase): test_host = opts.TargetHosts("10.0.0.10:9200,10.0.0.11:9200") client_options = opts.ClientOptions('timeout:60') def test_launches_cluster(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{"host": "10.0.0.10", "port": 9200}, {"host": "10.0.0.11", "port": 9200}], cluster.hosts) self.assertIsNotNone(cluster.telemetry) def test_launches_cluster_with_telemetry_client_timeout_enabled(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) cluster = cluster_launcher.start() for telemetry_device in cluster.telemetry.devices: if hasattr(telemetry_device, "clients"): # Process all clients options for multi cluster aware telemetry devices, like CcrStats for _, client in telemetry_device.clients.items(): self.assertDictEqual({"retry-on-timeout": True, "timeout": 60}, client.client_options) else: self.assertDictEqual({"retry-on-timeout": True, "timeout": 60}, telemetry_device.client.client_options) @mock.patch("time.sleep") def test_error_on_cluster_launch(self, sleep): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) # Simulate that the client will raise an error upon startup cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true")) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) with self.assertRaisesRegex(exceptions.LaunchError, "Elasticsearch REST API layer is not available. Forcefully terminated cluster."): cluster_launcher.start()
def test_jsonstring_parses_as_dict_of_clusters(self): target_hosts = ( '{"default": ["127.0.0.1:9200","10.17.0.5:19200"],' ' "remote_1": ["88.33.22.15:19200"],' ' "remote_2": ["10.18.0.6:19200","10.18.0.7:19201"]}' ) assert opts.TargetHosts(target_hosts).all_hosts == { "default": ["127.0.0.1:9200", "10.17.0.5:19200"], "remote_1": ["88.33.22.15:19200"], "remote_2": ["10.18.0.6:19200", "10.18.0.7:19201"], }
def test_empty_arg_parses_as_empty_list(self): self.assertEqual([], opts.TargetHosts('').default) self.assertEqual({'default': []}, opts.TargetHosts('').all_hosts)
def test_amends_with_max_connections(self): client_options_string = opts.ClientOptions.DEFAULT_CLIENT_OPTIONS target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') self.assertEqual( {"default": {"timeout": 60, "max_connections": 128}, "remote": {"timeout": 60, "max_connections": 128}}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(128))
def test_empty_arg_parses_as_empty_list(self): assert opts.TargetHosts("").default == [] assert opts.TargetHosts("").all_hosts == {"default": []}
def test_keeps_already_specified_max_connections(self): client_options_string = '{"default": {"timeout":60,"max_connections":5}, "remote": {"timeout":60}}' target_hosts = opts.TargetHosts('{"default": ["10.17.0.5:9200"], "remote": ["88.33.22.15:9200"]}') self.assertEqual( {"default": {"timeout": 60, "max_connections": 5}, "remote": {"timeout": 60, "max_connections": 32}}, opts.ClientOptions(client_options_string, target_hosts=target_hosts).with_max_connections(32))
def main(): check_python_version() log.install_default_log_config() log.configure_logging() logger = logging.getLogger(__name__) start = time.time() # Early init of console output so we start to show everything consistently. console.init(quiet=False) arg_parser = create_arg_parser() args = arg_parser.parse_args() console.init(quiet=args.quiet) console.println(BANNER) cfg = config.Config(config_name=args.configuration_name) sub_command = derive_sub_command(args, cfg) ensure_configuration_present(cfg, args, sub_command) if args.effective_start_date: cfg.add(config.Scope.application, "system", "time.start", args.effective_start_date) cfg.add(config.Scope.application, "system", "time.start.user_provided", True) else: cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow()) cfg.add(config.Scope.application, "system", "time.start.user_provided", False) cfg.add(config.Scope.applicationOverride, "system", "trial.id", str(uuid.uuid4())) cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet) cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline) # Local config per node cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root()) cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd()) cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision) if args.distribution_version: cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.version", args.distribution_version) cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.repository", args.distribution_repository) cfg.add(config.Scope.applicationOverride, "mechanic", "car.names", opts.csv_to_list(args.car)) if args.team_path: cfg.add(config.Scope.applicationOverride, "mechanic", "team.path", os.path.abspath(io.normalize_path(args.team_path))) cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", None) else: cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository) cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", opts.csv_to_list(args.elasticsearch_plugins)) cfg.add(config.Scope.applicationOverride, "mechanic", "car.params", opts.to_dict(args.car_params)) cfg.add(config.Scope.applicationOverride, "mechanic", "plugin.params", opts.to_dict(args.plugin_params)) if args.keep_cluster_running: cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True) # force-preserve the cluster nodes. cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True) else: cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False) cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install)) cfg.add(config.Scope.applicationOverride, "mechanic", "runtime.jdk", args.runtime_jdk) cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices", opts.csv_to_list(args.telemetry)) cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.params", opts.to_dict(args.telemetry_params)) cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline) cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps) cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag) # We can assume here that if a track-path is given, the user did not specify a repository either (although argparse sets it to # its default value) if args.track_path: cfg.add(config.Scope.applicationOverride, "track", "track.path", os.path.abspath(io.normalize_path(args.track_path))) cfg.add(config.Scope.applicationOverride, "track", "repository.name", None) if args.track: # stay as close as possible to argparse errors although we have a custom validation. arg_parser.error( "argument --track not allowed with argument --track-path") # cfg.add(config.Scope.applicationOverride, "track", "track.name", None) else: # cfg.add(config.Scope.applicationOverride, "track", "track.path", None) cfg.add(config.Scope.applicationOverride, "track", "repository.name", args.track_repository) # set the default programmatically because we need to determine whether the user has provided a value chosen_track = args.track if args.track else "geonames" cfg.add(config.Scope.applicationOverride, "track", "track.name", chosen_track) cfg.add(config.Scope.applicationOverride, "track", "params", opts.to_dict(args.track_params)) cfg.add(config.Scope.applicationOverride, "track", "challenge.name", args.challenge) cfg.add(config.Scope.applicationOverride, "track", "include.tasks", opts.csv_to_list(args.include_tasks)) cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode) cfg.add(config.Scope.applicationOverride, "reporting", "format", args.report_format) cfg.add(config.Scope.applicationOverride, "reporting", "values", args.show_in_report) cfg.add(config.Scope.applicationOverride, "reporting", "output.path", args.report_file) if sub_command == "compare": cfg.add(config.Scope.applicationOverride, "reporting", "baseline.timestamp", args.baseline) cfg.add(config.Scope.applicationOverride, "reporting", "contender.timestamp", args.contender) if sub_command == "generate": cfg.add(config.Scope.applicationOverride, "generator", "chart.type", args.chart_type) cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path) if args.chart_spec_path and (args.track or args.challenge or args.car or args.node_count): console.println( "You need to specify either --chart-spec-path or --track, --challenge, --car and " "--node-count but not both.") exit(1) if args.chart_spec_path: cfg.add(config.Scope.applicationOverride, "generator", "chart.spec.path", args.chart_spec_path) else: # other options are stored elsewhere already cfg.add(config.Scope.applicationOverride, "generator", "node.count", args.node_count) cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling) cfg.add(config.Scope.applicationOverride, "driver", "on.error", args.on_error) cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", opts.csv_to_list(args.load_driver_hosts)) if sub_command != "list": # Also needed by mechanic (-> telemetry) - duplicate by module? target_hosts = opts.TargetHosts(args.target_hosts) cfg.add(config.Scope.applicationOverride, "client", "hosts", target_hosts) client_options = opts.ClientOptions(args.client_options, target_hosts=target_hosts) cfg.add(config.Scope.applicationOverride, "client", "options", client_options) if "timeout" not in client_options.default: console.info( "You did not provide an explicit timeout in the client options. Assuming default of 10 seconds." ) if list(target_hosts.all_hosts) != list( client_options.all_client_options): console.println( "--target-hosts and --client-options must define the same keys for multi cluster setups." ) exit(1) # split by component? if sub_command == "list": cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration) cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit) logger.info("OS [%s]", str(os.uname())) logger.info("Python [%s]", str(sys.implementation)) logger.info("Rally version [%s]", version.version()) logger.info("Command line arguments: %s", args) # Configure networking net.init() if not args.offline: if not net.has_internet_connection(): console.warn( "No Internet connection detected. Automatic download of track data sets etc. is disabled.", logger=logger) cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True) else: logger.info("Detected a working Internet connection.") success = dispatch_sub_command(cfg, sub_command) end = time.time() if success: console.println("") console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-") else: console.println("") console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-") sys.exit(64)
class ClusterLauncherTests(TestCase): test_host = opts.TargetHosts("10.0.0.10:9200,10.0.0.11:9200") client_options = opts.ClientOptions('timeout:60') def test_launches_cluster_with_post_launch_handler(self): on_post_launch = mock.Mock() cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), on_post_launch=on_post_launch, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry) # this requires at least Python 3.6 # on_post_launch.assert_called_once() self.assertEqual(1, on_post_launch.call_count) def test_launches_cluster_without_post_launch_handler(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry) @mock.patch("time.sleep") def test_error_on_cluster_launch(self, sleep): on_post_launch = mock.Mock() cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) # Simulate that the client will raise an error upon startup cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true")) #cfg.add(config.Scope.application, "client", "options", {"raise-error-on-info": True}) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), on_post_launch=on_post_launch, client_factory_class=MockClientFactory) with self.assertRaisesRegex( exceptions.LaunchError, "Elasticsearch REST API layer is not available. Forcefully terminated cluster." ): cluster_launcher.start() self.assertEqual(0, on_post_launch.call_count)