Example #1
0
    def start_benchmark(self, msg, sender):
        logger.info("Benchmark is about to start.")
        self.start_sender = sender
        self.config = msg.config
        self.quiet = msg.config.opts("system", "quiet.mode", mandatory=False, default_value=False)
        self.es = client.EsClientFactory(msg.config.opts("client", "hosts"), msg.config.opts("client", "options")).create()
        self.metrics_store = metrics.InMemoryMetricsStore(config=self.config, meta_info=msg.metrics_meta_info)
        invocation = self.config.opts("meta", "time.start")
        track_name = self.config.opts("system", "track")
        challenge_name = self.config.opts("benchmarks", "challenge")
        selected_car_name = self.config.opts("benchmarks", "car")
        self.metrics_store.open(invocation, track_name, challenge_name, selected_car_name)

        track = msg.track
        challenge = select_challenge(self.config, track)
        setup_index(self.es, track, challenge)
        allocator = Allocator(challenge.schedule)
        self.allocations = allocator.allocations
        self.number_of_steps = len(allocator.join_points) - 1
        self.ops_per_join_point = allocator.operations_per_joinpoint

        logger.info("Benchmark consists of [%d] steps executed by (at most) [%d] clients as specified by the allocation matrix:\n%s" %
                    (self.number_of_steps, len(self.allocations), self.allocations))

        for client_id in range(allocator.clients):
            self.drivers.append(self.createActor(LoadGenerator))
        for client_id, driver in enumerate(self.drivers):
            self.send(driver, StartLoadGenerator(client_id, self.config, track.indices, self.allocations[client_id]))

        self.update_progress_message()
        self.wakeupAfter(datetime.timedelta(seconds=Driver.WAKEUP_INTERVAL_SECONDS))
Example #2
0
    def test_create_https_connection_only_verify_self_signed_server_certificate(
            self, mocked_load_cert_chain):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password"),
            "ca_certs": os.path.join(self.cwd, "utils/resources/certs/ca.crt"),
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = deepcopy(client_options)

        logger = logging.getLogger("esrally.client")
        with mock.patch.object(logger, "info") as mocked_info_logger:
            f = client.EsClientFactory(hosts, client_options)
        mocked_info_logger.assert_has_calls([
            mock.call("SSL support: on"),
            mock.call("SSL certificate verification: on"),
            mock.call("SSL client authentication: off"),
        ])

        assert (
            not mocked_load_cert_chain.called
        ), "ssl_context.load_cert_chain should not have been called as we have not supplied client certs"
        assert f.hosts == hosts
        assert f.ssl_context.check_hostname
        assert f.ssl_context.verify_mode == ssl.CERT_REQUIRED

        assert f.client_options["scheme"] == "https"
        assert f.client_options["http_auth"] == ("user", "password")
        assert "use_ssl" not in f.client_options
        assert "verify_certs" not in f.client_options
        assert "ca_certs" not in f.client_options

        assert client_options == original_client_options
Example #3
0
    def start(self, car):
        port = self.cfg.opts("provisioning", "node.http.port")
        hosts = [{"host": "localhost", "port": port}]
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options",
                     client_options)

        es = client.EsClientFactory(hosts, client_options).create()

        # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
        node_prefix = self.cfg.opts("provisioning", "node.name.prefix")
        process.kill_running_es_instances(node_prefix)

        logger.info("Starting a cluster based on car [%s] with [%d] nodes." %
                    (car, car.nodes))

        cluster_telemetry = [
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.MergeParts(self.cfg, self.metrics_store),
            telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store),
            telemetry.NodeStats(self.cfg, es, self.metrics_store),
            telemetry.IndexStats(self.cfg, es, self.metrics_store),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.IndexSize(self.cfg, self.metrics_store)
        ]

        t = telemetry.Telemetry(self.cfg, devices=cluster_telemetry)
        c = cluster.Cluster(
            [self._start_node(node, car, es) for node in range(car.nodes)], t)
        t.attach_to_cluster(c)
        return c
Example #4
0
 def receiveMessage(self, msg, sender):
     if isinstance(msg, StartLoadGenerator):
         logger.debug("client [%d] is about to start." % msg.client_id)
         self.master = sender
         self.client_id = msg.client_id
         self.es = client.EsClientFactory(msg.config.opts("client", "hosts"), msg.config.opts("client", "options")).create()
         self.config = msg.config
         self.indices = msg.indices
         self.tasks = msg.tasks
         self.current_task = 0
         self.start_timestamp = time.perf_counter()
         self.drive()
     elif isinstance(msg, Drive):
         logger.debug("Client [%d] is continuing its work at task index [%d] on [%f]." %
                      (self.client_id, self.current_task, msg.client_start_timestamp))
         self.master = sender
         self.start_driving = True
         self.wakeupAfter(datetime.timedelta(seconds=time.perf_counter() - msg.client_start_timestamp))
     elif isinstance(msg, thespian.actors.WakeupMessage):
         logger.debug("client [%d] woke up." % self.client_id)
         # it would be better if we could send ourselves a message at a specific time, simulate this with a boolean...
         if self.start_driving:
             self.start_driving = False
             self.drive()
         else:
             self.send_samples()
             if self.executor_future is not None:
                 if self.executor_future.done():
                     self.executor_future = None
                     self.drive()
                 else:
                     self.wakeupAfter(datetime.timedelta(seconds=LoadGenerator.WAKEUP_INTERVAL_SECONDS))
     else:
         logger.debug("client [%d] received unknown message [%s] (ignoring)." % (self.client_id, str(msg)))
Example #5
0
    def start(self, car, binary, data_paths):
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = client.EsClientFactory(hosts, client_options).create()

        # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
        node_prefix = self.cfg.opts("provisioning", "node.name.prefix")
        process.kill_running_es_instances(node_prefix)

        logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes))

        # TODO dm: Get rid of these...
        enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")

        cluster_telemetry = [
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.MergeParts(self.metrics_store, self.node_log_dir),
            telemetry.EnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.IndexSize(data_paths, self.metrics_store)
        ]
        t = telemetry.Telemetry(enabled_devices, devices=cluster_telemetry)
        c = cluster.Cluster(hosts, [self._start_node(node, car, es, binary) for node in range(car.nodes)], t)
        logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
        if wait_for_rest_layer(es):
            logger.info("REST API is available. Attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
            self.stop(c)
            raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Example #6
0
    def test_create_https_connection_unverified_certificate(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": False,
            "basic_auth_user": "******",
            "basic_auth_password": "******"
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = dict(client_options)

        f = client.EsClientFactory(hosts, client_options)

        self.assertEqual(hosts, f.hosts)
        self.assertFalse(f.ssl_context.check_hostname)
        self.assertEqual(ssl.CERT_NONE, f.ssl_context.verify_mode)

        self.assertEqual("https", f.client_options["scheme"])
        self.assertEqual(("user", "password"), f.client_options["http_auth"])
        self.assertNotIn("use_ssl", f.client_options)
        self.assertNotIn("verify_certs", f.client_options)
        self.assertNotIn("basic_auth_user", f.client_options)
        self.assertNotIn("basic_auth_password", f.client_options)

        self.assertDictEqual(original_client_options, client_options)
Example #7
0
    def test_create_https_connection_verify_server(self, mocked_load_cert_chain):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password")
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = deepcopy(client_options)

        logger = logging.getLogger("esrally.client")
        with mock.patch.object(logger, "info") as mocked_info_logger:
            f = client.EsClientFactory(hosts, client_options)
        mocked_info_logger.assert_has_calls([
            mock.call("SSL support: on"),
            mock.call("SSL certificate verification: on"),
            mock.call("SSL client authentication: off")
        ])

        assert not mocked_load_cert_chain.called, "ssl_context.load_cert_chain should not have been called as we have not supplied client " \
                                                  "certs"

        self.assertEqual(hosts, f.hosts)
        self.assertTrue(f.ssl_context.check_hostname)
        self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode)

        self.assertEqual("https", f.client_options["scheme"])
        self.assertEqual(("user", "password"), f.client_options["http_auth"])
        self.assertNotIn("use_ssl", f.client_options)
        self.assertNotIn("verify_certs", f.client_options)
        self.assertNotIn("ca_certs", f.client_options)

        self.assertDictEqual(original_client_options, client_options)
Example #8
0
    def test_raises_error_when_only_one_of_client_cert_and_client_key_defined(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password"),
            "ca_certs": os.path.join(self.cwd, "utils/resources/certs/ca.crt"),
        }

        client_ssl_options = {"client_cert": "utils/resources/certs/client.crt", "client_key": "utils/resources/certs/client.key"}

        random_client_ssl_option = random.choice(list(client_ssl_options.keys()))
        missing_client_ssl_option = list(set(client_ssl_options) - set([random_client_ssl_option]))[0]
        client_options.update({random_client_ssl_option: client_ssl_options[random_client_ssl_option]})

        with pytest.raises(exceptions.SystemSetupError) as ctx:
            with mock.patch.object(console, "println") as mocked_console_println:
                client.EsClientFactory(hosts, client_options)
        mocked_console_println.assert_called_once_with(
            "'{}' is missing from client-options but '{}' has been specified.\n"
            "If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
            "Read the documentation at {}\n".format(
                missing_client_ssl_option,
                random_client_ssl_option,
                console.format.link(doc_link("command_line_reference.html#client-options")),
            )
        )
        assert ctx.value.args[0] == (
            "Cannot specify '{}' without also specifying '{}' in client-options.".format(
                random_client_ssl_option, missing_client_ssl_option
            )
        )
Example #9
0
    def test_client_cert(self, tmp_path_factory: pytest.TempPathFactory):
        tmpdir = tmp_path_factory.mktemp("certs")
        with _build_server(tmpdir, "localhost") as cfg:
            server, ca, ca_cert_path = cfg
            client_cert = ca.issue_cert("localhost")
            client_cert_path = str(tmpdir / "client.pem")
            client_key_path = str(tmpdir / "client.key")
            client_cert.cert_chain_pems[0].write_to_path(client_cert_path)
            client_cert.private_key_pem.write_to_path(client_key_path)

            hosts = [
                {
                    "host": "localhost",
                    "port": server.port
                },
            ]
            client_options = {
                "use_ssl": True,
                "verify_certs": True,
                "ca_certs": ca_cert_path,
                "client_cert": client_cert_path,
                "client_key": client_key_path,
            }
            f = client.EsClientFactory(hosts, client_options)
            es = f.create()
            assert es.info() == {"version": {"number": "8.0.0"}}
Example #10
0
 def start(self, race_id):
     cmd = "start --runtime-jdk=\"bundled\" --installation-id={} --race-id={}".format(
         self.installation_id, race_id)
     if esrally(self.cfg, cmd) != 0:
         raise AssertionError("Failed to start Elasticsearch test cluster.")
     es = client.EsClientFactory(hosts=[{
         "host": "127.0.0.1",
         "port": self.http_port
     }],
                                 client_options={}).create()
     client.wait_for_rest_layer(es)
Example #11
0
    def test_check_hostname_false_when_host_is_ip(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password"),
        }

        f = client.EsClientFactory(hosts, client_options)
        assert f.hosts == hosts
        assert f.ssl_context.check_hostname is False
        assert f.ssl_context.verify_mode == ssl.CERT_REQUIRED
Example #12
0
    def start_benchmark(self, msg, sender):
        self.start_sender = sender
        self.config = msg.config
        current_track = msg.track

        logger.info("Preparing track")
        # TODO #71: Reconsider this in case we distribute drivers. *For now* the driver will only be on a single machine, so we're safe.
        track.prepare_track(current_track, self.config)

        logger.info("Benchmark is about to start.")
        self.quiet = self.config.opts("system",
                                      "quiet.mode",
                                      mandatory=False,
                                      default_value=False)
        self.es = client.EsClientFactory(self.config.opts("client", "hosts"),
                                         self.config.opts("client",
                                                          "options")).create()
        self.metrics_store = metrics.InMemoryMetricsStore(
            config=self.config, meta_info=msg.metrics_meta_info, lap=msg.lap)
        invocation = self.config.opts("meta", "time.start")
        expected_cluster_health = self.config.opts("benchmarks",
                                                   "cluster.health")
        track_name = self.config.opts("benchmarks", "track")
        challenge_name = self.config.opts("benchmarks", "challenge")
        selected_car_name = self.config.opts("benchmarks", "car")
        self.metrics_store.open(invocation, track_name, challenge_name,
                                selected_car_name)

        challenge = select_challenge(self.config, current_track)
        es_version = self.config.opts("source", "distribution.version")
        for index in current_track.indices:
            setup_index(self.es, index, challenge.index_settings)
        wait_for_status(self.es, es_version, expected_cluster_health)
        allocator = Allocator(challenge.schedule)
        self.allocations = allocator.allocations
        self.number_of_steps = len(allocator.join_points) - 1
        self.ops_per_join_point = allocator.operations_per_joinpoint

        logger.info(
            "Benchmark consists of [%d] steps executed by (at most) [%d] clients as specified by the allocation matrix:\n%s"
            % (self.number_of_steps, len(self.allocations), self.allocations))

        for client_id in range(allocator.clients):
            self.drivers.append(self.createActor(LoadGenerator))
        for client_id, driver in enumerate(self.drivers):
            self.send(
                driver,
                StartLoadGenerator(client_id, self.config, current_track,
                                   self.allocations[client_id]))

        self.update_progress_message()
        self.wakeupAfter(
            datetime.timedelta(seconds=Driver.WAKEUP_INTERVAL_SECONDS))
Example #13
0
 def test_ip_address(self, tmp_path_factory: pytest.TempPathFactory):
     tmpdir = tmp_path_factory.mktemp("certs")
     with _build_server(tmpdir, "127.0.0.1") as cfg:
         server, _ca, ca_cert_path = cfg
         hosts = [{"host": "127.0.0.1", "port": server.port}]
         client_options = {
             "use_ssl": True,
             "verify_certs": True,
             "ca_certs": ca_cert_path,
         }
         f = client.EsClientFactory(hosts, client_options)
         es = f.create()
         assert es.info() == {"version": {"number": "8.0.0"}}
Example #14
0
    def test_create_http_connection(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {}
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = dict(client_options)

        f = client.EsClientFactory(hosts, client_options)

        self.assertEqual(hosts, f.hosts)
        self.assertIsNone(f.ssl_context)
        self.assertEqual("http", f.client_options["scheme"])
        self.assertFalse("http_auth" in f.client_options)

        self.assertDictEqual(original_client_options, client_options)
Example #15
0
def benchmark_internal(ctx):
    track_name = ctx.config.opts("system", "track")
    challenge_name = ctx.config.opts("benchmarks", "challenge")
    selected_car_name = ctx.config.opts("benchmarks", "car")

    print("Racing on track [%s] and challenge [%s] with car [%s]" %
          (track_name, challenge_name, selected_car_name))
    # TODO dm module refactoring: mechanic
    selected_car = None
    for c in car.cars:
        if c.name == selected_car_name:
            selected_car = c

    if not selected_car:
        raise exceptions.ImproperlyConfigured(
            "Unknown car [%s]. You can list the available cars with %s list cars."
            % (selected_car_name, PROGRAM_NAME))

    port = ctx.config.opts("provisioning", "node.http.port")
    hosts = [{"host": "localhost", "port": port}]
    client_options = ctx.config.opts("launcher", "client.options")
    # unified client config
    ctx.config.add(config.Scope.benchmark, "client", "hosts", hosts)
    ctx.config.add(config.Scope.benchmark, "client", "options", client_options)

    es_client = client.EsClientFactory(hosts, client_options).create()

    # TODO dm module refactoring: separate module? don't let the mechanic handle the metrics store but rather just provide it
    ctx.mechanic.start_metrics(track_name, challenge_name, selected_car_name)
    cluster = ctx.mechanic.start_engine(selected_car, es_client, port)
    actors = thespian.actors.ActorSystem()
    main_driver = actors.createActor(driver.Driver)

    #TODO dm: Retrieving the metrics store here is *dirty*...
    metrics_store = ctx.mechanic._metrics_store

    cluster.on_benchmark_start()
    completed = actors.ask(
        main_driver,
        driver.StartBenchmark(ctx.config, ctx.track, metrics_store.meta_info))
    cluster.on_benchmark_stop()
    if not hasattr(completed, "metrics"):
        raise exceptions.RallyError(
            "Driver has returned no metrics but instead [%s]. Terminating race without result."
            % str(completed))
    metrics_store.bulk_add(completed.metrics)

    ctx.mechanic.stop_engine(cluster)
    ctx.mechanic.revise_candidate()
    ctx.mechanic.stop_metrics()
Example #16
0
    def test_create_http_connection(self):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {}
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = dict(client_options)

        f = client.EsClientFactory(hosts, client_options)

        assert f.hosts == hosts
        assert f.ssl_context is None
        assert f.client_options["scheme"] == "http"
        assert "http_auth" not in f.client_options

        assert client_options == original_client_options
Example #17
0
    def test_create_https_connection_verify_self_signed_server_and_client_certificate(
            self, mocked_load_cert_chain):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl":
            True,
            "verify_certs":
            True,
            "http_auth": ("user", "password"),
            "ca_certs":
            os.path.join(EsClientFactoryTests.cwd,
                         "utils/resources/certs/ca.crt"),
            "client_cert":
            os.path.join(EsClientFactoryTests.cwd,
                         "utils/resources/certs/client.crt"),
            "client_key":
            os.path.join(EsClientFactoryTests.cwd,
                         "utils/resources/certs/client.key"),
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = deepcopy(client_options)

        logger = logging.getLogger("esrally.client")
        with mock.patch.object(logger, "info") as mocked_info_logger:
            f = client.EsClientFactory(hosts, client_options)
        mocked_info_logger.assert_has_calls([
            mock.call("SSL support: on"),
            mock.call("SSL certificate verification: on"),
            mock.call("SSL client authentication: on"),
        ])

        mocked_load_cert_chain.assert_called_with(
            certfile=client_options["client_cert"],
            keyfile=client_options["client_key"],
        )

        self.assertEqual(hosts, f.hosts)
        self.assertTrue(f.ssl_context.check_hostname)
        self.assertEqual(ssl.CERT_REQUIRED, f.ssl_context.verify_mode)

        self.assertEqual("https", f.client_options["scheme"])
        self.assertEqual(("user", "password"), f.client_options["http_auth"])
        self.assertNotIn("use_ssl", f.client_options)
        self.assertNotIn("verify_certs", f.client_options)
        self.assertNotIn("ca_certs", f.client_options)
        self.assertNotIn("client_cert", f.client_options)
        self.assertNotIn("client_key", f.client_options)

        self.assertDictEqual(original_client_options, client_options)
Example #18
0
def prepare_benchmark_external(ctx):
    track_name = ctx.config.opts("system", "track")
    challenge_name = ctx.config.opts("benchmarks", "challenge")
    # override externally used car name for this benchmark. We'll use a fixed one for external benchmarks.
    car_name = "external"
    ctx.config.add(config.Scope.benchmark, "benchmarks", "car", car_name)

    ctx.mechanic.start_metrics(track_name, challenge_name, car_name)

    hosts = ctx.config.opts("launcher", "external.target.hosts")
    client_options = ctx.config.opts("launcher", "client.options")
    # unified client config
    ctx.config.add(config.Scope.benchmark, "client", "hosts", hosts)
    ctx.config.add(config.Scope.benchmark, "client", "options", client_options)

    es_client = client.EsClientFactory(hosts, client_options).create()
    ctx.cluster = ctx.mechanic.start_engine_external(es_client)
Example #19
0
    def test_create_https_connection_unverified_certificate_present_client_certificates(
            self, mocked_load_cert_chain):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl":
            True,
            "verify_certs":
            False,
            "http_auth": ("user", "password"),
            "client_cert":
            os.path.join(self.cwd, "utils/resources/certs/client.crt"),
            "client_key":
            os.path.join(self.cwd, "utils/resources/certs/client.key"),
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = deepcopy(client_options)

        logger = logging.getLogger("esrally.client")
        with mock.patch.object(logger, "info") as mocked_info_logger:
            f = client.EsClientFactory(hosts, client_options)
        mocked_info_logger.assert_has_calls([
            mock.call("SSL certificate verification: off"),
            mock.call("SSL client authentication: on"),
        ], )

        mocked_load_cert_chain.assert_called_with(
            certfile=client_options["client_cert"],
            keyfile=client_options["client_key"],
        )

        assert f.hosts == hosts
        assert not f.ssl_context.check_hostname
        assert f.ssl_context.verify_mode == ssl.CERT_NONE

        assert f.client_options["scheme"] == "https"
        assert f.client_options["http_auth"] == ("user", "password")
        assert "use_ssl" not in f.client_options
        assert "verify_certs" not in f.client_options
        assert "basic_auth_user" not in f.client_options
        assert "basic_auth_password" not in f.client_options
        assert "ca_certs" not in f.client_options
        assert "client_cert" not in f.client_options
        assert "client_key" not in f.client_options

        assert client_options == original_client_options
Example #20
0
    def test_raises_error_when_verify_ssl_with_mixed_hosts(self):
        hosts = [{
            "host": "127.0.0.1",
            "port": 9200
        }, {
            "host": "localhost",
            "port": 9200
        }]
        client_options = {
            "use_ssl": True,
            "verify_certs": True,
            "http_auth": ("user", "password"),
        }

        with pytest.raises(
                exceptions.SystemSetupError,
                match=
                "Cannot verify certs with mixed IP addresses and hostnames",
        ):
            client.EsClientFactory(hosts, client_options)
Example #21
0
    def test_create_https_connection_unverified_certificate(self, mocked_load_cert_chain):
        hosts = [{"host": "127.0.0.1", "port": 9200}]
        client_options = {
            "use_ssl": True,
            "verify_certs": False,
            "basic_auth_user": "******",
            "basic_auth_password": "******",
        }
        # make a copy so we can verify later that the factory did not modify it
        original_client_options = dict(client_options)

        logger = logging.getLogger("esrally.client")
        with mock.patch.object(logger, "info") as mocked_info_logger:
            f = client.EsClientFactory(hosts, client_options)
        mocked_info_logger.assert_has_calls(
            [
                mock.call("SSL support: on"),
                mock.call("SSL certificate verification: off"),
                mock.call("SSL client authentication: off"),
            ]
        )

        assert (
            not mocked_load_cert_chain.called
        ), "ssl_context.load_cert_chain should not have been called as we have not supplied client certs"

        assert f.hosts == hosts
        assert not f.ssl_context.check_hostname
        assert f.ssl_context.verify_mode == ssl.CERT_NONE

        assert f.client_options["scheme"] == "https"
        assert f.client_options["http_auth"] == ("user", "password")
        assert "use_ssl" not in f.client_options
        assert "verify_certs" not in f.client_options
        assert "basic_auth_user" not in f.client_options
        assert "basic_auth_password" not in f.client_options

        assert client_options == original_client_options
Example #22
0
    def start_benchmark(self, msg, sender):
        self.start_sender = sender
        self.config = msg.config
        self.track = msg.track

        track_name = self.track.name
        challenge_name = self.track.find_challenge_or_default(
            self.config.opts("track", "challenge.name")).name
        selected_car_name = self.config.opts("mechanic", "car.name")

        logger.info("Preparing track [%s]" % track_name)
        # TODO #71: Reconsider this in case we distribute drivers. *For now* the driver will only be on a single machine, so we're safe.
        track.prepare_track(self.track, self.config)

        logger.info(
            "Benchmark for track [%s], challenge [%s] and car [%s] is about to start."
            % (track_name, challenge_name, selected_car_name))
        self.quiet = self.config.opts("system",
                                      "quiet.mode",
                                      mandatory=False,
                                      default_value=False)
        self.es = client.EsClientFactory(self.config.opts("client", "hosts"),
                                         self.config.opts("client",
                                                          "options")).create()
        self.metrics_store = metrics.InMemoryMetricsStore(
            cfg=self.config, meta_info=msg.metrics_meta_info, lap=msg.lap)
        invocation = self.config.opts("system", "time.start")
        expected_cluster_health = self.config.opts("benchmarks",
                                                   "cluster.health")
        self.metrics_store.open(invocation, track_name, challenge_name,
                                selected_car_name)

        self.challenge = select_challenge(self.config, self.track)
        for template in self.track.templates:
            setup_template(self.es, template)

        for index in self.track.indices:
            setup_index(self.es, index, self.challenge.index_settings)
        wait_for_status(self.es, expected_cluster_health)
        allocator = Allocator(self.challenge.schedule)
        self.allocations = allocator.allocations
        self.number_of_steps = len(allocator.join_points) - 1
        self.ops_per_join_point = allocator.operations_per_joinpoint

        logger.info(
            "Benchmark consists of [%d] steps executed by (at most) [%d] clients as specified by the allocation matrix:\n%s"
            % (self.number_of_steps, len(self.allocations), self.allocations))

        for client_id in range(allocator.clients):
            self.drivers.append(
                self.createActor(
                    LoadGenerator,
                    globalName="/rally/driver/worker/%s" % str(client_id),
                    targetActorRequirements={"coordinator": True}))
        for client_id, driver in enumerate(self.drivers):
            logger.info("Starting load generator [%d]." % client_id)
            self.send(
                driver,
                StartLoadGenerator(client_id, self.config, self.track,
                                   self.allocations[client_id]))

        self.update_progress_message()
        self.wakeupAfter(
            datetime.timedelta(seconds=Driver.WAKEUP_INTERVAL_SECONDS))
Example #23
0
 def receiveMessage(self, msg, sender):
     try:
         logger.debug(
             "LoadGenerator[%s]#receiveMessage(msg = [%s], sender = [%s])" %
             (str(self.client_id), str(type(msg)), str(sender)))
         if isinstance(msg, StartLoadGenerator):
             logger.info("LoadGenerator[%d] is about to start." %
                         msg.client_id)
             self.master = sender
             self.client_id = msg.client_id
             self.es = client.EsClientFactory(
                 msg.config.opts("client", "hosts"),
                 msg.config.opts("client", "options")).create()
             self.config = msg.config
             self.track = msg.track
             self.tasks = msg.tasks
             self.current_task = 0
             self.cancel.clear()
             # we need to wake up more often in test mode
             if self.config.opts("track", "test.mode.enabled"):
                 self.wakeup_interval = 0.5
             self.start_timestamp = time.perf_counter()
             track.load_track_plugins(self.config, runner.register_runner)
             self.drive()
         elif isinstance(msg, Drive):
             logger.debug(
                 "LoadGenerator[%d] is continuing its work at task index [%d] on [%f]."
                 % (self.client_id, self.current_task,
                    msg.client_start_timestamp))
             self.start_driving = True
             self.wakeupAfter(
                 datetime.timedelta(seconds=time.perf_counter() -
                                    msg.client_start_timestamp))
         elif isinstance(msg, thespian.actors.WakeupMessage):
             # it would be better if we could send ourselves a message at a specific time, simulate this with a boolean...
             if self.start_driving:
                 self.start_driving = False
                 self.drive()
             else:
                 self.send_samples()
                 if self.cancel.is_set():
                     self.send(self.master, BenchmarkCancelled())
                 elif self.executor_future is not None and self.executor_future.done(
                 ):
                     e = self.executor_future.exception(timeout=0)
                     if e:
                         self.send(
                             self.master,
                             BenchmarkFailure(
                                 "Error in load generator [%d]" %
                                 self.client_id, e))
                     else:
                         self.executor_future = None
                         self.drive()
                 else:
                     self.wakeupAfter(
                         datetime.timedelta(seconds=self.wakeup_interval))
         elif isinstance(msg, thespian.actors.ActorExitRequest):
             logger.info(
                 "LoadGenerator[%s] is exiting due to ActorExitRequest." %
                 str(self.client_id))
             if self.executor_future is not None and self.executor_future.running(
             ):
                 self.cancel.set()
                 self.pool.shutdown()
         else:
             logger.info(
                 "LoadGenerator[%d] received unknown message [%s] (ignoring)."
                 % (self.client_id, str(msg)))
     except Exception as e:
         logger.exception("Fatal error in LoadGenerator[%d]" %
                          self.client_id)
         self.send(
             self.master,
             BenchmarkFailure(
                 "Fatal error in load generator [%d]" % self.client_id, e))