Esempio n. 1
0
    def start(self, car):
        port = self.cfg.opts("provisioning", "node.http.port")
        hosts = [{"host": "localhost", "port": port}]
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options",
                     client_options)

        es = client.EsClientFactory(hosts, client_options).create()

        # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
        node_prefix = self.cfg.opts("provisioning", "node.name.prefix")
        process.kill_running_es_instances(node_prefix)

        logger.info("Starting a cluster based on car [%s] with [%d] nodes." %
                    (car, car.nodes))

        cluster_telemetry = [
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.MergeParts(self.cfg, self.metrics_store),
            telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store),
            telemetry.NodeStats(self.cfg, es, self.metrics_store),
            telemetry.IndexStats(self.cfg, es, self.metrics_store),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.IndexSize(self.cfg, self.metrics_store)
        ]

        t = telemetry.Telemetry(self.cfg, devices=cluster_telemetry)
        c = cluster.Cluster(
            [self._start_node(node, car, es) for node in range(car.nodes)], t)
        t.attach_to_cluster(c)
        return c
Esempio n. 2
0
    def start(self):
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(devices=[
            telemetry.ClusterMetaDataInfo(es),
            telemetry.ClusterEnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store)
        ])

        # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
        c = cluster.Cluster(hosts, [], t)
        logger.info(
            "All cluster nodes have successfully started. Checking if REST API is available."
        )
        if wait_for_rest_layer(es, max_attempts=20):
            logger.info(
                "REST API is available. Attaching telemetry devices to cluster."
            )
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
            logger.error(
                "REST API layer is not yet available. Forcefully terminating cluster."
            )
            self.stop(c)
            raise exceptions.LaunchError(
                "Elasticsearch REST API layer is not available. Forcefully terminated cluster."
            )

        return c
Esempio n. 3
0
    def start(self, car, binary, data_paths):
        self.binary_path = binary

        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        # Cannot enable custom telemetry devices here
        t = telemetry.Telemetry(devices=[
            # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the
            # Docker container constrains these, the metrics are actually wrong.
            telemetry.EnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store),
            telemetry.DiskIo(self.metrics_store),
            telemetry.CpuUsage(self.metrics_store)
        ])

        c = cluster.Cluster(hosts, [], t)
        self._start_process(cmd="docker-compose -f %s up" % self.binary_path, node_name="rally0")
        logger.info("Docker container has successfully started. Checking if REST API is available.")
        if wait_for_rest_layer(es):
            logger.info("REST API is available. Attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
            self.stop(c)
            raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Esempio n. 4
0
    def start(self, car, binary, data_paths):
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = client.EsClientFactory(hosts, client_options).create()

        # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
        node_prefix = self.cfg.opts("provisioning", "node.name.prefix")
        process.kill_running_es_instances(node_prefix)

        logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes))

        # TODO dm: Get rid of these...
        enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")

        cluster_telemetry = [
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.MergeParts(self.metrics_store, self.node_log_dir),
            telemetry.EnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.IndexSize(data_paths, self.metrics_store)
        ]
        t = telemetry.Telemetry(enabled_devices, devices=cluster_telemetry)
        c = cluster.Cluster(hosts, [self._start_node(node, car, es, binary) for node in range(car.nodes)], t)
        logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
        if wait_for_rest_layer(es):
            logger.info("REST API is available. Attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
            self.stop(c)
            raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Esempio n. 5
0
    def start(self, car=None, binary=None, data_paths=None):
        console.println(ExternalLauncher.BOGUS_RESULTS_WARNING)
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        # cannot enable custom telemetry devices here
        t = telemetry.Telemetry(devices=[
            telemetry.ClusterMetaDataInfo(es),
            telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store)
        ])
        # cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
        c = cluster.Cluster(hosts, [], t)
        user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
        distribution_version = es.info()["version"]["number"]
        if not user_defined_version or user_defined_version.strip() == "":
            logger.info("Distribution version was not specified by user. Rally-determined version is [%s]" % distribution_version)
            self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
        elif user_defined_version != distribution_version:
            console.warn(
                "Specified distribution version '%s' on the command line differs from version '%s' reported by the cluster." %
                (user_defined_version, distribution_version), logger=logger)
        t.attach_to_cluster(c)
        return c
Esempio n. 6
0
    def start(self):
        """
        Performs final startup tasks.

        Precondition: All cluster nodes have been started.
        Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised.

        :return: A representation of the launched cluster.
        """
        enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
        telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
        all_hosts = self.cfg.opts("client", "hosts").all_hosts
        default_hosts = self.cfg.opts("client", "hosts").default
        preserve = self.cfg.opts("mechanic", "preserve.install")
        skip_rest_api_check = self.cfg.opts("mechanic", "skip.rest.api.check")

        es = {}
        for cluster_name, cluster_hosts in all_hosts.items():
            all_client_options = self.cfg.opts("client", "options").all_client_options
            cluster_client_options = dict(all_client_options[cluster_name])
            # Use retries to avoid aborts on long living connections for telemetry devices
            cluster_client_options["retry-on-timeout"] = True
            es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create()

        es_default = es["default"]

        t = telemetry.Telemetry(enabled_devices, devices=[
            telemetry.NodeStats(telemetry_params, es, self.metrics_store),
            telemetry.ClusterMetaDataInfo(es_default),
            telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store),
            telemetry.JvmStatsSummary(es_default, self.metrics_store),
            telemetry.IndexStats(es_default, self.metrics_store),
            telemetry.MlBucketProcessingTime(es_default, self.metrics_store),
            telemetry.CcrStats(telemetry_params, es, self.metrics_store),
            telemetry.RecoveryStats(telemetry_params, es, self.metrics_store)
        ])

        # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
        c = cluster.Cluster(default_hosts, [], t, preserve)

        if skip_rest_api_check:
            self.logger.info("Skipping REST API check and attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            self.logger.info("Telemetry devices are now attached to the cluster.")
        else:
            self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
            if wait_for_rest_layer(es_default, max_attempts=40):
                self.logger.info("REST API is available. Attaching telemetry devices to cluster.")
                t.attach_to_cluster(c)
                self.logger.info("Telemetry devices are now attached to the cluster.")
            else:
                # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
                self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
                self.stop(c)
                raise exceptions.LaunchError(
                    "Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Esempio n. 7
0
    def start(self, car=None):
        console.println(ExternalLauncher.BOGUS_RESULTS_WARNING)

        hosts = self.cfg.opts("launcher", "external.target.hosts")
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options",
                     client_options)

        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(
            self.cfg,
            devices=[
                telemetry.ExternalEnvironmentInfo(self.cfg, es,
                                                  self.metrics_store),
                telemetry.NodeStats(self.cfg, es, self.metrics_store),
                telemetry.IndexStats(self.cfg, es, self.metrics_store)
            ])
        c = cluster.Cluster([], t)
        user_defined_version = self.cfg.opts("source",
                                             "distribution.version",
                                             mandatory=False)
        distribution_version = es.info()["version"]["number"]
        if not user_defined_version or user_defined_version.strip() == "":
            logger.info(
                "Distribution version was not specified by user. Rally-determined version is [%s]"
                % distribution_version)
            self.cfg.add(config.Scope.benchmark, "source",
                         "distribution.version", distribution_version)
        elif user_defined_version != distribution_version:
            console.println(
                "Warning: Specified distribution version '%s' on the command line differs from version '%s' reported by the cluster."
                % (user_defined_version, distribution_version),
                logger=logger.warn)
        t.attach_to_cluster(c)
        return c
Esempio n. 8
0
    def test_stores_only_diff_of_gc_times(self, metrics_store_node_level,
                                          metrics_store_cluster_level):
        nodes_stats_at_start = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name": "rally0",
                    "host": "127.0.0.1",
                    "jvm": {
                        "gc": {
                            "collectors": {
                                "old": {
                                    "collection_time_in_millis": 1000
                                },
                                "young": {
                                    "collection_time_in_millis": 500
                                }
                            }
                        }
                    }
                }
            }
        }

        client = Client(nodes=SubClient(nodes_stats_at_start))
        cfg = create_config()

        metrics_store = metrics.EsMetricsStore(cfg)
        device = telemetry.NodeStats(client, metrics_store)
        t = telemetry.Telemetry(cfg, devices=[device])
        t.on_benchmark_start()
        # now we'd need to change the node stats response
        nodes_stats_at_end = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name": "rally0",
                    "host": "127.0.0.1",
                    "jvm": {
                        "gc": {
                            "collectors": {
                                "old": {
                                    "collection_time_in_millis": 2500
                                },
                                "young": {
                                    "collection_time_in_millis": 1200
                                }
                            }
                        }
                    }
                }
            }
        }
        client.nodes = SubClient(nodes_stats_at_end)
        t.on_benchmark_stop()

        metrics_store_node_level.assert_has_calls([
            mock.call("rally0", "node_young_gen_gc_time", 700, "ms"),
            mock.call("rally0", "node_old_gen_gc_time", 1500, "ms")
        ])

        metrics_store_cluster_level.assert_has_calls([
            mock.call("node_total_young_gen_gc_time", 700, "ms"),
            mock.call("node_total_old_gen_gc_time", 1500, "ms")
        ])
Esempio n. 9
0
    def start(self, car):
        # hardcoded for the moment, should actually be identical to internal launcher
        # Only needed on Mac:
        # hosts = [{"host": process.run_subprocess_with_output("docker-machine ip default")[0].strip(), "port": 9200}]
        hosts = [{"host": "localhost", "port": 9200}]
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options",
                     client_options)

        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(
            self.cfg,
            devices=[
                # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the
                # Docker container constrains these, the metrics are actually wrong.
                telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store),
                telemetry.NodeStats(self.cfg, es, self.metrics_store),
                telemetry.IndexStats(self.cfg, es, self.metrics_store),
                telemetry.DiskIo(self.cfg, self.metrics_store),
                telemetry.CpuUsage(self.cfg, self.metrics_store)
            ])

        distribution_version = self.cfg.opts("source",
                                             "distribution.version",
                                             mandatory=False)

        install_dir = self._install_dir()
        io.ensure_dir(install_dir)

        java_opts = ""
        if car.heap:
            java_opts += "-Xms%s -Xmx%s " % (car.heap, car.heap)
        if car.java_opts:
            java_opts += car.java_opts

        vars = {
            "es_java_opts":
            java_opts,
            "container_memory_gb":
            "%dg" % (convert.bytes_to_gb(psutil.virtual_memory().total) // 2),
            "es_data_dir":
            "%s/data" % install_dir,
            "es_version":
            distribution_version
        }

        docker_cfg = self._render_template_from_file(vars)
        logger.info("Starting Docker container with configuration:\n%s" %
                    docker_cfg)
        docker_cfg_path = self._docker_cfg_path()
        with open(docker_cfg_path, "wt") as f:
            f.write(docker_cfg)

        c = cluster.Cluster([], t)

        self._start_process(cmd="docker-compose -f %s up" % docker_cfg_path,
                            node_name="rally0")
        # Wait for a little while: Plugins may still be initializing although the node has already started.
        time.sleep(10)

        t.attach_to_cluster(c)
        logger.info("Successfully started Docker container")
        return c