Example #1
0
    def start(self, node_configurations=None):
        hosts = self.cfg.opts("client", "hosts").default
        client_options = self.cfg.opts("client", "options").default
        es = self.client_factory(hosts, client_options).create()

        # cannot enable custom telemetry devices here
        t = telemetry.Telemetry(devices=[
            # This is needed to actually populate the nodes
            telemetry.ClusterMetaDataInfo(es),
            # will gather node specific meta-data for all nodes
            telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
        ])
        # We create a pseudo-cluster here to get information about all nodes.
        # cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
        c = cluster.Cluster(hosts, [], t)
        user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
        distribution_version = es.info()["version"]["number"]
        if not user_defined_version or user_defined_version.strip() == "":
            self.logger.info("Distribution version was not specified by user. Rally-determined version is [%s]", distribution_version)
            self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
        elif user_defined_version != distribution_version:
            console.warn(
                "Specified distribution version '%s' on the command line differs from version '%s' reported by the cluster." %
                (user_defined_version, distribution_version), logger=self.logger)
        t.attach_to_cluster(c)
        return c.nodes
Example #2
0
    def start(self, car, binary, data_paths):
        self.binary_path = binary

        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        # Cannot enable custom telemetry devices here
        t = telemetry.Telemetry(devices=[
            # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the
            # Docker container constrains these, the metrics are actually wrong.
            telemetry.ClusterMetaDataInfo(es),
            telemetry.EnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store)
        ])

        c = cluster.Cluster(hosts, [self._start_node(hosts[0], 0, es)], t)

        logger.info("Docker container has successfully started. Checking if REST API is available.")
        if wait_for_rest_layer(es):
            logger.info("REST API is available. Attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
            self.stop(c)
            raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Example #3
0
    def start(self):
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(devices=[
            telemetry.ClusterMetaDataInfo(es),
            telemetry.ClusterEnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store)
        ])

        # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
        c = cluster.Cluster(hosts, [], t)
        logger.info(
            "All cluster nodes have successfully started. Checking if REST API is available."
        )
        if wait_for_rest_layer(es, max_attempts=20):
            logger.info(
                "REST API is available. Attaching telemetry devices to cluster."
            )
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
            logger.error(
                "REST API layer is not yet available. Forcefully terminating cluster."
            )
            self.stop(c)
            raise exceptions.LaunchError(
                "Elasticsearch REST API layer is not available. Forcefully terminated cluster."
            )

        return c
Example #4
0
    def start(self, car=None, binary=None, data_paths=None):
        console.println(ExternalLauncher.BOGUS_RESULTS_WARNING)
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = self.client_factory(hosts, client_options).create()

        # cannot enable custom telemetry devices here
        t = telemetry.Telemetry(devices=[
            telemetry.ClusterMetaDataInfo(es),
            telemetry.ExternalEnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store)
        ])
        # cluster nodes will be populated by the external environment info telemetry device. We cannot know this upfront.
        c = cluster.Cluster(hosts, [], t)
        user_defined_version = self.cfg.opts("mechanic", "distribution.version", mandatory=False)
        distribution_version = es.info()["version"]["number"]
        if not user_defined_version or user_defined_version.strip() == "":
            logger.info("Distribution version was not specified by user. Rally-determined version is [%s]" % distribution_version)
            self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
        elif user_defined_version != distribution_version:
            console.warn(
                "Specified distribution version '%s' on the command line differs from version '%s' reported by the cluster." %
                (user_defined_version, distribution_version), logger=logger)
        t.attach_to_cluster(c)
        return c
Example #5
0
    def start(self):
        """
        Performs final startup tasks.

        Precondition: All cluster nodes have been started.
        Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised.

        :return: A representation of the launched cluster.
        """
        enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")
        telemetry_params = self.cfg.opts("mechanic", "telemetry.params")
        all_hosts = self.cfg.opts("client", "hosts").all_hosts
        default_hosts = self.cfg.opts("client", "hosts").default
        preserve = self.cfg.opts("mechanic", "preserve.install")
        skip_rest_api_check = self.cfg.opts("mechanic", "skip.rest.api.check")

        es = {}
        for cluster_name, cluster_hosts in all_hosts.items():
            all_client_options = self.cfg.opts("client", "options").all_client_options
            cluster_client_options = dict(all_client_options[cluster_name])
            # Use retries to avoid aborts on long living connections for telemetry devices
            cluster_client_options["retry-on-timeout"] = True
            es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create()

        es_default = es["default"]

        t = telemetry.Telemetry(enabled_devices, devices=[
            telemetry.NodeStats(telemetry_params, es, self.metrics_store),
            telemetry.ClusterMetaDataInfo(es_default),
            telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store),
            telemetry.JvmStatsSummary(es_default, self.metrics_store),
            telemetry.IndexStats(es_default, self.metrics_store),
            telemetry.MlBucketProcessingTime(es_default, self.metrics_store),
            telemetry.CcrStats(telemetry_params, es, self.metrics_store),
            telemetry.RecoveryStats(telemetry_params, es, self.metrics_store)
        ])

        # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here
        c = cluster.Cluster(default_hosts, [], t, preserve)

        if skip_rest_api_check:
            self.logger.info("Skipping REST API check and attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            self.logger.info("Telemetry devices are now attached to the cluster.")
        else:
            self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
            if wait_for_rest_layer(es_default, max_attempts=40):
                self.logger.info("REST API is available. Attaching telemetry devices to cluster.")
                t.attach_to_cluster(c)
                self.logger.info("Telemetry devices are now attached to the cluster.")
            else:
                # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes.
                self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
                self.stop(c)
                raise exceptions.LaunchError(
                    "Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Example #6
0
    def start(self, car, binary, data_paths):
        hosts = self.cfg.opts("client", "hosts")
        client_options = self.cfg.opts("client", "options")
        es = client.EsClientFactory(hosts, client_options).create()

        # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine
        node_prefix = self.cfg.opts("provisioning", "node.name.prefix")
        process.kill_running_es_instances(node_prefix)

        logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes))

        # TODO dm: Get rid of this config setting and replace it with a proper list
        enabled_devices = self.cfg.opts("mechanic", "telemetry.devices")

        cluster_telemetry = [
            telemetry.ClusterMetaDataInfo(es),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.MergeParts(self.metrics_store, self.node_log_dir),
            telemetry.EnvironmentInfo(es, self.metrics_store),
            telemetry.NodeStats(es, self.metrics_store),
            telemetry.IndexStats(es, self.metrics_store),
            # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster
            telemetry.IndexSize(data_paths, self.metrics_store)
        ]
        t = telemetry.Telemetry(enabled_devices, devices=cluster_telemetry)
        c = cluster.Cluster(hosts, [self._start_node(node, car, es, binary) for node in range(car.nodes)], t)
        logger.info("All cluster nodes have successfully started. Checking if REST API is available.")
        if wait_for_rest_layer(es):
            logger.info("REST API is available. Attaching telemetry devices to cluster.")
            t.attach_to_cluster(c)
            logger.info("Telemetry devices are now attached to the cluster.")
        else:
            logger.error("REST API layer is not yet available. Forcefully terminating cluster.")
            self.stop(c)
            raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.")
        return c
Example #7
0
    def test_enriches_cluster_nodes_for_elasticsearch_1_x(self):
        nodes_stats = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name": "rally0",
                    "host": "127.0.0.1",
                    "fs": {
                        "data": [{
                            "mount": "/usr/local/var/elasticsearch/data1",
                            "type": "hfs"
                        }, {
                            "mount": "/usr/local/var/elasticsearch/data2",
                            "type": "ntfs"
                        }]
                    }
                }
            }
        }

        nodes_info = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name": "rally0",
                    "host": "127.0.0.1",
                    "ip": "127.0.0.1",
                    "os": {
                        "name": "Mac OS X",
                        "version": "10.11.4",
                        "available_processors": 8,
                        "mem": {
                            "total_in_bytes": 17179869184
                        }
                    },
                    "jvm": {
                        "version": "1.8.0_74",
                        "vm_vendor": "Oracle Corporation"
                    }
                }
            }
        }
        cluster_info = {
            "version": {
                "build_hash": "c730b59357f8ebc555286794dcd90b3411f517c9",
                "number": "1.7.5"
            }
        }
        client = Client(nodes=SubClient(stats=nodes_stats, info=nodes_info),
                        info=cluster_info)

        t = telemetry.Telemetry(
            devices=[telemetry.ClusterMetaDataInfo(client)])

        c = cluster.Cluster(hosts=[{
            "host": "localhost",
            "port": 39200
        }],
                            nodes=[
                                cluster.Node(process=None,
                                             host_name="local",
                                             node_name="rally0",
                                             telemetry=None)
                            ],
                            telemetry=t)

        t.attach_to_cluster(c)

        self.assertEqual("1.7.5", c.distribution_version)
        self.assertEqual("c730b59357f8ebc555286794dcd90b3411f517c9",
                         c.source_revision)
        self.assertEqual(1, len(c.nodes))
        n = c.nodes[0]
        self.assertEqual("127.0.0.1", n.ip)
        self.assertEqual("Mac OS X", n.os["name"])
        self.assertEqual("10.11.4", n.os["version"])
        self.assertEqual("Oracle Corporation", n.jvm["vendor"])
        self.assertEqual("1.8.0_74", n.jvm["version"])
        self.assertEqual(8, n.cpu["available_processors"])
        self.assertIsNone(n.cpu["allocated_processors"])
        self.assertEqual(17179869184, n.memory["total_bytes"])

        self.assertEqual(2, len(n.fs))
        self.assertEqual("/usr/local/var/elasticsearch/data1",
                         n.fs[0]["mount"])
        self.assertEqual("hfs", n.fs[0]["type"])
        self.assertEqual("unknown", n.fs[0]["spins"])
        self.assertEqual("/usr/local/var/elasticsearch/data2",
                         n.fs[1]["mount"])
        self.assertEqual("ntfs", n.fs[1]["type"])
        self.assertEqual("unknown", n.fs[1]["spins"])
Example #8
0
    def test_enriches_cluster_nodes_for_elasticsearch_after_1_x(self):
        nodes_stats = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name": "rally0",
                    "host": "127.0.0.1",
                    "os": {
                        "mem": {
                            "total_in_bytes": 17179869184
                        }
                    },
                    "fs": {
                        "data": [{
                            "mount": "/usr/local/var/elasticsearch/data1",
                            "type": "hfs"
                        }, {
                            "mount": "/usr/local/var/elasticsearch/data2",
                            "type": "ntfs"
                        }]
                    }
                }
            }
        }

        nodes_info = {
            "nodes": {
                "FCFjozkeTiOpN-SI88YEcg": {
                    "name":
                    "rally0",
                    "host":
                    "127.0.0.1",
                    "ip":
                    "127.0.0.1",
                    "os": {
                        "name": "Mac OS X",
                        "version": "10.11.4",
                        "available_processors": 8,
                        "allocated_processors": 4
                    },
                    "jvm": {
                        "version": "1.8.0_74",
                        "vm_vendor": "Oracle Corporation"
                    },
                    "plugins": [{
                        "name": "analysis-icu",
                        "version": "5.0.0",
                        "description":
                        "The ICU Analysis plugin integrates Lucene ICU module ...",
                        "classname":
                        "org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin",
                        "has_native_controller": False
                    }, {
                        "name": "ingest-geoip",
                        "version": "5.0.0",
                        "description":
                        "Ingest processor that uses looksup geo data ...",
                        "classname":
                        "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin",
                        "has_native_controller": False
                    }, {
                        "name": "ingest-user-agent",
                        "version": "5.0.0",
                        "description":
                        "Ingest processor that extracts information from a user agent",
                        "classname":
                        "org.elasticsearch.ingest.useragent.IngestUserAgentPlugin",
                        "has_native_controller": False
                    }]
                }
            }
        }
        cluster_info = {
            "version": {
                "build_hash": "253032b",
                "number": "5.0.0"
            }
        }
        client = Client(nodes=SubClient(stats=nodes_stats, info=nodes_info),
                        info=cluster_info)

        t = telemetry.Telemetry(
            devices=[telemetry.ClusterMetaDataInfo(client)])

        c = cluster.Cluster(hosts=[{
            "host": "localhost",
            "port": 39200
        }],
                            nodes=[
                                cluster.Node(process=None,
                                             host_name="local",
                                             node_name="rally0",
                                             telemetry=None)
                            ],
                            telemetry=t)

        t.attach_to_cluster(c)

        self.assertEqual("5.0.0", c.distribution_version)
        self.assertEqual("253032b", c.source_revision)
        self.assertEqual(1, len(c.nodes))
        n = c.nodes[0]
        self.assertEqual("127.0.0.1", n.ip)
        self.assertEqual("Mac OS X", n.os["name"])
        self.assertEqual("10.11.4", n.os["version"])
        self.assertEqual("Oracle Corporation", n.jvm["vendor"])
        self.assertEqual("1.8.0_74", n.jvm["version"])
        self.assertEqual(8, n.cpu["available_processors"])
        self.assertEqual(4, n.cpu["allocated_processors"])
        self.assertEqual(17179869184, n.memory["total_bytes"])

        self.assertEqual(2, len(n.fs))
        self.assertEqual("/usr/local/var/elasticsearch/data1",
                         n.fs[0]["mount"])
        self.assertEqual("hfs", n.fs[0]["type"])
        self.assertEqual("unknown", n.fs[0]["spins"])
        self.assertEqual("/usr/local/var/elasticsearch/data2",
                         n.fs[1]["mount"])
        self.assertEqual("ntfs", n.fs[1]["type"])
        self.assertEqual("unknown", n.fs[1]["spins"])
        self.assertEqual(["analysis-icu", "ingest-geoip", "ingest-user-agent"],
                         n.plugins)