コード例 #1
0
ファイル: config_test.py プロジェクト: yodasantu/rally
    def test_can_load_and_amend_existing_config(self):
        base_cfg = config.Config(config_name="unittest", config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version", config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "unit-test", "sample.property", "let me copy you")

        cfg = config.auto_load_local_config(base_cfg, additional_sections=["unit-test"],
                                            config_file_class=InMemoryConfigStore, present=True, config={
            "distributions": {
                "release.url": "https://acme.com/releases",
                "release.cache": "true",
            },
            "system": {
                "env.name": "existing-unit-test-config"
            },
            "meta": {
                "config.version": config.Config.CURRENT_CONFIG_VERSION
            },
            "benchmarks": {
                "local.dataset.cache": "/tmp/rally/data"
            }
        })
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache"))
        # keeps config properties
        self.assertEqual("existing-unit-test-config", cfg.opts("system", "env.name"))
        # copies additional properties
        self.assert_equals_base_config(base_cfg, cfg, "unit-test", "sample.property")
コード例 #2
0
ファイル: config_test.py プロジェクト: yodasantu/rally
    def test_can_migrate_outdated_config(self):
        base_cfg = config.Config(config_name="unittest", config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version", config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "unit-test", "sample.property", "let me copy you")

        cfg = config.auto_load_local_config(base_cfg, additional_sections=["unit-test"],
                                            config_file_class=InMemoryConfigStore, present=True, config={
                "distributions": {
                    "release.url": "https://acme.com/releases",
                    "release.cache": "true",
                },
                "system": {
                    "env.name": "existing-unit-test-config"
                },
                # outdated
                "meta": {
                    "config.version": config.Config.CURRENT_CONFIG_VERSION - 1
                },
                "benchmarks": {
                    "local.dataset.cache": "/tmp/rally/data"
                },
                "runtime": {
                    "java8.home": "/opt/jdk8"
                }
            })
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache"))
        # migrated existing config
        self.assertEqual(config.Config.CURRENT_CONFIG_VERSION, int(cfg.opts("meta", "config.version")))
コード例 #3
0
    def test_can_migrate_outdated_config(self):
        base_cfg = config.Config(config_name="unittest", config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version", config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "unit-test", "sample.property", "let me copy you")

        cfg = config.auto_load_local_config(base_cfg, additional_sections=["unit-test"],
                                            config_file_class=InMemoryConfigStore, present=True, config={
                "distributions": {
                    "release.url": "https://acme.com/releases",
                    "release.cache": "true",
                },
                "system": {
                    "env.name": "existing-unit-test-config"
                },
                # outdated
                "meta": {
                    "config.version": config.Config.CURRENT_CONFIG_VERSION - 1
                },
                "benchmarks": {
                    "local.dataset.cache": "/tmp/rally/data"
                },
                "runtime": {
                    "java8.home": "/opt/jdk8"
                }
            })
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache"))
        # migrated existing config
        self.assertEqual(config.Config.CURRENT_CONFIG_VERSION, int(cfg.opts("meta", "config.version")))
コード例 #4
0
    def test_can_create_non_existing_config(self):
        base_cfg = config.Config(config_name="unittest",
                                 config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version",
                     config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks",
                     "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "reporting", "datastore.type",
                     "elasticsearch")
        base_cfg.add(config.Scope.application, "tracks", "metrics.url",
                     "http://github.com/org/metrics")
        base_cfg.add(config.Scope.application, "teams", "private.url",
                     "http://github.com/org/teams")
        base_cfg.add(config.Scope.application, "distributions",
                     "release.cache", False)
        base_cfg.add(config.Scope.application, "defaults",
                     "preserve_benchmark_candidate", True)

        cfg = config.auto_load_local_config(
            base_cfg, config_file_class=InMemoryConfigStore)
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"),
                            cfg.opts("benchmarks", "local.dataset.cache"))
        # copied sections from base config
        self.assert_equals_base_config(base_cfg, cfg, "reporting",
                                       "datastore.type")
        self.assert_equals_base_config(base_cfg, cfg, "tracks", "metrics.url")
        self.assert_equals_base_config(base_cfg, cfg, "teams", "private.url")
        self.assert_equals_base_config(base_cfg, cfg, "distributions",
                                       "release.cache")
        self.assert_equals_base_config(base_cfg, cfg, "defaults",
                                       "preserve_benchmark_candidate")
コード例 #5
0
    def test_can_load_and_amend_existing_config(self):
        base_cfg = config.Config(config_name="unittest", config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version", config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "unit-test", "sample.property", "let me copy you")

        cfg = config.auto_load_local_config(base_cfg, additional_sections=["unit-test"],
                                            config_file_class=InMemoryConfigStore, present=True, config={
            "distributions": {
                "release.url": "https://acme.com/releases",
                "release.cache": "true",
            },
            "system": {
                "env.name": "existing-unit-test-config"
            },
            "meta": {
                "config.version": config.Config.CURRENT_CONFIG_VERSION
            },
            "benchmarks": {
                "local.dataset.cache": "/tmp/rally/data"
            }
        })
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache"))
        # keeps config properties
        self.assertEqual("existing-unit-test-config", cfg.opts("system", "env.name"))
        # copies additional properties
        self.assert_equals_base_config(base_cfg, cfg, "unit-test", "sample.property")
コード例 #6
0
    def receiveMsg_StartNodes(self, msg, sender):
        try:
            self.host = msg.ip
            if msg.external:
                self.logger.info(
                    "Connecting to externally provisioned nodes on [%s].",
                    msg.ip)
            else:
                self.logger.info("Starting node(s) %s on [%s].", msg.node_ids,
                                 msg.ip)

            # Load node-specific configuration
            self.config = config.auto_load_local_config(
                msg.cfg,
                additional_sections=[
                    # only copy the relevant bits
                    "track",
                    "mechanic",
                    "client",
                    # allow metrics store to extract race meta-data
                    "race",
                    "source"
                ])
            # set root path (normally done by the main entry point)
            self.config.add(config.Scope.application, "node", "rally.root",
                            paths.rally_root())
            if not msg.external:
                self.config.add(config.Scope.benchmark, "provisioning",
                                "node.ip", msg.ip)
                # we need to override the port with the value that the user has specified instead of using the default value (39200)
                self.config.add(config.Scope.benchmark, "provisioning",
                                "node.http.port", msg.port)
                self.config.add(config.Scope.benchmark, "provisioning",
                                "node.ids", msg.node_ids)

            cls = metrics.metrics_store_class(self.config)
            self.metrics_store = cls(self.config)
            self.metrics_store.open(ctx=msg.open_metrics_context)
            # avoid follow-up errors in case we receive an unexpected ActorExitRequest due to an early failure in a parent actor.
            self.metrics_store.lap = 0

            self.mechanic = create(self.config, self.metrics_store,
                                   msg.all_node_ips, msg.cluster_settings,
                                   msg.sources, msg.build, msg.distribution,
                                   msg.external, msg.docker)
            nodes = self.mechanic.start_engine()
            self.running = True
            self.send(
                getattr(msg, "reply_to", sender),
                NodesStarted([NodeMetaInfo(node) for node in nodes],
                             self.metrics_store.meta_info))
        except Exception:
            self.logger.exception("Cannot process message [%s]", msg)
            # avoid "can't pickle traceback objects"
            import traceback
            ex_type, ex_value, ex_traceback = sys.exc_info()
            self.send(getattr(msg, "reply_to", sender),
                      actor.BenchmarkFailure(ex_value, traceback.format_exc()))
コード例 #7
0
    def receiveMsg_StartNodes(self, msg, sender):
        try:
            self.host = msg.ip
            if msg.external:
                self.logger.info("Connecting to externally provisioned nodes on [%s].", msg.ip)
            else:
                self.logger.info("Starting node(s) %s on [%s].", msg.node_ids, msg.ip)

            # Load node-specific configuration
            cfg = config.auto_load_local_config(
                msg.cfg,
                additional_sections=[
                    # only copy the relevant bits
                    "track",
                    "mechanic",
                    "client",
                    "telemetry",
                    # allow metrics store to extract race meta-data
                    "race",
                    "source",
                ],
            )
            # set root path (normally done by the main entry point)
            cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
            if not msg.external:
                cfg.add(config.Scope.benchmark, "provisioning", "node.ids", msg.node_ids)

            cls = metrics.metrics_store_class(cfg)
            metrics_store = cls(cfg)
            metrics_store.open(ctx=msg.open_metrics_context)
            # avoid follow-up errors in case we receive an unexpected ActorExitRequest due to an early failure in a parent actor.

            self.mechanic = create(
                cfg,
                metrics_store,
                msg.ip,
                msg.port,
                msg.all_node_ips,
                msg.all_node_ids,
                msg.sources,
                msg.distribution,
                msg.external,
                msg.docker,
            )
            self.mechanic.start_engine()
            self.wakeupAfter(METRIC_FLUSH_INTERVAL_SECONDS)
            self.send(getattr(msg, "reply_to", sender), NodesStarted())
        except Exception:
            self.logger.exception("Cannot process message [%s]", msg)
            # avoid "can't pickle traceback objects"
            _, ex_value, _ = sys.exc_info()
            self.send(getattr(msg, "reply_to", sender), actor.BenchmarkFailure(ex_value, traceback.format_exc()))
コード例 #8
0
ファイル: config_test.py プロジェクト: ynuosoft/rally
    def test_can_migrate_outdated_config(self):
        base_cfg = config.Config(config_name="unittest",
                                 config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version",
                     config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks",
                     "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "unit-test", "sample.property",
                     "let me copy you")

        cfg = config.auto_load_local_config(
            base_cfg,
            additional_sections=["unit-test"],
            config_file_class=InMemoryConfigStore,
            present=True,
            config={
                "distributions": {
                    "release.url": "https://acme.com/releases",
                    "release.cache": "true",
                },
                "system": {
                    "env.name": "existing-unit-test-config"
                },
                # outdated
                "meta": {
                    # ensure we don't attempt to migrate if that version is unsupported
                    "config.version":
                    max(config.Config.CURRENT_CONFIG_VERSION - 1,
                        config.Config.EARLIEST_SUPPORTED_VERSION)
                },
                "benchmarks": {
                    "local.dataset.cache": "/tmp/rally/data"
                },
                "runtime": {
                    "java8.home": "/opt/jdk8"
                },
            },
        )
        assert cfg.config_file.present
        # did not just copy base config
        assert base_cfg.opts("benchmarks", "local.dataset.cache") != cfg.opts(
            "benchmarks", "local.dataset.cache")
        # migrated existing config
        assert int(cfg.opts(
            "meta", "config.version")) == config.Config.CURRENT_CONFIG_VERSION
コード例 #9
0
    def test_can_create_non_existing_config(self):
        base_cfg = config.Config(config_name="unittest", config_file_class=InMemoryConfigStore)
        base_cfg.add(config.Scope.application, "meta", "config.version", config.Config.CURRENT_CONFIG_VERSION)
        base_cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/base-config/data-set-cache")
        base_cfg.add(config.Scope.application, "reporting", "datastore.type", "elasticsearch")
        base_cfg.add(config.Scope.application, "tracks", "metrics.url", "http://github.com/org/metrics")
        base_cfg.add(config.Scope.application, "teams", "private.url", "http://github.com/org/teams")
        base_cfg.add(config.Scope.application, "distributions", "release.cache", False)
        base_cfg.add(config.Scope.application, "defaults", "preserve_benchmark_candidate", True)

        cfg = config.auto_load_local_config(base_cfg, config_file_class=InMemoryConfigStore)
        self.assertTrue(cfg.config_file.present)
        # did not just copy base config
        self.assertNotEqual(base_cfg.opts("benchmarks", "local.dataset.cache"), cfg.opts("benchmarks", "local.dataset.cache"))
        # copied sections from base config
        self.assert_equals_base_config(base_cfg, cfg, "reporting", "datastore.type")
        self.assert_equals_base_config(base_cfg, cfg, "tracks", "metrics.url")
        self.assert_equals_base_config(base_cfg, cfg, "teams", "private.url")
        self.assert_equals_base_config(base_cfg, cfg, "distributions", "release.cache")
        self.assert_equals_base_config(base_cfg, cfg, "defaults", "preserve_benchmark_candidate")
コード例 #10
0
    def receiveMessage(self, msg, sender):
        # at the moment, we implement all message handling blocking. This is not ideal but simple to get started with. Besides, the caller
        # needs to block anyway. The only reason we implement mechanic as an actor is to distribute them.
        # noinspection PyBroadException
        try:
            logger.debug(
                "NodeMechanicActor#receiveMessage(msg = [%s] sender = [%s])" %
                (str(type(msg)), str(sender)))
            if isinstance(msg, StartNodes):
                self.host = msg.ip
                if msg.external:
                    logger.info(
                        "Connecting to externally provisioned nodes on [%s]." %
                        msg.ip)
                else:
                    logger.info("Starting node(s) %s on [%s]." %
                                (msg.node_ids, msg.ip))

                # Load node-specific configuration
                self.config = config.auto_load_local_config(
                    msg.cfg,
                    additional_sections=[
                        # only copy the relevant bits
                        "track",
                        "mechanic",
                        "client",
                        # allow metrics store to extract race meta-data
                        "race",
                        "source"
                    ])
                # set root path (normally done by the main entry point)
                self.config.add(config.Scope.application, "node", "rally.root",
                                paths.rally_root())
                if not msg.external:
                    self.config.add(config.Scope.benchmark, "provisioning",
                                    "node.ip", msg.ip)
                    # we need to override the port with the value that the user has specified instead of using the default value (39200)
                    self.config.add(config.Scope.benchmark, "provisioning",
                                    "node.http.port", msg.port)
                    self.config.add(config.Scope.benchmark, "provisioning",
                                    "node.ids", msg.node_ids)

                self.metrics_store = metrics.InMemoryMetricsStore(self.config)
                self.metrics_store.open(ctx=msg.open_metrics_context)
                # avoid follow-up errors in case we receive an unexpected ActorExitRequest due to an early failure in a parent actor.
                self.metrics_store.lap = 0

                self.mechanic = create(self.config, self.metrics_store,
                                       msg.all_node_ips, msg.cluster_settings,
                                       msg.sources, msg.build,
                                       msg.distribution, msg.external,
                                       msg.docker)
                nodes = self.mechanic.start_engine()
                self.running = True
                self.send(
                    sender,
                    NodesStarted([NodeMetaInfo(node) for node in nodes],
                                 self.metrics_store.meta_info))
            elif isinstance(msg, ApplyMetricsMetaInfo):
                self.metrics_store.merge_meta_info(msg.meta_info)
                self.send(sender, MetricsMetaInfoApplied())
            elif isinstance(msg, ResetRelativeTime):
                logger.info(
                    "Resetting relative time of system metrics store on host [%s]."
                    % self.host)
                self.metrics_store.reset_relative_time()
            elif isinstance(msg, OnBenchmarkStart):
                self.metrics_store.lap = msg.lap
                self.mechanic.on_benchmark_start()
                self.send(sender, BenchmarkStarted())
            elif isinstance(msg, OnBenchmarkStop):
                self.mechanic.on_benchmark_stop()
                # clear metrics store data to not send duplicate system metrics data
                self.send(
                    sender,
                    BenchmarkStopped(
                        self.metrics_store.to_externalizable(clear=True)))
            elif isinstance(msg, StopNodes):
                logger.info("Stopping nodes %s." % self.mechanic.nodes)
                self.mechanic.stop_engine()
                self.send(sender,
                          NodesStopped(self.metrics_store.to_externalizable()))
                # clear all state as the mechanic might get reused later
                self.running = False
                self.config = None
                self.mechanic = None
                self.metrics_store = None
            elif isinstance(msg, thespian.actors.ActorExitRequest):
                if self.running:
                    logger.info("Stopping nodes %s (due to ActorExitRequest)" %
                                self.mechanic.nodes)
                    self.mechanic.stop_engine()
                    self.running = False
        except BaseException:
            self.running = False
            logger.exception("Cannot process message [%s]" % msg)
            # avoid "can't pickle traceback objects"
            import traceback
            ex_type, ex_value, ex_traceback = sys.exc_info()
            self.send(sender,
                      actor.BenchmarkFailure(ex_value, traceback.format_exc()))
コード例 #11
0
ファイル: mechanic.py プロジェクト: danielmitterdorfer/rally
    def receiveMessage(self, msg, sender):
        # at the moment, we implement all message handling blocking. This is not ideal but simple to get started with. Besides, the caller
        # needs to block anyway. The only reason we implement mechanic as an actor is to distribute them.
        # noinspection PyBroadException
        try:
            logger.debug("NodeMechanicActor#receiveMessage(msg = [%s] sender = [%s])" % (str(type(msg)), str(sender)))
            if isinstance(msg, StartNodes):
                self.host = msg.ip
                if msg.external:
                    logger.info("Connecting to externally provisioned nodes on [%s]." % msg.ip)
                else:
                    logger.info("Starting node(s) %s on [%s]." % (msg.node_ids, msg.ip))

                # Load node-specific configuration
                self.config = config.auto_load_local_config(msg.cfg, additional_sections=[
                    # only copy the relevant bits
                    "track", "mechanic", "client",
                    # allow metrics store to extract race meta-data
                    "race",
                    "source"
                ])
                # set root path (normally done by the main entry point)
                self.config.add(config.Scope.application, "node", "rally.root", paths.rally_root())
                if not msg.external:
                    self.config.add(config.Scope.benchmark, "provisioning", "node.ip", msg.ip)
                    # we need to override the port with the value that the user has specified instead of using the default value (39200)
                    self.config.add(config.Scope.benchmark, "provisioning", "node.http.port", msg.port)
                    self.config.add(config.Scope.benchmark, "provisioning", "node.ids", msg.node_ids)

                self.metrics_store = metrics.InMemoryMetricsStore(self.config)
                self.metrics_store.open(ctx=msg.open_metrics_context)
                # avoid follow-up errors in case we receive an unexpected ActorExitRequest due to an early failure in a parent actor.
                self.metrics_store.lap = 0

                self.mechanic = create(self.config, self.metrics_store, msg.all_node_ips, msg.cluster_settings, msg.sources, msg.build,
                                       msg.distribution, msg.external, msg.docker)
                nodes = self.mechanic.start_engine()
                self.running = True
                self.send(sender, NodesStarted([NodeMetaInfo(node) for node in nodes], self.metrics_store.meta_info))
            elif isinstance(msg, ApplyMetricsMetaInfo):
                self.metrics_store.merge_meta_info(msg.meta_info)
                self.send(sender, MetricsMetaInfoApplied())
            elif isinstance(msg, ResetRelativeTime):
                logger.info("Resetting relative time of system metrics store on host [%s]." % self.host)
                self.metrics_store.reset_relative_time()
            elif isinstance(msg, OnBenchmarkStart):
                self.metrics_store.lap = msg.lap
                self.mechanic.on_benchmark_start()
                self.send(sender, BenchmarkStarted())
            elif isinstance(msg, OnBenchmarkStop):
                self.mechanic.on_benchmark_stop()
                # clear metrics store data to not send duplicate system metrics data
                self.send(sender, BenchmarkStopped(self.metrics_store.to_externalizable(clear=True)))
            elif isinstance(msg, StopNodes):
                logger.info("Stopping nodes %s." % self.mechanic.nodes)
                self.mechanic.stop_engine()
                self.send(sender, NodesStopped(self.metrics_store.to_externalizable()))
                # clear all state as the mechanic might get reused later
                self.running = False
                self.config = None
                self.mechanic = None
                self.metrics_store = None
            elif isinstance(msg, thespian.actors.ActorExitRequest):
                if self.running:
                    logger.info("Stopping nodes %s (due to ActorExitRequest)" % self.mechanic.nodes)
                    self.mechanic.stop_engine()
                    self.running = False
        except BaseException:
            self.running = False
            logger.exception("Cannot process message [%s]" % msg)
            # avoid "can't pickle traceback objects"
            import traceback
            ex_type, ex_value, ex_traceback = sys.exc_info()
            self.send(sender, actor.BenchmarkFailure(ex_value, traceback.format_exc()))