Exemple #1
0
    def test_preserves(self, mock_path_exists, mock_rm):
        mock_path_exists.return_value = True

        provisioner.cleanup(preserve=True, install_dir="./rally/races/install", data_paths=["./rally/races/data"])

        self.assertEqual(mock_path_exists.call_count, 0)
        self.assertEqual(mock_rm.call_count, 0)
Exemple #2
0
    def test_cleanup(self, mock_path_exists, mock_rm):
        mock_path_exists.return_value = True

        provisioner.cleanup(preserve=False, install_dir="./rally/races/install", data_paths=["./rally/races/data"])

        expected_dir_calls = [mock.call("/tmp/some/data-path-dir"), mock.call("/rally-root/track/challenge/es-bin")]
        mock_path_exists.mock_calls = expected_dir_calls
        mock_rm.mock_calls = expected_dir_calls
Exemple #3
0
def stop(cfg):
    root_path = paths.install_root(cfg)
    node_config = provisioner.load_node_configuration(root_path)
    if node_config.build_type == "tar":
        node_launcher = launcher.ProcessLauncher(cfg)
    elif node_config.build_type == "docker":
        node_launcher = launcher.DockerLauncher(cfg)
    else:
        raise exceptions.SystemSetupError("Unknown build type [{}]".format(
            node_config.build_type))

    nodes, race_id = _load_node_file(root_path)

    cls = metrics.metrics_store_class(cfg)
    metrics_store = cls(cfg)

    race_store = metrics.race_store(cfg)
    try:
        current_race = race_store.find_by_race_id(race_id)
    except exceptions.NotFound:
        logging.getLogger(__name__).info(
            "Could not find race [%s] most likely because an in-memory metrics store is "
            "used across multiple machines. Use an Elasticsearch metrics store to persist "
            "results.", race_id)
        # we are assuming here that we use an Elasticsearch metrics store... . If we use a file race store (across
        # multiple machines) we will not be able to retrieve a race. In that case we open our in-memory metrics store
        # with settings derived from startup parameters (because we can't store system metrics persistently anyway).
        current_race = metrics.create_race(cfg, track=None, challenge=None)

    metrics_store.open(race_id=current_race.race_id,
                       race_timestamp=current_race.race_timestamp,
                       track_name=current_race.track_name,
                       challenge_name=current_race.challenge_name)

    node_launcher.stop(nodes, metrics_store)
    _delete_node_file(root_path)

    metrics_store.flush(refresh=True)
    for node in nodes:
        results = metrics.calculate_system_results(metrics_store,
                                                   node.node_name)
        current_race.add_results(results)
        metrics.results_store(cfg).store_results(current_race)

    metrics_store.close()

    # TODO: Do we need to expose this as a separate command as well?
    provisioner.cleanup(preserve=cfg.opts("mechanic", "preserve.install"),
                        install_dir=node_config.binary_path,
                        data_paths=node_config.data_paths)
Exemple #4
0
def stop(cfg):
    root_path = paths.install_root(cfg)
    node_config = provisioner.load_node_configuration(root_path)
    if node_config.build_type == "tar":
        node_launcher = launcher.ProcessLauncher(cfg)
    elif node_config.build_type == "docker":
        node_launcher = launcher.DockerLauncher(cfg)
    else:
        raise exceptions.SystemSetupError("Unknown build type [{}]".format(
            node_config.build_type))

    nodes, race_id = _load_node_file(root_path)

    cls = metrics.metrics_store_class(cfg)
    metrics_store = cls(cfg)

    race_store = metrics.race_store(cfg)
    try:
        current_race = race_store.find_by_race_id(race_id)
        metrics_store.open(race_id=current_race.race_id,
                           race_timestamp=current_race.race_timestamp,
                           track_name=current_race.track_name,
                           challenge_name=current_race.challenge_name)
    except exceptions.NotFound:
        logging.getLogger(__name__).info(
            "Could not find race [%s] and will thus not persist system metrics.",
            race_id)
        # Don't persist system metrics if we can't retrieve the race as we cannot derive the required meta-data.
        current_race = None
        metrics_store = None

    node_launcher.stop(nodes, metrics_store)
    _delete_node_file(root_path)

    if current_race:
        metrics_store.flush(refresh=True)
        for node in nodes:
            results = metrics.calculate_system_results(metrics_store,
                                                       node.node_name)
            current_race.add_results(results)
            metrics.results_store(cfg).store_results(current_race)

        metrics_store.close()

    # TODO: Do we need to expose this as a separate command as well?
    provisioner.cleanup(preserve=cfg.opts("mechanic", "preserve.install"),
                        install_dir=node_config.binary_path,
                        data_paths=node_config.data_paths)
Exemple #5
0
    def stop_engine(self):
        self.logger.info("Stopping nodes %s.", self.nodes)
        self.launcher.stop(self.nodes, self.metrics_store)
        self.flush_metrics(refresh=True)
        try:
            current_race = self._current_race()
            for node in self.nodes:
                self._add_results(current_race, node)
        except exceptions.NotFound as e:
            self.logger.warning("Cannot store system metrics: %s.", str(e))

        self.metrics_store.close()
        self.nodes = []
        for node_config in self.node_configs:
            provisioner.cleanup(preserve=self.preserve_install, install_dir=node_config.binary_path, data_paths=node_config.data_paths)
        self.node_configs = []