Exemplo n.º 1
0
def create(cfg, metrics_store, node_ip, node_http_port, all_node_ips, all_node_ids, sources=False, distribution=False,
           external=False, docker=False):
    race_root_path = paths.race_root(cfg)
    node_ids = cfg.opts("provisioning", "node.ids", mandatory=False)
    node_name_prefix = cfg.opts("provisioning", "node.name.prefix")
    car, plugins = load_team(cfg, external)

    if sources or distribution:
        s = supplier.create(cfg, sources, distribution, car, plugins)
        p = []
        all_node_names = ["%s-%s" % (node_name_prefix, n) for n in all_node_ids]
        for node_id in node_ids:
            node_name = "%s-%s" % (node_name_prefix, node_id)
            p.append(
                provisioner.local(cfg, car, plugins, node_ip, node_http_port, all_node_ips,
                                  all_node_names, race_root_path, node_name))
        l = launcher.ProcessLauncher(cfg)
    elif external:
        raise exceptions.RallyAssertionError("Externally provisioned clusters should not need to be managed by Rally's mechanic")
    elif docker:
        if len(plugins) > 0:
            raise exceptions.SystemSetupError("You cannot specify any plugins for Docker clusters. Please remove "
                                              "\"--elasticsearch-plugins\" and try again.")
        s = lambda: None
        p = []
        for node_id in node_ids:
            node_name = "%s-%s" % (node_name_prefix, node_id)
            p.append(provisioner.docker(cfg, car, node_ip, node_http_port, race_root_path, node_name))
        l = launcher.DockerLauncher(cfg)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError("One of sources, distribution, docker or external must be True")

    return Mechanic(cfg, metrics_store, s, p, l)
Exemplo n.º 2
0
    def test_starts_container_successfully(self, run_subprocess_with_output,
                                           run_subprocess_with_logging):
        run_subprocess_with_logging.return_value = 0
        # Docker container id (from docker-compose ps), Docker container id (from docker ps --filter ...)
        run_subprocess_with_output.side_effect = [["de604d0d"], ["de604d0d"]]
        cfg = config.Config()
        docker = launcher.DockerLauncher(cfg)

        node_config = NodeConfiguration(build_type="docker",
                                        car_env={},
                                        car_runtime_jdks="12,11",
                                        ip="127.0.0.1",
                                        node_name="testnode",
                                        node_root_path="/tmp",
                                        binary_path="/bin",
                                        data_paths="/tmp")

        nodes = docker.start([node_config])
        self.assertEqual(1, len(nodes))
        node = nodes[0]

        self.assertEqual(0, node.pid)
        self.assertEqual("/bin", node.binary_path)
        self.assertEqual("127.0.0.1", node.host_name)
        self.assertEqual("testnode", node.node_name)
        self.assertIsNotNone(node.telemetry)

        run_subprocess_with_logging.assert_called_once_with(
            "docker-compose -f /bin/docker-compose.yml up -d")
        run_subprocess_with_output.assert_has_calls([
            mock.call("docker-compose -f /bin/docker-compose.yml ps -q"),
            mock.call(
                'docker ps -a --filter "id=de604d0d" --filter "status=running" --filter "health=healthy" -q'
            )
        ])
Exemplo n.º 3
0
    def test_container_not_started(self, run_subprocess_with_output,
                                   run_subprocess_with_logging, sleep):
        run_subprocess_with_logging.return_value = 0
        # Docker container id (from docker-compose ps), but NO Docker container id (from docker ps --filter...) twice
        run_subprocess_with_output.side_effect = [["de604d0d"], [], []]
        cfg = config.Config()
        # ensure we only check the status two times
        stop_watch = DockerLauncherTests.IterationBasedStopWatch(
            max_iterations=2)
        docker = launcher.DockerLauncher(
            cfg, clock=DockerLauncherTests.TestClock(stop_watch=stop_watch))

        node_config = NodeConfiguration(build_type="docker",
                                        car_env={},
                                        car_runtime_jdks="12,11",
                                        ip="127.0.0.1",
                                        node_name="testnode",
                                        node_root_path="/tmp",
                                        binary_path="/bin",
                                        data_paths="/tmp")

        with self.assertRaisesRegex(
                exceptions.LaunchError,
                "No healthy running container after 600 seconds!"):
            docker.start([node_config])
Exemplo n.º 4
0
def create(cfg,
           metrics_store,
           single_machine=True,
           sources=False,
           build=False,
           distribution=False,
           external=False,
           docker=False):
    challenge_root_path = paths.race_root(cfg)
    install_dir = "%s/install" % challenge_root_path
    log_dir = "%s/logs" % challenge_root_path
    io.ensure_dir(log_dir)

    if sources:
        try:
            src_dir = cfg.opts("source", "local.src.dir")
        except config.ConfigError:
            logger.exception("Cannot determine source directory")
            raise exceptions.SystemSetupError(
                "You cannot benchmark Elasticsearch from sources. Are you missing Gradle 2.13? Please install"
                " all prerequisites and reconfigure Rally with %s configure" %
                PROGRAM_NAME)

        remote_url = cfg.opts("source", "remote.repo.url")
        revision = cfg.opts("mechanic", "source.revision")
        gradle = cfg.opts("build", "gradle.bin")
        java_home = cfg.opts("runtime", "java8.home")

        s = lambda: supplier.from_sources(remote_url, src_dir, revision,
                                          gradle, java_home, log_dir, build)
        p = provisioner.local_provisioner(cfg, install_dir, single_machine)
        l = launcher.InProcessLauncher(cfg, metrics_store, challenge_root_path,
                                       log_dir)
    elif distribution:
        version = cfg.opts("mechanic", "distribution.version")
        repo_name = cfg.opts("mechanic", "distribution.repository")
        distributions_root = "%s/%s" % (cfg.opts(
            "node", "root.dir"), cfg.opts("source", "distribution.dir"))

        s = lambda: supplier.from_distribution(version=version,
                                               repo_name=repo_name,
                                               distributions_root=
                                               distributions_root)
        p = provisioner.local_provisioner(cfg, install_dir, single_machine)
        l = launcher.InProcessLauncher(cfg, metrics_store, challenge_root_path,
                                       log_dir)
    elif external:
        s = lambda: None
        p = provisioner.no_op_provisioner(cfg)
        l = launcher.ExternalLauncher(cfg, metrics_store)
    elif docker:
        s = lambda: None
        p = provisioner.docker_provisioner(cfg, install_dir)
        l = launcher.DockerLauncher(cfg, metrics_store)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError(
            "One of sources, distribution, docker or external must be True")

    return Mechanic(s, p, l)
Exemplo n.º 5
0
def create(cfg,
           metrics_store,
           sources=False,
           build=False,
           distribution=False,
           external=False,
           docker=False):
    if sources:
        s = lambda: supplier.from_sources(cfg, build)
        p = provisioner.local_provisioner(cfg)
        l = launcher.InProcessLauncher(cfg, metrics_store)
    elif distribution:
        s = lambda: supplier.from_distribution(cfg)
        p = provisioner.local_provisioner(cfg)
        l = launcher.InProcessLauncher(cfg, metrics_store)
    elif external:
        s = lambda: None
        p = provisioner.no_op_provisioner(cfg)
        l = launcher.ExternalLauncher(cfg, metrics_store)
    elif docker:
        s = lambda: None
        p = provisioner.no_op_provisioner(cfg)
        l = launcher.DockerLauncher(cfg, metrics_store)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError(
            "One of sources, distribution, docker or external must be True")

    return Mechanic(cfg, s, p, l)
Exemplo n.º 6
0
def create(cfg,
           metrics_store,
           all_node_ips,
           cluster_settings=None,
           sources=False,
           build=False,
           distribution=False,
           external=False,
           docker=False):
    races_root = paths.races_root(cfg)
    challenge_root_path = paths.race_root(cfg)
    node_ids = cfg.opts("provisioning", "node.ids", mandatory=False)
    car, plugins = load_team(cfg, external)

    if sources or distribution:
        s = supplier.create(cfg, sources, distribution, build,
                            challenge_root_path, car, plugins)
        p = []
        for node_id in node_ids:
            p.append(
                provisioner.local_provisioner(cfg, car, plugins,
                                              cluster_settings, all_node_ips,
                                              challenge_root_path, node_id))
        l = launcher.InProcessLauncher(cfg, metrics_store, races_root)
    elif external:
        if cluster_settings:
            logging.getLogger(__name__).warning(
                "Cannot apply challenge-specific cluster settings [%s] for an externally provisioned cluster. Please ensure that the cluster "
                "settings are present or the benchmark may fail or behave unexpectedly."
                % cluster_settings)
        if len(plugins) > 0:
            raise exceptions.SystemSetupError(
                "You cannot specify any plugins for externally provisioned clusters. Please remove "
                "\"--elasticsearch-plugins\" and try again.")

        s = lambda: None
        p = [provisioner.no_op_provisioner()]
        l = launcher.ExternalLauncher(cfg, metrics_store)
    elif docker:
        if len(plugins) > 0:
            raise exceptions.SystemSetupError(
                "You cannot specify any plugins for Docker clusters. Please remove "
                "\"--elasticsearch-plugins\" and try again.")
        s = lambda: None
        p = []
        for node_id in node_ids:
            p.append(
                provisioner.docker_provisioner(cfg, car, cluster_settings,
                                               challenge_root_path, node_id))
        l = launcher.DockerLauncher(cfg, metrics_store)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError(
            "One of sources, distribution, docker or external must be True")

    return Mechanic(s, p, l)
Exemplo n.º 7
0
    def test_stops_container_successfully(self, run_subprocess_with_logging, add_metadata_for_node):
        cfg = config.Config()
        metrics_store = None
        docker = launcher.DockerLauncher(cfg)

        nodes = [cluster.Node(0, "/bin", "127.0.0.1", "testnode", telemetry.Telemetry())]

        docker.stop(nodes, metrics_store=metrics_store)

        add_metadata_for_node.assert_called_once_with(metrics_store, "testnode", "127.0.0.1")

        run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml down")
Exemplo n.º 8
0
def stop(cfg):
    root_path = paths.install_root(cfg)
    node_config = provisioner.load_node_configuration(root_path)
    if node_config.build_type == "tar":
        node_launcher = launcher.ProcessLauncher(cfg)
    elif node_config.build_type == "docker":
        node_launcher = launcher.DockerLauncher(cfg)
    else:
        raise exceptions.SystemSetupError("Unknown build type [{}]".format(
            node_config.build_type))

    nodes, race_id = _load_node_file(root_path)

    cls = metrics.metrics_store_class(cfg)
    metrics_store = cls(cfg)

    race_store = metrics.race_store(cfg)
    try:
        current_race = race_store.find_by_race_id(race_id)
    except exceptions.NotFound:
        logging.getLogger(__name__).info(
            "Could not find race [%s] most likely because an in-memory metrics store is "
            "used across multiple machines. Use an Elasticsearch metrics store to persist "
            "results.", race_id)
        # we are assuming here that we use an Elasticsearch metrics store... . If we use a file race store (across
        # multiple machines) we will not be able to retrieve a race. In that case we open our in-memory metrics store
        # with settings derived from startup parameters (because we can't store system metrics persistently anyway).
        current_race = metrics.create_race(cfg, track=None, challenge=None)

    metrics_store.open(race_id=current_race.race_id,
                       race_timestamp=current_race.race_timestamp,
                       track_name=current_race.track_name,
                       challenge_name=current_race.challenge_name)

    node_launcher.stop(nodes, metrics_store)
    _delete_node_file(root_path)

    metrics_store.flush(refresh=True)
    for node in nodes:
        results = metrics.calculate_system_results(metrics_store,
                                                   node.node_name)
        current_race.add_results(results)
        metrics.results_store(cfg).store_results(current_race)

    metrics_store.close()

    # TODO: Do we need to expose this as a separate command as well?
    provisioner.cleanup(preserve=cfg.opts("mechanic", "preserve.install"),
                        install_dir=node_config.binary_path,
                        data_paths=node_config.data_paths)
Exemplo n.º 9
0
def stop(cfg):
    root_path = paths.install_root(cfg)
    node_config = provisioner.load_node_configuration(root_path)
    if node_config.build_type == "tar":
        node_launcher = launcher.ProcessLauncher(cfg)
    elif node_config.build_type == "docker":
        node_launcher = launcher.DockerLauncher(cfg)
    else:
        raise exceptions.SystemSetupError("Unknown build type [{}]".format(
            node_config.build_type))

    nodes, race_id = _load_node_file(root_path)

    cls = metrics.metrics_store_class(cfg)
    metrics_store = cls(cfg)

    race_store = metrics.race_store(cfg)
    try:
        current_race = race_store.find_by_race_id(race_id)
        metrics_store.open(race_id=current_race.race_id,
                           race_timestamp=current_race.race_timestamp,
                           track_name=current_race.track_name,
                           challenge_name=current_race.challenge_name)
    except exceptions.NotFound:
        logging.getLogger(__name__).info(
            "Could not find race [%s] and will thus not persist system metrics.",
            race_id)
        # Don't persist system metrics if we can't retrieve the race as we cannot derive the required meta-data.
        current_race = None
        metrics_store = None

    node_launcher.stop(nodes, metrics_store)
    _delete_node_file(root_path)

    if current_race:
        metrics_store.flush(refresh=True)
        for node in nodes:
            results = metrics.calculate_system_results(metrics_store,
                                                       node.node_name)
            current_race.add_results(results)
            metrics.results_store(cfg).store_results(current_race)

        metrics_store.close()

    # TODO: Do we need to expose this as a separate command as well?
    provisioner.cleanup(preserve=cfg.opts("mechanic", "preserve.install"),
                        install_dir=node_config.binary_path,
                        data_paths=node_config.data_paths)
Exemplo n.º 10
0
def start(cfg):
    root_path = paths.install_root(cfg)
    race_id = cfg.opts("system", "race.id")
    # avoid double-launching - we expect that the node file is absent
    with contextlib.suppress(FileNotFoundError):
        _load_node_file(root_path)
        install_id = cfg.opts("system", "install.id")
        raise exceptions.SystemSetupError("A node with this installation id is already running. Please stop it first "
                                          "with {} stop --installation-id={}".format(PROGRAM_NAME, install_id))

    node_config = provisioner.load_node_configuration(root_path)

    if node_config.build_type == "tar":
        node_launcher = launcher.ProcessLauncher(cfg)
    elif node_config.build_type == "docker":
        node_launcher = launcher.DockerLauncher(cfg)
    else:
        raise exceptions.SystemSetupError("Unknown build type [{}]".format(node_config.build_type))
    nodes = node_launcher.start([node_config])
    _store_node_file(root_path, (nodes, race_id))
Exemplo n.º 11
0
def create(cfg,
           metrics_store,
           all_node_ips,
           cluster_settings=None,
           sources=False,
           build=False,
           distribution=False,
           external=False,
           docker=False):
    races_root = paths.races_root(cfg)
    challenge_root_path = paths.race_root(cfg)
    node_ids = cfg.opts("provisioning", "node.ids", mandatory=False)
    repo = team.team_repo(cfg)
    # externally provisioned clusters do not support cars / plugins
    if external:
        car = None
        plugins = []
    else:
        car = team.load_car(repo, cfg.opts("mechanic", "car.names"))
        plugins = team.load_plugins(repo, cfg.opts("mechanic", "car.plugins"))

    if sources:
        try:
            src_dir = cfg.opts("node", "src.root.dir")
        except config.ConfigError:
            logger.exception("Cannot determine source directory")
            raise exceptions.SystemSetupError(
                "You cannot benchmark Elasticsearch from sources. Did you install Gradle? Please install"
                " all prerequisites and reconfigure Rally with %s configure" %
                PROGRAM_NAME)

        remote_url = cfg.opts("source", "remote.repo.url")
        revision = cfg.opts("mechanic", "source.revision")
        gradle = cfg.opts("build", "gradle.bin")
        java_home = cfg.opts("runtime", "java.home")
        src_config = cfg.all_opts("source")
        s = lambda: supplier.from_sources(
            remote_url, src_dir, revision, gradle, java_home,
            challenge_root_path, plugins, src_config, build)
        p = []
        for node_id in node_ids:
            p.append(
                provisioner.local_provisioner(cfg, car, plugins,
                                              cluster_settings, all_node_ips,
                                              challenge_root_path, node_id))
        l = launcher.InProcessLauncher(cfg, metrics_store, races_root)
    elif distribution:
        version = cfg.opts("mechanic", "distribution.version")
        repo_name = cfg.opts("mechanic", "distribution.repository")
        distributions_root = "%s/%s" % (cfg.opts(
            "node", "root.dir"), cfg.opts("source", "distribution.dir"))
        distribution_cfg = cfg.all_opts("distributions")

        s = lambda: supplier.from_distribution(
            version=version,
            repo_name=repo_name,
            distribution_config=distribution_cfg,
            distributions_root=distributions_root,
            plugins=plugins)
        p = []
        for node_id in node_ids:
            p.append(
                provisioner.local_provisioner(cfg, car, plugins,
                                              cluster_settings, all_node_ips,
                                              challenge_root_path, node_id))
        l = launcher.InProcessLauncher(cfg, metrics_store, races_root)
    elif external:
        if cluster_settings:
            logger.warning(
                "Cannot apply challenge-specific cluster settings [%s] for an externally provisioned cluster. Please ensure "
                "that the cluster settings are present or the benchmark may fail or behave unexpectedly."
                % cluster_settings)
        if len(plugins) > 0:
            raise exceptions.SystemSetupError(
                "You cannot specify any plugins for externally provisioned clusters. Please remove "
                "\"--elasticsearch-plugins\" and try again.")

        s = lambda: None
        p = [provisioner.no_op_provisioner()]
        l = launcher.ExternalLauncher(cfg, metrics_store)
    elif docker:
        if len(plugins) > 0:
            raise exceptions.SystemSetupError(
                "You cannot specify any plugins for Docker clusters. Please remove "
                "\"--elasticsearch-plugins\" and try again.")
        s = lambda: None
        p = []
        for node_id in node_ids:
            p.append(
                provisioner.docker_provisioner(cfg, car, cluster_settings,
                                               challenge_root_path, node_id))
        l = launcher.DockerLauncher(cfg, metrics_store)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError(
            "One of sources, distribution, docker or external must be True")

    return Mechanic(s, p, l)