def test_minio_setup(self, chi, minio_spec):
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count(
            "pod",
            ns=settings.operator_namespace,
            label="-l app=clickhouse-operator"
        ) > 0, error(
            "please run deploy/operator/clickhouse-operator-install.sh before run test"
        )
        util.set_operator_version(settings.operator_version)
        util.set_metrics_exporter_version(settings.operator_version)

    with Given("minio-operator is installed"):
        assert kubectl.get_count(
            "pod", ns=settings.minio_namespace, label="-l name=minio-operator"
        ) > 0, error(
            "please run deploy/minio/install-minio-operator.sh before test run"
        )
        assert kubectl.get_count(
            "pod", ns=settings.minio_namespace,
            label="-l app=minio") > 0, error(
                "please run deploy/minio/install-minio.sh before test run")

        minio_expected_version = f"minio/minio:{settings.minio_version}"
        assert minio_expected_version in minio_spec["items"][0]["spec"][
            "containers"][0]["image"], error(
                f"require {minio_expected_version} image")
def test_metrics_exporter_setup(self):
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count("pod",
                                 ns='--all-namespaces',
                                 label=util.operator_label) > 0, error()
        with Then(f"Set metrics-exporter version {settings.operator_version}"):
            util.set_metrics_exporter_version(settings.operator_version)
Ejemplo n.º 3
0
def set_operator_version(version, ns=settings.operator_namespace, timeout=600):
    if settings.operator_install != 'yes':
        return
    operator_image = f"{settings.operator_docker_repo}:{version}"
    metrics_exporter_image = f"{settings.metrics_exporter_docker_repo}:{version}"

    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator clickhouse-operator={operator_image}", ns=ns)
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator metrics-exporter={metrics_exporter_image}", ns=ns)
    kubectl.launch("rollout status deployment.v1.apps/clickhouse-operator", ns=ns, timeout=timeout)
    if kubectl.get_count("pod", ns=ns, label=operator_label) == 0:
        fail("invalid clickhouse-operator pod count")
def test_prometheus_setup(self, prometheus_operator_spec, clickhouse_operator_spec, chi):
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count("pod", ns=settings.operator_namespace,
                                 label="-l app=clickhouse-operator") > 0, error(
            "please run deploy/operator/clickhouse-operator-install.sh before run test")
        util.set_operator_version(settings.operator_version)
        util.set_metrics_exporter_version(settings.operator_version)

    with Given("prometheus-operator is installed"):
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app=prometheus,prometheus=prometheus") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app=alertmanager,alertmanager=alertmanager") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        prometheus_operator_exptected_version = f"quay.io/prometheus-operator/prometheus-operator:v{settings.prometheus_operator_version}"
        actual_image = prometheus_operator_spec["items"][0]["spec"]["containers"][0]["image"]
        assert prometheus_operator_exptected_version in actual_image, error(
            f"require {prometheus_operator_exptected_version} image"
        )
Ejemplo n.º 5
0
def install_operator_if_not_exist(reinstall=False, manifest=get_full_path(settings.clickhouse_operator_install_manifest)):
    if settings.operator_install != 'yes':
        return
    with Given(f"clickhouse-operator version {settings.operator_version} is installed"):
        if kubectl.get_count("pod", ns=settings.operator_namespace, label="-l app=clickhouse-operator") == 0 or reinstall:
            kubectl.apply(
                ns=settings.operator_namespace,
                manifest=f"<(cat {manifest} | "
                       f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" "
                       f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" "
                       f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" "
                       f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" "
                       f"envsubst)",
                validate=False
            )
        set_operator_version(settings.operator_version)
Ejemplo n.º 6
0
def require_zookeeper(zk_manifest='zookeeper-1-node-1GB-for-tests-only.yaml', force_install=False):
    if force_install or kubectl.get_count("service", name="zookeeper") == 0:
        zk_manifest = f"../../deploy/zookeeper/quick-start-persistent-volume/{zk_manifest}"
        zk = yaml_manifest.get_multidoc_manifest_data(get_full_path(zk_manifest, lookup_in_host=True))
        zk_nodes = 1
        i = 0
        for doc in zk:
            i += 1
            if i == 4:
                zk_nodes = doc["spec"]["replicas"]
        assert i == 4, "invalid zookeeper manifest, expected 4 documents in yaml file"
        with Given(f"Install Zookeeper {zk_nodes} nodes"):
            kubectl.apply(get_full_path(zk_manifest, lookup_in_host=False))
            for i in range(zk_nodes):
                kubectl.wait_object("pod", f"zookeeper-{i}")
            for i in range(zk_nodes):
                kubectl.wait_pod_status(f"zookeeper-{i}", "Running")
Ejemplo n.º 7
0
def require_keeper(keeper_manifest='', keeper_type='zookeeper', force_install=False):
    if force_install or kubectl.get_count("service", name=keeper_type) == 0:

        if keeper_type == "zookeeper":
            keeper_manifest = 'zookeeper-1-node-1GB-for-tests-only.yaml' if keeper_manifest == '' else keeper_manifest
            keeper_manifest = f"../../deploy/zookeeper/quick-start-persistent-volume/{keeper_manifest}"
        if keeper_type == "clickhouse-keeper":
            keeper_manifest = 'clickhouse-keeper-1-node-256M-for-test-only.yaml' if keeper_manifest == '' else keeper_manifest
            keeper_manifest = f"../../deploy/clickhouse-keeper/{keeper_manifest}"
        if keeper_type == "zookeeper-operator":
            keeper_manifest = 'zookeeper-operator-1-node.yaml' if keeper_manifest == '' else keeper_manifest
            keeper_manifest = f"../../deploy/zookeeper-operator/{keeper_manifest}"

        multi_doc = yaml_manifest.get_multidoc_manifest_data(get_full_path(keeper_manifest, lookup_in_host=True))
        keeper_nodes = 1
        docs_count = 0
        for doc in multi_doc:
            docs_count += 1
            if doc["kind"] in ("StatefulSet", "ZookeeperCluster"):
                keeper_nodes = doc["spec"]["replicas"]
        expected_docs = {
            "zookeeper": 6 if 'scaleout-pvc' in keeper_manifest else 4,
            "clickhouse-keeper": 6,
            "zookeeper-operator": 1,
        }
        expected_pod_prefix = {
            "zookeeper": "zookeeper",
            "zookeeper-operator": "zookeeper",
            "clickhouse-keeper": "clickhouse-keeper",
        }
        assert docs_count == expected_docs[keeper_type], f"invalid {keeper_type} manifest, expected {expected_docs[keeper_type]}, actual {docs_count} documents in {keeper_manifest} file"
        with Given(f"Install {keeper_type} {keeper_nodes} nodes"):
            kubectl.apply(get_full_path(keeper_manifest, lookup_in_host=False))
            for pod_num in range(keeper_nodes):
                kubectl.wait_object("pod", f"{expected_pod_prefix[keeper_type]}-{pod_num}")
            for pod_num in range(keeper_nodes):
                kubectl.wait_pod_status(f"{expected_pod_prefix[keeper_type]}-{pod_num}", "Running")
def test_metrics_exporter_reboot(self):
    def check_monitoring_chi(operator_namespace,
                             operator_pod,
                             expect_result,
                             max_retries=10):
        with Then(
                f"metrics-exporter /chi endpoint result should return {expect_result}"
        ):
            for i in range(1, max_retries):
                # check /metrics for try to refresh monitored instances
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/metrics")
                kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                # check /chi after refresh monitored instances
                url_cmd = util.make_http_get_request("127.0.0.1", "8888",
                                                     "/chi")
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- {url_cmd}",
                    ns=operator_namespace)
                out = json.loads(out)
                if out == expect_result:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert out == expect_result, error()

    with Given("clickhouse-operator is installed"):
        kubectl.wait_field("pods",
                           util.operator_label,
                           ".status.containerStatuses[*].ready",
                           "true,true",
                           ns=settings.operator_namespace)
        assert kubectl.get_count("pod",
                                 ns='--all-namespaces',
                                 label=util.operator_label) > 0, error()

        out = kubectl.launch("get pods -l app=clickhouse-operator",
                             ns=settings.operator_namespace).splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = settings.operator_namespace
        kubectl.delete_ns(kubectl.namespace, ok_to_fail=True)
        kubectl.create_ns(kubectl.namespace)
        check_monitoring_chi(operator_namespace, operator_pod, [])
        with And("created simple clickhouse installation"):
            manifest = "../../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml"
            kubectl.create_and_check(manifest=manifest,
                                     check={
                                         "object_counts": {
                                             "statefulset": 1,
                                             "pod": 1,
                                             "service": 2,
                                         },
                                         "do_not_delete": True,
                                     })
            expected_chi = [{
                "namespace":
                "test",
                "name":
                "simple-01",
                "hostnames":
                ["chi-simple-01-simple-0-0.test.svc.cluster.local"]
            }]
            check_monitoring_chi(operator_namespace, operator_pod,
                                 expected_chi)
            with When("reboot metrics exporter"):
                kubectl.launch(
                    f"exec -n {operator_namespace} {operator_pod} -c metrics-exporter -- bash -c 'kill 1'"
                )
                time.sleep(15)
                kubectl.wait_field("pods",
                                   util.operator_label,
                                   ".status.containerStatuses[*].ready",
                                   "true,true",
                                   ns=settings.operator_namespace)
                with Then("check metrics exporter still contains chi objects"):
                    check_monitoring_chi(operator_namespace, operator_pod,
                                         expected_chi)
                    kubectl.delete(util.get_full_path(manifest,
                                                      lookup_in_host=False),
                                   timeout=600)
                    check_monitoring_chi(operator_namespace, operator_pod, [])