Exemplo n.º 1
0
def test_minio_setup(self):
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count(
            "pod",
            ns=settings.operator_namespace,
            label="-l app=clickhouse-operator"
        ) > 0, error(
            "please run deploy/operator/clickhouse-operator-install.sh before run test"
        )
        util.set_operator_version(settings.operator_version)
        util.set_metrics_exporter_version(settings.operator_version)

    with Given("minio-operator is installed"):
        assert kubectl.get_count(
            "pod", ns=settings.minio_namespace, label="-l name=minio-operator"
        ) > 0, error(
            "please run deploy/minio/install-minio-operator.sh before test run"
        )
        assert kubectl.get_count(
            "pod", ns=settings.minio_namespace,
            label="-l app=minio") > 0, error(
                "please run deploy/minio/install-minio.sh before test run")

        minio_expected_version = f"minio/minio:{settings.minio_version}"
        assert minio_expected_version in minio_spec["items"][0]["spec"][
            "containers"][0]["image"], error(
                f"require {minio_expected_version} image")
def test_019(config="configs/test-019-retain-volume.yaml"):
    require_zookeeper()

    chi = manifest.get_chi_name(util.get_full_path(config))
    kubectl.create_and_check(
        config=config,
        check={
            "pod_count": 1,
            "do_not_delete": 1,
        })

    create_non_replicated_table = "create table t1 Engine = Log as select 1 as a"
    create_replicated_table = """
    create table t2 
    Engine = ReplicatedMergeTree('/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}', '{replica}')
    partition by tuple() order by a
    as select 1 as a""".replace('\r', '').replace('\n', '')

    with Given("ClickHouse has some data in place"):
        clickhouse.query(chi, sql=create_non_replicated_table)
        clickhouse.query(chi, sql=create_replicated_table)

    with When("CHI with retained volume is deleted"):
        pvc_count = kubectl.get_count("pvc")
        pv_count = kubectl.get_count("pv")

        kubectl.delete_chi(chi)

        with Then("PVC should be retained"):
            assert kubectl.get_count("pvc") == pvc_count
            assert kubectl.get_count("pv") == pv_count

    with When("Re-create CHI"):
        kubectl.create_and_check(
            config=config,
            check={
                "pod_count": 1,
                "do_not_delete": 1,
            })

    with Then("PVC should be re-mounted"):
        with And("Non-replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t1")
            assert out == "1"
        with And("Replicated table should have data"):
            out = clickhouse.query(chi, sql="select a from t2")
            assert out == "1"

    kubectl.delete_chi(chi)
def test_metrics_exporter_setup(self):
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count(
            "pod", ns='--all-namespaces',
            label="-l app=clickhouse-operator") > 0, error()
        with Then(f"Set metrics-exporter version {settings.operator_version}"):
            set_metrics_exporter_version(settings.operator_version)
Exemplo n.º 4
0
def require_zookeeper(manifest='zookeeper-1-node-1GB-for-tests-only.yaml', force_install=False):
    with Given("Install Zookeeper if missing"):
        if force_install or kubectl.get_count("service", name="zookeeper") == 0:
            config = util.get_full_path(f"../deploy/zookeeper/quick-start-persistent-volume/{manifest}")
            kubectl.apply(config)
            kubectl.wait_object("pod", "zookeeper-0")
            kubectl.wait_pod_status("zookeeper-0", "Running")
def set_operator_version(version, ns=settings.operator_namespace, timeout=60):
    operator_image = f"{settings.operator_docker_repo}:{version}"
    metrics_exporter_image = f"{settings.metrics_exporter_docker_repo}:{version}"
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator clickhouse-operator={operator_image}", ns=ns)
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator metrics-exporter={metrics_exporter_image}", ns=ns)
    kubectl.launch("rollout status deployment.v1.apps/clickhouse-operator", ns=ns, timeout=timeout)
    assert kubectl.get_count("pod", ns=ns, label="-l app=clickhouse-operator") > 0, error()
def test_metrics_exporter_reboot():
    def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10):
        with And(f"metrics-exporter /chi enpoint result should return {expect_result}"):
            for i in range(1, max_retries):
                # check /metrics for try to refresh monitored instances
                kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/metrics",
                    ns=operator_namespace
                )
                # check /chi after refresh monitored instances
                out = kubectl.launch(
                    f"exec {operator_pod} -c metrics-exporter -- wget -O- -q http://127.0.0.1:8888/chi",
                    ns=operator_namespace
                )
                out = json.loads(out)
                if out == expect_result:
                    break
                with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
                    time.sleep(i * 5)
            assert out == expect_result, error()

    with Given("clickhouse-operator is installed"):
        kubectl.wait_field("pods", "-l app=clickhouse-operator", ".status.containerStatuses[*].ready", "true,true",
                           ns=settings.operator_namespace)
        assert kubectl.get_count("pod", ns='--all-namespaces', label="-l app=clickhouse-operator") > 0, error()

        out = kubectl.launch("get pods -l app=clickhouse-operator", ns=settings.operator_namespace).splitlines()[1]
        operator_pod = re.split(r'[\t\r\n\s]+', out)[0]
        operator_namespace = settings.operator_namespace
        kubectl.delete_ns(kubectl.namespace)
        kubectl.create_ns(kubectl.namespace)
        check_monitoring_chi(operator_namespace, operator_pod, [])
        with And("created simple clickhouse installation"):
            config = util.get_full_path("../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml")
            kubectl.create_and_check(
                config=config,
                check={
                    "object_counts": {
                        "statefulset": 1,
                        "pod": 1,
                        "service": 2,
                    },
                    "do_not_delete": True,
                })
            expected_chi = [{
                "namespace": "test", "name": "simple-01",
                "hostnames": ["chi-simple-01-cluster-0-0.test.svc.cluster.local"]
            }]
            check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
            with When("reboot metrics exporter"):
                kubectl.launch(f"exec -n {operator_namespace} {operator_pod} -c metrics-exporter -- reboot")
                time.sleep(15)
                kubectl.wait_field("pods", "-l app=clickhouse-operator",
                                        ".status.containerStatuses[*].ready", "true,true",
                                   ns=settings.operator_namespace)
                with Then("check metrics exporter still contains chi objects"):
                    check_monitoring_chi(operator_namespace, operator_pod, expected_chi)
                    kubectl.delete(config)
                    check_monitoring_chi(operator_namespace, operator_pod, [])
Exemplo n.º 7
0
def set_operator_version(version, ns=settings.operator_namespace, timeout=60):
    operator_image = f"{settings.operator_docker_repo}:{version}"
    metrics_exporter_image = f"{settings.metrics_exporter_docker_repo}:{version}"
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator clickhouse-operator={operator_image}", ns=ns)
    kubectl.launch(f"set image deployment.v1.apps/clickhouse-operator metrics-exporter={metrics_exporter_image}", ns=ns)
    kubectl.launch("rollout status deployment.v1.apps/clickhouse-operator", ns=ns, timeout=timeout)
    if kubectl.get_count("pod", ns=ns, label=operator_label) == 0:
        fail("invalid clickhouse-operator pod count")
def test_prometheus_setup():
    with Given("clickhouse-operator is installed"):
        assert kubectl.get_count("pod", ns=settings.operator_namespace,
                                 label="-l app=clickhouse-operator") > 0, error(
            "please run deploy/operator/clickhouse-operator-install.sh before run test")
        set_operator_version(settings.operator_version)
        set_metrics_exporter_version(settings.operator_version)

    with Given("prometheus-operator is installed"):
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app=prometheus,prometheus=prometheus") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        assert kubectl.get_count("pod", ns=settings.prometheus_namespace,
                                 label="-l app=alertmanager,alertmanager=alertmanager") > 0, error(
            "please run deploy/promehteus/create_prometheus.sh before test run")
        prometheus_operator_exptected_version = f"quay.io/prometheus-operator/prometheus-operator:v{settings.prometheus_operator_version}"
        assert prometheus_operator_exptected_version in prometheus_operator_spec["items"][0]["spec"]["containers"][0]["image"], error(f"require {prometheus_operator_exptected_version} image")
Exemplo n.º 9
0
from testflows.core import TestScenario, Name, When, Then, Given, And, main, run, Module, TE, args, Fail, Error
from testflows.asserts import error

if main():
    with Module("main"):
        with Given(f"Clean namespace {settings.test_namespace}"):
            kubectl.delete_all_chi(settings.test_namespace)
            kubectl.delete_ns(settings.test_namespace, ok_to_fail=True)
            kubectl.create_ns(settings.test_namespace)

        with Given(
                f"clickhouse-operator version {settings.operator_version} is installed"
        ):
            if kubectl.get_count("pod",
                                 ns=settings.operator_namespace,
                                 label="-l app=clickhouse-operator") == 0:
                config = util.get_full_path(
                    '../deploy/operator/clickhouse-operator-install-template.yaml'
                )
                kubectl.apply(
                    ns=settings.operator_namespace,
                    config=f"<(cat {config} | "
                    f"OPERATOR_IMAGE=\"{settings.operator_docker_repo}:{settings.operator_version}\" "
                    f"OPERATOR_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"METRICS_EXPORTER_IMAGE=\"{settings.metrics_exporter_docker_repo}:{settings.operator_version}\" "
                    f"METRICS_EXPORTER_NAMESPACE=\"{settings.operator_namespace}\" "
                    f"envsubst)",
                    validate=False)
            test_operator.set_operator_version(settings.operator_version)