示例#1
0
def glusterd2_setup(config):
    template_kube_apply(config, ManifestGd2Services)
    info("Glusterd2 services created")

    for node in config["nodes"]:
        config["template-args"]["kube_hostname"] = node["address"]
        node_manifest_file = ManifestGd2Node.replace(
            ".yml", "-" + node["address"] + ".yml")
        template_kube_apply(config,
                            node_manifest_file,
                            template_file=ManifestGd2Node)
        info("Glusterd2 pod created", address=node["address"])

    gd2_client_endpoint = kubectl_get(namespace=config["namespace"],
                                      jsonpath="spec.clusterIP",
                                      gettype="service",
                                      name="glusterd2-client",
                                      retries=5,
                                      delay=5,
                                      label="Fetching Glusterd2 Cluster IP")
    gd2_client_endpoint = "http://%s:24007" % gd2_client_endpoint
    info("Fetched Glusterd2 ClusterIP", ip=gd2_client_endpoint)

    def check_num_peers(cmdout):
        peers = json.loads(cmdout.strip())
        return len(peers) == len(config["nodes"])

    curl_cmd = "curl %s/v1/peers" % gd2_client_endpoint
    peers = kubectl_exec(config["namespace"],
                         "gluster-%s-0" % config["nodes"][0]["address"],
                         curl_cmd,
                         out_expect_fn=check_num_peers,
                         retries=50,
                         delay=10,
                         label="Fetching the Gluster Peer info")

    info("Glusterd2 cluster is ready")

    peers_json = json.loads(peers)

    for peer in peers_json:
        kube_hostname = peer["name"].split('-')[1]
        config["template-args"]["kube_hostname"] = kube_hostname
        devices = []
        for node in config["nodes"]:
            if node["address"] == kube_hostname:
                devices = node["devices"]
                break

        for device in devices:
            cmd = "glustercli device add %s %s" % (peer["id"], device)
            kubectl_exec(config["namespace"],
                         "gluster-%s-0" % config["nodes"][0]["address"],
                         cmd,
                         retries=5,
                         delay=5,
                         label="Adding device to peer")
            info("Added device", peer=kube_hostname, device=device)
示例#2
0
def etcd_setup(config):
    template_kube_apply(config, ManifestEtcdOperator)
    info("Etcd Operator created")

    kubectl_get(namespace=config["namespace"],
                jsonpath="status.availableReplicas",
                gettype="deployment",
                name="etcd-operator",
                retries=50,
                delay=10,
                out_expect="1",
                label="Checking etcd operator status")

    info("Etcd Operator is ready")

    template_kube_apply(config,
                        ManifestEtcdCluster,
                        retries=5,
                        delay=5,
                        label="Checking Etcd Operator status")
    info("Etcd cluster created")

    etcd_client_endpoint = kubectl_get(namespace=config["namespace"],
                                       jsonpath="spec.clusterIP",
                                       gettype="service",
                                       name="etcd-client",
                                       retries=5,
                                       delay=5,
                                       label="Fetch etcd Cluster IP")

    def check_num_etcd_running(out):
        # 3 etcd pods + one header line in CLI
        return len(out.split("\n")) == 4

    etcd_client_endpoint = "http://%s:2379" % etcd_client_endpoint
    config["template-args"]["etcd_client_endpoint"] = etcd_client_endpoint
    info("Etcd Service IP is available", endpoint=etcd_client_endpoint)
    kubectl_get(namespace=config["namespace"],
                jsonpath="",
                gettype="pods",
                name="",
                extra_args=[
                    "-l", "etcd_cluster=etcd",
                    "--field-selector=status.phase=Running"
                ],
                out_expect_fn=check_num_etcd_running,
                retries=50,
                delay=10,
                label="Check for Etcd cluster status")

    info("Etcd cluster is UP")
示例#3
0
def virtblock_csi_setup(config):
    template_kube_apply(config, ManifestVirtBlockCSI)
    info("GlusterCS Virtblock CSI driver pods created")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.readyReplicas",
        gettype="statefulset",
        name="csi-glustervirtblock-provisioner",
        retries=50,
        delay=10,
        out_expect="1",
        label="Checking Virtblock CSI Provisioner state"
    )
    info("CSI Virtblock provisioner is Ready")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.readyReplicas",
        gettype="statefulset",
        name="csi-glustervirtblock-attacher",
        retries=50,
        delay=10,
        out_expect="1",
        label="Checking Virtblock CSI attacher status"
    )
    info("CSI Virtblock attacher is Ready")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.numberAvailable",
        gettype="daemonset",
        name="csi-glustervirtblock-nodeplugin",
        retries=50,
        delay=10,
        out_expect="%s" % config["cluster-size"],
        label="Checking Virtblock CSI node plugins"
    )
    info("Virtblock CSI node plugins are Ready")

    template_kube_apply(config, ManifestVirtBlockCSIStorageClass)
    info("Virtblock Storage class created")
示例#4
0
def fs_csi_setup(config):
    template_kube_apply(config, ManifestFsCSI)
    info("GlusterCS FS CSI driver pods created")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.readyReplicas",
        gettype="statefulset",
        name="csi-glusterfsplugin-provisioner",
        retries=50,
        delay=10,
        out_expect="1",
        label="Checking FS CSI Provisioner state"
    )
    info("FS CSI provisioner is Ready")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.readyReplicas",
        gettype="statefulset",
        name="csi-glusterfsplugin-attacher",
        retries=50,
        delay=10,
        out_expect="1",
        label="Checking FS CSI attacher status"
    )
    info("FS CSI attacher is Ready")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.numberAvailable",
        gettype="daemonset",
        name="csi-glusterfsplugin-nodeplugin",
        retries=50,
        delay=10,
        out_expect="%s" % config["cluster-size"],
        label="Checking CSI node plugins"
    )
    info("FS CSI node plugins are Ready")

    template_kube_apply(config, ManifestStorageSnapshot)
    info("FS Storage and Snapshot class created")
示例#5
0
def deploy_args_validate(config):
    # Hard coded to 3 for now
    if config.get("cluster-size", 0) != defaultClusterSize:
        error("Invalid cluster-size.",
              actual=config.get("cluster-size", 0),
              required=defaultClusterSize)

    if not config.get("namespace", ""):
        config["namespace"] = defaultNamespace
        warn("Namespace not specified, using default namespace.",
             name=defaultNamespace)

    if len(config.get("nodes", [])) != defaultClusterSize:
        error("Invalid number of nodes provided",
              actual=len(config.get("nodes", [])),
              required=defaultClusterSize)

    for idx, node in enumerate(config["nodes"]):
        if not node.get("address", ""):
            error("Invalid address provided for one or more node")

        if len(node.get("devices", [])) == 0:
            warn("No raw devices provided", address=node["address"])

        info("Node details",
             address=node["address"],
             devices=",".join(node.get("devices", [])))

    info("Cluster Size", clusterSize=config["cluster-size"])
    info("Namespace", namespace=config["namespace"])
示例#6
0
def gcs_namespace_setup(config):
    template_kube_apply(config, ManifestGcsNamespace)
    info("GCS Namespace created")
示例#7
0
def monitoring_setup(config):
    template_kube_apply(config, ManifestPromethesOperator)
    info("Prometheus operator created")

    kubectl_get(namespace=config["namespace"],
                jsonpath="status.availableReplicas",
                gettype="deployment",
                name="prometheus-operator",
                retries=50,
                delay=10,
                out_expect="1",
                label="Checking Prometheus Operator")
    info("Prometheus operator is Ready")

    kubectl_get(namespace="",
                jsonpath="",
                gettype="customresourcedefinitions",
                name="servicemonitors.monitoring.coreos.com",
                retries=30,
                delay=10,
                label="Checking Prometheus CRD")
    info("Prometheus CRD check successful")

    kubectl_get(namespace=config["namespace"],
                jsonpath="",
                gettype="servicemonitors",
                name="",
                retries=30,
                delay=10)
    info("Prometheus service monitor CRD check successful")

    kubectl_get(namespace=config["namespace"],
                jsonpath="",
                gettype="Prometheus",
                name="",
                retries=30,
                delay=10)
    info("Prometheus namespace check successful")

    monitoring_extra = (
        ("Prometheus services, ServiceMonitor and Prometheus Objects",
         ManifestPrometheusBundle), ("Kube-State-Metrics Exporter",
                                     ManifestPrometheusKubeStateMetrics),
        ("Kubelet, APIServer and CoreDNS", ManifestPrometheusKubeMetrics),
        ("Etcd Operator and the etcd cluster Exporters",
         ManifestPrometheusEtcdMetrics), ("Node exporter",
                                          ManifestPrometheusNodeMetrics),
        ("Prometheus Operator Exporter",
         ManifestPrometheusOperatorMetrics), ("Alert manager Cluster",
                                              ManifestPrometheusAlertManager),
        ("Grafana mixins", ManifestGrafanaMixins), ("Grafana dashboard",
                                                    ManifestGrafanaDashboard))

    for msg, filename in monitoring_extra:
        template_kube_apply(config,
                            filename,
                            retries=2,
                            delay=10,
                            label="Checking status of %s" % msg)
        info(msg + " deployed")
示例#8
0
def etcd_setup(config):
    template_kube_apply(config, ManifestEtcdOperator)
    info("Etcd Operator created")

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="status.availableReplicas",
        gettype="deployment",
        name="etcd-operator",
        retries=50,
        delay=10,
        out_expect="1",
        label="Checking etcd operator status"
    )

    info("Etcd Operator is ready")

    template_kube_apply(
        config,
        ManifestEtcdCluster,
        retries=5,
        delay=5,
        label="Checking Etcd Operator status"
    )
    info("Etcd cluster created")

    etcd_client_endpoint = kubectl_get(
        namespace=config["namespace"],
        jsonpath="spec.clusterIP",
        gettype="service",
        name="etcd-client",
        retries=5,
        delay=5,
        label="Fetch etcd Cluster IP"
    )

    def check_num_etcd_running(out):
        # 3 etcd pods + one header line in CLI
        return len(out.split("\n")) == 4

    etcd_client_endpoint = "http://%s:2379" % etcd_client_endpoint
    config["template-args"]["etcd_client_endpoint"] = etcd_client_endpoint
    info("Etcd Service IP is available", endpoint=etcd_client_endpoint)
    kubectl_get(
        namespace=config["namespace"],
        jsonpath="",
        gettype="pods",
        name="",
        extra_args=["-l", "etcd_cluster=etcd",
                    "--field-selector=status.phase=Running"],
        out_expect_fn=check_num_etcd_running,
        retries=50,
        delay=10,
        label="Check for Etcd cluster status"
    )

    info("Etcd pods are UP")

    def check_etcd_cluster_ready(cmdout):
        members_data = json.loads(cmdout.strip())
        return len(members_data.get("members", [])) == 3

    # Etcd port-forward so that status can be checked
    with kubectl_context_run(
            ["port-forward",
             "svc/etcd-client",
             "31000:2379",
             "-n%s" % config["namespace"]]):

        execute(
            ["curl", "http://localhost:31000/v2/members"],
            out_expect_fn=check_etcd_cluster_ready,
            retries=50,
            delay=10,
            label="Checking etcd cluster ready"
        )

    info("Etcd cluster is ready")
示例#9
0
def glusterd2_setup(config):
    template_kube_apply(config, ManifestGd2Services)
    info("Glusterd2 services created")

    for node in config["nodes"]:
        config["template-args"]["kube_hostname"] = node["address"]
        node_manifest_file = ManifestGd2Node.replace(
            ".yml", "-" + node["address"] + ".yml")
        template_kube_apply(
            config,
            node_manifest_file,
            template_file=ManifestGd2Node
        )
        info("Glusterd2 pod created", address=node["address"])

    def check_num_glusterd2_running(out):
        # glusterd2 pods + one header line in CLI
        return len(out.split("\n")) == (len(config["nodes"]) + 1)

    kubectl_get(
        namespace=config["namespace"],
        jsonpath="",
        gettype="pods",
        name="",
        extra_args=["-l", "app.kubernetes.io/name=glusterd2",
                    "--field-selector=status.phase=Running"],
        out_expect_fn=check_num_glusterd2_running,
        retries=50,
        delay=10,
        label="Check for Glusterd2 pods status"
    )
    info("glusterd2 pods are UP")

    gd2_client_endpoint = "http://gluster-%s-0:24007" % (
        config["nodes"][0]["address"]
    )

    def check_num_peers(cmdout):
        peers = json.loads(cmdout.strip())
        return len(peers) == len(config["nodes"])

    curl_cmd = "curl %s/v1/peers" % gd2_client_endpoint
    peers = kubectl_exec(
        config["namespace"],
        "gluster-%s-0" % config["nodes"][0]["address"],
        curl_cmd,
        out_expect_fn=check_num_peers,
        retries=50,
        delay=10,
        label="Fetching the Gluster Peer info"
    )

    info("Glusterd2 cluster is ready")

    peers_json = json.loads(peers)

    for peer in peers_json:
        kube_hostname = peer["name"].split('-')[1]
        config["template-args"]["kube_hostname"] = kube_hostname
        devices = []
        for node in config["nodes"]:
            if node["address"] == kube_hostname:
                devices = node["devices"]
                break

        for device in devices:
            cmd = "glustercli device add %s %s" % (peer["id"], device)
            kubectl_exec(
                config["namespace"],
                "gluster-%s-0" % config["nodes"][0]["address"],
                cmd,
                retries=5,
                delay=5,
                label="Adding device to peer"
            )
            info("Added device", peer=kube_hostname, device=device)