Ejemplo n.º 1
0
 def __init__(self):
     self.settings = SettingsHandler()
     self.kubernetes = Kubernetes()
     self.storage_class_file = Path("./couchbase/storageclasses.yaml")
     self.couchbase_cluster_file = Path(
         "./couchbase/couchbase-cluster.yaml")
     self.couchbase_buckets_file = Path(
         "./couchbase/couchbase-buckets.yaml")
     self.couchbase_group_file = Path("./couchbase/couchbase-group.yaml")
     self.couchbase_user_file = Path("./couchbase/couchbase-user.yaml")
     self.couchbase_rolebinding_file = Path(
         "./couchbase/couchbase-rolebinding.yaml")
     self.couchbase_ephemeral_buckets_file = Path(
         "./couchbase/couchbase-ephemeral-buckets.yaml")
     self.couchbase_source_folder_pattern, self.couchbase_source_file = self.get_couchbase_files
     self.couchbase_custom_resource_definition_file = self.couchbase_source_file.joinpath(
         "crd.yaml")
     self.couchbase_operator_dac_file = self.couchbase_source_file.joinpath(
         "operator_dac.yaml")
     self.couchbase_admission_file = self.couchbase_source_file.joinpath(
         "admission.yaml")
     self.couchbase_operator_backup_file = self.couchbase_source_file.joinpath(
         "operator_dac_backup.yaml")
     self.filename = ""
     # @TODO: Remove flag after depreciation of couchbase operator 2.0
     self.old_couchbase = False
Ejemplo n.º 2
0
    def __init__(self):
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.timeout = 120
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            user_account, stderr, retcode = exec_cmd(
                "gcloud config get-value core/account")
            user_account = str(user_account, "utf-8").strip()

            user, stderr, retcode = exec_cmd("whoami")
            user = str(user, "utf-8").strip()
            cluster_role_binding_name = "cluster-admin-{}".format(user)
            self.kubernetes.create_cluster_role_binding(
                cluster_role_binding_name=cluster_role_binding_name,
                user_name=user_account,
                cluster_role_name="cluster-admin")
Ejemplo n.º 3
0
    def install_gluu(self, install_ingress=True):
        """
        Helm install Gluu
        :param install_ingress:
        """
        labels = {"app": "gluu"}
        if self.settings.get("USE_ISTIO") == "Y":
            labels = {"app": "gluu", "istio-injection": "enabled"}
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"), labels=labels)
        if self.settings.get(
                "PERSISTENCE_BACKEND") != "ldap" and self.settings.get(
                    "INSTALL_COUCHBASE") == "Y":
            couchbase_app = Couchbase()
            couchbase_app.uninstall()
            couchbase_app = Couchbase()
            couchbase_app.install()
            self.settings = SettingsHandler()
        if self.settings.get("AWS_LB_TYPE") == "alb":
            self.prepare_alb()
            self.deploy_alb()
        if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get(
                "USE_ISTIO_INGRESS") != "Y":
            self.check_install_nginx_ingress(install_ingress)
        self.analyze_global_values()
        try:
            exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format(
                self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
                self.settings.get("CN_NAMESPACE")))

            if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                    self.settings.get("PERSISTENCE_BACKEND") == "ldap":
                values_file = Path("./helm/ldap-backup/values.yaml").resolve()
                values_file_parser = Parser(values_file, True)
                values_file_parser["ldapPass"] = self.settings.get("LDAP_PW")
                values_file_parser.dump_it()

                exec_cmd(
                    "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
                    .format(self.ldap_backup_release_name,
                            self.settings.get("CN_NAMESPACE")))
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)
Ejemplo n.º 4
0
 def __init__(self):
     self.settings = SettingsHandler()
     self.kubernetes = Kubernetes()
     self.storage_class_file = Path("./couchbase/storageclasses.yaml")
     self.couchbase_cluster_file = Path(
         "./couchbase/couchbase-cluster.yaml")
     self.couchbase_buckets_file = Path(
         "./couchbase/couchbase-buckets.yaml")
     self.couchbase_group_file = Path("./couchbase/couchbase-group.yaml")
     self.couchbase_user_file = Path("./couchbase/couchbase-user.yaml")
     self.couchbase_rolebinding_file = Path(
         "./couchbase/couchbase-rolebinding.yaml")
     self.couchbase_ephemeral_buckets_file = Path(
         "./couchbase/couchbase-ephemeral-buckets.yaml")
     self.couchbase_source_folder_pattern, self.couchbase_source_file = self.get_couchbase_files
     self.couchbase_custom_resource_definition_file = self.couchbase_source_file.joinpath(
         "crd.yaml")
     self.couchbase_operator_dac_file = self.couchbase_source_file.joinpath(
         "operator_dac.yaml")
     self.filename = ""
Ejemplo n.º 5
0
    def __init__(self):
        self.values_file = Path("./helm/gluu/values.yaml").resolve()
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.ldap_backup_release_name = self.settings.get(
            'CN_HELM_RELEASE_NAME') + "-ldap-backup"
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            # Clusterrolebinding needs to be created for gke with CB or kubeDB installed
            if self.settings.get("INSTALL_REDIS") == "Y" or \
                    self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    self.settings.get("INSTALL_COUCHBASE") == "Y":
                user_account, stderr, retcode = exec_cmd(
                    "gcloud config get-value core/account")
                user_account = str(user_account, "utf-8").strip()

                user, stderr, retcode = exec_cmd("whoami")
                user = str(user, "utf-8").strip()
                cluster_role_binding_name = "cluster-admin-{}".format(user)
                self.kubernetes.create_cluster_role_binding(
                    cluster_role_binding_name=cluster_role_binding_name,
                    user_name=user_account,
                    cluster_role_name="cluster-admin")
Ejemplo n.º 6
0
class Prompt:
    """Prompt is used for prompting users for input used in deploying Gluu.
    """
    def __init__(self):
        self.settings = SettingsHandler()

    def load_settings(self):
        self.settings = SettingsHandler()

    def license(self):
        self.load_settings()
        PromptLicense(self.settings)

    def versions(self):
        self.load_settings()
        PromptVersion(self.settings)

    def arch(self):
        self.load_settings()
        arch = PromptArch(self.settings)
        arch.prompt_arch()

    def namespace(self):
        self.load_settings()
        namespace = PromptNamespace(self.settings)
        namespace.prompt_gluu_namespace()

    def optional_services(self):
        self.load_settings()
        optional_services = PromptOptionalServices(self.settings)
        optional_services.prompt_optional_services()

    def gluu_gateway(self):
        self.load_settings()
        gluu_gateway = PromptGluuGateway(self.settings)
        gluu_gateway.prompt_gluu_gateway()

    def jackrabbit(self):
        self.load_settings()
        jackrabbit = PromptJackrabbit(self.settings)
        jackrabbit.prompt_jackrabbit()

    def istio(self):
        self.load_settings()
        istio = PromptIstio(self.settings)
        istio.prompt_istio()

    def test_enviornment(self):
        self.load_settings()
        test_environment = PromptTestEnvironment(self.settings)
        if not self.settings.get("TEST_ENVIRONMENT") and \
                self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"):
            test_environment.prompt_test_environment()

        if self.settings.get("DEPLOYMENT_ARCH") in ("eks", "gke", "do",
                                                    "local", "aks"):
            if not self.settings.get("NODE_SSH_KEY"):
                test_environment.prompt_ssh_key()

    def network(self):
        if not self.settings.get("HOST_EXT_IP"):
            ip = gather_ip()
            self.load_settings()
            self.settings.set("HOST_EXT_IP", ip)

            if self.settings.get(
                    "DEPLOYMENT_ARCH"
            ) == "eks" and self.settings.get("USE_ISTIO_INGRESS") != "Y":
                aws = PromptAws(self.settings)
                aws.prompt_aws_lb()

    def gke(self):
        self.load_settings()
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            gke = PromptGke(self.settings)
            gke.prompt_gke()

    def persistence_backend(self):
        self.load_settings()
        persistence_backend = PromptPersistenceBackend(self.settings)
        persistence_backend.prompt_persistence_backend()

    def ldap(self):
        self.load_settings()
        if self.settings.get("PERSISTENCE_BACKEND") == "hybrid":
            ldap = PromptLdap(self.settings)
            ldap.prompt_hybrid_ldap_held_data()

    def volumes(self):
        self.load_settings()
        volumes = PromptVolumes(self.settings)
        if self.settings.get("PERSISTENCE_BACKEND") in ("hybrid", "ldap") or \
                self.settings.get("INSTALL_JACKRABBIT") == "Y":
            volumes.prompt_volumes()
        volumes.prompt_storage()

    def couchbase(self):
        self.load_settings()
        couchbase = PromptCouchbase(self.settings)
        if not self.settings.get("DEPLOY_MULTI_CLUSTER") and self.settings.get(
                "PERSISTENCE_BACKEND") in (
                    "hybrid", "couchbase") and self.settings.get(
                        "DEPLOYMENT_ARCH") not in ("microk8s", "minikube"):
            couchbase.prompt_couchbase_multi_cluster()
        if self.settings.get("PERSISTENCE_BACKEND") in ("hybrid", "couchbase"):
            couchbase.prompt_couchbase()

    def cache(self):
        self.load_settings()
        cache = PromptCache(self.settings)
        cache.prompt_cache_type()

    def backup(self):
        self.load_settings()
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            backup = PromptBackup(self.settings)
            backup.prompt_backup()

    def configuration(self):
        self.load_settings()
        configuration = PromptConfiguration(self.settings)
        configuration.prompt_config()

    def images(self):
        self.load_settings()
        images = PromptImages(self.settings)
        images.prompt_image_name_tag()

    def replicas(self):
        self.load_settings()
        replicas = PromptReplicas(self.settings)
        replicas.prompt_replicas()

    def confirm_settings(self):
        self.load_settings()
        if self.settings.get("CONFIRM_PARAMS") != "Y":
            confirm_settings = PromptConfirmSettings(self.settings)
            confirm_settings.confirm_params()

    def prompt(self):
        """Main property: called to setup all prompts and returns prompts in settings file.

        :return:
        """
        self.license()
        self.versions()
        self.arch()
        self.namespace()
        self.gluu_gateway()
        self.optional_services()
        self.jackrabbit()
        self.istio()
        self.test_enviornment()
        self.network()
        self.gke()
        self.persistence_backend()
        self.ldap()
        self.volumes()
        self.couchbase()
        self.cache()
        self.backup()
        self.configuration()
        self.images()
        self.replicas()
        self.volumes()
        self.confirm_settings()
Ejemplo n.º 7
0
 def __init__(self, app=None):
     if app is not None:
         self.init_app(app)
     self.db = SettingsHandler()
Ejemplo n.º 8
0
class Kubedb(object):
    def __init__(self):
        self.values_file = Path("./helm/gluu/values.yaml").resolve()
        self.upgrade_values_file = Path(
            "./helm/gluu-upgrade/values.yaml").resolve()
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.ldap_backup_release_name = self.settings.get(
            'CN_HELM_RELEASE_NAME') + "-ldap-backup"
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            # Clusterrolebinding needs to be created for gke with CB or kubeDB installed
            if self.settings.get("INSTALL_REDIS") == "Y" or \
                    self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    self.settings.get("INSTALL_COUCHBASE") == "Y":
                user_account, stderr, retcode = exec_cmd(
                    "gcloud config get-value core/account")
                user_account = str(user_account, "utf-8").strip()

                user, stderr, retcode = exec_cmd("whoami")
                user = str(user, "utf-8").strip()
                cluster_role_binding_name = "cluster-admin-{}".format(user)
                self.kubernetes.create_cluster_role_binding(
                    cluster_role_binding_name=cluster_role_binding_name,
                    user_name=user_account,
                    cluster_role_name="cluster-admin")

    def install_kubedb(self):
        self.uninstall_kubedb()
        self.kubernetes.create_namespace(name="gluu-kubedb",
                                         labels={"app": "kubedb"})
        try:
            exec_cmd(
                "helm repo add appscode https://charts.appscode.com/stable/")
            exec_cmd("helm repo update")
            exec_cmd(
                "helm install kubedb-operator appscode/kubedb  --version v0.13.0-rc.0 "
                "--namespace gluu-kubedb")
            self.kubernetes.check_pods_statuses("gluu-kubedb", "app=kubedb")
            exec_cmd(
                "helm install kubedb-catalog appscode/kubedb-catalog  --version v0.13.0-rc.0 "
                "--namespace gluu-kubedb")
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)

    def uninstall_kubedb(self):
        logger.info("Deleting KubeDB...This may take a little while.")
        try:
            exec_cmd(
                "helm repo add appscode https://charts.appscode.com/stable/")
            exec_cmd("helm repo update")
            exec_cmd("helm delete kubedb-operator --namespace gluu-kubedb")
            exec_cmd("helm delete kubedb-catalog --namespace gluu-kubedb")
            time.sleep(20)
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)
Ejemplo n.º 9
0
class Gluu(object):
    def __init__(self):
        self.values_file = Path("./helm/gluu/values.yaml").resolve()
        self.upgrade_values_file = Path(
            "./helm/gluu-upgrade/values.yaml").resolve()
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.ldap_backup_release_name = self.settings.get(
            'CN_HELM_RELEASE_NAME') + "-ldap-backup"
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            # Clusterrolebinding needs to be created for gke with CB or kubeDB installed
            if self.settings.get("INSTALL_REDIS") == "Y" or \
                    self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    self.settings.get("INSTALL_COUCHBASE") == "Y":
                user_account, stderr, retcode = exec_cmd(
                    "gcloud config get-value core/account")
                user_account = str(user_account, "utf-8").strip()

                user, stderr, retcode = exec_cmd("whoami")
                user = str(user, "utf-8").strip()
                cluster_role_binding_name = "cluster-admin-{}".format(user)
                self.kubernetes.create_cluster_role_binding(
                    cluster_role_binding_name=cluster_role_binding_name,
                    user_name=user_account,
                    cluster_role_name="cluster-admin")

    def prepare_alb(self):
        ingress_parser = Parser("./alb/ingress.yaml", "Ingress")
        ingress_parser["spec"]["rules"][0]["host"] = self.settings.get(
            "CN_FQDN")
        ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \
            self.settings.get("ARN_AWS_IAM")
        if not self.settings.get("ARN_AWS_IAM"):
            del ingress_parser["metadata"]["annotations"][
                "alb.ingress.kubernetes.io/certificate-arn"]

        for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]:
            service_name = path["backend"]["serviceName"]
            if self.settings.get(
                    "ENABLE_CASA") != "Y" and service_name == "casa":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXSHIBBOLETH"
                                 ) != "Y" and service_name == "oxshibboleth":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXPASSPORT"
                                 ) != "Y" and service_name == "oxpassport":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("INSTALL_GLUU_GATEWAY"
                                 ) != "Y" and service_name == "gg-kong-ui":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]
        ingress_parser.dump_it()

    def deploy_alb(self):
        alb_ingress = Path("./alb/ingress.yaml")
        self.kubernetes.create_objects_from_dict(
            alb_ingress, self.settings.get("CN_NAMESPACE"))
        if self.settings.get("IS_CN_FQDN_REGISTERED") != "Y":
            prompt = input(
                "Please input the DNS of the Application load balancer  created found on AWS UI: "
            )
            lb_hostname = prompt
            while True:
                try:
                    if lb_hostname:
                        break
                    lb_hostname = self.kubernetes.read_namespaced_ingress(
                        name="gluu", namespace="gluu"
                    ).status.load_balancer.ingress[0].hostname
                except TypeError:
                    logger.info("Waiting for loadbalancer address..")
                    time.sleep(10)
            self.settings.set("LB_ADD", lb_hostname)

    def wait_for_nginx_add(self):
        hostname_ip = None
        while True:
            try:
                if hostname_ip:
                    break
                if self.settings.get("DEPLOYMENT_ARCH") == "eks":
                    hostname_ip = self.kubernetes.read_namespaced_service(
                        name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-ingress-nginx-controller",
                        namespace=self.settings.get("NGINX_INGRESS_NAMESPACE")
                    ).status.load_balancer.ingress[0].hostname
                    self.settings.set("LB_ADD", hostname_ip)
                    if self.settings.get("AWS_LB_TYPE") == "nlb":
                        try:
                            ip_static = socket.gethostbyname(str(hostname_ip))
                            if ip_static:
                                break
                        except socket.gaierror:
                            logger.info("Address has not received an ip yet.")
                elif self.settings.get("DEPLOYMENT_ARCH") == "local":
                    self.settings.set(
                        "LB_ADD",
                        self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-nginx-ingress-controller." +
                        self.settings.get("NGINX_INGRESS_NAMESPACE") +
                        ".svc.cluster.local")
                    break
                else:
                    hostname_ip = self.kubernetes.read_namespaced_service(
                        name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-ingress-nginx-controller",
                        namespace=self.settings.get("NGINX_INGRESS_NAMESPACE")
                    ).status.load_balancer.ingress[0].ip
                    self.settings.set("HOST_EXT_IP", hostname_ip)
            except (TypeError, AttributeError):
                logger.info("Waiting for address..")
                time.sleep(10)

    def check_install_nginx_ingress(self, install_ingress=True):
        """
        Helm installs nginx ingress or checks to recieve and ip or address
        :param install_ingress:
        """
        if install_ingress:
            self.kubernetes.delete_custom_resource(
                "virtualservers.k8s.nginx.org")
            self.kubernetes.delete_custom_resource(
                "virtualserverroutes.k8s.nginx.org")
            self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress")
            self.kubernetes.delete_cluster_role_binding(
                "ingress-nginx-nginx-ingress")
            self.kubernetes.create_namespace(
                name=self.settings.get("NGINX_INGRESS_NAMESPACE"),
                labels={"app": "ingress-nginx"})
            self.kubernetes.delete_cluster_role(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            self.kubernetes.delete_cluster_role_binding(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            try:
                exec_cmd(
                    "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx"
                )
                exec_cmd("helm repo add stable https://charts.helm.sh/stable")
                exec_cmd("helm repo update")
            except FileNotFoundError:
                logger.error(
                    "Helm v3 is not installed. Please install it to continue "
                    "https://helm.sh/docs/intro/install/")
                raise SystemExit(1)
        command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format(
            self.settings.get('NGINX_INGRESS_RELEASE_NAME'),
            self.settings.get("NGINX_INGRESS_NAMESPACE"))
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            exec_cmd("minikube addons enable ingress")
        if self.settings.get("DEPLOYMENT_ARCH") == "eks":
            if self.settings.get("AWS_LB_TYPE") == "nlb":
                if install_ingress:
                    nlb_override_values_file = Path(
                        "./nginx/aws/aws-nlb-override-values.yaml").resolve()
                    nlb_values = " --values {}".format(
                        nlb_override_values_file)
                    exec_cmd(command + nlb_values)
            else:
                if self.settings.get("USE_ARN") == "Y":
                    if install_ingress:
                        elb_override_values_file = Path(
                            "./nginx/aws/aws-elb-override-values.yaml"
                        ).resolve()
                        elb_file_parser = Parser(elb_override_values_file,
                                                 True)
                        elb_file_parser["controller"]["service"][
                            "annotations"].update({
                                "service.beta.kubernetes.io/aws-load-balancer-ssl-cert":
                                self.settings.get("ARN_AWS_IAM")
                            })
                        elb_file_parser["controller"]["config"][
                            "proxy-real-ip-cidr"] = self.settings.get(
                                "VPC_CIDR")
                        elb_file_parser.dump_it()
                        elb_values = " --values {}".format(
                            elb_override_values_file)
                        exec_cmd(command + elb_values)
                else:
                    if install_ingress:
                        exec_cmd(command)

        if self.settings.get("DEPLOYMENT_ARCH") in ("gke", "aks", "do"):
            if install_ingress:
                cloud_override_values_file = Path(
                    "./nginx/cloud/cloud-override-values.yaml").resolve()
                cloud_values = " --values {}".format(
                    cloud_override_values_file)
                exec_cmd(command + cloud_values)
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            if install_ingress:
                baremetal_override_values_file = Path(
                    "./nginx/baremetal/baremetal-override-values.yaml"
                ).resolve()
                baremetal_values = " --values {}".format(
                    baremetal_override_values_file)
                exec_cmd(command + baremetal_values)
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            logger.info("Waiting for nginx to be prepared...")
            time.sleep(60)
            self.wait_for_nginx_add()

    def analyze_global_values(self):
        """
        Parses Gluu values.yaml with the input information from prompts
        """
        values_file_parser = Parser(self.values_file, True)
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            provisioner = "k8s.io/minikube-hostpath"
        elif self.settings.get("DEPLOYMENT_ARCH") == "eks":
            provisioner = "kubernetes.io/aws-ebs"
        elif self.settings.get("DEPLOYMENT_ARCH") == "gke":
            provisioner = "kubernetes.io/gce-pd"
        elif self.settings.get("DEPLOYMENT_ARCH") == "aks":
            provisioner = "kubernetes.io/azure-disk"
        elif self.settings.get("DEPLOYMENT_ARCH") == "do":
            provisioner = "dobs.csi.digitalocean.com"
        elif self.settings.get("DEPLOYMENT_ARCH") == "local":
            provisioner = "openebs.io/local"
        else:
            provisioner = "microk8s.io/hostpath"
        values_file_parser["global"]["storageClass"][
            "provisioner"] = provisioner
        values_file_parser["global"]["lbIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["global"]["domain"] = self.settings.get("CN_FQDN")
        values_file_parser["global"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["global"]["isDomainRegistered"] = "true"
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["config"]["configmap"][
                "cnRedisUrl"] = self.settings.get("REDIS_URL")
            values_file_parser["config"]["configmap"][
                "cnRedisType"] = self.settings.get("REDIS_TYPE")
            values_file_parser["config"]["configmap"][
                "cnRedisUseSsl"] = self.settings.get("REDIS_USE_SSL")
            values_file_parser["config"]["configmap"]["cnRedisSslTruststore"] = \
                self.settings.get("REDIS_SSL_TRUSTSTORE")
            values_file_parser["config"]["configmap"]["cnRedisSentinelGroup"] = \
                self.settings.get("REDIS_SENTINEL_GROUP")
            values_file_parser["config"]["redisPass"] = self.settings.get(
                "REDIS_PW")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") \
                or self.settings.get("TEST_ENVIRONMENT") == "Y":
            values_file_parser["global"]["cloud"]["testEnviroment"] = True
        values_file_parser["config"]["configmap"][
            "lbAddr"] = self.settings.get("LB_ADD")
        values_file_parser["global"]["cnPersistenceType"] = self.settings.get(
            "PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"][
            "cnPersistenceType"] = self.settings.get("PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"]["cnPersistenceLdapMapping"] = \
            self.settings.get("HYBRID_LDAP_HELD_DATA")
        if self.settings.get("PERSISTENCE_BACKEND") != "ldap":
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUrl"] = self.settings.get("COUCHBASE_URL")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUser"] = self.settings.get("COUCHBASE_USER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseIndexNumReplica"] = self.settings.get(
                    "COUCHBASE_INDEX_NUM_REPLICA")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseBucketPrefix"] = self.settings.get(
                    "COUCHBASE_BUCKET_PREFIX")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUser"] = \
                self.settings.get("COUCHBASE_SUPERUSER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseCrt"] = self.settings.get("COUCHBASE_CRT")
            values_file_parser["config"]["configmap"][
                "cnCouchbasePass"] = self.settings.get("COUCHBASE_PASSWORD")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUserPass"] = \
                self.settings.get("COUCHBASE_SUPERUSER_PASSWORD")
        values_file_parser["global"]["auth-server"]["enabled"] = True
        values_file_parser["global"]["persistence"]["enabled"] = True
        values_file_parser["global"]["oxtrust"]["enabled"] = True
        values_file_parser["global"]["config"]["enabled"] = True
        values_file_parser["global"]["opendj"]["enabled"] = False
        values_file_parser["global"]["fido2"]["enabled"] = False
        if self.settings.get("ENABLE_FIDO2") == "Y":
            values_file_parser["global"]["fido2"]["enabled"] = True
            values_file_parser["fido2"]["replicas"] = self.settings.get(
                "FIDO2_REPLICAS")
        values_file_parser["global"]["scim"]["enabled"] = False
        if self.settings.get("ENABLE_SCIM") == "Y":
            values_file_parser["global"]["scim"]["enabled"] = True
            values_file_parser["scim"]["replicas"] = self.settings.get(
                "SCIM_REPLICAS")

        if self.settings.get("ENABLE_CONFIG_API") == "Y":
            values_file_parser["global"]["config-api"]["enabled"] = True

        if self.settings.get("INSTALL_JACKRABBIT") == "Y":
            values_file_parser["global"]["jackrabbit"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnJackrabbitUrl"] = self.settings.get("JACKRABBIT_URL")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitAdminPass"] = \
                self.settings.get("JACKRABBIT_ADMIN_PASSWORD")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitPostgresPass"] = \
                self.settings.get("JACKRABBIT_PG_PASSWORD")
        if self.settings.get("USE_ISTIO_INGRESS") == "Y":
            values_file_parser["global"]["istio"]["ingress"] = True
            values_file_parser["global"]["istio"]["enabled"] = True
            values_file_parser["global"]["istio"][
                "namespace"] = self.settings.get("ISTIO_SYSTEM_NAMESPACE")
        elif self.settings.get("AWS_LB_TYPE") == "alb":
            values_file_parser["global"]["alb"]["ingress"] = True
        else:
            values_file_parser["nginx-ingress"]["ingress"]["enabled"] = True
            values_file_parser["nginx-ingress"]["ingress"]["hosts"] = [
                self.settings.get("CN_FQDN")
            ]
            values_file_parser["nginx-ingress"]["ingress"]["tls"][0][
                "hosts"] = [self.settings.get("CN_FQDN")]
        if self.settings.get("USE_ISTIO") == "Y":
            values_file_parser["global"]["istio"]["enabled"] = True

        values_file_parser["global"]["cnJackrabbitCluster"] = "false"
        if self.settings.get("JACKRABBIT_CLUSTER") == "Y":
            values_file_parser["global"]["cnJackrabbitCluster"] = "true"
            values_file_parser["config"]["configmap"]["cnJackrabbitAdminId"] = \
                self.settings.get("JACKRABBIT_ADMIN_ID")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresDatabaseName"] = \
                self.settings.get("JACKRABBIT_DATABASE")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresHost"] = \
                self.settings.get("POSTGRES_URL")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")

        if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                self.settings.get("PERSISTENCE_BACKEND") == "ldap":
            values_file_parser["global"]["opendj"]["enabled"] = True
            # ALPHA-FEATURE: Multi cluster ldap replication
            if self.settings.get("CN_LDAP_MULTI_CLUSTER") == "Y":
                values_file_parser["opendj"]["multiCluster"]["enabled"] = True
                values_file_parser["opendj"]["multiCluster"]["serfAdvertiseAddr"] = \
                    self.settings.get("CN_LDAP_ADVERTISE_ADDRESS")
                serf_key = base64.b64encode(secrets.token_bytes()).decode()
                values_file_parser["opendj"]["multiCluster"][
                    "serfKey"] = serf_key
                values_file_parser["opendj"]["multiCluster"]["serfPeers"] = \
                    self.settings.get("CN_LDAP_SERF_PEERS")
                if self.settings.get("CN_LDAP_SECONDARY_CLUSTER") == "Y":
                    values_file_parser["global"]["persistence"][
                        "enabled"] = False
                values_file_parser["opendj"]["ports"]["tcp-ldaps"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_LDAPS_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-repl"]["port"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-repl"]["targetPort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-repl"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-admin"]["port"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-admin"]["targetPort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-admin"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-serf"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_SERF_PORT"))
                values_file_parser["opendj"]["ports"]["udp-serf"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_SERF_PORT"))

        values_file_parser["global"]["oxshibboleth"]["enabled"] = False
        if self.settings.get("ENABLE_OXSHIBBOLETH") == "Y":
            values_file_parser["global"]["oxshibboleth"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncShibManifests"] = True

        values_file_parser["global"]["client-api"]["enabled"] = False
        if self.settings.get("ENABLE_CLIENT_API") == "Y":
            values_file_parser["global"]["client-api"]["enabled"] = True
            values_file_parser["config"]["configmap"]["jansClientApiApplicationCertCn"] = \
                self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN")
            values_file_parser["config"]["configmap"][
                "jansClientApiAdminCertCn"] = self.settings.get(
                    "CLIENT_API_ADMIN_KEYSTORE_CN")
            values_file_parser["client-api"]["replicas"] = self.settings.get(
                "CLIENT_API_REPLICAS")

        values_file_parser["opendj"]["cnRedisEnabled"] = False
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["opendj"]["cnRedisEnabled"] = True

        values_file_parser["global"]["nginx-ingress"]["enabled"] = True

        values_file_parser["global"]["cr-rotate"]["enabled"] = False
        if self.settings.get("ENABLE_CACHE_REFRESH") == "Y":
            values_file_parser["global"]["cr-rotate"]["enabled"] = True

        values_file_parser["global"]["auth-server-key-rotation"][
            "enabled"] = False
        if self.settings.get("ENABLE_AUTH_SERVER_KEY_ROTATE") == "Y":
            values_file_parser["global"]["auth-server-key-rotation"][
                "enabled"] = True
            values_file_parser["auth-server-key-rotation"][
                "keysLife"] = self.settings.get("AUTH_SERVER_KEYS_LIFE")

        values_file_parser["config"]["orgName"] = self.settings.get("ORG_NAME")
        values_file_parser["config"]["email"] = self.settings.get("EMAIL")
        values_file_parser["config"]["adminPass"] = self.settings.get(
            "ADMIN_PW")
        values_file_parser["config"]["ldapPass"] = self.settings.get("LDAP_PW")
        values_file_parser["config"]["countryCode"] = self.settings.get(
            "COUNTRY_CODE")
        values_file_parser["config"]["state"] = self.settings.get("STATE")
        values_file_parser["config"]["city"] = self.settings.get("CITY")
        values_file_parser["config"]["configmap"][
            "cnCacheType"] = self.settings.get("CN_CACHE_TYPE")
        values_file_parser["opendj"]["replicas"] = self.settings.get(
            "LDAP_REPLICAS")
        values_file_parser["opendj"]["persistence"][
            "size"] = self.settings.get("LDAP_STORAGE_SIZE")
        if self.settings.get("ENABLE_OXTRUST_API_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiEnabled"] = True
        if self.settings.get("ENABLE_OXTRUST_TEST_MODE_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiTestMode"] = True
        if self.settings.get("ENABLE_CASA_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnCasaEnabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncCasaManifests"] = True

        if self.settings.get("ENABLE_OXPASSPORT_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnPassportEnabled"] = True
        if self.settings.get("ENABLE_RADIUS_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnRadiusEnabled"] = True
        if self.settings.get("ENABLE_SAML_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnSamlEnabled"] = True

        values_file_parser["oxpassport"]["resources"] = {}
        values_file_parser["casa"]["image"]["repository"] = self.settings.get(
            "CASA_IMAGE_NAME")
        values_file_parser["casa"]["image"]["tag"] = self.settings.get(
            "CASA_IMAGE_TAG")
        values_file_parser["casa"]["replicas"] = self.settings.get(
            "CASA_REPLICAS")
        values_file_parser["config"]["image"][
            "repository"] = self.settings.get("CONFIG_IMAGE_NAME")
        values_file_parser["config"]["image"]["tag"] = self.settings.get(
            "CONFIG_IMAGE_TAG")
        values_file_parser["cr-rotate"]["image"][
            "repository"] = self.settings.get(
                "CACHE_REFRESH_ROTATE_IMAGE_NAME")
        values_file_parser["cr-rotate"]["image"]["tag"] = self.settings.get(
            "CACHE_REFRESH_ROTATE_IMAGE_TAG")
        values_file_parser["auth-server-key-rotation"]["image"][
            "repository"] = self.settings.get("CERT_MANAGER_IMAGE_NAME")
        values_file_parser["auth-server-key-rotation"]["image"][
            "tag"] = self.settings.get("CERT_MANAGER_IMAGE_TAG")
        values_file_parser["opendj"]["image"][
            "repository"] = self.settings.get("LDAP_IMAGE_NAME")
        values_file_parser["opendj"]["image"]["tag"] = self.settings.get(
            "LDAP_IMAGE_TAG")
        values_file_parser["persistence"]["image"][
            "repository"] = self.settings.get("PERSISTENCE_IMAGE_NAME")
        values_file_parser["persistence"]["image"]["tag"] = self.settings.get(
            "PERSISTENCE_IMAGE_TAG")
        values_file_parser["auth-server"]["image"][
            "repository"] = self.settings.get("AUTH_SERVER_IMAGE_NAME")
        values_file_parser["auth-server"]["image"]["tag"] = self.settings.get(
            "AUTH_SERVER_IMAGE_TAG")
        values_file_parser["client-api"]["image"][
            "repository"] = self.settings.get("CLIENT_API_IMAGE_NAME")
        values_file_parser["client-api"]["image"]["tag"] = self.settings.get(
            "CLIENT_API_IMAGE_TAG")
        values_file_parser["auth-server"]["replicas"] = self.settings.get(
            "AUTH_SERVER_REPLICAS")
        values_file_parser["oxpassport"]["image"][
            "repository"] = self.settings.get("OXPASSPORT_IMAGE_NAME")
        values_file_parser["oxpassport"]["image"]["tag"] = self.settings.get(
            "OXPASSPORT_IMAGE_TAG")
        values_file_parser["oxpassport"]["replicas"] = self.settings.get(
            "OXPASSPORT_REPLICAS")
        values_file_parser["oxshibboleth"]["image"][
            "repository"] = self.settings.get("OXSHIBBOLETH_IMAGE_NAME")
        values_file_parser["oxshibboleth"]["image"]["tag"] = self.settings.get(
            "OXSHIBBOLETH_IMAGE_TAG")
        values_file_parser["oxshibboleth"]["replicas"] = self.settings.get(
            "OXSHIBBOLETH_REPLICAS")
        values_file_parser["jackrabbit"]["image"][
            "repository"] = self.settings.get("JACKRABBIT_IMAGE_NAME")
        values_file_parser["jackrabbit"]["image"]["tag"] = self.settings.get(
            "JACKRABBIT_IMAGE_TAG")
        values_file_parser["oxtrust"]["image"][
            "repository"] = self.settings.get("OXTRUST_IMAGE_NAME")
        values_file_parser["oxtrust"]["image"]["tag"] = self.settings.get(
            "OXTRUST_IMAGE_TAG")
        values_file_parser["oxtrust"]["replicas"] = self.settings.get(
            "OXTRUST_REPLICAS")
        values_file_parser["radius"]["image"][
            "repository"] = self.settings.get("RADIUS_IMAGE_NAME")
        values_file_parser["radius"]["image"]["tag"] = self.settings.get(
            "RADIUS_IMAGE_TAG")
        values_file_parser["radius"]["replicas"] = self.settings.get(
            "RADIUS_REPLICAS")
        values_file_parser.dump_it()

    def install_gluu(self, install_ingress=True):
        """
        Helm install Gluu
        :param install_ingress:
        """
        labels = {"app": "gluu"}
        if self.settings.get("USE_ISTIO") == "Y":
            labels = {"app": "gluu", "istio-injection": "enabled"}
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"), labels=labels)
        if self.settings.get(
                "PERSISTENCE_BACKEND") != "ldap" and self.settings.get(
                    "INSTALL_COUCHBASE") == "Y":
            couchbase_app = Couchbase()
            couchbase_app.uninstall()
            couchbase_app = Couchbase()
            couchbase_app.install()
            self.settings = SettingsHandler()
        if self.settings.get("AWS_LB_TYPE") == "alb":
            self.prepare_alb()
            self.deploy_alb()
        if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get(
                "USE_ISTIO_INGRESS") != "Y":
            self.check_install_nginx_ingress(install_ingress)
        self.analyze_global_values()
        try:
            exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format(
                self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
                self.settings.get("CN_NAMESPACE")))

            if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                    self.settings.get("PERSISTENCE_BACKEND") == "ldap":
                self.install_ldap_backup()

        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)

    def install_ldap_backup(self):
        values_file = Path("./helm/ldap-backup/values.yaml").resolve()
        values_file_parser = Parser(values_file, True)
        values_file_parser["ldapPass"] = self.settings.get("LDAP_PW")
        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
            .format(self.ldap_backup_release_name,
                    self.settings.get("CN_NAMESPACE")))

    def upgrade_gluu(self):
        values_file_parser = Parser(self.upgrade_values_file, True)
        values_file_parser["domain"] = self.settings.get("CN_FQDN")
        values_file_parser["cnCacheType"] = self.settings.get("CN_CACHE_TYPE")
        values_file_parser["cnCouchbaseUrl"] = self.settings.get(
            "COUCHBASE_URL")
        values_file_parser["cnCouchbaseUser"] = self.settings.get(
            "COUCHBASE_USER")
        values_file_parser["cnCouchbaseSuperUser"] = self.settings.get(
            "COUCHBASE_SUPERUSER")
        values_file_parser["cnPersistenceLdapMapping"] = self.settings.get(
            "HYBRID_LDAP_HELD_DATA")
        values_file_parser["cnPersistenceType"] = self.settings.get(
            "PERSISTENCE_BACKEND")
        values_file_parser["source"] = self.settings.get("CN_VERSION")
        values_file_parser["target"] = self.settings.get(
            "CN_UPGRADE_TARGET_VERSION")
        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f {} ./helm/gluu-upgrade --namespace={}".format(
                self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
                self.settings.get("CN_NAMESPACE")))

    def uninstall_gluu(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('CN_HELM_RELEASE_NAME'),
            self.settings.get("CN_NAMESPACE")))
        exec_cmd("helm delete {} --namespace={}".format(
            self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE")))

    def uninstall_nginx_ingress(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('NGINX_INGRESS_RELEASE_NAME'),
            self.settings.get("NGINX_INGRESS_NAMESPACE")))
Ejemplo n.º 10
0
def main():
    parser = create_parser()
    args = parser.parse_args(sys.argv[1:])

    if not args.subparser_name:
        parser.print_help()
        return
    copy_templates()
    settings = SettingsHandler()
    settings.validate()
    if not settings.validate():
        for error in settings.errors:
            logger.error(error)
        sys.exit()

    prompts = Prompt()
    prompts.prompt()
    settings = SettingsHandler()

    try:
        if args.subparser_name == "install-ldap-backup":
            gluu = Gluu()
            gluu.install_ldap_backup()

        elif args.subparser_name == "uninstall-gluu":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            gluu = Gluu()
            gluu.uninstall_gluu()
            if settings.get("INSTALL_REDIS") == "Y" or settings.get("INSTALL_GLUU_GATEWAY") == "Y":
                from pygluu.kubernetes.kubedb import Kubedb
                kubedb = Kubedb()
                kubedb.uninstall_kubedb()

        elif args.subparser_name == "upgrade-values-yaml":
            from pygluu.kubernetes.terminal.upgrade import PromptUpgrade
            # New feature in 4.2 compared to 4.1 and hence if enabled should make sure kubedb is installed.
            gluu = Gluu()
            if settings.get("JACKRABBIT_CLUSTER") == "Y":
                from pygluu.kubernetes.kubedb import Kubedb
                kubedb = Kubedb()
                kubedb.uninstall_kubedb()
                kubedb.install_kubedb()
            prompt_upgrade = PromptUpgrade(settings)
            prompt_upgrade.prompt_upgrade()
            gluu = Gluu()
            logger.info("Patching values.yaml for helm upgrade...")
            gluu.analyze_global_values()
            logger.info("Please find your patched values.yaml at the location ./helm/gluu/values.yaml."
                        "Continue with the steps found at https://gluu.org/docs/gluu-server/latest/upgrade/#helm")

        elif args.subparser_name == "install-couchbase":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.install()

        elif args.subparser_name == "install-couchbase-backup":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.setup_backup_couchbase()

        elif args.subparser_name == "uninstall-couchbase":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.uninstall()

        elif args.subparser_name == "install-kubedb":
            from pygluu.kubernetes.kubedb import Kubedb
            kubedb = Kubedb()
            kubedb.install_kubedb()

        elif args.subparser_name == "generate-settings":
            logger.info("settings.json has been generated")

        elif args.subparser_name == "install":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            gluu = Gluu()
            if settings.get("INSTALL_REDIS") == "Y" or \
                    settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    settings.get("JACKRABBIT_CLUSTER") == "Y":
                from pygluu.kubernetes.kubedb import Kubedb
                kubedb = Kubedb()
                kubedb.uninstall_kubedb()
                kubedb.install_kubedb()
            if settings.get("JACKRABBIT_CLUSTER") == "Y":
                from pygluu.kubernetes.postgres import Postgres
                postgres = Postgres()
                postgres.install_postgres()
            if settings.get("INSTALL_REDIS") == "Y":
                from pygluu.kubernetes.redis import Redis
                redis = Redis()
                redis.uninstall_redis()
                redis.install_redis()
            gluu.install_gluu()

        elif args.subparser_name == "uninstall":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            from pygluu.kubernetes.kubedb import Kubedb
            from pygluu.kubernetes.gluugateway import GluuGateway
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            gluu = Gluu()
            gluugateway = GluuGateway()
            gluu.uninstall_gluu()
            gluu.uninstall_nginx_ingress()
            gluugateway.uninstall_gluu_gateway_dbmode()
            gluugateway.uninstall_gluu_gateway_ui()
            logger.info("Please wait...")
            time.sleep(30)
            kubedb = Kubedb()
            kubedb.uninstall_kubedb()

        elif args.subparser_name == "install-gluu":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            gluu = Gluu()
            gluu.uninstall_gluu()
            gluu.install_gluu(install_ingress=False)

        elif args.subparser_name == "install-gg-dbmode":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            from pygluu.kubernetes.postgres import Postgres
            from pygluu.kubernetes.gluugateway import GluuGateway
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            postgres = Postgres()
            postgres.patch_or_install_postgres()
            gluugateway = GluuGateway()
            gluugateway.install_gluu_gateway_dbmode()
            gluugateway.install_gluu_gateway_ui()

        elif args.subparser_name == "uninstall-gg-dbmode":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            from pygluu.kubernetes.postgres import Postgres
            from pygluu.kubernetes.gluugateway import GluuGateway
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            postgres = Postgres()
            postgres.uninstall_postgres()
            gluugateway = GluuGateway()
            gluugateway.uninstall_gluu_gateway_dbmode()
            gluugateway.uninstall_gluu_gateway_ui()


    except KeyboardInterrupt:
        print("\n[I] Canceled by user; exiting ...")
Ejemplo n.º 11
0
class Redis(object):
    def __init__(self):
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.timeout = 120
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            user_account, stderr, retcode = exec_cmd(
                "gcloud config get-value core/account")
            user_account = str(user_account, "utf-8").strip()

            user, stderr, retcode = exec_cmd("whoami")
            user = str(user, "utf-8").strip()
            cluster_role_binding_name = "cluster-admin-{}".format(user)
            self.kubernetes.create_cluster_role_binding(
                cluster_role_binding_name=cluster_role_binding_name,
                user_name=user_account,
                cluster_role_name="cluster-admin")

    def install_redis(self):
        self.uninstall_redis()
        self.kubernetes.create_namespace(
            name=self.settings.get("REDIS_NAMESPACE"), labels={"app": "redis"})
        if self.settings.get("DEPLOYMENT_ARCH") != "local":
            redis_storage_class = Path("./redis/storageclasses.yaml")
            analyze_storage_class(self.settings, redis_storage_class)
            self.kubernetes.create_objects_from_dict(redis_storage_class)

        redis_configmap = Path("./redis/configmaps.yaml")
        redis_conf_parser = Parser(redis_configmap, "ConfigMap")
        redis_conf_parser["metadata"]["namespace"] = self.settings.get(
            "REDIS_NAMESPACE")
        redis_conf_parser.dump_it()
        self.kubernetes.create_objects_from_dict(redis_configmap)

        redis_yaml = Path("./redis/redis.yaml")
        redis_parser = Parser(redis_yaml, "Redis")
        redis_parser["spec"]["cluster"]["master"] = self.settings.get(
            "REDIS_MASTER_NODES")
        redis_parser["spec"]["cluster"]["replicas"] = self.settings.get(
            "REDIS_NODES_PER_MASTER")
        redis_parser["spec"]["monitor"]["prometheus"][
            "namespace"] = self.settings.get("REDIS_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            redis_parser["spec"]["storage"][
                "storageClassName"] = "openebs-hostpath"
        redis_parser["metadata"]["namespace"] = self.settings.get(
            "REDIS_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube"):
            del redis_parser["spec"]["podTemplate"]["spec"]["resources"]
        redis_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=redis_yaml,
            group="kubedb.com",
            version="v1alpha1",
            plural="redises",
            namespace=self.settings.get("REDIS_NAMESPACE"))

        if not self.settings.get("AWS_LB_TYPE") == "alb":
            self.kubernetes.check_pods_statuses(
                self.settings.get("CN_NAMESPACE"), "app=redis-cluster",
                self.timeout)

    def uninstall_redis(self):
        logger.info("Removing gluu-redis-cluster...")
        logger.info("Removing redis...")
        redis_yaml = Path("./redis/redis.yaml")
        self.kubernetes.delete_namespaced_custom_object(
            filepath=redis_yaml,
            group="kubedb.com",
            version="v1alpha1",
            plural="redises",
            namespace=self.settings.get("REDIS_NAMESPACE"))
        self.kubernetes.delete_storage_class("redis-sc")
        self.kubernetes.delete_service("kubedb",
                                       self.settings.get("REDIS_NAMESPACE"))
Ejemplo n.º 12
0
class Helm(object):
    def __init__(self):
        self.values_file = Path("./helm/gluu/values.yaml").resolve()
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.ldap_backup_release_name = self.settings.get(
            'CN_HELM_RELEASE_NAME') + "-ldap-backup"
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            # Clusterrolebinding needs to be created for gke with CB or kubeDB installed
            if self.settings.get("INSTALL_REDIS") == "Y" or \
                    self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    self.settings.get("INSTALL_COUCHBASE") == "Y":
                user_account, stderr, retcode = exec_cmd(
                    "gcloud config get-value core/account")
                user_account = str(user_account, "utf-8").strip()

                user, stderr, retcode = exec_cmd("whoami")
                user = str(user, "utf-8").strip()
                cluster_role_binding_name = "cluster-admin-{}".format(user)
                self.kubernetes.create_cluster_role_binding(
                    cluster_role_binding_name=cluster_role_binding_name,
                    user_name=user_account,
                    cluster_role_name="cluster-admin")

    def prepare_alb(self):
        ingress_parser = Parser("./alb/ingress.yaml", "Ingress")
        ingress_parser["spec"]["rules"][0]["host"] = self.settings.get(
            "CN_FQDN")
        ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \
            self.settings.get("ARN_AWS_IAM")
        if not self.settings.get("ARN_AWS_IAM"):
            del ingress_parser["metadata"]["annotations"][
                "alb.ingress.kubernetes.io/certificate-arn"]

        for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]:
            service_name = path["backend"]["serviceName"]
            if self.settings.get(
                    "ENABLE_CASA") != "Y" and service_name == "casa":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXSHIBBOLETH"
                                 ) != "Y" and service_name == "oxshibboleth":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXPASSPORT"
                                 ) != "Y" and service_name == "oxpassport":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("INSTALL_GLUU_GATEWAY"
                                 ) != "Y" and service_name == "gg-kong-ui":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]
        ingress_parser.dump_it()

    def deploy_alb(self):
        alb_ingress = Path("./alb/ingress.yaml")
        self.kubernetes.create_objects_from_dict(
            alb_ingress, self.settings.get("CN_NAMESPACE"))
        if self.settings.get("IS_CN_FQDN_REGISTERED") != "Y":
            prompt = input(
                "Please input the DNS of the Application load balancer  created found on AWS UI: "
            )
            lb_hostname = prompt
            while True:
                try:
                    if lb_hostname:
                        break
                    lb_hostname = self.kubernetes.read_namespaced_ingress(
                        name="gluu", namespace="gluu"
                    ).status.load_balancer.ingress[0].hostname
                except TypeError:
                    logger.info("Waiting for loadbalancer address..")
                    time.sleep(10)
            self.settings.set("LB_ADD", lb_hostname)

    def wait_for_nginx_add(self):
        hostname_ip = None
        while True:
            try:
                if hostname_ip:
                    break
                if self.settings.get("DEPLOYMENT_ARCH") == "eks":
                    hostname_ip = self.kubernetes.read_namespaced_service(
                        name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-ingress-nginx-controller",
                        namespace=self.settings.get("NGINX_INGRESS_NAMESPACE")
                    ).status.load_balancer.ingress[0].hostname
                    self.settings.set("LB_ADD", hostname_ip)
                    if self.settings.get("AWS_LB_TYPE") == "nlb":
                        try:
                            ip_static = socket.gethostbyname(str(hostname_ip))
                            if ip_static:
                                break
                        except socket.gaierror:
                            logger.info("Address has not received an ip yet.")
                elif self.settings.get("DEPLOYMENT_ARCH") == "local":
                    self.settings.set(
                        "LB_ADD",
                        self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-nginx-ingress-controller." +
                        self.settings.get("NGINX_INGRESS_NAMESPACE") +
                        ".svc.cluster.local")
                    break
                else:
                    hostname_ip = self.kubernetes.read_namespaced_service(
                        name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                        "-ingress-nginx-controller",
                        namespace=self.settings.get("NGINX_INGRESS_NAMESPACE")
                    ).status.load_balancer.ingress[0].ip
                    self.settings.set("HOST_EXT_IP", hostname_ip)
            except (TypeError, AttributeError):
                logger.info("Waiting for address..")
                time.sleep(10)

    def check_install_nginx_ingress(self, install_ingress=True):
        """
        Helm installs nginx ingress or checks to recieve and ip or address
        :param install_ingress:
        """
        if install_ingress:
            self.kubernetes.delete_custom_resource(
                "virtualservers.k8s.nginx.org")
            self.kubernetes.delete_custom_resource(
                "virtualserverroutes.k8s.nginx.org")
            self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress")
            self.kubernetes.delete_cluster_role_binding(
                "ingress-nginx-nginx-ingress")
            self.kubernetes.create_namespace(
                name=self.settings.get("NGINX_INGRESS_NAMESPACE"),
                labels={"app": "ingress-nginx"})
            self.kubernetes.delete_cluster_role(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            self.kubernetes.delete_cluster_role_binding(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            try:
                exec_cmd(
                    "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx"
                )
                exec_cmd("helm repo add stable https://charts.helm.sh/stable")
                exec_cmd("helm repo update")
            except FileNotFoundError:
                logger.error(
                    "Helm v3 is not installed. Please install it to continue "
                    "https://helm.sh/docs/intro/install/")
                raise SystemExit(1)
        command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format(
            self.settings.get('NGINX_INGRESS_RELEASE_NAME'),
            self.settings.get("NGINX_INGRESS_NAMESPACE"))
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            exec_cmd("minikube addons enable ingress")
        if self.settings.get("DEPLOYMENT_ARCH") == "eks":
            if self.settings.get("AWS_LB_TYPE") == "nlb":
                if install_ingress:
                    nlb_override_values_file = Path(
                        "./nginx/aws/aws-nlb-override-values.yaml").resolve()
                    nlb_values = " --values {}".format(
                        nlb_override_values_file)
                    exec_cmd(command + nlb_values)
            else:
                if self.settings.get("USE_ARN") == "Y":
                    if install_ingress:
                        elb_override_values_file = Path(
                            "./nginx/aws/aws-elb-override-values.yaml"
                        ).resolve()
                        elb_file_parser = Parser(elb_override_values_file,
                                                 True)
                        elb_file_parser["controller"]["service"][
                            "annotations"].update({
                                "service.beta.kubernetes.io/aws-load-balancer-ssl-cert":
                                self.settings.get("ARN_AWS_IAM")
                            })
                        elb_file_parser["controller"]["config"][
                            "proxy-real-ip-cidr"] = self.settings.get(
                                "VPC_CIDR")
                        elb_file_parser.dump_it()
                        elb_values = " --values {}".format(
                            elb_override_values_file)
                        exec_cmd(command + elb_values)
                else:
                    if install_ingress:
                        exec_cmd(command)

        if self.settings.get("DEPLOYMENT_ARCH") in ("gke", "aks", "do"):
            if install_ingress:
                cloud_override_values_file = Path(
                    "./nginx/cloud/cloud-override-values.yaml").resolve()
                cloud_values = " --values {}".format(
                    cloud_override_values_file)
                exec_cmd(command + cloud_values)
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            if install_ingress:
                baremetal_override_values_file = Path(
                    "./nginx/baremetal/baremetal-override-values.yaml"
                ).resolve()
                baremetal_values = " --values {}".format(
                    baremetal_override_values_file)
                exec_cmd(command + baremetal_values)
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            logger.info("Waiting for nginx to be prepared...")
            time.sleep(60)
            self.wait_for_nginx_add()

    def analyze_global_values(self):
        """
        Parses Gluu values.yaml with the input information from prompts
        """
        values_file_parser = Parser(self.values_file, True)
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            provisioner = "k8s.io/minikube-hostpath"
        elif self.settings.get("DEPLOYMENT_ARCH") == "eks":
            provisioner = "kubernetes.io/aws-ebs"
        elif self.settings.get("DEPLOYMENT_ARCH") == "gke":
            provisioner = "kubernetes.io/gce-pd"
        elif self.settings.get("DEPLOYMENT_ARCH") == "aks":
            provisioner = "kubernetes.io/azure-disk"
        elif self.settings.get("DEPLOYMENT_ARCH") == "do":
            provisioner = "dobs.csi.digitalocean.com"
        elif self.settings.get("DEPLOYMENT_ARCH") == "local":
            provisioner = "openebs.io/local"
        else:
            provisioner = "microk8s.io/hostpath"
        values_file_parser["global"]["provisioner"] = provisioner
        values_file_parser["global"]["lbIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["global"]["domain"] = self.settings.get("CN_FQDN")
        values_file_parser["global"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["global"]["isDomainRegistered"] = "true"
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["config"]["configmap"][
                "cnRedisUrl"] = self.settings.get("REDIS_URL")
            values_file_parser["config"]["configmap"][
                "cnRedisType"] = self.settings.get("REDIS_TYPE")
            values_file_parser["config"]["configmap"][
                "cnRedisUseSsl"] = self.settings.get("REDIS_USE_SSL")
            values_file_parser["config"]["configmap"]["cnRedisSslTruststore"] = \
                self.settings.get("REDIS_SSL_TRUSTSTORE")
            values_file_parser["config"]["configmap"]["cnRedisSentinelGroup"] = \
                self.settings.get("REDIS_SENTINEL_GROUP")
            values_file_parser["config"]["redisPass"] = self.settings.get(
                "REDIS_PW")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") \
                or self.settings.get("TEST_ENVIRONMENT") == "Y":
            values_file_parser["global"]["cloud"]["testEnviroment"] = True
        values_file_parser["config"]["configmap"][
            "lbAddr"] = self.settings.get("LB_ADD")
        values_file_parser["global"]["cnPersistenceType"] = self.settings.get(
            "PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"][
            "cnPersistenceType"] = self.settings.get("PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"]["cnPersistenceLdapMapping"] = \
            self.settings.get("HYBRID_LDAP_HELD_DATA")
        if self.settings.get("PERSISTENCE_BACKEND") != "ldap":
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUrl"] = self.settings.get("COUCHBASE_URL")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUser"] = self.settings.get("COUCHBASE_USER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseIndexNumReplica"] = self.settings.get(
                    "COUCHBASE_INDEX_NUM_REPLICA")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUser"] = \
                self.settings.get("COUCHBASE_SUPERUSER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseCrt"] = self.settings.get("COUCHBASE_CRT")
            values_file_parser["config"]["configmap"][
                "cnCouchbasePass"] = self.settings.get("COUCHBASE_PASSWORD")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUserPass"] = \
                self.settings.get("COUCHBASE_SUPERUSER_PASSWORD")
        values_file_parser["global"]["auth-server"]["enabled"] = True
        values_file_parser["global"]["persistence"]["enabled"] = True
        values_file_parser["global"]["oxtrust"]["enabled"] = True
        values_file_parser["global"]["config"]["enabled"] = True
        values_file_parser["global"]["opendj"]["enabled"] = False
        values_file_parser["global"]["fido2"]["enabled"] = False
        if self.settings.get("ENABLE_FIDO2") == "Y":
            values_file_parser["global"]["fido2"]["enabled"] = True
        values_file_parser["global"]["scim"]["enabled"] = False
        if self.settings.get("ENABLE_SCIM") == "Y":
            values_file_parser["global"]["scim"]["enabled"] = True
        if self.settings.get("ENABLE_CONFIG_API") == "Y":
            values_file_parser["global"]["config-api"]["enabled"] = True
        if self.settings.get("INSTALL_JACKRABBIT") == "Y":
            values_file_parser["global"]["jackrabbit"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnJackrabbitUrl"] = self.settings.get("JACKRABBIT_URL")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitAdminPass"] = \
                self.settings.get("JACKRABBIT_ADMIN_PASSWORD")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitPostgresPass"] = \
                self.settings.get("JACKRABBIT_PG_PASSWORD")
        if self.settings.get("USE_ISTIO_INGRESS") == "Y":
            values_file_parser["global"]["istio"]["ingress"] = True
            values_file_parser["global"]["istio"]["enabled"] = True
            values_file_parser["global"]["istio"][
                "namespace"] = self.settings.get("ISTIO_SYSTEM_NAMESPACE")
        elif self.settings.get("AWS_LB_TYPE") == "alb":
            values_file_parser["global"]["alb"]["ingress"] = True
        else:
            values_file_parser["nginx-ingress"]["ingress"]["enabled"] = True
            values_file_parser["nginx-ingress"]["ingress"]["hosts"] = [
                self.settings.get("CN_FQDN")
            ]
            values_file_parser["nginx-ingress"]["ingress"]["tls"][0][
                "hosts"] = [self.settings.get("CN_FQDN")]
        if self.settings.get("USE_ISTIO") == "Y":
            values_file_parser["global"]["istio"]["enabled"] = True

        values_file_parser["global"]["cnJackrabbitCluster"] = "false"
        if self.settings.get("JACKRABBIT_CLUSTER") == "Y":
            values_file_parser["global"]["cnJackrabbitCluster"] = "true"
            values_file_parser["config"]["configmap"]["cnJackrabbitAdminId"] = \
                self.settings.get("JACKRABBIT_ADMIN_ID")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresDatabaseName"] = \
                self.settings.get("JACKRABBIT_DATABASE")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresHost"] = \
                self.settings.get("POSTGRES_URL")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")

        if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                self.settings.get("PERSISTENCE_BACKEND") == "ldap":
            values_file_parser["global"]["opendj"]["enabled"] = True

        values_file_parser["global"]["oxshibboleth"]["enabled"] = False
        if self.settings.get("ENABLE_OXSHIBBOLETH") == "Y":
            values_file_parser["global"]["oxshibboleth"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncShibManifests"] = True

        values_file_parser["global"]["client-api"]["enabled"] = False
        if self.settings.get("ENABLE_CLIENT_API") == "Y":
            values_file_parser["global"]["client-api"]["enabled"] = True
            values_file_parser["config"]["configmap"]["jansClientApiApplicationCertCn"] = \
                self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN")
            values_file_parser["config"]["configmap"][
                "jansClientApiAdminCertCn"] = self.settings.get(
                    "CLIENT_API_ADMIN_KEYSTORE_CN")

        values_file_parser["opendj"]["cnRedisEnabled"] = False
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["opendj"]["cnRedisEnabled"] = True

        values_file_parser["global"]["nginx-ingress"]["enabled"] = True

        values_file_parser["global"]["cr-rotate"]["enabled"] = False
        if self.settings.get("ENABLE_CACHE_REFRESH") == "Y":
            values_file_parser["global"]["cr-rotate"]["enabled"] = True

        values_file_parser["global"]["auth-server-key-rotation"][
            "enabled"] = False
        if self.settings.get("ENABLE_AUTH_SERVER_KEY_ROTATE") == "Y":
            values_file_parser["global"]["auth-server-key-rotation"][
                "enabled"] = True
            values_file_parser["auth-server-key-rotation"][
                "keysLife"] = self.settings.get("AUTH_SERVER_KEYS_LIFE")

        values_file_parser["config"]["orgName"] = self.settings.get("ORG_NAME")
        values_file_parser["config"]["email"] = self.settings.get("EMAIL")
        values_file_parser["config"]["adminPass"] = self.settings.get(
            "ADMIN_PW")
        values_file_parser["config"]["ldapPass"] = self.settings.get("LDAP_PW")
        values_file_parser["config"]["countryCode"] = self.settings.get(
            "COUNTRY_CODE")
        values_file_parser["config"]["state"] = self.settings.get("STATE")
        values_file_parser["config"]["city"] = self.settings.get("CITY")
        values_file_parser["config"]["configmap"][
            "cnCacheType"] = self.settings.get("CN_CACHE_TYPE")
        values_file_parser["opendj"]["replicas"] = self.settings.get(
            "LDAP_REPLICAS")
        values_file_parser["opendj"]["persistence"][
            "size"] = self.settings.get("LDAP_STORAGE_SIZE")
        if self.settings.get("ENABLE_OXTRUST_API_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiEnabled"] = True
        if self.settings.get("ENABLE_OXTRUST_TEST_MODE_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiTestMode"] = True
        if self.settings.get("ENABLE_CASA_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnCasaEnabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncCasaManifests"] = True

        if self.settings.get("ENABLE_OXPASSPORT_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnPassportEnabled"] = True
        if self.settings.get("ENABLE_RADIUS_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnRadiusEnabled"] = True
        if self.settings.get("ENABLE_SAML_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnSamlEnabled"] = True

        values_file_parser["oxpassport"]["resources"] = {}
        values_file_parser["casa"]["image"]["repository"] = self.settings.get(
            "CASA_IMAGE_NAME")
        values_file_parser["casa"]["image"]["tag"] = self.settings.get(
            "CASA_IMAGE_TAG")
        values_file_parser["config"]["image"][
            "repository"] = self.settings.get("CONFIG_IMAGE_NAME")
        values_file_parser["config"]["image"]["tag"] = self.settings.get(
            "CONFIG_IMAGE_TAG")
        values_file_parser["cr-rotate"]["image"][
            "repository"] = self.settings.get(
                "CACHE_REFRESH_ROTATE_IMAGE_NAME")
        values_file_parser["cr-rotate"]["image"]["tag"] = self.settings.get(
            "CACHE_REFRESH_ROTATE_IMAGE_TAG")
        values_file_parser["auth-server-key-rotation"]["image"][
            "repository"] = self.settings.get("CERT_MANAGER_IMAGE_NAME")
        values_file_parser["auth-server-key-rotation"]["image"][
            "tag"] = self.settings.get("CERT_MANAGER_IMAGE_TAG")
        values_file_parser["opendj"]["image"][
            "repository"] = self.settings.get("LDAP_IMAGE_NAME")
        values_file_parser["opendj"]["image"]["tag"] = self.settings.get(
            "LDAP_IMAGE_TAG")
        values_file_parser["persistence"]["image"][
            "repository"] = self.settings.get("PERSISTENCE_IMAGE_NAME")
        values_file_parser["persistence"]["image"]["tag"] = self.settings.get(
            "PERSISTENCE_IMAGE_TAG")
        values_file_parser["auth-server"]["image"][
            "repository"] = self.settings.get("AUTH_SERVER_IMAGE_NAME")
        values_file_parser["auth-server"]["image"]["tag"] = self.settings.get(
            "AUTH_SERVER_IMAGE_TAG")
        values_file_parser["client-api"]["image"][
            "repository"] = self.settings.get("CLIENT_API_IMAGE_NAME")
        values_file_parser["client-api"]["image"]["tag"] = self.settings.get(
            "CLIENT_API_IMAGE_TAG")
        values_file_parser["oxpassport"]["image"][
            "repository"] = self.settings.get("OXPASSPORT_IMAGE_NAME")
        values_file_parser["oxpassport"]["image"]["tag"] = self.settings.get(
            "OXPASSPORT_IMAGE_TAG")
        values_file_parser["oxshibboleth"]["image"][
            "repository"] = self.settings.get("OXSHIBBOLETH_IMAGE_NAME")
        values_file_parser["oxshibboleth"]["image"]["tag"] = self.settings.get(
            "OXSHIBBOLETH_IMAGE_TAG")
        values_file_parser["jackrabbit"]["image"][
            "repository"] = self.settings.get("JACKRABBIT_IMAGE_NAME")
        values_file_parser["jackrabbit"]["image"]["tag"] = self.settings.get(
            "JACKRABBIT_IMAGE_TAG")
        values_file_parser["oxtrust"]["image"][
            "repository"] = self.settings.get("OXTRUST_IMAGE_NAME")
        values_file_parser["oxtrust"]["image"]["tag"] = self.settings.get(
            "OXTRUST_IMAGE_TAG")
        values_file_parser["radius"]["image"][
            "repository"] = self.settings.get("RADIUS_IMAGE_NAME")
        values_file_parser["radius"]["image"]["tag"] = self.settings.get(
            "RADIUS_IMAGE_TAG")
        values_file_parser.dump_it()

    def install_gluu(self, install_ingress=True):
        """
        Helm install Gluu
        :param install_ingress:
        """
        labels = {"app": "gluu"}
        if self.settings.get("USE_ISTIO") == "Y":
            labels = {"app": "gluu", "istio-injection": "enabled"}
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"), labels=labels)
        if self.settings.get(
                "PERSISTENCE_BACKEND") != "ldap" and self.settings.get(
                    "INSTALL_COUCHBASE") == "Y":
            couchbase_app = Couchbase()
            couchbase_app.uninstall()
            couchbase_app = Couchbase()
            couchbase_app.install()
            self.settings = SettingsHandler()
        if self.settings.get("AWS_LB_TYPE") == "alb":
            self.prepare_alb()
            self.deploy_alb()
        if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get(
                "USE_ISTIO_INGRESS") != "Y":
            self.check_install_nginx_ingress(install_ingress)
        self.analyze_global_values()
        try:
            exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format(
                self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
                self.settings.get("CN_NAMESPACE")))

            if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                    self.settings.get("PERSISTENCE_BACKEND") == "ldap":
                values_file = Path("./helm/ldap-backup/values.yaml").resolve()
                values_file_parser = Parser(values_file, True)
                values_file_parser["ldapPass"] = self.settings.get("LDAP_PW")
                values_file_parser.dump_it()

                exec_cmd(
                    "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
                    .format(self.ldap_backup_release_name,
                            self.settings.get("CN_NAMESPACE")))
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)

    def install_gluu_gateway_ui(self):
        self.uninstall_gluu_gateway_ui()
        self.kubernetes.create_namespace(
            name=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
            labels={"APP_NAME": "gluu-gateway-ui"})
        try:
            # Try to get gluu cert + key
            ssl_cert = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_cert"]
            ssl_key = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_key"]

            self.kubernetes.patch_or_create_namespaced_secret(
                name="tls-certificate",
                namespace=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
                literal="tls.crt",
                value_of_literal=ssl_cert,
                secret_type="kubernetes.io/tls",
                second_literal="tls.key",
                value_of_second_literal=ssl_key)

        except (KeyError, Exception):
            logger.error(
                "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. "
                "Please mount crt and key inside gg-ui deployment")
        client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format(
            self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN"),
            self.settings.get("CN_NAMESPACE"))
        values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve()
        values_file_parser = Parser(values_file, True)
        values_file_parser["cloud"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["cloud"]["isDomainRegistered"] = "true"
        if self.settings.get(
                "DEPLOYMENT_ARCH") == "microk8s" or self.settings.get(
                    "DEPLOYMENT_ARCH") == "minikube":
            values_file_parser["cloud"]["enabled"] = False
        values_file_parser["cloud"]["provider"] = self.settings.get(
            "DEPLOYMENT_ARCH")
        values_file_parser["dbUser"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_USER")
        values_file_parser[
            "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format(
                self.settings.get("KONG_HELM_RELEASE_NAME"),
                self.settings.get("KONG_NAMESPACE"))
        values_file_parser["dbHost"] = self.settings.get("POSTGRES_URL")
        values_file_parser["dbDatabase"] = self.settings.get(
            "GLUU_GATEWAY_UI_DATABASE")
        values_file_parser["clientApiServerUrl"] = client_api_server_url
        values_file_parser["image"]["repository"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_NAME")
        values_file_parser["image"]["tag"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_TAG")
        values_file_parser["loadBalancerIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["dbPassword"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_PASSWORD")
        values_file_parser["opServerUrl"] = "https://" + self.settings.get(
            "CN_FQDN")
        values_file_parser["ggHost"] = self.settings.get("CN_FQDN") + "/gg-ui/"
        values_file_parser["ggUiRedirectUrlHost"] = self.settings.get(
            "CN_FQDN") + "/gg-ui/"
        # Register new client if one was not provided
        if not values_file_parser["clientApiId"] or \
                not values_file_parser["clientId"] or \
                not values_file_parser["clientSecret"]:
            client_api_id, client_id, client_secret = register_op_client(
                self.settings.get("CN_NAMESPACE"), "konga-client",
                self.settings.get("CN_FQDN"), client_api_server_url,
                self.settings.get('CN_HELM_RELEASE_NAME'))
            if not client_api_id:
                values_file_parser.dump_it()
                logger.error(
                    "Due to a failure in konga client registration the installation has stopped."
                    " Please register as suggested above manually and enter the values returned"
                    " for clientApiId, clientId, "
                    "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run "
                    "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui "
                    "--namespace={}".format(
                        self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                        self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))
                raise SystemExit(1)
            values_file_parser["clientApiId"] = client_api_id
            values_file_parser["clientId"] = client_id
            values_file_parser["clientSecret"] = client_secret

        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}"
            .format(self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                    self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))

    def install_gluu_gateway_dbmode(self):
        self.uninstall_gluu_gateway_dbmode()
        self.kubernetes.create_namespace(
            name=self.settings.get("KONG_NAMESPACE"),
            labels={"app": "ingress-kong"})
        encoded_kong_pass_bytes = base64.b64encode(
            self.settings.get("KONG_PG_PASSWORD").encode("utf-8"))
        encoded_kong_pass_string = str(encoded_kong_pass_bytes, "utf-8")
        self.kubernetes.patch_or_create_namespaced_secret(
            name="kong-postgres-pass",
            namespace=self.settings.get("KONG_NAMESPACE"),
            literal="KONG_PG_PASSWORD",
            value_of_literal=encoded_kong_pass_string)
        exec_cmd("helm repo add kong https://charts.konghq.com")
        exec_cmd("helm repo update")
        exec_cmd(
            "helm install {} kong/kong "
            "--set ingressController.installCRDs=false "
            "--set image.repository={} "
            "--set image.tag={} "
            "--set env.database=postgres "
            "--set env.pg_user={} "
            "--set env.pg_password.valueFrom.secretKeyRef.name=kong-postgres-pass "
            "--set env.pg_password.valueFrom.secretKeyRef.key=KONG_PG_PASSWORD "
            "--set env.pg_host={} "
            "--set admin.enabled=true "
            "--set admin.type=ClusterIP "
            "--namespace={}".format(
                self.settings.get("KONG_HELM_RELEASE_NAME"),
                self.settings.get("GLUU_GATEWAY_IMAGE_NAME"),
                self.settings.get("GLUU_GATEWAY_IMAGE_TAG"),
                self.settings.get("KONG_PG_USER"),
                self.settings.get("POSTGRES_URL"),
                self.settings.get("KONG_NAMESPACE")))

    def install_kubedb(self):
        self.uninstall_kubedb()
        self.kubernetes.create_namespace(name="gluu-kubedb",
                                         labels={"app": "kubedb"})
        try:
            exec_cmd(
                "helm repo add appscode https://charts.appscode.com/stable/")
            exec_cmd("helm repo update")
            exec_cmd(
                "helm install kubedb-operator appscode/kubedb  --version v0.13.0-rc.0 "
                "--namespace gluu-kubedb")
            self.kubernetes.check_pods_statuses("gluu-kubedb", "app=kubedb")
            exec_cmd(
                "helm install kubedb-catalog appscode/kubedb-catalog  --version v0.13.0-rc.0 "
                "--namespace gluu-kubedb")
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)

    def uninstall_gluu_gateway_dbmode(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('KONG_HELM_RELEASE_NAME'),
            self.settings.get("KONG_NAMESPACE")))

    def uninstall_gluu_gateway_ui(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
            self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))

    def uninstall_kubedb(self):
        logger.info("Deleting KubeDB...This may take a little while.")
        try:
            exec_cmd(
                "helm repo add appscode https://charts.appscode.com/stable/")
            exec_cmd("helm repo update")
            exec_cmd("helm delete kubedb-operator --namespace gluu-kubedb")
            exec_cmd("helm delete kubedb-catalog --namespace gluu-kubedb")
            time.sleep(20)
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)

    def uninstall_gluu(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('CN_HELM_RELEASE_NAME'),
            self.settings.get("CN_NAMESPACE")))
        exec_cmd("helm delete {} --namespace={}".format(
            self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE")))

    def uninstall_nginx_ingress(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('NGINX_INGRESS_RELEASE_NAME'),
            self.settings.get("NGINX_INGRESS_NAMESPACE")))
Ejemplo n.º 13
0
def gather_ip():
    """Attempts to detect and return ip automatically.
    Also set node names, zones, and addresses in a cloud deployment.

    :return:
    """
    from pygluu.kubernetes.kubeapi import Kubernetes
    from pygluu.kubernetes.settings import SettingsHandler
    import ipaddress
    kubernetes = Kubernetes()
    settings = SettingsHandler()
    logger.info("Determining OS type and attempting to gather external IP address")
    ip = ""

    # detect IP address automatically (if possible)
    try:
        node_ip_list = []
        node_zone_list = []
        node_name_list = []
        node_list = kubernetes.list_nodes().items

        for node in node_list:
            node_name = node.metadata.name
            node_addresses = kubernetes.read_node(name=node_name).status.addresses
            if settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube"):
                for add in node_addresses:
                    if add.type == "InternalIP":
                        ip = add.address
                        node_ip_list.append(ip)
            else:
                for add in node_addresses:
                    if add.type == "ExternalIP":
                        ip = add.address
                        node_ip_list.append(ip)
                # Digital Ocean does not provide zone support yet
                if settings.get("DEPLOYMENT_ARCH") not in ("do", "local"):
                    node_zone = node.metadata.labels["failure-domain.beta.kubernetes.io/zone"]
                    node_zone_list.append(node_zone)
                node_name_list.append(node_name)

        settings.set("NODES_NAMES", node_name_list)
        settings.set("NODES_ZONES", node_zone_list)
        settings.set("NODES_IPS", node_ip_list)

        if settings.get("DEPLOYMENT_ARCH") in ("eks", "gke", "do", "local", "aks"):
            #  Assign random IP. IP will be changed by either the update ip script, GKE external ip or nlb ip
            return "22.22.22.22"

    except Exception as e:
        logger.error(e)
        # prompt for user-inputted IP address
        logger.warning("Cannot determine IP address")
        ip = click.prompt("Please input the host's external IP address")

    if click.confirm(f"Is this the correct external IP address: {ip}", default=True):
        return ip

    while True:
        ip = click.prompt("Please input the host's external IP address")
        try:
            ipaddress.ip_address(ip)
            return ip
        except ValueError as exc:
            # raised if IP is invalid
            logger.warning(f"Cannot determine IP address; reason={exc}")
Ejemplo n.º 14
0
def settings():
    from pygluu.kubernetes.settings import SettingsHandler, unlink_settings_json

    handler = SettingsHandler()
    yield handler
    unlink_settings_json()
Ejemplo n.º 15
0
class GluuGateway(object):
    def __init__(self):
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        if self.settings.get("DEPLOYMENT_ARCH") == "gke":
            # Clusterrolebinding needs to be created for gke with CB or kubeDB installed
            user_account, stderr, retcode = exec_cmd(
                "gcloud config get-value core/account")
            user_account = str(user_account, "utf-8").strip()

            user, stderr, retcode = exec_cmd("whoami")
            user = str(user, "utf-8").strip()
            cluster_role_binding_name = "cluster-admin-{}".format(user)
            self.kubernetes.create_cluster_role_binding(
                cluster_role_binding_name=cluster_role_binding_name,
                user_name=user_account,
                cluster_role_name="cluster-admin")

    def install_gluu_gateway_ui(self):
        self.uninstall_gluu_gateway_ui()
        self.kubernetes.create_namespace(
            name=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
            labels={"APP_NAME": "gluu-gateway-ui"})
        try:
            # Try to get gluu cert + key
            ssl_cert = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_cert"]
            ssl_key = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_key"]

            self.kubernetes.patch_or_create_namespaced_secret(
                name="tls-certificate",
                namespace=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
                literal="tls.crt",
                value_of_literal=ssl_cert,
                secret_type="kubernetes.io/tls",
                second_literal="tls.key",
                value_of_second_literal=ssl_key)

        except (KeyError, Exception):
            logger.error(
                "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. "
                "Please mount crt and key inside gg-ui deployment")
        client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format(
            self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN"),
            self.settings.get("CN_NAMESPACE"))
        values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve()
        values_file_parser = Parser(values_file, True)
        values_file_parser["cloud"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["cloud"]["isDomainRegistered"] = "true"
        if self.settings.get(
                "DEPLOYMENT_ARCH") == "microk8s" or self.settings.get(
                    "DEPLOYMENT_ARCH") == "minikube":
            values_file_parser["cloud"]["enabled"] = False
        values_file_parser["cloud"]["provider"] = self.settings.get(
            "DEPLOYMENT_ARCH")
        values_file_parser["dbUser"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_USER")
        values_file_parser[
            "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format(
                self.settings.get("KONG_HELM_RELEASE_NAME"),
                self.settings.get("KONG_NAMESPACE"))
        values_file_parser["dbHost"] = self.settings.get("POSTGRES_URL")
        values_file_parser["dbDatabase"] = self.settings.get(
            "GLUU_GATEWAY_UI_DATABASE")
        values_file_parser["clientApiServerUrl"] = client_api_server_url
        values_file_parser["image"]["repository"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_NAME")
        values_file_parser["image"]["tag"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_TAG")
        values_file_parser["loadBalancerIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["dbPassword"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_PASSWORD")
        values_file_parser["opServerUrl"] = "https://" + self.settings.get(
            "CN_FQDN")
        values_file_parser["ggHost"] = self.settings.get("CN_FQDN") + "/gg-ui/"
        values_file_parser["ggUiRedirectUrlHost"] = self.settings.get(
            "CN_FQDN") + "/gg-ui/"
        # Register new client if one was not provided
        if not values_file_parser["clientApiId"] or \
                not values_file_parser["clientId"] or \
                not values_file_parser["clientSecret"]:
            client_api_id, client_id, client_secret = register_op_client(
                self.settings.get("CN_NAMESPACE"), "konga-client",
                self.settings.get("CN_FQDN"), client_api_server_url,
                self.settings.get('CN_HELM_RELEASE_NAME'))
            if not client_api_id:
                values_file_parser.dump_it()
                logger.error(
                    "Due to a failure in konga client registration the installation has stopped."
                    " Please register as suggested above manually and enter the values returned"
                    " for clientApiId, clientId, "
                    "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run "
                    "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui "
                    "--namespace={}".format(
                        self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                        self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))
                raise SystemExit(1)
            values_file_parser["clientApiId"] = client_api_id
            values_file_parser["clientId"] = client_id
            values_file_parser["clientSecret"] = client_secret

        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}"
            .format(self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                    self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))

    def install_gluu_gateway_dbmode(self):
        self.uninstall_gluu_gateway_dbmode()
        self.kubernetes.create_namespace(
            name=self.settings.get("KONG_NAMESPACE"),
            labels={"app": "ingress-kong"})
        encoded_kong_pass_bytes = base64.b64encode(
            self.settings.get("KONG_PG_PASSWORD").encode("utf-8"))
        encoded_kong_pass_string = str(encoded_kong_pass_bytes, "utf-8")
        self.kubernetes.patch_or_create_namespaced_secret(
            name="kong-postgres-pass",
            namespace=self.settings.get("KONG_NAMESPACE"),
            literal="KONG_PG_PASSWORD",
            value_of_literal=encoded_kong_pass_string)
        exec_cmd("helm repo add kong https://charts.konghq.com")
        exec_cmd("helm repo update")
        exec_cmd(
            "helm install {} kong/kong "
            "--set ingressController.installCRDs=false "
            "--set image.repository={} "
            "--set image.tag={} "
            "--set env.database=postgres "
            "--set env.pg_user={} "
            "--set env.pg_password.valueFrom.secretKeyRef.name=kong-postgres-pass "
            "--set env.pg_password.valueFrom.secretKeyRef.key=KONG_PG_PASSWORD "
            "--set env.pg_host={} "
            "--set admin.enabled=true "
            "--set admin.type=ClusterIP "
            "--namespace={}".format(
                self.settings.get("KONG_HELM_RELEASE_NAME"),
                self.settings.get("GLUU_GATEWAY_IMAGE_NAME"),
                self.settings.get("GLUU_GATEWAY_IMAGE_TAG"),
                self.settings.get("KONG_PG_USER"),
                self.settings.get("POSTGRES_URL"),
                self.settings.get("KONG_NAMESPACE")))

    def uninstall_gluu_gateway_dbmode(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('KONG_HELM_RELEASE_NAME'),
            self.settings.get("KONG_NAMESPACE")))

    def uninstall_gluu_gateway_ui(self):
        exec_cmd("helm delete {} --namespace={}".format(
            self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
            self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))

    def uninstall_kong(self):
        logger.info("Removing gluu gateway kong...")
        self.kubernetes.delete_job(self.settings.get("KONG_NAMESPACE"),
                                   "app=kong-migration-job")
        self.kubernetes.delete_custom_resource(
            "kongconsumers.configuration.konghq.com")
        self.kubernetes.delete_custom_resource(
            "kongcredentials.configuration.konghq.com")
        self.kubernetes.delete_custom_resource(
            "kongingresses.configuration.konghq.com")
        self.kubernetes.delete_custom_resource(
            "kongplugins.configuration.konghq.com")
        self.kubernetes.delete_custom_resource(
            "tcpingresses.configuration.konghq.com")
        self.kubernetes.delete_custom_resource(
            "kongclusterplugins.configuration.konghq.com")
        self.kubernetes.delete_cluster_role("kong-ingress-clusterrole")
        self.kubernetes.delete_service_account(
            "kong-serviceaccount", self.settings.get("KONG_NAMESPACE"))
        self.kubernetes.delete_cluster_role_binding(
            "kong-ingress-clusterrole-nisa-binding")
        self.kubernetes.delete_config_map_using_name(
            "kong-server-blocks", self.settings.get("KONG_NAMESPACE"))
        self.kubernetes.delete_service("kong-proxy",
                                       self.settings.get("KONG_NAMESPACE"))
        self.kubernetes.delete_service("kong-validation-webhook",
                                       self.settings.get("KONG_NAMESPACE"))
        self.kubernetes.delete_service("kong-admin",
                                       self.settings.get("KONG_NAMESPACE"))
        self.kubernetes.delete_deployment_using_name(
            "ingress-kong", self.settings.get("KONG_NAMESPACE"))
Ejemplo n.º 16
0
 def __init__(self):
     self.settings = SettingsHandler()
Ejemplo n.º 17
0
class Postgres(object):
    def __init__(self):
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.timeout = 120

    @property
    def generate_postgres_init_sql(self):
        services_using_postgres = []
        if self.settings.get("JACKRABBIT_CLUSTER") == "Y":
            services_using_postgres.append("JACKRABBIT")
        if self.settings.get("INSTALL_GLUU_GATEWAY") == "Y":
            services_using_postgres.append("KONG")
            services_using_postgres.append("GLUU_GATEWAY_UI")
        # Generate init sql
        postgres_init_sql = ""
        for service in services_using_postgres:
            pg_user = self.settings.get("{}_PG_USER".format(service))
            pg_password = self.settings.get("{}_PG_PASSWORD".format(service))
            pg_database = self.settings.get("{}_DATABASE".format(service))
            postgres_init_sql_jackrabbit = "CREATE USER {};\nALTER USER {} PASSWORD '{}';\nCREATE DATABASE {};\n" \
                                           "GRANT ALL PRIVILEGES ON DATABASE {} TO {};\n" \
                .format(pg_user, pg_user, pg_password, pg_database, pg_database, pg_user)
            postgres_init_sql = postgres_init_sql + postgres_init_sql_jackrabbit
        return postgres_init_sql

    def create_patch_secret_init_sql(self):
        postgres_init_sql = self.generate_postgres_init_sql
        encoded_postgers_init_bytes = base64.b64encode(
            postgres_init_sql.encode("utf-8"))
        encoded_postgers_init_string = str(encoded_postgers_init_bytes,
                                           "utf-8")
        self.kubernetes.patch_or_create_namespaced_secret(
            name="pg-init-sql",
            namespace=self.settings.get("POSTGRES_NAMESPACE"),
            literal="data.sql",
            value_of_literal=encoded_postgers_init_string)

    def patch_or_install_postgres(self):
        # Jackrabbit Cluster would have installed postgres
        if self.settings.get("JACKRABBIT_CLUSTER") == "N":
            self.install_postgres()
        else:
            self.create_patch_secret_init_sql()
            logger.info("Restarting postgres...please wait 2mins..")
            self.kubernetes.patch_namespaced_stateful_set_scale(
                name="postgres",
                replicas=0,
                namespace=self.settings.get("POSTGRES_NAMESPACE"))
            time.sleep(120)
            self.kubernetes.patch_namespaced_stateful_set_scale(
                name="postgres",
                replicas=3,
                namespace=self.settings.get("POSTGRES_NAMESPACE"))
            self.kubernetes.check_pods_statuses(
                self.settings.get("POSTGRES_NAMESPACE"), "app=postgres",
                self.timeout)

    def install_postgres(self):
        self.uninstall_postgres()
        self.kubernetes.create_namespace(
            name=self.settings.get("POSTGRES_NAMESPACE"),
            labels={"app": "postgres"})
        self.create_patch_secret_init_sql()
        if self.settings.get("DEPLOYMENT_ARCH") != "local":
            postgres_storage_class = Path("./postgres/storageclasses.yaml")
            analyze_storage_class(self.settings, postgres_storage_class)
            self.kubernetes.create_objects_from_dict(postgres_storage_class)

        postgres_yaml = Path("./postgres/postgres.yaml")
        postgres_parser = Parser(postgres_yaml, "Postgres")
        postgres_parser["spec"]["replicas"] = self.settings.get(
            "POSTGRES_REPLICAS")
        postgres_parser["spec"]["monitor"]["prometheus"][
            "namespace"] = self.settings.get("POSTGRES_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            postgres_parser["spec"]["storage"][
                "storageClassName"] = "openebs-hostpath"
        postgres_parser["metadata"]["namespace"] = self.settings.get(
            "POSTGRES_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") or \
                self.settings.get("TEST_ENVIRONMENT") == "Y":
            try:
                del postgres_parser["spec"]["podTemplate"]["spec"]["resources"]
            except KeyError:
                logger.info(
                    "Resources not deleted as they are not found inside yaml.")

        postgres_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=postgres_yaml,
            group="kubedb.com",
            version="v1alpha1",
            plural="postgreses",
            namespace=self.settings.get("POSTGRES_NAMESPACE"))
        if not self.settings.get("AWS_LB_TYPE") == "alb":
            self.kubernetes.check_pods_statuses(
                self.settings.get("POSTGRES_NAMESPACE"), "app=postgres",
                self.timeout)

    def uninstall_postgres(self):
        logger.info("Removing gluu-postgres...")
        self.kubernetes.delete_namespaced_custom_object_by_name(
            group="kubedb.com",
            version="v1alpha1",
            plural="postgreses",
            name="postgres",
            namespace=self.settings.get("POSTGRES_NAMESPACE"))
        self.kubernetes.delete_namespaced_custom_object_by_name(
            group="kubedb.com",
            version="v1alpha1",
            plural="postgresversions",
            name="postgres",
            namespace=self.settings.get("POSTGRES_NAMESPACE"))
        self.kubernetes.delete_storage_class("postgres-sc")
        self.kubernetes.delete_service("kubedb",
                                       self.settings.get("POSTGRES_NAMESPACE"))
        self.kubernetes.delete_service("postgres",
                                       self.settings.get("POSTGRES_NAMESPACE"))
        self.kubernetes.delete_service("postgres-replicas",
                                       self.settings.get("POSTGRES_NAMESPACE"))
        self.kubernetes.delete_service("postgres-stats",
                                       self.settings.get("POSTGRES_NAMESPACE"))
Ejemplo n.º 18
0
 def load_settings(self):
     self.settings = SettingsHandler()
Ejemplo n.º 19
0
class Couchbase(object):
    def __init__(self):
        self.settings = SettingsHandler()
        self.kubernetes = Kubernetes()
        self.storage_class_file = Path("./couchbase/storageclasses.yaml")
        self.couchbase_cluster_file = Path(
            "./couchbase/couchbase-cluster.yaml")
        self.couchbase_buckets_file = Path(
            "./couchbase/couchbase-buckets.yaml")
        self.couchbase_group_file = Path("./couchbase/couchbase-group.yaml")
        self.couchbase_user_file = Path("./couchbase/couchbase-user.yaml")
        self.couchbase_rolebinding_file = Path(
            "./couchbase/couchbase-rolebinding.yaml")
        self.couchbase_ephemeral_buckets_file = Path(
            "./couchbase/couchbase-ephemeral-buckets.yaml")
        self.couchbase_source_folder_pattern, self.couchbase_source_file = self.get_couchbase_files
        self.couchbase_custom_resource_definition_file = self.couchbase_source_file.joinpath(
            "crd.yaml")
        self.couchbase_operator_dac_file = self.couchbase_source_file.joinpath(
            "operator_dac.yaml")
        self.filename = ""

    @property
    def get_couchbase_files(self):
        """
        Returns the couchbase extracted package folder path containing manifests and the tar package file
        :return:
        """
        if self.settings.get("INSTALL_COUCHBASE") == "Y":
            couchbase_tar_pattern = "couchbase-autonomous-operator-kubernetes_*.tar.gz"
            directory = Path('.')
            try:
                couchbase_tar_file = list(
                    directory.glob(couchbase_tar_pattern))[0]
                if "_1." in str(couchbase_tar_file.resolve()):
                    logger.fatal(
                        "Couchbase Autonomous Operator version must be > 2.0")
                    sys.exit()

            except IndexError:
                logger.fatal("Couchbase package not found.")
                logger.info(
                    "Please download the couchbase kubernetes package and place it inside the same directory "
                    "containing the pygluu-kubernetes.pyz script.https://www.couchbase.com/downloads"
                )
                sys.exit()
            extract_couchbase_tar(couchbase_tar_file)
            couchbase_source_folder_pattern = "./couchbase-source-folder/couchbase-autonomous-operator-kubernetes_*"
            couchbase_source_folder = list(
                directory.glob(couchbase_source_folder_pattern))[0]

            return couchbase_tar_file, couchbase_source_folder
        # Couchbase is installed.
        return Path("."), Path(".")

    def create_couchbase_gluu_cert_pass_secrets(self, encoded_ca_crt_string,
                                                encoded_cb_pass_string,
                                                encoded_cb_super_pass_string):
        """
        Create cor patch secret containing couchbase certificate authority crt and couchbase admin password
        :param encoded_ca_crt_string:
        :param encoded_cb_pass_string:
        :param encoded_cb_super_pass_string:
        """
        # Remove this if its not needed
        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-crt",
            namespace=self.settings.get("CN_NAMESPACE"),
            literal="couchbase.crt",
            value_of_literal=encoded_ca_crt_string)

        # Remove this if its not needed
        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-pass",
            namespace=self.settings.get("CN_NAMESPACE"),
            literal="couchbase_password",
            value_of_literal=encoded_cb_pass_string)

        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-super-pass",
            namespace=self.settings.get("CN_NAMESPACE"),
            literal="couchbase_superuser_password",
            value_of_literal=encoded_cb_super_pass_string)

    def setup_backup_couchbase(self):
        """
        Setups Couchbase backup strategy
        """
        couchbase_backup_file = Path(
            "./couchbase/backup/couchbase-backup.yaml")
        parser = Parser(couchbase_backup_file, "CouchbaseBackup")
        parser["spec"]["full"]["schedule"] = self.settings.get(
            "COUCHBASE_FULL_BACKUP_SCHEDULE")
        parser["spec"]["incremental"]["schedule"] = self.settings.get(
            "COUCHBASE_INCR_BACKUP_SCHEDULE")
        parser["spec"]["backupRetention"] = self.settings.get(
            "COUCHBASE_BACKUP_RETENTION_TIME")
        parser["spec"]["size"] = self.settings.get(
            "COUCHBASE_BACKUP_STORAGE_SIZE")
        parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=couchbase_backup_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasebackups",
            namespace=self.settings.get("COUCHBASE_NAMESPACE"))

    @property
    def calculate_couchbase_resources(self):
        """
        Return a dictionary containing couchbase resource information calculated
        Alpha
        :return:
        """
        tps = int(self.settings.get("EXPECTED_TRANSACTIONS_PER_SEC"))
        number_of_data_nodes = 0
        number_of_query_nodes = 0
        number_of_index_nodes = 0
        number_of_eventing_service_memory_nodes = 0
        user_ratio = int(
            self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 50000000
        tps_ratio = tps / 14000

        if self.settings.get(
                "USING_RESOURCE_OWNER_PASSWORD_CRED_GRANT_FLOW") == "Y":
            number_of_data_nodes += tps_ratio * 7 * user_ratio
            number_of_query_nodes += tps_ratio * 5 * user_ratio
            number_of_index_nodes += tps_ratio * 5 * user_ratio
            number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio

        if self.settings.get("USING_CODE_FLOW") == "Y":
            number_of_data_nodes += tps_ratio * 14 * user_ratio
            number_of_query_nodes += tps_ratio * 12 * user_ratio
            number_of_index_nodes += tps_ratio * 13 * user_ratio
            number_of_eventing_service_memory_nodes += tps_ratio * 7 * user_ratio

        if self.settings.get("USING_SCIM_FLOW") == "Y":
            number_of_data_nodes += tps_ratio * 7 * user_ratio
            number_of_query_nodes += tps_ratio * 5 * user_ratio
            number_of_index_nodes += tps_ratio * 5 * user_ratio
            number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio

        if not self.settings.get("COUCHBASE_GENERAL_STORAGE"):
            self.settings.set(
                "COUCHBASE_GENERAL_STORAGE",
                str(
                    int((tps_ratio *
                         (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) /
                          125000)) + 5)) + "Gi")
        if not self.settings.get("COUCHBASE_DATA_STORAGE"):
            self.settings.set(
                "COUCHBASE_DATA_STORAGE",
                str(
                    int((tps_ratio *
                         (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) /
                          100000)) + 5)) + "Gi")
        if not self.settings.get("COUCHBASE_INDEX_STORAGE"):
            self.settings.set(
                "COUCHBASE_INDEX_STORAGE",
                str(
                    int((tps_ratio *
                         (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) /
                          200000)) + 5)) + "Gi")
        if not self.settings.get("COUCHBASE_QUERY_STORAGE"):
            self.settings.set(
                "COUCHBASE_QUERY_STORAGE",
                str(
                    int((tps_ratio *
                         (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) /
                          200000)) + 5)) + "Gi")
        if not self.settings.get("COUCHBASE_ANALYTICS_STORAGE"):
            self.settings.set(
                "COUCHBASE_ANALYTICS_STORAGE",
                str(
                    int((tps_ratio *
                         (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) /
                          250000)) + 5)) + "Gi")

        if self.settings.get("COUCHBASE_DATA_NODES"):
            number_of_data_nodes = self.settings.get("COUCHBASE_DATA_NODES")
        if self.settings.get("COUCHBASE_QUERY_NODES"):
            number_of_query_nodes = self.settings.get("COUCHBASE_QUERY_NODES")
        if self.settings.get("COUCHBASE_INDEX_NODES"):
            number_of_index_nodes = self.settings.get("COUCHBASE_INDEX_NODES")
        if self.settings.get("COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"):
            number_of_eventing_service_memory_nodes = self.settings.get(
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES")

        data_service_memory_quota = (tps_ratio * 40000 * user_ratio) + 512
        data_memory_request = data_service_memory_quota / 4
        data_memory_limit = data_memory_request
        data_cpu_request = data_service_memory_quota / 4
        data_cpu_limit = data_cpu_request

        query_memory_request = data_memory_request
        query_memory_limit = query_memory_request
        query_cpu_request = data_service_memory_quota / 4
        query_cpu_limit = query_cpu_request

        index_service_memory_quota = (tps_ratio * 25000 * user_ratio) + 256
        index_memory_request = index_service_memory_quota / 3
        index_memory_limit = index_memory_request
        index_cpu_request = index_service_memory_quota / 3
        index_cpu_limit = index_cpu_request

        search_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256
        eventing_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256
        analytics_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 1024

        search_eventing_analytics_memory_quota_sum = (
            search_service_memory_quota + eventing_service_memory_quota +
            analytics_service_memory_quota)
        search_eventing_analytics_memory_request = tps_ratio * 10000 * user_ratio
        search_eventing_analytics_memory_limit = search_eventing_analytics_memory_request
        search_eventing_analytics_cpu_request = tps_ratio * 6000 * user_ratio
        search_eventing_analytics_cpu_limit = search_eventing_analytics_cpu_request

        # Two services because query is assumed to take the same amount of mem quota
        total_mem_resources = \
            data_service_memory_quota + data_service_memory_quota + index_service_memory_quota + \
            search_eventing_analytics_memory_quota_sum

        total_cpu_resources = data_cpu_limit + query_cpu_limit + index_cpu_limit + search_eventing_analytics_cpu_limit

        resources_info = dict(
            COUCHBASE_DATA_NODES=int(number_of_data_nodes),
            COUCHBASE_QUERY_NODES=int(number_of_query_nodes),
            COUCHBASE_INDEX_NODES=int(number_of_index_nodes),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES=int(
                number_of_eventing_service_memory_nodes),
            COUCHBASE_DATA_MEM_QUOTA=round(data_service_memory_quota),
            COUCHBASE_DATA_MEM_REQUEST=round(data_memory_request),
            COUCHBASE_DATA_MEM_LIMIT=round(data_memory_limit),
            COUCHBASE_DATA_CPU_REQUEST=round(data_cpu_request),
            COUCHBASE_DATA_CPU_LIMIT=round(data_cpu_limit),
            COUCHBASE_QUERY_MEM_QUOTA=round(data_service_memory_quota),
            COUCHBASE_QUERY_MEM_REQUEST=round(query_memory_request),
            COUCHBASE_QUERY_MEM_LIMIT=round(query_memory_limit),
            COUCHBASE_QUERY_CPU_REQUEST=round(query_cpu_request),
            COUCHBASE_QUERY_CPU_LIMIT=round(query_cpu_limit),
            COUCHBASE_INDEX_MEM_QUOTA=round(index_service_memory_quota),
            COUCHBASE_INDEX_MEM_REQUEST=round(index_memory_request),
            COUCHBASE_INDEX_MEM_LIMIT=round(index_memory_limit),
            COUCHBASE_INDEX_CPU_REQUEST=round(index_cpu_request),
            COUCHBASE_INDEX_CPU_LIMIT=round(index_cpu_limit),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA=round(
                search_service_memory_quota),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST=round(
                search_eventing_analytics_memory_request),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT=round(
                search_eventing_analytics_memory_limit),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST=round(
                search_eventing_analytics_cpu_request),
            COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT=round(
                search_eventing_analytics_cpu_limit),
            TOTAL_RAM_NEEDED=round(total_mem_resources),
            TOTAL_CPU_NEEDED=round(total_cpu_resources))
        self.settings.set("COUCHBASE_DATA_NODES", number_of_data_nodes)
        self.settings.set("COUCHBASE_QUERY_NODES", number_of_query_nodes)
        self.settings.set("COUCHBASE_INDEX_NODES", number_of_index_nodes)
        self.settings.set("COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES",
                          number_of_eventing_service_memory_nodes)
        return resources_info

    def analyze_couchbase_cluster_yaml(self):
        """
        Dumps created calculated resources into couchbase.yaml file. ALso includes cloud zones.
        """
        parser = Parser("./couchbase/couchbase-cluster.yaml",
                        "CouchbaseCluster")
        parser["metadata"]["name"] = self.settings.get(
            "COUCHBASE_CLUSTER_NAME")
        number_of_buckets = 5
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") or \
                self.settings.get("COUCHBASE_USE_LOW_RESOURCES") == "Y":
            resources_servers = [{
                "name":
                "allServices",
                "size":
                1,
                "services":
                ["data", "index", "query", "search", "eventing", "analytics"],
                "volumeMounts": {
                    "default": "pvc-general",
                    "data": "pvc-data",
                    "index": "pvc-index",
                    "analytics": ["pvc-analytics"]
                }
            }]
            data_service_memory_quota = 1024
            index_service_memory_quota = 512
            search_service_memory_quota = 512
            eventing_service_memory_quota = 512
            analytics_service_memory_quota = 1024
            memory_quota = 0
            self.settings.set("COUCHBASE_GENERAL_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_DATA_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_INDEX_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_QUERY_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_ANALYTICS_STORAGE", "5Gi")

        else:
            resources = self.calculate_couchbase_resources
            data_service_memory_quota = resources["COUCHBASE_DATA_MEM_QUOTA"]
            index_service_memory_quota = resources["COUCHBASE_INDEX_MEM_QUOTA"]
            search_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            eventing_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            analytics_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] + 1024
            memory_quota = ((resources["COUCHBASE_DATA_MEM_QUOTA"] - 500) /
                            number_of_buckets)
            zones_list = self.settings.get("NODES_ZONES")
            data_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_DATA_NODES"]), "data",
                str(resources["COUCHBASE_DATA_MEM_REQUEST"]),
                str(resources["COUCHBASE_DATA_MEM_LIMIT"]),
                str(resources["COUCHBASE_DATA_CPU_REQUEST"]),
                str(resources["COUCHBASE_DATA_CPU_LIMIT"]))

            query_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_QUERY_NODES"]), "query",
                str(resources["COUCHBASE_QUERY_MEM_REQUEST"]),
                str(resources["COUCHBASE_QUERY_MEM_LIMIT"]),
                str(resources["COUCHBASE_QUERY_CPU_REQUEST"]),
                str(resources["COUCHBASE_QUERY_CPU_LIMIT"]))

            index_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_INDEX_NODES"]), "index",
                str(resources["COUCHBASE_INDEX_MEM_REQUEST"]),
                str(resources["COUCHBASE_INDEX_MEM_LIMIT"]),
                str(resources["COUCHBASE_INDEX_CPU_REQUEST"]),
                str(resources["COUCHBASE_INDEX_CPU_LIMIT"]))

            search_eventing_analytics_server_spec = create_server_spec_per_cb_service(
                zones_list,
                int(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"]),
                "analytics",
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT"]
                    ),
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT"]
                    ))

            resources_servers = \
                data_server_spec + query_server_spec + index_server_spec + \
                search_eventing_analytics_server_spec

        if self.settings.get("NODES_ZONES"):
            unique_zones = list(dict.fromkeys(
                self.settings.get("NODES_ZONES")))
            parser["spec"]["serverGroups"] = unique_zones
        parser["spec"]["cluster"]["dataServiceMemoryQuota"] = str(
            data_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["indexServiceMemoryQuota"] = str(
            index_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["searchServiceMemoryQuota"] = str(
            search_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["eventingServiceMemoryQuota"] = str(
            eventing_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["analyticsServiceMemoryQuota"] = str(
            analytics_service_memory_quota) + "Mi"

        set_memory_for_buckets(memory_quota)
        parser["metadata"]["name"] = self.settings.get(
            "COUCHBASE_CLUSTER_NAME")
        parser["spec"]["servers"] = resources_servers

        number_of_volume_claims = len(parser["spec"]["volumeClaimTemplates"])
        for i in range(number_of_volume_claims):
            name = parser["spec"]["volumeClaimTemplates"][i]["metadata"][
                "name"]
            if name == "pvc-general":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_GENERAL_STORAGE")
            elif name == "pvc-data":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_DATA_STORAGE")
            elif name == "pvc-index":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_INDEX_STORAGE")
            elif name == "pvc-query":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_QUERY_STORAGE")
            elif name == "pvc-analytics":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_ANALYTICS_STORAGE")
        parser.dump_it()

    def install(self):
        """
        Installs Couchbase
        """
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"))
        if self.settings.get("COUCHBASE_CLUSTER_FILE_OVERRIDE") == "N":
            self.analyze_couchbase_cluster_yaml()
        cb_namespace = self.settings.get("COUCHBASE_NAMESPACE")
        storage_class_file_parser = Parser(self.storage_class_file,
                                           "StorageClass")
        if self.settings.get('DEPLOYMENT_ARCH') == "gke" or \
                self.settings.get('DEPLOYMENT_ARCH') == "aks" or \
                self.settings.get('DEPLOYMENT_ARCH') == "do":
            try:
                del storage_class_file_parser["parameters"]["encrypted"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser["parameters"][
                "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE")
        if self.settings.get('DEPLOYMENT_ARCH') == "gke":
            storage_class_file_parser["provisioner"] = "kubernetes.io/gce-pd"
        elif self.settings.get('DEPLOYMENT_ARCH') == "aks":
            storage_class_file_parser[
                "provisioner"] = "kubernetes.io/azure-disk"
        elif self.settings.get('DEPLOYMENT_ARCH') == "do":
            storage_class_file_parser[
                "provisioner"] = "dobs.csi.digitalocean.com"
        elif self.settings.get('DEPLOYMENT_ARCH') == "microk8s":
            storage_class_file_parser["provisioner"] = "microk8s.io/hostpath"
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        elif self.settings.get('DEPLOYMENT_ARCH') == "minikube":
            storage_class_file_parser[
                "provisioner"] = "k8s.io/minikube-hostpath"
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        else:
            try:
                storage_class_file_parser["parameters"][
                    "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE")
            except KeyError:
                logger.info("Key not found")
        storage_class_file_parser.dump_it()

        logger.info("Installing Couchbase...")
        couchbase_crts_keys = Path("couchbase_crts_keys")
        if not couchbase_crts_keys.exists():
            os.mkdir(couchbase_crts_keys)
        custom_cb_ca_crt = Path("./couchbase_crts_keys/ca.crt")
        custom_cb_crt = Path("./couchbase_crts_keys/chain.pem")
        custom_cb_key = Path("./couchbase_crts_keys/pkey.key")
        if not custom_cb_ca_crt.exists() and not custom_cb_crt.exists(
        ) and not custom_cb_key.exists():
            setup_crts(
                ca_common_name=self.settings.get("COUCHBASE_CN"),
                cert_common_name="couchbase-server",
                san_list=self.settings.get("COUCHBASE_SUBJECT_ALT_NAME"),
                ca_cert_file="./couchbase_crts_keys/ca.crt",
                ca_key_file="./couchbase_crts_keys/ca.key",
                cert_file="./couchbase_crts_keys/chain.pem",
                key_file="./couchbase_crts_keys/pkey.key")
        self.kubernetes.create_namespace(name=cb_namespace)
        chain_pem_filepath = Path("./couchbase_crts_keys/chain.pem")
        pkey_filepath = Path("./couchbase_crts_keys/pkey.key")
        tls_cert_filepath = Path("./couchbase_crts_keys/tls-cert-file")
        tls_private_key_filepath = Path(
            "./couchbase_crts_keys/tls-private-key-file")
        ca_cert_filepath = Path("./couchbase_crts_keys/ca.crt")
        shutil.copyfile(ca_cert_filepath,
                        Path("./couchbase_crts_keys/couchbase.crt"))
        shutil.copyfile(chain_pem_filepath, tls_cert_filepath)
        shutil.copyfile(pkey_filepath, tls_private_key_filepath)

        encoded_ca_crt_string = self.settings.get("COUCHBASE_CRT")
        if not encoded_ca_crt_string:
            with open(ca_cert_filepath) as content_file:
                ca_crt_content = content_file.read()
                encoded_ca_crt_bytes = base64.b64encode(
                    ca_crt_content.encode("utf-8"))
                encoded_ca_crt_string = str(encoded_ca_crt_bytes, "utf-8")
            self.settings.set("COUCHBASE_CRT", encoded_ca_crt_string)

        with open(chain_pem_filepath) as content_file:
            chain_pem_content = content_file.read()
            encoded_chain_bytes = base64.b64encode(
                chain_pem_content.encode("utf-8"))
            encoded_chain_string = str(encoded_chain_bytes, "utf-8")

        with open(pkey_filepath) as content_file:
            pkey_content = content_file.read()
            encoded_pkey_bytes = base64.b64encode(pkey_content.encode("utf-8"))
            encoded_pkey_string = str(encoded_pkey_bytes, "utf-8")

        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-server-tls",
            namespace=cb_namespace,
            literal=chain_pem_filepath.name,
            value_of_literal=encoded_chain_string,
            second_literal=pkey_filepath.name,
            value_of_second_literal=encoded_pkey_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-operator-tls",
            namespace=cb_namespace,
            literal=ca_cert_filepath.name,
            value_of_literal=encoded_ca_crt_string)

        encoded_cb_super_user_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_SUPERUSER").encode("utf-8"))
        encoded_cb_super_user_string = str(encoded_cb_super_user_bytes,
                                           "utf-8")
        encoded_cb_pass_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_PASSWORD").encode("utf-8"))
        encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8")
        encoded_cb_super_pass_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_SUPERUSER_PASSWORD").encode("utf-8"))
        encoded_cb_super_pass_string = str(encoded_cb_super_pass_bytes,
                                           "utf-8")

        self.create_couchbase_gluu_cert_pass_secrets(
            encoded_ca_crt_string, encoded_cb_pass_string,
            encoded_cb_super_pass_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="gluu-couchbase-user-password",
            namespace=self.settings.get("COUCHBASE_NAMESPACE"),
            literal="password",
            value_of_literal=encoded_cb_pass_string)
        command = "./{}/bin/cbopcfg -backup=true -namespace={}".format(
            self.couchbase_source_file,
            self.settings.get("COUCHBASE_NAMESPACE"))
        exec_cmd(command, output_file=self.couchbase_operator_dac_file)
        couchbase_cluster_parser = Parser(self.couchbase_cluster_file,
                                          "CouchbaseCluster")
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "serverSecret"] = "couchbase-server-tls"
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "operatorSecret"] = "couchbase-operator-tls"
        try:
            couchbase_cluster_parser["spec"]["security"]["rbac"]["selector"]["matchLabels"]["cluster"] = \
                self.settings.get("COUCHBASE_CLUSTER_NAME")
            couchbase_cluster_parser["spec"]["security"]["rbac"][
                "managed"] = True
        except KeyError:
            logger.error(
                "rbac section is missing or incorrect in couchbase-cluster.yaml."
                " Please set spec --> security --> rbac --> managed : true"
                " and set spec --> security --> rbac --> selector --> matchLabels --> "
                "cluster --> to your cluster name")
            logger.info(
                "As a result of the above the installation will exit "
                "as the gluu user will not be created causing the communication between "
                "Gluu server and Couchbase to fail.")
            sys.exit()
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            volume_claims = couchbase_cluster_parser["spec"][
                "volumeClaimTemplates"]
            for i, volume_claim in enumerate(volume_claims):
                couchbase_cluster_parser["spec"]["volumeClaimTemplates"][i]["spec"]["storageClassName"] = \
                    "openebs-hostpath"
        couchbase_cluster_parser.dump_it()

        self.kubernetes.create_objects_from_dict(
            self.couchbase_custom_resource_definition_file,
            namespace=cb_namespace)

        self.kubernetes.create_objects_from_dict(
            self.couchbase_operator_dac_file, namespace=cb_namespace)

        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "app=couchbase-operator", 700)

        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-auth",
            namespace=cb_namespace,
            literal="username",
            value_of_literal=encoded_cb_super_user_string,
            second_literal="password",
            value_of_second_literal=encoded_cb_super_pass_string)

        self.kubernetes.create_objects_from_dict(self.storage_class_file,
                                                 namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_cluster_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseclusters",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasebuckets",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_ephemeral_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseephemeralbuckets",
            namespace=cb_namespace)
        coucbase_group_parser = Parser(self.couchbase_group_file,
                                       "CouchbaseGroup")
        coucbase_group_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("COUCHBASE_CLUSTER_NAME")
        coucbase_group_parser.dump_it()
        coucbase_user_parser = Parser(self.couchbase_user_file,
                                      "CouchbaseUser")
        coucbase_user_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("COUCHBASE_CLUSTER_NAME")
        coucbase_user_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_group_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasegroups",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_user_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseusers",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_rolebinding_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaserolebindings",
            namespace=cb_namespace)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_analytics=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_data=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_eventing=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_index=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_query=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_search=enabled", 700)
        # Setup couchbase backups
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            self.setup_backup_couchbase()
        shutil.rmtree(self.couchbase_source_folder_pattern, ignore_errors=True)

        if self.settings.get("DEPLOY_MULTI_CLUSTER") == "Y":
            logger.info(
                "Setup XDCR between the running Gluu couchbase cluster and this one"
            )

    def uninstall(self):
        """
        Uninstalls couchbase
        """
        logger.info("Deleting Couchbase...")
        self.kubernetes.delete_storage_class("couchbase-sc")
        self.kubernetes.delete_custom_resource(
            "couchbaseclusters.couchbase.com")
        self.kubernetes.delete_validating_webhook_configuration(
            "couchbase-operator-admission")
        self.kubernetes.delete_mutating_webhook_configuration(
            "couchbase-operator-admission")
        self.kubernetes.delete_cluster_role_binding(
            "couchbase-operator-admission")
        self.kubernetes.delete_cluster_role("couchbase-operator-admission")
        self.kubernetes.delete_role("couchbase-operator",
                                    self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_secret("cb-auth",
                                      self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_secret("gluu-couchbase-user-password",
                                      self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_deployment_using_name(
            "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_role_binding(
            "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_service_account(
            "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_service(
            "couchbase-operator-admission",
            self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_deployment_using_name(
            "couchbase-operator-admission",
            self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_service(
            "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_custom_resource(
            "couchbasebackuprestores.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbasebackups.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbasebuckets.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbaseephemeralbuckets.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbasereplications.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbaserolebindings.couchbase.com")
        self.kubernetes.delete_custom_resource("couchbasegroups.couchbase.com")
        self.kubernetes.delete_custom_resource(
            "couchbasememcachedbuckets.couchbase.com")
        self.kubernetes.delete_custom_resource("couchbaseusers.couchbase.com")

        self.kubernetes.delete_service_account(
            "couchbase-operator-admission",
            self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_secret("couchbase-operator-admission",
                                      self.settings.get("COUCHBASE_NAMESPACE"))
        self.kubernetes.delete_secret("couchbase-operator-tls",
                                      self.settings.get("COUCHBASE_NAMESPACE"))
        shutil.rmtree(Path("./couchbase-source-folder"), ignore_errors=True)
Ejemplo n.º 20
0
def main():
    parser = create_parser()
    args = parser.parse_args(sys.argv[1:])

    if not args.subparser_name:
        parser.print_help()
        return
    copy_templates()
    settings = SettingsHandler()
    settings.validate()
    if not settings.validate():
        for error in settings.errors:
            logger.error(error)
        sys.exit()

    prompts = Prompt()
    prompts.prompt()

    timeout = 120
    if args.subparser_name == "install-no-wait":
        timeout = 0
    try:
        if args.subparser_name == "install" or args.subparser_name == "install-no-wait":
            kustomize = Kustomize(timeout)
            kustomize.uninstall()
            if settings.get("INSTALL_REDIS") == "Y" or \
                    settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    settings.get("JACKRABBIT_CLUSTER") == "Y":
                helm = Helm()
                helm.uninstall_kubedb()
                helm.install_kubedb()
            kustomize.install()

        if args.subparser_name == "install-ldap-backup":
            kustomize = Kustomize(timeout)
            kustomize.setup_backup_ldap()

        elif args.subparser_name == "uninstall":
            logger.info("Removing all Gluu resources...")
            kustomize = Kustomize(timeout)
            kustomize.uninstall()
            if settings.get("INSTALL_REDIS") == "Y" or settings.get("INSTALL_GLUU_GATEWAY") == "Y":
                helm = Helm()
                helm.uninstall_kubedb()

        elif args.subparser_name == "upgrade":
            from pygluu.kubernetes.terminal.upgrade import PromptUpgrade
            # New feature in 4.2 compared to 4.1 and hence if enabled should make sure kubedb is installed.
            if settings.get("JACKRABBIT_CLUSTER") == "Y":
                helm = Helm()
                helm.uninstall_kubedb()
                helm.install_kubedb()
            prompt_upgrade = PromptUpgrade(settings)
            logger.info("Starting upgrade...")
            prompt_upgrade.prompt_upgrade()
            kustomize = Kustomize(timeout)
            kustomize.upgrade()

        elif args.subparser_name == "upgrade-values-yaml":
            from pygluu.kubernetes.terminal.upgrade import PromptUpgrade
            # New feature in 4.2 compared to 4.1 and hence if enabled should make sure kubedb is installed.
            helm = Helm()
            if settings.get("JACKRABBIT_CLUSTER") == "Y":
                helm.uninstall_kubedb()
                helm.install_kubedb()
            prompt_upgrade = PromptUpgrade(settings)
            prompt_upgrade.prompt_upgrade()
            helm = Helm()
            logger.info("Patching values.yaml for helm upgrade...")
            helm.analyze_global_values()
            logger.info("Please find your patched values.yaml at the location ./helm/gluu/values.yaml."
                        "Continue with the steps found at https://gluu.org/docs/gluu-server/latest/upgrade/#helm")

        elif args.subparser_name == "restore":
            kustomize = Kustomize(timeout)
            kustomize.copy_configs_before_restore()
            kustomize.uninstall(restore=True)
            kustomize.install(install_couchbase=False, restore=True)

        elif args.subparser_name == "install-couchbase":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.install()

        elif args.subparser_name == "install-couchbase-backup":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.setup_backup_couchbase()

        elif args.subparser_name == "uninstall-couchbase":
            from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
            prompt_couchbase = PromptCouchbase(settings)
            prompt_couchbase.prompt_couchbase()
            couchbase = Couchbase()
            couchbase.uninstall()

        elif args.subparser_name == "install-gg-dbmode":
            from pygluu.kubernetes.terminal.gluugateway import PromptGluuGateway
            prompt_gluu_gateway = PromptGluuGateway(settings)
            prompt_gluu_gateway.prompt_gluu_gateway()
            kustomize = Kustomize(timeout)
            kustomize.install_gluu_gateway_dbmode()

        elif args.subparser_name == "install-kubedb":
            helm = Helm()
            helm.install_kubedb()

        elif args.subparser_name == "uninstall-gg-dbmode":
            kustomize = Kustomize(timeout)
            kustomize.uninstall_kong()
            kustomize.uninstall_gluu_gateway_ui()

        elif args.subparser_name == "generate-settings":
            logger.info("settings.json has been generated")

        elif args.subparser_name == "helm-install":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            helm = Helm()
            if settings.get("INSTALL_REDIS") == "Y" or \
                    settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \
                    settings.get("JACKRABBIT_CLUSTER") == "Y":
                helm.uninstall_kubedb()
                helm.install_kubedb()
            if settings.get("JACKRABBIT_CLUSTER") == "Y":
                kustomize = Kustomize(timeout)
                kustomize.deploy_postgres()
            if settings.get("INSTALL_REDIS") == "Y":
                kustomize = Kustomize(timeout)
                kustomize.uninstall_redis()
                kustomize.deploy_redis()
            helm.install_gluu()

        elif args.subparser_name == "helm-uninstall":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            kustomize = Kustomize(timeout)
            helm = Helm()
            helm.uninstall_gluu()
            helm.uninstall_nginx_ingress()
            helm.uninstall_gluu_gateway_dbmode()
            helm.uninstall_gluu_gateway_ui()
            logger.info("Please wait...")
            time.sleep(30)
            kustomize.uninstall()
            helm.uninstall_kubedb()

        elif args.subparser_name == "helm-install-gluu":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            helm = Helm()
            helm.uninstall_gluu()
            helm.install_gluu(install_ingress=False)

        elif args.subparser_name == "helm-install-gg-dbmode":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            kustomize = Kustomize(timeout)
            kustomize.patch_or_deploy_postgres()
            helm = Helm()
            helm.install_gluu_gateway_dbmode()
            helm.install_gluu_gateway_ui()

        elif args.subparser_name == "helm-uninstall-gg-dbmode":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            kustomize = Kustomize(timeout)
            kustomize.uninstall_postgres()
            helm = Helm()
            helm.uninstall_gluu_gateway_dbmode()
            helm.uninstall_gluu_gateway_ui()

        elif args.subparser_name == "helm-uninstall-gluu":
            from pygluu.kubernetes.terminal.helm import PromptHelm
            prompt_helm = PromptHelm(settings)
            prompt_helm.prompt_helm()
            helm = Helm()
            helm.uninstall_gluu()

    except KeyboardInterrupt:
        print("\n[I] Canceled by user; exiting ...")
Ejemplo n.º 21
0
 def __init__(self):
     self.settings = SettingsHandler()
     self.kubernetes = Kubernetes()
     self.timeout = 120