Exemplo n.º 1
0
    def install_redis(self):
        self.uninstall_redis()
        self.kubernetes.create_namespace(name=self.settings.get("installer-settings.redis.namespace"), labels={"app": "redis"})
        if self.settings.get("CN_DEPLOYMENT_ARCH") != "local":
            redis_storage_class = Path("./redis/storageclasses.yaml")
            analyze_storage_class(self.settings, redis_storage_class)
            self.kubernetes.create_objects_from_dict(redis_storage_class)

        redis_configmap = Path("./redis/configmaps.yaml")
        redis_conf_parser = Parser(redis_configmap, "ConfigMap")
        redis_conf_parser["metadata"]["namespace"] = self.settings.get("installer-settings.redis.namespace")
        redis_conf_parser.dump_it()
        self.kubernetes.create_objects_from_dict(redis_configmap)

        redis_yaml = Path("./redis/redis.yaml")
        redis_parser = Parser(redis_yaml, "Redis")
        redis_parser["spec"]["cluster"]["master"] = self.settings.get("installer-settings.redis.masterNodes")
        redis_parser["spec"]["cluster"]["replicas"] = self.settings.get("installer-settings.redis.nodesPerMaster")
        redis_parser["spec"]["monitor"]["prometheus"]["namespace"] = self.settings.get("installer-settings.redis.namespace")
        if self.settings.get("CN_DEPLOYMENT_ARCH") == "local":
            redis_parser["spec"]["storage"]["storageClassName"] = "openebs-hostpath"
        redis_parser["metadata"]["namespace"] = self.settings.get("installer-settings.redis.namespace")
        if self.settings.get("global.storageClass.provisioner") in ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"):
            del redis_parser["spec"]["podTemplate"]["spec"]["resources"]
        redis_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(filepath=redis_yaml,
                                                        group="kubedb.com",
                                                        version="v1alpha1",
                                                        plural="redises",
                                                        namespace=self.settings.get("installer-settings.redis.namespace"))

        if not self.settings.get("installer-settings.aws.lbType") == "alb":
            self.kubernetes.check_pods_statuses(self.settings.get("installer-settings.namespace"), "app=redis-cluster", self.timeout)
Exemplo n.º 2
0
 def upgrade_gluu(self):
     values_file_parser = Parser(self.upgrade_values_file, True)
     values_file_parser["domain"] = self.settings.get("global.fqdn")
     values_file_parser["cnCacheType"] = self.settings.get(
         "config.configmap.cnCacheType")
     values_file_parser["cnCouchbaseUrl"] = self.settings.get(
         "config.configmap.cnCouchbaseUrl")
     values_file_parser["cnCouchbaseUser"] = self.settings.get(
         "config.configmap.cnCouchbaseUser")
     values_file_parser["cnCouchbaseSuperUser"] = self.settings.get(
         "config.configmap.cnCouchbaseSuperUser")
     values_file_parser["cnPersistenceLdapMapping"] = self.settings.get(
         "global.cnPersistenceType")
     values_file_parser["cnPersistenceType"] = self.settings.get(
         "config.configmap.cnPersistenceLdapMapping")
     values_file_parser["source"] = self.settings.get(
         "installer-settings.currentVersion")
     values_file_parser["target"] = self.settings.get(
         "installer-settings.upgrade.targetVersion")
     values_file_parser.dump_it()
     exec_cmd(
         "helm install {} -f {} ./helm/gluu-upgrade --namespace={}".format(
             self.settings.get('installer-settings.releaseName'),
             self.values_file,
             self.settings.get("installer-settings.namespace")))
Exemplo n.º 3
0
 def install_ldap_backup(self):
     values_file = Path("./helm/ldap-backup/values.yaml").resolve()
     values_file_parser = Parser(values_file, True)
     values_file_parser["ldapPass"] = self.settings.get("LDAP_PW")
     values_file_parser.dump_it()
     exec_cmd(
         "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
         .format(self.ldap_backup_release_name,
                 self.settings.get("CN_NAMESPACE")))
Exemplo n.º 4
0
    def prepare_alb(self):
        ingress_parser = Parser("./alb/ingress.yaml", "Ingress")
        ingress_parser["spec"]["rules"][0]["host"] = self.settings.get(
            "global.fqdn")
        ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \
            self.settings.get("installer-settings.aws.arn.arnAcmCert")
        if not self.settings.get("installer-settings.aws.arn.enabled"):
            del ingress_parser["metadata"]["annotations"][
                "alb.ingress.kubernetes.io/certificate-arn"]

        for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]:
            service_name = path["backend"]["serviceName"]
            if self.settings.get("config.configmap.cnCasaEnabled"
                                 ) and service_name == "casa":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("global.oxshibboleth.enabled"
                                 ) and service_name == "oxshibboleth":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("config.configmap.cnPassportEnabled"
                                 ) and service_name == "oxpassport":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("installer-settings.gluuGateway.uI"
                                 ) and service_name == "gg-kong-ui":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("installer-settings.global.scim.enabled"
                                 ) and service_name == "jans-scim":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("installer-settings.config-api.enabled"
                                 ) and service_name == "config-api":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

        ingress_parser.dump_it()
Exemplo n.º 5
0
 def parse_couchbase_buckets(file, bucket_type, allbuckets):
     for bucket in allbuckets:
         metadata_name = "gluu"
         if bucket:
             metadata_name = "gluu-" + bucket
         parser = Parser(file, bucket_type, metadata_name)
         parser["spec"]["memoryQuota"] = str(memory_quota + 100) + "Mi"
         parser["spec"]["name"] = couchbase_bucket_prefix
         parser["metadata"]["name"] = couchbase_bucket_prefix
         if bucket:
             parser["spec"]["name"] = couchbase_bucket_prefix + "_" + bucket
             parser["metadata"][
                 "name"] = couchbase_bucket_prefix + "-" + bucket
         parser.dump_it()
Exemplo n.º 6
0
def set_memory_for_buckets(memory_quota):
    buckets = ["gluu", "gluu-site", "gluu-user"]
    ephemeral_buckets = ["gluu-cache", "gluu-token", "gluu-session"]

    for bucket in buckets:
        parser = Parser("./couchbase/couchbase-buckets.yaml",
                        "CouchbaseBucket", bucket)
        parser["spec"]["memoryQuota"] = str(memory_quota + 100) + "Mi"
        parser.dump_it()

    for bucket in ephemeral_buckets:
        parser = Parser("./couchbase/couchbase-ephemeral-buckets.yaml",
                        "CouchbaseEphemeralBucket", bucket)
        parser["spec"]["memoryQuota"] = str(memory_quota + 100) + "Mi"
        parser.dump_it()
Exemplo n.º 7
0
    def install_gluu(self, install_ingress=True):
        """
        Helm install Gluu
        :param install_ingress:
        """
        labels = {"app": "gluu"}
        if self.settings.get("USE_ISTIO") == "Y":
            labels = {"app": "gluu", "istio-injection": "enabled"}
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"), labels=labels)
        if self.settings.get(
                "PERSISTENCE_BACKEND") != "ldap" and self.settings.get(
                    "INSTALL_COUCHBASE") == "Y":
            couchbase_app = Couchbase()
            couchbase_app.uninstall()
            couchbase_app = Couchbase()
            couchbase_app.install()
            self.settings = SettingsHandler()
        if self.settings.get("AWS_LB_TYPE") == "alb":
            self.prepare_alb()
            self.deploy_alb()
        if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get(
                "USE_ISTIO_INGRESS") != "Y":
            self.check_install_nginx_ingress(install_ingress)
        self.analyze_global_values()
        try:
            exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format(
                self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
                self.settings.get("CN_NAMESPACE")))

            if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                    self.settings.get("PERSISTENCE_BACKEND") == "ldap":
                values_file = Path("./helm/ldap-backup/values.yaml").resolve()
                values_file_parser = Parser(values_file, True)
                values_file_parser["ldapPass"] = self.settings.get("LDAP_PW")
                values_file_parser.dump_it()

                exec_cmd(
                    "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
                    .format(self.ldap_backup_release_name,
                            self.settings.get("CN_NAMESPACE")))
        except FileNotFoundError:
            logger.error(
                "Helm v3 is not installed. Please install it to continue "
                "https://helm.sh/docs/intro/install/")
            raise SystemExit(1)
Exemplo n.º 8
0
    def install_postgres(self):
        self.uninstall_postgres()
        self.kubernetes.create_namespace(
            name=self.settings.get("installer-settings.postgres.install"),
            labels={"app": "postgres"})
        self.create_patch_secret_init_sql()
        if "OpenEbs" in self.settings.get(
                "installer-settings.volumeProvisionStrategy"):
            postgres_storage_class = Path("./postgres/storageclasses.yaml")
            analyze_storage_class(self.settings, postgres_storage_class)
            self.kubernetes.create_objects_from_dict(postgres_storage_class)

        postgres_yaml = Path("./postgres/postgres.yaml")
        postgres_parser = Parser(postgres_yaml, "Postgres")
        postgres_parser["spec"]["replicas"] = self.settings.get(
            "installer-settings.postgres.replicas")
        postgres_parser["spec"]["monitor"]["prometheus"][
            "namespace"] = self.settings.get(
                "installer-settings.postgres.install")
        if "OpenEbs" in self.settings.get(
                "installer-settings.volumeProvisionStrategy"):
            postgres_parser["spec"]["storage"][
                "storageClassName"] = "openebs-hostpath"
        postgres_parser["metadata"]["namespace"] = self.settings.get(
            "installer-settings.postgres.install")
        if self.settings.get("global.storageClass.provisioner") in \
                ("microk8s.io/hostpath", "k8s.io/minikube-hostpath") or \
                self.settings.get("global.cloud.testEnviroment"):
            try:
                del postgres_parser["spec"]["podTemplate"]["spec"]["resources"]
            except KeyError:
                logger.info(
                    "Resources not deleted as they are not found inside yaml.")

        postgres_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=postgres_yaml,
            group="kubedb.com",
            version="v1alpha1",
            plural="postgreses",
            namespace=self.settings.get("installer-settings.postgres.install"))
        if not self.settings.get("installer-settings.aws.lbType") == "alb":
            self.kubernetes.check_pods_statuses(
                self.settings.get("installer-settings.postgres.install"),
                "app=postgres", self.timeout)
Exemplo n.º 9
0
 def install_ldap_backup(self):
     values_file = Path("./helm/ldap-backup/values.yaml").resolve()
     values_file_parser = Parser(values_file, True)
     values_file_parser["ldapPass"] = self.settings.get(
         "config.ldapPassword")
     if self.settings.get("global.storageClass.provisioner") not in \
             ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"):
         values_file_parser["gluuLdapSchedule"] = self.settings.get(
             "installer-settings.ldap.backup.fullSchedule")
     if self.settings.get("opendj.multiCluster.enabled"):
         values_file_parser["multiCluster"]["enabled"] = True
         values_file_parser["multiCluster"]["ldapAdvertiseAdminPort"] = \
             self.settings.get("opendj.ports.tcp-admin.nodePort")
         values_file_parser["multiCluster"]["serfAdvertiseAddrSuffix"] = \
             self.settings.get("opendj.multiCluster.serfAdvertiseAddrSuffix")[:-6]
     values_file_parser.dump_it()
     exec_cmd(
         "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}"
         .format(self.ldap_backup_release_name,
                 self.settings.get("installer-settings.namespace")))
Exemplo n.º 10
0
    def install_postgres(self):
        self.uninstall_postgres()
        self.kubernetes.create_namespace(
            name=self.settings.get("POSTGRES_NAMESPACE"),
            labels={"app": "postgres"})
        self.create_patch_secret_init_sql()
        if self.settings.get("DEPLOYMENT_ARCH") != "local":
            postgres_storage_class = Path("./postgres/storageclasses.yaml")
            analyze_storage_class(self.settings, postgres_storage_class)
            self.kubernetes.create_objects_from_dict(postgres_storage_class)

        postgres_yaml = Path("./postgres/postgres.yaml")
        postgres_parser = Parser(postgres_yaml, "Postgres")
        postgres_parser["spec"]["replicas"] = self.settings.get(
            "POSTGRES_REPLICAS")
        postgres_parser["spec"]["monitor"]["prometheus"][
            "namespace"] = self.settings.get("POSTGRES_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            postgres_parser["spec"]["storage"][
                "storageClassName"] = "openebs-hostpath"
        postgres_parser["metadata"]["namespace"] = self.settings.get(
            "POSTGRES_NAMESPACE")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") or \
                self.settings.get("TEST_ENVIRONMENT") == "Y":
            try:
                del postgres_parser["spec"]["podTemplate"]["spec"]["resources"]
            except KeyError:
                logger.info(
                    "Resources not deleted as they are not found inside yaml.")

        postgres_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=postgres_yaml,
            group="kubedb.com",
            version="v1alpha1",
            plural="postgreses",
            namespace=self.settings.get("POSTGRES_NAMESPACE"))
        if not self.settings.get("AWS_LB_TYPE") == "alb":
            self.kubernetes.check_pods_statuses(
                self.settings.get("POSTGRES_NAMESPACE"), "app=postgres",
                self.timeout)
Exemplo n.º 11
0
    def prepare_alb(self):
        ingress_parser = Parser("./alb/ingress.yaml", "Ingress")
        ingress_parser["spec"]["rules"][0]["host"] = self.settings.get(
            "CN_FQDN")
        ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \
            self.settings.get("ARN_AWS_IAM")
        if not self.settings.get("ARN_AWS_IAM"):
            del ingress_parser["metadata"]["annotations"][
                "alb.ingress.kubernetes.io/certificate-arn"]

        for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]:
            service_name = path["backend"]["serviceName"]
            if self.settings.get(
                    "ENABLE_CASA") != "Y" and service_name == "casa":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXSHIBBOLETH"
                                 ) != "Y" and service_name == "oxshibboleth":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("ENABLE_OXPASSPORT"
                                 ) != "Y" and service_name == "oxpassport":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]

            if self.settings.get("INSTALL_GLUU_GATEWAY"
                                 ) != "Y" and service_name == "gg-kong-ui":
                path_index = ingress_parser["spec"]["rules"][0]["http"][
                    "paths"].index(path)
                del ingress_parser["spec"]["rules"][0]["http"]["paths"][
                    path_index]
        ingress_parser.dump_it()
Exemplo n.º 12
0
 def upgrade_gluu(self):
     values_file_parser = Parser(self.upgrade_values_file, True)
     values_file_parser["domain"] = self.settings.get("CN_FQDN")
     values_file_parser["cnCacheType"] = self.settings.get("CN_CACHE_TYPE")
     values_file_parser["cnCouchbaseUrl"] = self.settings.get(
         "COUCHBASE_URL")
     values_file_parser["cnCouchbaseUser"] = self.settings.get(
         "COUCHBASE_USER")
     values_file_parser["cnCouchbaseSuperUser"] = self.settings.get(
         "COUCHBASE_SUPERUSER")
     values_file_parser["cnPersistenceLdapMapping"] = self.settings.get(
         "HYBRID_LDAP_HELD_DATA")
     values_file_parser["cnPersistenceType"] = self.settings.get(
         "PERSISTENCE_BACKEND")
     values_file_parser["source"] = self.settings.get("CN_VERSION")
     values_file_parser["target"] = self.settings.get(
         "CN_UPGRADE_TARGET_VERSION")
     values_file_parser.dump_it()
     exec_cmd(
         "helm install {} -f {} ./helm/gluu-upgrade --namespace={}".format(
             self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file,
             self.settings.get("CN_NAMESPACE")))
Exemplo n.º 13
0
 def setup_backup_couchbase(self):
     """
     Setups Couchbase backup strategy
     """
     couchbase_backup_file = Path(
         "./couchbase/backup/couchbase-backup.yaml")
     parser = Parser(couchbase_backup_file, "CouchbaseBackup")
     parser["spec"]["full"]["schedule"] = self.settings.get(
         "COUCHBASE_FULL_BACKUP_SCHEDULE")
     parser["spec"]["incremental"]["schedule"] = self.settings.get(
         "COUCHBASE_INCR_BACKUP_SCHEDULE")
     parser["spec"]["backupRetention"] = self.settings.get(
         "COUCHBASE_BACKUP_RETENTION_TIME")
     parser["spec"]["size"] = self.settings.get(
         "COUCHBASE_BACKUP_STORAGE_SIZE")
     parser.dump_it()
     self.kubernetes.create_namespaced_custom_object(
         filepath=couchbase_backup_file,
         group="couchbase.com",
         version="v2",
         plural="couchbasebackups",
         namespace=self.settings.get("COUCHBASE_NAMESPACE"))
Exemplo n.º 14
0
 def setup_backup_couchbase(self):
     """
     Setups Couchbase backup strategy
     """
     couchbase_backup_file = Path(
         "./couchbase/backup/couchbase-backup.yaml")
     parser = Parser(couchbase_backup_file, "CouchbaseBackup")
     parser["spec"]["full"]["schedule"] = self.settings.get(
         "installer-settings.couchbase.backup.fullSchedule")
     parser["spec"]["incremental"]["schedule"] = self.settings.get(
         "installer-settings.couchbase.backup.incrementalSchedule")
     parser["spec"]["backupRetention"] = self.settings.get(
         "installer-settings.couchbase.backup.retentionTime")
     parser["spec"]["size"] = self.settings.get(
         "installer-settings.couchbase.backup.storageSize")
     parser.dump_it()
     self.kubernetes.create_namespaced_custom_object(
         filepath=couchbase_backup_file,
         group="couchbase.com",
         version="v2",
         plural="couchbasebackups",
         namespace=self.settings.get(
             "installer-settings.couchbase.namespace"))
Exemplo n.º 15
0
    def install(self):
        """
        Installs Couchbase
        """
        self.kubernetes.create_namespace(
            name=self.settings.get("CN_NAMESPACE"))
        if self.settings.get("COUCHBASE_CLUSTER_FILE_OVERRIDE") == "N":
            self.analyze_couchbase_cluster_yaml()
        cb_namespace = self.settings.get("COUCHBASE_NAMESPACE")
        storage_class_file_parser = Parser(self.storage_class_file,
                                           "StorageClass")
        if self.settings.get('DEPLOYMENT_ARCH') == "gke" or \
                self.settings.get('DEPLOYMENT_ARCH') == "aks" or \
                self.settings.get('DEPLOYMENT_ARCH') == "do":
            try:
                del storage_class_file_parser["parameters"]["encrypted"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser["parameters"][
                "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE")
        if self.settings.get('DEPLOYMENT_ARCH') == "gke":
            storage_class_file_parser["provisioner"] = "kubernetes.io/gce-pd"
        elif self.settings.get('DEPLOYMENT_ARCH') == "aks":
            storage_class_file_parser[
                "provisioner"] = "kubernetes.io/azure-disk"
        elif self.settings.get('DEPLOYMENT_ARCH') == "do":
            storage_class_file_parser[
                "provisioner"] = "dobs.csi.digitalocean.com"
        elif self.settings.get('DEPLOYMENT_ARCH') == "microk8s":
            storage_class_file_parser["provisioner"] = "microk8s.io/hostpath"
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        elif self.settings.get('DEPLOYMENT_ARCH') == "minikube":
            storage_class_file_parser[
                "provisioner"] = "k8s.io/minikube-hostpath"
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        else:
            try:
                storage_class_file_parser["parameters"][
                    "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE")
            except KeyError:
                logger.info("Key not found")
        storage_class_file_parser.dump_it()

        logger.info("Installing Couchbase...")
        couchbase_crts_keys = Path("couchbase_crts_keys")
        if not couchbase_crts_keys.exists():
            os.mkdir(couchbase_crts_keys)
        custom_cb_ca_crt = Path("./couchbase_crts_keys/ca.crt")
        custom_cb_crt = Path("./couchbase_crts_keys/chain.pem")
        custom_cb_key = Path("./couchbase_crts_keys/pkey.key")
        if not custom_cb_ca_crt.exists() and not custom_cb_crt.exists(
        ) and not custom_cb_key.exists():
            setup_crts(
                ca_common_name=self.settings.get("COUCHBASE_CN"),
                cert_common_name="couchbase-server",
                san_list=self.settings.get("COUCHBASE_SUBJECT_ALT_NAME"),
                ca_cert_file="./couchbase_crts_keys/ca.crt",
                ca_key_file="./couchbase_crts_keys/ca.key",
                cert_file="./couchbase_crts_keys/chain.pem",
                key_file="./couchbase_crts_keys/pkey.key")
        self.kubernetes.create_namespace(name=cb_namespace)
        chain_pem_filepath = Path("./couchbase_crts_keys/chain.pem")
        pkey_filepath = Path("./couchbase_crts_keys/pkey.key")
        tls_cert_filepath = Path("./couchbase_crts_keys/tls-cert-file")
        tls_private_key_filepath = Path(
            "./couchbase_crts_keys/tls-private-key-file")
        ca_cert_filepath = Path("./couchbase_crts_keys/ca.crt")
        shutil.copyfile(ca_cert_filepath,
                        Path("./couchbase_crts_keys/couchbase.crt"))
        shutil.copyfile(chain_pem_filepath, tls_cert_filepath)
        shutil.copyfile(pkey_filepath, tls_private_key_filepath)

        encoded_ca_crt_string = self.settings.get("COUCHBASE_CRT")
        if not encoded_ca_crt_string:
            with open(ca_cert_filepath) as content_file:
                ca_crt_content = content_file.read()
                encoded_ca_crt_bytes = base64.b64encode(
                    ca_crt_content.encode("utf-8"))
                encoded_ca_crt_string = str(encoded_ca_crt_bytes, "utf-8")
            self.settings.set("COUCHBASE_CRT", encoded_ca_crt_string)

        with open(chain_pem_filepath) as content_file:
            chain_pem_content = content_file.read()
            encoded_chain_bytes = base64.b64encode(
                chain_pem_content.encode("utf-8"))
            encoded_chain_string = str(encoded_chain_bytes, "utf-8")

        with open(pkey_filepath) as content_file:
            pkey_content = content_file.read()
            encoded_pkey_bytes = base64.b64encode(pkey_content.encode("utf-8"))
            encoded_pkey_string = str(encoded_pkey_bytes, "utf-8")

        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-server-tls",
            namespace=cb_namespace,
            literal=chain_pem_filepath.name,
            value_of_literal=encoded_chain_string,
            second_literal=pkey_filepath.name,
            value_of_second_literal=encoded_pkey_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-operator-tls",
            namespace=cb_namespace,
            literal=ca_cert_filepath.name,
            value_of_literal=encoded_ca_crt_string)

        encoded_cb_super_user_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_SUPERUSER").encode("utf-8"))
        encoded_cb_super_user_string = str(encoded_cb_super_user_bytes,
                                           "utf-8")
        encoded_cb_pass_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_PASSWORD").encode("utf-8"))
        encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8")
        encoded_cb_super_pass_bytes = base64.b64encode(
            self.settings.get("COUCHBASE_SUPERUSER_PASSWORD").encode("utf-8"))
        encoded_cb_super_pass_string = str(encoded_cb_super_pass_bytes,
                                           "utf-8")

        self.create_couchbase_gluu_cert_pass_secrets(
            encoded_ca_crt_string, encoded_cb_pass_string,
            encoded_cb_super_pass_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="gluu-couchbase-user-password",
            namespace=self.settings.get("COUCHBASE_NAMESPACE"),
            literal="password",
            value_of_literal=encoded_cb_pass_string)
        command = "./{}/bin/cbopcfg -backup=true -namespace={}".format(
            self.couchbase_source_file,
            self.settings.get("COUCHBASE_NAMESPACE"))
        exec_cmd(command, output_file=self.couchbase_operator_dac_file)
        couchbase_cluster_parser = Parser(self.couchbase_cluster_file,
                                          "CouchbaseCluster")
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "serverSecret"] = "couchbase-server-tls"
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "operatorSecret"] = "couchbase-operator-tls"
        try:
            couchbase_cluster_parser["spec"]["security"]["rbac"]["selector"]["matchLabels"]["cluster"] = \
                self.settings.get("COUCHBASE_CLUSTER_NAME")
            couchbase_cluster_parser["spec"]["security"]["rbac"][
                "managed"] = True
        except KeyError:
            logger.error(
                "rbac section is missing or incorrect in couchbase-cluster.yaml."
                " Please set spec --> security --> rbac --> managed : true"
                " and set spec --> security --> rbac --> selector --> matchLabels --> "
                "cluster --> to your cluster name")
            logger.info(
                "As a result of the above the installation will exit "
                "as the gluu user will not be created causing the communication between "
                "Gluu server and Couchbase to fail.")
            sys.exit()
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            volume_claims = couchbase_cluster_parser["spec"][
                "volumeClaimTemplates"]
            for i, volume_claim in enumerate(volume_claims):
                couchbase_cluster_parser["spec"]["volumeClaimTemplates"][i]["spec"]["storageClassName"] = \
                    "openebs-hostpath"
        couchbase_cluster_parser.dump_it()

        self.kubernetes.create_objects_from_dict(
            self.couchbase_custom_resource_definition_file,
            namespace=cb_namespace)

        self.kubernetes.create_objects_from_dict(
            self.couchbase_operator_dac_file, namespace=cb_namespace)

        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "app=couchbase-operator", 700)

        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-auth",
            namespace=cb_namespace,
            literal="username",
            value_of_literal=encoded_cb_super_user_string,
            second_literal="password",
            value_of_second_literal=encoded_cb_super_pass_string)

        self.kubernetes.create_objects_from_dict(self.storage_class_file,
                                                 namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_cluster_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseclusters",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasebuckets",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_ephemeral_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseephemeralbuckets",
            namespace=cb_namespace)
        coucbase_group_parser = Parser(self.couchbase_group_file,
                                       "CouchbaseGroup")
        coucbase_group_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("COUCHBASE_CLUSTER_NAME")
        coucbase_group_parser.dump_it()
        coucbase_user_parser = Parser(self.couchbase_user_file,
                                      "CouchbaseUser")
        coucbase_user_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("COUCHBASE_CLUSTER_NAME")
        coucbase_user_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_group_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasegroups",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_user_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseusers",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_rolebinding_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaserolebindings",
            namespace=cb_namespace)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_analytics=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_data=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_eventing=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_index=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_query=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_search=enabled", 700)
        # Setup couchbase backups
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            self.setup_backup_couchbase()
        shutil.rmtree(self.couchbase_source_folder_pattern, ignore_errors=True)

        if self.settings.get("DEPLOY_MULTI_CLUSTER") == "Y":
            logger.info(
                "Setup XDCR between the running Gluu couchbase cluster and this one"
            )
Exemplo n.º 16
0
    def install_gluu_gateway_ui(self):
        self.uninstall_gluu_gateway_ui()
        self.kubernetes.create_namespace(
            name=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
            labels={"APP_NAME": "gluu-gateway-ui"})
        try:
            # Try to get gluu cert + key
            ssl_cert = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_cert"]
            ssl_key = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_key"]

            self.kubernetes.patch_or_create_namespaced_secret(
                name="tls-certificate",
                namespace=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"),
                literal="tls.crt",
                value_of_literal=ssl_cert,
                secret_type="kubernetes.io/tls",
                second_literal="tls.key",
                value_of_second_literal=ssl_key)

        except (KeyError, Exception):
            logger.error(
                "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. "
                "Please mount crt and key inside gg-ui deployment")
        client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format(
            self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN"),
            self.settings.get("CN_NAMESPACE"))
        values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve()
        values_file_parser = Parser(values_file, True)
        values_file_parser["cloud"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["cloud"]["isDomainRegistered"] = "true"
        if self.settings.get(
                "DEPLOYMENT_ARCH") == "microk8s" or self.settings.get(
                    "DEPLOYMENT_ARCH") == "minikube":
            values_file_parser["cloud"]["enabled"] = False
        values_file_parser["cloud"]["provider"] = self.settings.get(
            "DEPLOYMENT_ARCH")
        values_file_parser["dbUser"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_USER")
        values_file_parser[
            "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format(
                self.settings.get("KONG_HELM_RELEASE_NAME"),
                self.settings.get("KONG_NAMESPACE"))
        values_file_parser["dbHost"] = self.settings.get("POSTGRES_URL")
        values_file_parser["dbDatabase"] = self.settings.get(
            "GLUU_GATEWAY_UI_DATABASE")
        values_file_parser["clientApiServerUrl"] = client_api_server_url
        values_file_parser["image"]["repository"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_NAME")
        values_file_parser["image"]["tag"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_TAG")
        values_file_parser["loadBalancerIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["dbPassword"] = self.settings.get(
            "GLUU_GATEWAY_UI_PG_PASSWORD")
        values_file_parser["opServerUrl"] = "https://" + self.settings.get(
            "CN_FQDN")
        values_file_parser["ggHost"] = self.settings.get("CN_FQDN") + "/gg-ui/"
        values_file_parser["ggUiRedirectUrlHost"] = self.settings.get(
            "CN_FQDN") + "/gg-ui/"
        # Register new client if one was not provided
        if not values_file_parser["clientApiId"] or \
                not values_file_parser["clientId"] or \
                not values_file_parser["clientSecret"]:
            client_api_id, client_id, client_secret = register_op_client(
                self.settings.get("CN_NAMESPACE"), "konga-client",
                self.settings.get("CN_FQDN"), client_api_server_url,
                self.settings.get('CN_HELM_RELEASE_NAME'))
            if not client_api_id:
                values_file_parser.dump_it()
                logger.error(
                    "Due to a failure in konga client registration the installation has stopped."
                    " Please register as suggested above manually and enter the values returned"
                    " for clientApiId, clientId, "
                    "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run "
                    "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui "
                    "--namespace={}".format(
                        self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                        self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))
                raise SystemExit(1)
            values_file_parser["clientApiId"] = client_api_id
            values_file_parser["clientId"] = client_id
            values_file_parser["clientSecret"] = client_secret

        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}"
            .format(self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'),
                    self.settings.get("GLUU_GATEWAY_UI_NAMESPACE")))
Exemplo n.º 17
0
 def check_install_nginx_ingress(self, install_ingress=True):
     """
     Helm installs nginx ingress or checks to recieve and ip or address
     :param install_ingress:
     """
     if install_ingress:
         self.kubernetes.delete_custom_resource(
             "virtualservers.k8s.nginx.org")
         self.kubernetes.delete_custom_resource(
             "virtualserverroutes.k8s.nginx.org")
         self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress")
         self.kubernetes.delete_cluster_role_binding(
             "ingress-nginx-nginx-ingress")
         self.kubernetes.create_namespace(name=self.settings.get(
             "installer-settings.nginxIngress.releaseName"),
                                          labels={"app": "ingress-nginx"})
         self.kubernetes.delete_cluster_role(
             self.settings.get(
                 'installer-settings.nginxIngress.releaseName') +
             "-nginx-ingress-controller")
         self.kubernetes.delete_cluster_role_binding(
             self.settings.get(
                 'installer-settings.nginxIngress.releaseName') +
             "-nginx-ingress-controller")
         try:
             exec_cmd(
                 "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx"
             )
             exec_cmd("helm repo add stable https://charts.helm.sh/stable")
             exec_cmd("helm repo update")
         except FileNotFoundError:
             logger.error(
                 "Helm v3 is not installed. Please install it to continue "
                 "https://helm.sh/docs/intro/install/")
             raise SystemExit(1)
     command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format(
         self.settings.get('installer-settings.nginxIngress.releaseName'),
         self.settings.get("installer-settings.nginxIngress.namespace"))
     if self.settings.get("installer-settings.volumeProvisionStrategy"
                          ) == "minikubeDynamic":
         exec_cmd("minikube addons enable ingress")
     if "aws" in self.settings.get(
             "installer-settings.volumeProvisionStrategy"):
         if self.settings.get("installer-settings.aws.lbType") == "nlb":
             if install_ingress:
                 nlb_override_values_file = Path(
                     "./nginx/aws/aws-nlb-override-values.yaml").resolve()
                 nlb_values = " --values {}".format(
                     nlb_override_values_file)
                 exec_cmd(command + nlb_values)
         else:
             if self.settings.get("installer-settings.aws.arn.enabled"):
                 if install_ingress:
                     elb_override_values_file = Path(
                         "./nginx/aws/aws-elb-override-values.yaml"
                     ).resolve()
                     elb_file_parser = Parser(elb_override_values_file,
                                              True)
                     elb_file_parser["controller"]["service"][
                         "annotations"].update({
                             "service.beta.kubernetes.io/aws-load-balancer-ssl-cert":
                             self.settings.get(
                                 "installer-settings.aws.arn.arnAcmCert")
                         })
                     elb_file_parser["controller"]["config"]["proxy-real-ip-cidr"] = \
                         self.settings.get("installer-settings.aws.vpcCidr")
                     elb_file_parser.dump_it()
                     elb_values = " --values {}".format(
                         elb_override_values_file)
                     exec_cmd(command + elb_values)
             else:
                 if install_ingress:
                     exec_cmd(command)
     volume_provision_strategy = self.settings.get(
         "installer-settings.volumeProvisionStrategy")
     if "gke" in volume_provision_strategy or \
             "aks" in volume_provision_strategy or \
             "doks" in volume_provision_strategy:
         if install_ingress:
             cloud_override_values_file = Path(
                 "./nginx/cloud/cloud-override-values.yaml").resolve()
             cloud_values = " --values {}".format(
                 cloud_override_values_file)
             exec_cmd(command + cloud_values)
     elif "local" in volume_provision_strategy:
         if install_ingress:
             baremetal_override_values_file = Path(
                 "./nginx/baremetal/baremetal-override-values.yaml"
             ).resolve()
             baremetal_values = " --values {}".format(
                 baremetal_override_values_file)
             exec_cmd(command + baremetal_values)
     if self.settings.get("global.storageClass.provisioner") not in \
             ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"):
         logger.info("Waiting for nginx to be prepared...")
         time.sleep(60)
         self.wait_for_nginx_add()
Exemplo n.º 18
0
class ValuesHandler(object):
    def __init__(self,
                 values_file="./helm/gluu/override-values.yaml",
                 values_schema_file="./helm/gluu/values.schema.json"):
        self.values_file = Path(values_file)
        self.values_schema = Path(values_schema_file)
        self.errors = list()
        self.values_file_parser = Parser(self.values_file, True)
        self.schema = {}

    def load(self):
        """
        Get merged settings (default and custom settings from json file).
        """
        # Check if running in container and settings.json mounted
        try:
            shutil.copy(Path("./override-values.yaml"), self.values_file)
            self.values_file_parser = Parser(self.values_file, True)
        except FileNotFoundError:
            # No installation settings mounted as /override-values.yaml. Checking values.yaml.
            pass

    def store_override_file(self):
        """
        Copy override file to main directory
        """
        shutil.copy(Path("./helm/gluu/override-values.yaml"),
                    Path("./override-values.yaml"))

    def store_data(self, clean_data=False):
        try:
            self.values_file_parser.dump_it(clean_data)
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def set(self, keys_string, value):
        """
        single update
        """
        try:
            dot = dotty(self.values_file_parser)
            dot[keys_string] = value
            self.store_data()
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def get(self, keys_string):
        """
        This method receives a dict and list of attributes to return the innermost value of the give dict
        """
        try:
            dot = dotty(self.values_file_parser)
            return dot[keys_string]

        except (KeyError, NameError):
            logger.info("No Value Can Be Found for " + str(keys_string))
            return False

    def update(self, collection):
        """
        mass update
        """
        try:
            self.values_file_parser.update(collection)
            self.store_data()
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def reset_data(self):
        """
        reset values.yaml to default_settings
        """
        try:
            iterate_dict(self.values_file_parser)
            self.store_data()
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def remove_empty_keys(self):
        """
        removes empty keys for override-values.yaml
        """
        try:
            self.store_data(clean_data=True)
            self.store_override_file()
            return True
        except Exception as exc:
            logger.error(f"Uncaught error={exc}")
            return False
Exemplo n.º 19
0
def analyze_storage_class(settings, storageclass):
    from pygluu.kubernetes.yamlparser import Parser
    parser = Parser(storageclass, "StorageClass")
    if settings.get("DEPLOYMENT_ARCH") == "eks":
        parser["provisioner"] = "kubernetes.io/aws-ebs"
        parser["parameters"]["encrypted"] = "true"
        parser["parameters"]["type"] = settings.get("LDAP_JACKRABBIT_VOLUME")
        unique_zones = list(dict.fromkeys(settings.get("NODES_ZONES")))
        parser["allowedTopologies"][0]["matchLabelExpressions"][0][
            "values"] = unique_zones
        parser.dump_it()
    elif settings.get("DEPLOYMENT_ARCH") == "gke":
        parser["provisioner"] = "kubernetes.io/gce-pd"
        try:
            del parser["parameters"]["encrypted"]
        except KeyError:
            logger.info("Key not deleted as it does not exist inside yaml.")
        parser["parameters"]["type"] = settings.get("LDAP_JACKRABBIT_VOLUME")
        unique_zones = list(dict.fromkeys(settings.get("NODES_ZONES")))
        parser["allowedTopologies"][0]["matchLabelExpressions"][0][
            "values"] = unique_zones
        parser.dump_it()
    elif settings.get("DEPLOYMENT_ARCH") == "aks":
        parser["provisioner"] = "kubernetes.io/azure-disk"
        try:
            del parser["parameters"]["encrypted"]
            del parser["parameters"]["type"]
        except KeyError:
            logger.info("Key not deleted as it does not exist inside yaml.")
        parser["parameters"]["storageaccounttype"] = settings.get(
            "LDAP_JACKRABBIT_VOLUME")
        unique_zones = list(dict.fromkeys(settings.get("NODES_ZONES")))
        parser["allowedTopologies"][0]["matchLabelExpressions"][0][
            "values"] = unique_zones
        parser.dump_it()
    elif settings.get("DEPLOYMENT_ARCH") == "do":
        parser["provisioner"] = "dobs.csi.digitalocean.com"
        try:
            del parser["parameters"]
            del parser["allowedTopologies"]
        except KeyError:
            logger.info("Key not deleted as it does not exist inside yaml.")
        parser.dump_it()
    elif settings.get('DEPLOYMENT_ARCH') == "microk8s":
        try:
            parser["provisioner"] = "microk8s.io/hostpath"
            del parser["allowedTopologies"]
            del parser["allowVolumeExpansion"]
            del parser["parameters"]
        except KeyError:
            logger.info("Key not deleted as it does not exist inside yaml.")
        parser.dump_it()
    elif settings.get('DEPLOYMENT_ARCH') == "minikube":
        try:
            parser["provisioner"] = "k8s.io/minikube-hostpath"
            del parser["allowedTopologies"]
            del parser["allowVolumeExpansion"]
            del parser["parameters"]
        except KeyError:
            logger.info("Key not deleted as it does not exist inside yaml.")
        parser.dump_it()
Exemplo n.º 20
0
    def analyze_global_values(self):
        """
        Parses Gluu values.yaml with the input information from prompts
        """
        values_file_parser = Parser(self.values_file, True)
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            provisioner = "k8s.io/minikube-hostpath"
        elif self.settings.get("DEPLOYMENT_ARCH") == "eks":
            provisioner = "kubernetes.io/aws-ebs"
        elif self.settings.get("DEPLOYMENT_ARCH") == "gke":
            provisioner = "kubernetes.io/gce-pd"
        elif self.settings.get("DEPLOYMENT_ARCH") == "aks":
            provisioner = "kubernetes.io/azure-disk"
        elif self.settings.get("DEPLOYMENT_ARCH") == "do":
            provisioner = "dobs.csi.digitalocean.com"
        elif self.settings.get("DEPLOYMENT_ARCH") == "local":
            provisioner = "openebs.io/local"
        else:
            provisioner = "microk8s.io/hostpath"
        values_file_parser["global"]["storageClass"][
            "provisioner"] = provisioner
        values_file_parser["global"]["lbIp"] = self.settings.get("HOST_EXT_IP")
        values_file_parser["global"]["domain"] = self.settings.get("CN_FQDN")
        values_file_parser["global"]["isDomainRegistered"] = "false"
        if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y":
            values_file_parser["global"]["isDomainRegistered"] = "true"
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["config"]["configmap"][
                "cnRedisUrl"] = self.settings.get("REDIS_URL")
            values_file_parser["config"]["configmap"][
                "cnRedisType"] = self.settings.get("REDIS_TYPE")
            values_file_parser["config"]["configmap"][
                "cnRedisUseSsl"] = self.settings.get("REDIS_USE_SSL")
            values_file_parser["config"]["configmap"]["cnRedisSslTruststore"] = \
                self.settings.get("REDIS_SSL_TRUSTSTORE")
            values_file_parser["config"]["configmap"]["cnRedisSentinelGroup"] = \
                self.settings.get("REDIS_SENTINEL_GROUP")
            values_file_parser["config"]["redisPass"] = self.settings.get(
                "REDIS_PW")
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") \
                or self.settings.get("TEST_ENVIRONMENT") == "Y":
            values_file_parser["global"]["cloud"]["testEnviroment"] = True
        values_file_parser["config"]["configmap"][
            "lbAddr"] = self.settings.get("LB_ADD")
        values_file_parser["global"]["cnPersistenceType"] = self.settings.get(
            "PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"][
            "cnPersistenceType"] = self.settings.get("PERSISTENCE_BACKEND")
        values_file_parser["config"]["configmap"]["cnPersistenceLdapMapping"] = \
            self.settings.get("HYBRID_LDAP_HELD_DATA")
        if self.settings.get("PERSISTENCE_BACKEND") != "ldap":
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUrl"] = self.settings.get("COUCHBASE_URL")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseUser"] = self.settings.get("COUCHBASE_USER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseIndexNumReplica"] = self.settings.get(
                    "COUCHBASE_INDEX_NUM_REPLICA")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseBucketPrefix"] = self.settings.get(
                    "COUCHBASE_BUCKET_PREFIX")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUser"] = \
                self.settings.get("COUCHBASE_SUPERUSER")
            values_file_parser["config"]["configmap"][
                "cnCouchbaseCrt"] = self.settings.get("COUCHBASE_CRT")
            values_file_parser["config"]["configmap"][
                "cnCouchbasePass"] = self.settings.get("COUCHBASE_PASSWORD")
            values_file_parser["config"]["configmap"]["cnCouchbaseSuperUserPass"] = \
                self.settings.get("COUCHBASE_SUPERUSER_PASSWORD")
        values_file_parser["global"]["auth-server"]["enabled"] = True
        values_file_parser["global"]["persistence"]["enabled"] = True
        values_file_parser["global"]["oxtrust"]["enabled"] = True
        values_file_parser["global"]["config"]["enabled"] = True
        values_file_parser["global"]["opendj"]["enabled"] = False
        values_file_parser["global"]["fido2"]["enabled"] = False
        if self.settings.get("ENABLE_FIDO2") == "Y":
            values_file_parser["global"]["fido2"]["enabled"] = True
            values_file_parser["fido2"]["replicas"] = self.settings.get(
                "FIDO2_REPLICAS")
        values_file_parser["global"]["scim"]["enabled"] = False
        if self.settings.get("ENABLE_SCIM") == "Y":
            values_file_parser["global"]["scim"]["enabled"] = True
            values_file_parser["scim"]["replicas"] = self.settings.get(
                "SCIM_REPLICAS")

        if self.settings.get("ENABLE_CONFIG_API") == "Y":
            values_file_parser["global"]["config-api"]["enabled"] = True

        if self.settings.get("INSTALL_JACKRABBIT") == "Y":
            values_file_parser["global"]["jackrabbit"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnJackrabbitUrl"] = self.settings.get("JACKRABBIT_URL")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitAdminPass"] = \
                self.settings.get("JACKRABBIT_ADMIN_PASSWORD")
            values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitPostgresPass"] = \
                self.settings.get("JACKRABBIT_PG_PASSWORD")
        if self.settings.get("USE_ISTIO_INGRESS") == "Y":
            values_file_parser["global"]["istio"]["ingress"] = True
            values_file_parser["global"]["istio"]["enabled"] = True
            values_file_parser["global"]["istio"][
                "namespace"] = self.settings.get("ISTIO_SYSTEM_NAMESPACE")
        elif self.settings.get("AWS_LB_TYPE") == "alb":
            values_file_parser["global"]["alb"]["ingress"] = True
        else:
            values_file_parser["nginx-ingress"]["ingress"]["enabled"] = True
            values_file_parser["nginx-ingress"]["ingress"]["hosts"] = [
                self.settings.get("CN_FQDN")
            ]
            values_file_parser["nginx-ingress"]["ingress"]["tls"][0][
                "hosts"] = [self.settings.get("CN_FQDN")]
        if self.settings.get("USE_ISTIO") == "Y":
            values_file_parser["global"]["istio"]["enabled"] = True

        values_file_parser["global"]["cnJackrabbitCluster"] = "false"
        if self.settings.get("JACKRABBIT_CLUSTER") == "Y":
            values_file_parser["global"]["cnJackrabbitCluster"] = "true"
            values_file_parser["config"]["configmap"]["cnJackrabbitAdminId"] = \
                self.settings.get("JACKRABBIT_ADMIN_ID")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresDatabaseName"] = \
                self.settings.get("JACKRABBIT_DATABASE")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresHost"] = \
                self.settings.get("POSTGRES_URL")
            values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \
                self.settings.get("JACKRABBIT_PG_USER")

        if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \
                self.settings.get("PERSISTENCE_BACKEND") == "ldap":
            values_file_parser["global"]["opendj"]["enabled"] = True
            # ALPHA-FEATURE: Multi cluster ldap replication
            if self.settings.get("CN_LDAP_MULTI_CLUSTER") == "Y":
                values_file_parser["opendj"]["multiCluster"]["enabled"] = True
                values_file_parser["opendj"]["multiCluster"]["serfAdvertiseAddr"] = \
                    self.settings.get("CN_LDAP_ADVERTISE_ADDRESS")
                serf_key = base64.b64encode(secrets.token_bytes()).decode()
                values_file_parser["opendj"]["multiCluster"][
                    "serfKey"] = serf_key
                values_file_parser["opendj"]["multiCluster"]["serfPeers"] = \
                    self.settings.get("CN_LDAP_SERF_PEERS")
                if self.settings.get("CN_LDAP_SECONDARY_CLUSTER") == "Y":
                    values_file_parser["global"]["persistence"][
                        "enabled"] = False
                values_file_parser["opendj"]["ports"]["tcp-ldaps"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_LDAPS_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-repl"]["port"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-repl"]["targetPort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-repl"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-admin"]["port"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-admin"]["targetPort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))
                values_file_parser["opendj"]["ports"]["tcp-admin"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT"))

                values_file_parser["opendj"]["ports"]["tcp-serf"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_SERF_PORT"))
                values_file_parser["opendj"]["ports"]["udp-serf"]["nodePort"] = \
                    int(self.settings.get("CN_LDAP_SERF_PORT"))

        values_file_parser["global"]["oxshibboleth"]["enabled"] = False
        if self.settings.get("ENABLE_OXSHIBBOLETH") == "Y":
            values_file_parser["global"]["oxshibboleth"]["enabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncShibManifests"] = True

        values_file_parser["global"]["client-api"]["enabled"] = False
        if self.settings.get("ENABLE_CLIENT_API") == "Y":
            values_file_parser["global"]["client-api"]["enabled"] = True
            values_file_parser["config"]["configmap"]["jansClientApiApplicationCertCn"] = \
                self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN")
            values_file_parser["config"]["configmap"][
                "jansClientApiAdminCertCn"] = self.settings.get(
                    "CLIENT_API_ADMIN_KEYSTORE_CN")
            values_file_parser["client-api"]["replicas"] = self.settings.get(
                "CLIENT_API_REPLICAS")

        values_file_parser["opendj"]["cnRedisEnabled"] = False
        if self.settings.get("CN_CACHE_TYPE") == "REDIS":
            values_file_parser["opendj"]["cnRedisEnabled"] = True

        values_file_parser["global"]["nginx-ingress"]["enabled"] = True

        values_file_parser["global"]["cr-rotate"]["enabled"] = False
        if self.settings.get("ENABLE_CACHE_REFRESH") == "Y":
            values_file_parser["global"]["cr-rotate"]["enabled"] = True

        values_file_parser["global"]["auth-server-key-rotation"][
            "enabled"] = False
        if self.settings.get("ENABLE_AUTH_SERVER_KEY_ROTATE") == "Y":
            values_file_parser["global"]["auth-server-key-rotation"][
                "enabled"] = True
            values_file_parser["auth-server-key-rotation"][
                "keysLife"] = self.settings.get("AUTH_SERVER_KEYS_LIFE")

        values_file_parser["config"]["orgName"] = self.settings.get("ORG_NAME")
        values_file_parser["config"]["email"] = self.settings.get("EMAIL")
        values_file_parser["config"]["adminPass"] = self.settings.get(
            "ADMIN_PW")
        values_file_parser["config"]["ldapPass"] = self.settings.get("LDAP_PW")
        values_file_parser["config"]["countryCode"] = self.settings.get(
            "COUNTRY_CODE")
        values_file_parser["config"]["state"] = self.settings.get("STATE")
        values_file_parser["config"]["city"] = self.settings.get("CITY")
        values_file_parser["config"]["configmap"][
            "cnCacheType"] = self.settings.get("CN_CACHE_TYPE")
        values_file_parser["opendj"]["replicas"] = self.settings.get(
            "LDAP_REPLICAS")
        values_file_parser["opendj"]["persistence"][
            "size"] = self.settings.get("LDAP_STORAGE_SIZE")
        if self.settings.get("ENABLE_OXTRUST_API_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiEnabled"] = True
        if self.settings.get("ENABLE_OXTRUST_TEST_MODE_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnOxtrustApiTestMode"] = True
        if self.settings.get("ENABLE_CASA_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnCasaEnabled"] = True
            values_file_parser["config"]["configmap"][
                "cnSyncCasaManifests"] = True

        if self.settings.get("ENABLE_OXPASSPORT_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"][
                "cnPassportEnabled"] = True
        if self.settings.get("ENABLE_RADIUS_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnRadiusEnabled"] = True
        if self.settings.get("ENABLE_SAML_BOOLEAN") == "true":
            values_file_parser["config"]["configmap"]["cnSamlEnabled"] = True

        values_file_parser["oxpassport"]["resources"] = {}
        values_file_parser["casa"]["image"]["repository"] = self.settings.get(
            "CASA_IMAGE_NAME")
        values_file_parser["casa"]["image"]["tag"] = self.settings.get(
            "CASA_IMAGE_TAG")
        values_file_parser["casa"]["replicas"] = self.settings.get(
            "CASA_REPLICAS")
        values_file_parser["config"]["image"][
            "repository"] = self.settings.get("CONFIG_IMAGE_NAME")
        values_file_parser["config"]["image"]["tag"] = self.settings.get(
            "CONFIG_IMAGE_TAG")
        values_file_parser["cr-rotate"]["image"][
            "repository"] = self.settings.get(
                "CACHE_REFRESH_ROTATE_IMAGE_NAME")
        values_file_parser["cr-rotate"]["image"]["tag"] = self.settings.get(
            "CACHE_REFRESH_ROTATE_IMAGE_TAG")
        values_file_parser["auth-server-key-rotation"]["image"][
            "repository"] = self.settings.get("CERT_MANAGER_IMAGE_NAME")
        values_file_parser["auth-server-key-rotation"]["image"][
            "tag"] = self.settings.get("CERT_MANAGER_IMAGE_TAG")
        values_file_parser["opendj"]["image"][
            "repository"] = self.settings.get("LDAP_IMAGE_NAME")
        values_file_parser["opendj"]["image"]["tag"] = self.settings.get(
            "LDAP_IMAGE_TAG")
        values_file_parser["persistence"]["image"][
            "repository"] = self.settings.get("PERSISTENCE_IMAGE_NAME")
        values_file_parser["persistence"]["image"]["tag"] = self.settings.get(
            "PERSISTENCE_IMAGE_TAG")
        values_file_parser["auth-server"]["image"][
            "repository"] = self.settings.get("AUTH_SERVER_IMAGE_NAME")
        values_file_parser["auth-server"]["image"]["tag"] = self.settings.get(
            "AUTH_SERVER_IMAGE_TAG")
        values_file_parser["client-api"]["image"][
            "repository"] = self.settings.get("CLIENT_API_IMAGE_NAME")
        values_file_parser["client-api"]["image"]["tag"] = self.settings.get(
            "CLIENT_API_IMAGE_TAG")
        values_file_parser["auth-server"]["replicas"] = self.settings.get(
            "AUTH_SERVER_REPLICAS")
        values_file_parser["oxpassport"]["image"][
            "repository"] = self.settings.get("OXPASSPORT_IMAGE_NAME")
        values_file_parser["oxpassport"]["image"]["tag"] = self.settings.get(
            "OXPASSPORT_IMAGE_TAG")
        values_file_parser["oxpassport"]["replicas"] = self.settings.get(
            "OXPASSPORT_REPLICAS")
        values_file_parser["oxshibboleth"]["image"][
            "repository"] = self.settings.get("OXSHIBBOLETH_IMAGE_NAME")
        values_file_parser["oxshibboleth"]["image"]["tag"] = self.settings.get(
            "OXSHIBBOLETH_IMAGE_TAG")
        values_file_parser["oxshibboleth"]["replicas"] = self.settings.get(
            "OXSHIBBOLETH_REPLICAS")
        values_file_parser["jackrabbit"]["image"][
            "repository"] = self.settings.get("JACKRABBIT_IMAGE_NAME")
        values_file_parser["jackrabbit"]["image"]["tag"] = self.settings.get(
            "JACKRABBIT_IMAGE_TAG")
        values_file_parser["oxtrust"]["image"][
            "repository"] = self.settings.get("OXTRUST_IMAGE_NAME")
        values_file_parser["oxtrust"]["image"]["tag"] = self.settings.get(
            "OXTRUST_IMAGE_TAG")
        values_file_parser["oxtrust"]["replicas"] = self.settings.get(
            "OXTRUST_REPLICAS")
        values_file_parser["radius"]["image"][
            "repository"] = self.settings.get("RADIUS_IMAGE_NAME")
        values_file_parser["radius"]["image"]["tag"] = self.settings.get(
            "RADIUS_IMAGE_TAG")
        values_file_parser["radius"]["replicas"] = self.settings.get(
            "RADIUS_REPLICAS")
        values_file_parser.dump_it()
Exemplo n.º 21
0
    def check_install_nginx_ingress(self, install_ingress=True):
        """
        Helm installs nginx ingress or checks to recieve and ip or address
        :param install_ingress:
        """
        if install_ingress:
            self.kubernetes.delete_custom_resource(
                "virtualservers.k8s.nginx.org")
            self.kubernetes.delete_custom_resource(
                "virtualserverroutes.k8s.nginx.org")
            self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress")
            self.kubernetes.delete_cluster_role_binding(
                "ingress-nginx-nginx-ingress")
            self.kubernetes.create_namespace(
                name=self.settings.get("NGINX_INGRESS_NAMESPACE"),
                labels={"app": "ingress-nginx"})
            self.kubernetes.delete_cluster_role(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            self.kubernetes.delete_cluster_role_binding(
                self.settings.get('NGINX_INGRESS_RELEASE_NAME') +
                "-nginx-ingress-controller")
            try:
                exec_cmd(
                    "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx"
                )
                exec_cmd("helm repo add stable https://charts.helm.sh/stable")
                exec_cmd("helm repo update")
            except FileNotFoundError:
                logger.error(
                    "Helm v3 is not installed. Please install it to continue "
                    "https://helm.sh/docs/intro/install/")
                raise SystemExit(1)
        command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format(
            self.settings.get('NGINX_INGRESS_RELEASE_NAME'),
            self.settings.get("NGINX_INGRESS_NAMESPACE"))
        if self.settings.get("DEPLOYMENT_ARCH") == "minikube":
            exec_cmd("minikube addons enable ingress")
        if self.settings.get("DEPLOYMENT_ARCH") == "eks":
            if self.settings.get("AWS_LB_TYPE") == "nlb":
                if install_ingress:
                    nlb_override_values_file = Path(
                        "./nginx/aws/aws-nlb-override-values.yaml").resolve()
                    nlb_values = " --values {}".format(
                        nlb_override_values_file)
                    exec_cmd(command + nlb_values)
            else:
                if self.settings.get("USE_ARN") == "Y":
                    if install_ingress:
                        elb_override_values_file = Path(
                            "./nginx/aws/aws-elb-override-values.yaml"
                        ).resolve()
                        elb_file_parser = Parser(elb_override_values_file,
                                                 True)
                        elb_file_parser["controller"]["service"][
                            "annotations"].update({
                                "service.beta.kubernetes.io/aws-load-balancer-ssl-cert":
                                self.settings.get("ARN_AWS_IAM")
                            })
                        elb_file_parser["controller"]["config"][
                            "proxy-real-ip-cidr"] = self.settings.get(
                                "VPC_CIDR")
                        elb_file_parser.dump_it()
                        elb_values = " --values {}".format(
                            elb_override_values_file)
                        exec_cmd(command + elb_values)
                else:
                    if install_ingress:
                        exec_cmd(command)

        if self.settings.get("DEPLOYMENT_ARCH") in ("gke", "aks", "do"):
            if install_ingress:
                cloud_override_values_file = Path(
                    "./nginx/cloud/cloud-override-values.yaml").resolve()
                cloud_values = " --values {}".format(
                    cloud_override_values_file)
                exec_cmd(command + cloud_values)
        if self.settings.get("DEPLOYMENT_ARCH") == "local":
            if install_ingress:
                baremetal_override_values_file = Path(
                    "./nginx/baremetal/baremetal-override-values.yaml"
                ).resolve()
                baremetal_values = " --values {}".format(
                    baremetal_override_values_file)
                exec_cmd(command + baremetal_values)
        if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s",
                                                        "minikube"):
            logger.info("Waiting for nginx to be prepared...")
            time.sleep(60)
            self.wait_for_nginx_add()
Exemplo n.º 22
0
class ValuesHandler(object):
    def __init__(self,
                 values_file="./helm/gluu/values.yaml",
                 values_schema_file="./helm/gluu/values.schema.json"):
        self.values_file = Path(values_file)
        self.values_schema = Path(values_schema_file)
        self.errors = list()
        self.values_file_parser = Parser(self.values_file, True)
        self.schema = {}
        self.load()
        # self.load_schema()

    def load(self):
        """
        Get merged settings (default and custom settings from json file).
        """
        # Check if running in container and settings.json mounted
        try:
            shutil.copy(Path("./override-values.yaml"), self.values_file)
            self.values_file_parser = Parser(self.values_file, True)
        except FileNotFoundError:
            # No installation settings mounted as /override-values.yaml. Checking values.yaml.
            pass

    def load_schema(self):
        try:
            with open(self.values_schema) as f:
                try:
                    self.schema = json.load(f)
                    jsonschema.Draft7Validator.check_schema(self.schema)
                except json.decoder.JSONDecodeError as e:
                    logger.info(f"Opps! values.schema.json not readable")
                    sys.exit(4)
                except jsonschema.SchemaError as e:
                    logger.info(f"Opps! values.schema.json is invalid")
                    sys.exit(4)
        except FileNotFoundError:
            logger.info(f"Opps! values.schema.json not found")
            sys.exit(4)

    def store_data(self):
        try:
            self.values_file_parser.dump_it()
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def set(self, keys_string, value):
        """
        single update
        """
        try:
            dot = dotty(self.values_file_parser)
            dot[keys_string] = value
            self.store_data()
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def get(self, keys_string):
        """
        This method receives a dict and list of attributes to return the innermost value of the give dict
        """
        try:
            dot = dotty(self.values_file_parser)
            return dot[keys_string]

        except (KeyError, NameError):
            logger.info("No Value Can Be Found for " + str(keys_string))
            return False

    def get_all(self):
        return self.values_file_parser

    def update(self, collection):
        """
        mass update
        """
        try:
            self.values_file_parser.update(collection)
            self.store_data()
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def reset_data(self):
        """
        reset settings.json to default_settings
        """
        def iterate_dict(dictionary):
            for k, v in dictionary.items():
                if isinstance(v, dict):
                    iterate_dict(v)
                else:
                    dictionary[k] = ""

        try:
            iterate_dict(self.values_file_parser)
            self.store_data()
            return True
        except Exception as exc:
            logger.info(f"Uncaught error={exc}")
            return False

    def is_exist(self):
        try:
            self.values_file.resolve(strict=True)
        except FileNotFoundError:
            return False
        else:
            return True

    def validate(self):
        self.errors = []
        try:
            with open(self.values_file) as f:
                try:
                    settings = json.load(f)
                    validator = jsonschema.Draft7Validator(self.schema)
                    errors = sorted(validator.iter_errors(settings),
                                    key=lambda e: e.path)

                    for error in errors:
                        if "errors" in error.schema and \
                                error.validator != 'required':
                            key = error.path[0]
                            error_msg = error.schema.get('errors').get(
                                error.validator)
                            message = f"{key} : {error_msg}"
                        else:
                            if error.path:
                                key = error.path[0]
                                message = f"{key} : {error.message}"
                            else:
                                message = error.message

                        self.errors.append(message)

                except json.decoder.JSONDecodeError as e:
                    self.errors.append(f"Not a valid values.yaml : {str(e)}")
                    return False

        except FileNotFoundError:
            # skip validating file does not exist
            return True

        return len(self.errors) == 0
Exemplo n.º 23
0
    def analyze_couchbase_cluster_yaml(self):
        """
        Dumps created calculated resources into couchbase.yaml file. ALso includes cloud zones.
        """
        parser = Parser("./couchbase/couchbase-cluster.yaml",
                        "CouchbaseCluster")
        parser["metadata"]["name"] = self.settings.get(
            "COUCHBASE_CLUSTER_NAME")
        number_of_buckets = 5
        if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") or \
                self.settings.get("COUCHBASE_USE_LOW_RESOURCES") == "Y":
            resources_servers = [{
                "name":
                "allServices",
                "size":
                1,
                "services":
                ["data", "index", "query", "search", "eventing", "analytics"],
                "volumeMounts": {
                    "default": "pvc-general",
                    "data": "pvc-data",
                    "index": "pvc-index",
                    "analytics": ["pvc-analytics"]
                }
            }]
            data_service_memory_quota = 1024
            index_service_memory_quota = 512
            search_service_memory_quota = 512
            eventing_service_memory_quota = 512
            analytics_service_memory_quota = 1024
            memory_quota = 0
            self.settings.set("COUCHBASE_GENERAL_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_DATA_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_INDEX_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_QUERY_STORAGE", "5Gi")
            self.settings.set("COUCHBASE_ANALYTICS_STORAGE", "5Gi")

        else:
            resources = self.calculate_couchbase_resources
            data_service_memory_quota = resources["COUCHBASE_DATA_MEM_QUOTA"]
            index_service_memory_quota = resources["COUCHBASE_INDEX_MEM_QUOTA"]
            search_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            eventing_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            analytics_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] + 1024
            memory_quota = ((resources["COUCHBASE_DATA_MEM_QUOTA"] - 500) /
                            number_of_buckets)
            zones_list = self.settings.get("NODES_ZONES")
            data_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_DATA_NODES"]), "data",
                str(resources["COUCHBASE_DATA_MEM_REQUEST"]),
                str(resources["COUCHBASE_DATA_MEM_LIMIT"]),
                str(resources["COUCHBASE_DATA_CPU_REQUEST"]),
                str(resources["COUCHBASE_DATA_CPU_LIMIT"]))

            query_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_QUERY_NODES"]), "query",
                str(resources["COUCHBASE_QUERY_MEM_REQUEST"]),
                str(resources["COUCHBASE_QUERY_MEM_LIMIT"]),
                str(resources["COUCHBASE_QUERY_CPU_REQUEST"]),
                str(resources["COUCHBASE_QUERY_CPU_LIMIT"]))

            index_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["COUCHBASE_INDEX_NODES"]), "index",
                str(resources["COUCHBASE_INDEX_MEM_REQUEST"]),
                str(resources["COUCHBASE_INDEX_MEM_LIMIT"]),
                str(resources["COUCHBASE_INDEX_CPU_REQUEST"]),
                str(resources["COUCHBASE_INDEX_CPU_LIMIT"]))

            search_eventing_analytics_server_spec = create_server_spec_per_cb_service(
                zones_list,
                int(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"]),
                "analytics",
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT"]
                    ),
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT"]
                    ))

            resources_servers = \
                data_server_spec + query_server_spec + index_server_spec + \
                search_eventing_analytics_server_spec

        if self.settings.get("NODES_ZONES"):
            unique_zones = list(dict.fromkeys(
                self.settings.get("NODES_ZONES")))
            parser["spec"]["serverGroups"] = unique_zones
        parser["spec"]["cluster"]["dataServiceMemoryQuota"] = str(
            data_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["indexServiceMemoryQuota"] = str(
            index_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["searchServiceMemoryQuota"] = str(
            search_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["eventingServiceMemoryQuota"] = str(
            eventing_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["analyticsServiceMemoryQuota"] = str(
            analytics_service_memory_quota) + "Mi"

        set_memory_for_buckets(memory_quota)
        parser["metadata"]["name"] = self.settings.get(
            "COUCHBASE_CLUSTER_NAME")
        parser["spec"]["servers"] = resources_servers

        number_of_volume_claims = len(parser["spec"]["volumeClaimTemplates"])
        for i in range(number_of_volume_claims):
            name = parser["spec"]["volumeClaimTemplates"][i]["metadata"][
                "name"]
            if name == "pvc-general":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_GENERAL_STORAGE")
            elif name == "pvc-data":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_DATA_STORAGE")
            elif name == "pvc-index":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_INDEX_STORAGE")
            elif name == "pvc-query":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_QUERY_STORAGE")
            elif name == "pvc-analytics":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \
                    self.settings.get("COUCHBASE_ANALYTICS_STORAGE")
        parser.dump_it()
Exemplo n.º 24
0
    def install_gluu_gateway_ui(self):
        self.uninstall_gluu_gateway_ui()
        self.kubernetes.create_namespace(
            name=self.settings.get(
                "installer-settings.gluuGateway.uI.namespace"),
            labels={"APP_NAME": "gluu-gateway-ui"})
        try:
            # Try to get gluu cert + key
            ssl_cert = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get(
                    "installer-settings.namespace")).data["ssl_cert"]
            ssl_key = self.kubernetes.read_namespaced_secret(
                "gluu", self.settings.get(
                    "installer-settings.namespace")).data["ssl_key"]

            self.kubernetes.patch_or_create_namespaced_secret(
                name="tls-certificate",
                namespace=self.settings.get(
                    "installer-settings.gluuGateway.uI.namespace"),
                literal="tls.crt",
                value_of_literal=ssl_cert,
                secret_type="kubernetes.io/tls",
                second_literal="tls.key",
                value_of_second_literal=ssl_key)

        except (KeyError, Exception):
            logger.error(
                "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. "
                "Please mount crt and key inside gg-ui deployment")
        client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format(
            self.settings.get("client-api.service.clientApiServerServiceName"),
            self.settings.get("installer-settings.namespace"))
        values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve()
        values_file_parser = Parser(values_file, True)
        values_file_parser["cloud"]["isDomainRegistered"] = "false"
        if self.settings.get("global.isFqdnRegistered"):
            values_file_parser["cloud"]["isDomainRegistered"] = "true"
        if self.settings.get("global.storageClass.provisioner") in \
                ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"):
            values_file_parser["cloud"]["enabled"] = False
        if "aws" in self.settings.get(
                "installer-settings.volumeProvisionStrategy"):
            values_file_parser["cloud"]["provider"] = "eks"
        values_file_parser["dbUser"] = self.settings.get(
            "installer-settings.gluuGateway.uI.postgresUser")
        values_file_parser[
            "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format(
                self.settings.get("installer-settings.kong.releaseName"),
                self.settings.get("installer-settings.kong.namespace"))
        values_file_parser["dbHost"] = self.settings.get(
            "config.configmap.cnJackrabbitPostgresHost")
        values_file_parser["dbDatabase"] = self.settings.get(
            "installer-settings.gluuGateway.uI.postgresDatabaseName")
        values_file_parser["clientApiServerUrl"] = client_api_server_url
        values_file_parser["image"]["repository"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_NAME")
        values_file_parser["image"]["tag"] = self.settings.get(
            "GLUU_GATEWAY_UI_IMAGE_TAG")
        values_file_parser["loadBalancerIp"] = self.settings.get("global.lbIp")
        values_file_parser["dbPassword"] = self.settings.get(
            "installer-settings.gluuGateway.uI.postgresPassword")
        values_file_parser["opServerUrl"] = "https://" + self.settings.get(
            "global.fqdn")
        values_file_parser["ggHost"] = self.settings.get(
            "global.fqdn") + "/gg-ui/"
        values_file_parser["ggUiRedirectUrlHost"] = self.settings.get(
            "global.fqdn") + "/gg-ui/"
        # Register new client if one was not provided
        if not values_file_parser["clientApiId"] or \
                not values_file_parser["clientId"] or \
                not values_file_parser["clientSecret"]:
            client_api_id, client_id, client_secret = register_op_client(
                self.settings.get("installer-settings.namespace"),
                "konga-client", self.settings.get("global.fqdn"),
                client_api_server_url,
                self.settings.get('installer-settings.releaseName'))
            if not client_api_id:
                values_file_parser.dump_it()
                logger.error(
                    "Due to a failure in konga client registration the installation has stopped."
                    " Please register as suggested above manually and enter the values returned"
                    " for clientApiId, clientId, "
                    "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run "
                    "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui "
                    "--namespace={}".format(
                        self.settings.get(
                            'installer-settings.gluuGateway.uI.releaseName'),
                        self.settings.get(
                            "installer-settings.gluuGateway.uI.namespace")))
                raise SystemExit(1)
            values_file_parser["clientApiId"] = client_api_id
            values_file_parser["clientId"] = client_id
            values_file_parser["clientSecret"] = client_secret

        values_file_parser.dump_it()
        exec_cmd(
            "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}"
            .format(
                self.settings.get(
                    'installer-settings.gluuGateway.uI.releaseName'),
                self.settings.get(
                    "installer-settings.gluuGateway.uI.namespace")))
Exemplo n.º 25
0
    def install(self):
        """
        Installs Couchbase
        """
        self.kubernetes.create_namespace(
            name=self.settings.get("installer-settings.namespace"))
        if not self.settings.get(
                "installer-settings.couchbase.customFileOverride"):
            try:
                self.analyze_couchbase_cluster_yaml()
            except Exception:
                # TODO remove this exception
                logger.error(
                    "Looks like some of the couchbase files were misconfigured. "
                    "If you wish to override the couchbase files please set "
                    " installer-settings.couchbase.customFileOverride to true`"
                )
                sys.exit()
        cb_namespace = self.settings.get(
            "installer-settings.couchbase.namespace")
        storage_class_file_parser = Parser(self.storage_class_file,
                                           "StorageClass")
        if self.settings.get('global.storageClass.provisioner') in (
                "kubernetes.io/gce-pd", "dobs.csi.digitalocean.com",
                "kubernetes.io/azure-disk"):
            try:
                del storage_class_file_parser["parameters"]["encrypted"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser["parameters"]["type"] = \
                self.settings.get("installer-settings.couchbase.volumeType")
        storage_class_file_parser["provisioner"] = self.settings.get(
            'global.storageClass.provisioner')
        if self.settings.get(
                'global.storageClass.provisioner') == "microk8s.io/hostpath":
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        elif self.settings.get('global.storageClass.provisioner'
                               ) == "k8s.io/minikube-hostpath":
            try:
                del storage_class_file_parser["allowVolumeExpansion"]
                del storage_class_file_parser["parameters"]
            except KeyError:
                logger.info("Key not found")
            storage_class_file_parser.dump_it()
        else:
            try:
                storage_class_file_parser["parameters"]["type"] = \
                    self.settings.get("installer-settings.couchbase.volumeType")
            except KeyError:
                logger.info("Key not found")
        storage_class_file_parser.dump_it()

        logger.info("Installing Couchbase...")
        couchbase_crts_keys = Path("couchbase_crts_keys")
        if not couchbase_crts_keys.exists():
            os.mkdir(couchbase_crts_keys)
        custom_cb_ca_crt = Path("./couchbase_crts_keys/ca.crt")
        custom_cb_crt = Path("./couchbase_crts_keys/chain.pem")
        custom_cb_key = Path("./couchbase_crts_keys/pkey.key")
        if not custom_cb_ca_crt.exists() and not custom_cb_crt.exists(
        ) and not custom_cb_key.exists():
            setup_crts(
                ca_common_name=self.settings.get(
                    "installer-settings.couchbase.commonName"),
                cert_common_name="couchbase-server",
                san_list=self.settings.get(
                    "installer-settings.couchbase.subjectAlternativeName"),
                ca_cert_file="./couchbase_crts_keys/ca.crt",
                ca_key_file="./couchbase_crts_keys/ca.key",
                cert_file="./couchbase_crts_keys/chain.pem",
                key_file="./couchbase_crts_keys/pkey.key")
        labels = {"app": "gluu-couchbase"}
        if self.settings.get("global.istio.enabled"):
            labels = {"app": "couchbase", "istio-injection": "enabled"}
        self.kubernetes.create_namespace(name=cb_namespace, labels=labels)
        chain_pem_filepath = Path("./couchbase_crts_keys/chain.pem")
        pkey_filepath = Path("./couchbase_crts_keys/pkey.key")
        tls_cert_filepath = Path("./couchbase_crts_keys/tls-cert-file")
        tls_private_key_filepath = Path(
            "./couchbase_crts_keys/tls-private-key-file")
        ca_cert_filepath = Path("./couchbase_crts_keys/ca.crt")
        shutil.copyfile(ca_cert_filepath,
                        Path("./couchbase_crts_keys/couchbase.crt"))
        shutil.copyfile(chain_pem_filepath, tls_cert_filepath)
        shutil.copyfile(pkey_filepath, tls_private_key_filepath)

        encoded_ca_crt_string = self.settings.get(
            "config.configmap.cnCouchbaseCrt")
        if encoded_ca_crt_string in (None, ''):
            with open(ca_cert_filepath) as content_file:
                ca_crt_content = content_file.read()
                encoded_ca_crt_bytes = base64.b64encode(
                    ca_crt_content.encode("utf-8"))
                encoded_ca_crt_string = str(encoded_ca_crt_bytes, "utf-8")
            self.settings.set("config.configmap.cnCouchbaseCrt",
                              encoded_ca_crt_string)

        with open(chain_pem_filepath) as content_file:
            chain_pem_content = content_file.read()
            encoded_chain_bytes = base64.b64encode(
                chain_pem_content.encode("utf-8"))
            encoded_chain_string = str(encoded_chain_bytes, "utf-8")

        with open(pkey_filepath) as content_file:
            pkey_content = content_file.read()
            encoded_pkey_bytes = base64.b64encode(pkey_content.encode("utf-8"))
            encoded_pkey_string = str(encoded_pkey_bytes, "utf-8")

        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-server-tls",
            namespace=cb_namespace,
            literal=chain_pem_filepath.name,
            value_of_literal=encoded_chain_string,
            second_literal=pkey_filepath.name,
            value_of_second_literal=encoded_pkey_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="couchbase-operator-tls",
            namespace=cb_namespace,
            literal=ca_cert_filepath.name,
            value_of_literal=encoded_ca_crt_string)

        encoded_cb_super_user_bytes = base64.b64encode(
            self.settings.get("config.configmap.cnCouchbaseSuperUser").encode(
                "utf-8"))
        encoded_cb_super_user_string = str(encoded_cb_super_user_bytes,
                                           "utf-8")
        encoded_cb_pass_bytes = base64.b64encode(
            self.settings.get("config.configmap.cnCouchbasePassword").encode(
                "utf-8"))
        encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8")
        encoded_cb_super_pass_bytes = base64.b64encode(
            self.settings.get("config.configmap.cnCouchbaseSuperUserPassword").
            encode("utf-8"))
        encoded_cb_super_pass_string = str(encoded_cb_super_pass_bytes,
                                           "utf-8")

        self.create_couchbase_gluu_cert_pass_secrets(
            encoded_ca_crt_string, encoded_cb_pass_string,
            encoded_cb_super_pass_string)
        self.kubernetes.patch_or_create_namespaced_secret(
            name="gluu-couchbase-user-password",
            namespace=self.settings.get(
                "installer-settings.couchbase.namespace"),
            literal="password",
            value_of_literal=encoded_cb_pass_string)

        admission_command = "./{}/bin/cbopcfg generate admission --namespace {}".format(
            self.couchbase_source_file,
            self.settings.get("installer-settings.couchbase.namespace"))
        operator_command = "./{}/bin/cbopcfg generate operator --namespace {}".format(
            self.couchbase_source_file,
            self.settings.get("installer-settings.couchbase.namespace"))
        backup_command = "./{}/bin/cbopcfg generate backup --namespace {}".format(
            self.couchbase_source_file,
            self.settings.get("installer-settings.couchbase.namespace"))
        # @TODO: Remove condition and operator_command override after depreciation of couchbase operator 2.0
        if self.old_couchbase:
            operator_command = "./{}/bin/cbopcfg -backup=true -namespace={}".format(
                self.couchbase_source_file,
                self.settings.get("installer-settings.couchbase.namespace"))
        exec_cmd(operator_command,
                 output_file=self.couchbase_operator_dac_file)
        # @TODO: Remove only the condition after depreciation of couchbase operator 2.0
        if not self.old_couchbase:
            exec_cmd(backup_command,
                     output_file=self.couchbase_operator_backup_file)
            exec_cmd(admission_command,
                     output_file=self.couchbase_admission_file)

        couchbase_cluster_parser = Parser(self.couchbase_cluster_file,
                                          "CouchbaseCluster")
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "serverSecret"] = "couchbase-server-tls"
        couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][
            "operatorSecret"] = "couchbase-operator-tls"
        if self.settings.get("global.istio.enabled"):
            couchbase_cluster_parser["spec"]["networking"][
                "networkPlatform"] = "Istio"
        try:
            couchbase_cluster_parser["spec"]["security"]["rbac"]["selector"]["matchLabels"]["cluster"] = \
                self.settings.get("installer-settings.couchbase.clusterName")
            couchbase_cluster_parser["spec"]["security"]["rbac"][
                "managed"] = True
        except KeyError:
            logger.error(
                "rbac section is missing or incorrect in couchbase-cluster.yaml."
                " Please set spec --> security --> rbac --> managed : true"
                " and set spec --> security --> rbac --> selector --> matchLabels --> "
                "cluster --> to your cluster name")
            logger.info(
                "As a result of the above the installation will exit "
                "as the gluu user will not be created causing the communication between "
                "Gluu server and Couchbase to fail.")
            sys.exit()
        if "localOpenEbsHostPathDynamic" in self.settings.get(
                "installer-settings.volumeProvisionStrategy"):
            volume_claims = couchbase_cluster_parser["spec"][
                "volumeClaimTemplates"]
            for i, volume_claim in enumerate(volume_claims):
                couchbase_cluster_parser["spec"]["volumeClaimTemplates"][i]["spec"]["storageClassName"] = \
                    "openebs-hostpath"
        couchbase_cluster_parser.dump_it()

        self.kubernetes.create_objects_from_dict(
            self.couchbase_custom_resource_definition_file,
            namespace=cb_namespace)

        self.kubernetes.create_objects_from_dict(
            self.couchbase_operator_dac_file, namespace=cb_namespace)
        # @TODO: Remove only the condition after depreciation of couchbase operator 2.0
        if not self.old_couchbase:
            self.kubernetes.create_objects_from_dict(
                self.couchbase_admission_file, namespace=cb_namespace)

            self.kubernetes.create_objects_from_dict(
                self.couchbase_operator_backup_file, namespace=cb_namespace)

        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "app=couchbase-operator", 700)

        self.kubernetes.patch_or_create_namespaced_secret(
            name="cb-auth",
            namespace=cb_namespace,
            literal="username",
            value_of_literal=encoded_cb_super_user_string,
            second_literal="password",
            value_of_second_literal=encoded_cb_super_pass_string)

        self.kubernetes.create_objects_from_dict(self.storage_class_file,
                                                 namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_cluster_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseclusters",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasebuckets",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_ephemeral_buckets_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseephemeralbuckets",
            namespace=cb_namespace)
        coucbase_group_parser = Parser(self.couchbase_group_file,
                                       "CouchbaseGroup")
        coucbase_group_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("installer-settings.couchbase.clusterName")
        permissions = [
            "query_select", "query_update", "query_insert", "query_delete"
        ]
        allbuckets = ["", "site", "user", "cache", "token", "session"]
        roles = []
        for permission in permissions:
            for bucket in allbuckets:
                bucket_name = self.settings.get(
                    "config.configmap.cnCouchbaseBucketPrefix")
                if bucket:
                    bucket_name = bucket_name + "_" + bucket
                roles.append({"name": permission, "bucket": bucket_name})
        coucbase_group_parser["spec"]["roles"] = roles
        coucbase_group_parser.dump_it()
        coucbase_user_parser = Parser(self.couchbase_user_file,
                                      "CouchbaseUser")
        coucbase_user_parser["metadata"]["labels"]["cluster"] = \
            self.settings.get("installer-settings.couchbase.clusterName")
        coucbase_user_parser.dump_it()
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_group_file,
            group="couchbase.com",
            version="v2",
            plural="couchbasegroups",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_user_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaseusers",
            namespace=cb_namespace)
        self.kubernetes.create_namespaced_custom_object(
            filepath=self.couchbase_rolebinding_file,
            group="couchbase.com",
            version="v2",
            plural="couchbaserolebindings",
            namespace=cb_namespace)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_analytics=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_data=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_eventing=enabled", 700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_index=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(cb_namespace,
                                            "couchbase_service_query=enabled",
                                            700)
        self.kubernetes.check_pods_statuses(
            cb_namespace, "couchbase_service_search=enabled", 700)
        # Setup couchbase backups
        if self.settings.get("global.storageClass.provisioner") not in (
                "microk8s.io/hostpath", "k8s.io/minikube-hostpath"):
            self.setup_backup_couchbase()
        shutil.rmtree(self.couchbase_source_folder_pattern, ignore_errors=True)
Exemplo n.º 26
0
    def analyze_couchbase_cluster_yaml(self):
        """
        Dumps created calculated resources into couchbase.yaml file. ALso includes cloud zones.
        """
        parser = Parser("./couchbase/couchbase-cluster.yaml",
                        "CouchbaseCluster")
        parser["metadata"]["name"] = self.settings.get(
            "installer-settings.couchbase.clusterName")
        number_of_buckets = 5
        if self.settings.get("global.storageClass.provisioner") in ("microk8s.io/hostpath",
                                                                    "k8s.io/minikube-hostpath") or \
                self.settings.get("global.cloud.testEnviroment"):
            resources_servers = [{
                "name":
                "allServices",
                "size":
                1,
                "services":
                ["data", "index", "query", "search", "eventing", "analytics"],
                "volumeMounts": {
                    "default": "pvc-general",
                    "data": "pvc-data",
                    "index": "pvc-index",
                    "analytics": ["pvc-analytics"]
                }
            }]
            data_service_memory_quota = 1024
            index_service_memory_quota = 512
            search_service_memory_quota = 512
            eventing_service_memory_quota = 512
            analytics_service_memory_quota = 1024
            memory_quota = 0
        else:
            resources = self.calculate_couchbase_resources
            data_service_memory_quota = resources["COUCHBASE_DATA_MEM_QUOTA"]
            index_service_memory_quota = resources["COUCHBASE_INDEX_MEM_QUOTA"]
            search_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            eventing_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"]
            analytics_service_memory_quota = resources[
                "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] + 1024
            memory_quota = ((resources["COUCHBASE_DATA_MEM_QUOTA"] - 500) /
                            number_of_buckets)
            zones_list = self.settings.get("CN_NODES_ZONES")
            data_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["CN_COUCHBASE_DATA_NODES"]), "data",
                str(resources["COUCHBASE_DATA_MEM_REQUEST"]),
                str(resources["COUCHBASE_DATA_MEM_LIMIT"]),
                str(resources["COUCHBASE_DATA_CPU_REQUEST"]),
                str(resources["COUCHBASE_DATA_CPU_LIMIT"]))

            query_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["CN_COUCHBASE_QUERY_NODES"]),
                "query", str(resources["COUCHBASE_QUERY_MEM_REQUEST"]),
                str(resources["COUCHBASE_QUERY_MEM_LIMIT"]),
                str(resources["COUCHBASE_QUERY_CPU_REQUEST"]),
                str(resources["COUCHBASE_QUERY_CPU_LIMIT"]))

            index_server_spec = create_server_spec_per_cb_service(
                zones_list, int(resources["CN_COUCHBASE_INDEX_NODES"]),
                "index", str(resources["COUCHBASE_INDEX_MEM_REQUEST"]),
                str(resources["COUCHBASE_INDEX_MEM_LIMIT"]),
                str(resources["COUCHBASE_INDEX_CPU_REQUEST"]),
                str(resources["COUCHBASE_INDEX_CPU_LIMIT"]))

            search_eventing_analytics_server_spec = create_server_spec_per_cb_service(
                zones_list,
                int(resources["CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"]),
                "analytics",
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT"]
                    ),
                str(resources[
                    "COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST"]),
                str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT"]
                    ))

            resources_servers = \
                data_server_spec + query_server_spec + index_server_spec + \
                search_eventing_analytics_server_spec

        if self.settings.get("installer-settings.nodes.zones"):
            unique_zones = list(
                dict.fromkeys(
                    self.settings.get("installer-settings.nodes.zones")))
            parser["spec"]["serverGroups"] = unique_zones
        parser["spec"]["cluster"]["dataServiceMemoryQuota"] = str(
            data_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["indexServiceMemoryQuota"] = str(
            index_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["searchServiceMemoryQuota"] = str(
            search_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["eventingServiceMemoryQuota"] = str(
            eventing_service_memory_quota) + "Mi"
        parser["spec"]["cluster"]["analyticsServiceMemoryQuota"] = str(
            analytics_service_memory_quota) + "Mi"

        set_memory_for_buckets(
            memory_quota,
            self.settings.get("config.configmap.cnCouchbaseBucketPrefix"))
        parser["metadata"]["name"] = self.settings.get(
            "installer-settings.couchbase.clusterName")
        parser["spec"]["servers"] = resources_servers

        number_of_volume_claims = len(parser["spec"]["volumeClaimTemplates"])
        for i in range(number_of_volume_claims):
            name = parser["spec"]["volumeClaimTemplates"][i]["metadata"][
                "name"]
            if name == "pvc-general":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][
                    "requests"]["storage"] = "5Gi"
            elif name == "pvc-data":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][
                    "requests"]["storage"] = "5Gi"
            elif name == "pvc-index":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][
                    "requests"]["storage"] = "5Gi"
            elif name == "pvc-query":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][
                    "requests"]["storage"] = "5Gi"
            elif name == "pvc-analytics":
                parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][
                    "requests"]["storage"] = "5Gi"
        parser.dump_it()