class Gluu(object): def __init__(self): self.values_file = Path("./helm/gluu/override-values.yaml").resolve() self.upgrade_values_file = Path( "./helm/gluu-upgrade/values.yaml").resolve() self.settings = ValuesHandler() self.kubernetes = Kubernetes() self.ldap_backup_release_name = self.settings.get( "installer-settings.releaseName") + "-ldap-backup" if "gke" in self.settings.get( "installer-settings.volumeProvisionStrategy"): # Clusterrolebinding needs to be created for gke with CB installed if self.settings.get("config.configmap.cnCacheType") == "REDIS" or \ self.settings.get("installer-settings.couchbase.install"): user_account, stderr, retcode = exec_cmd( "gcloud config get-value core/account") user_account = str(user_account, "utf-8").strip() user, stderr, retcode = exec_cmd("whoami") user = str(user, "utf-8").strip() cluster_role_binding_name = "cluster-admin-{}".format(user) self.kubernetes.create_cluster_role_binding( cluster_role_binding_name=cluster_role_binding_name, user_name=user_account, cluster_role_name="cluster-admin") def prepare_alb(self): ingress_parser = Parser("./alb/ingress.yaml", "Ingress") ingress_parser["spec"]["rules"][0]["host"] = self.settings.get( "global.fqdn") ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \ self.settings.get("installer-settings.aws.arn.arnAcmCert") if not self.settings.get("installer-settings.aws.arn.enabled"): del ingress_parser["metadata"]["annotations"][ "alb.ingress.kubernetes.io/certificate-arn"] for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]: service_name = path["backend"]["serviceName"] if self.settings.get("config.configmap.cnCasaEnabled" ) and service_name == "casa": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("global.oxshibboleth.enabled" ) and service_name == "oxshibboleth": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("config.configmap.cnPassportEnabled" ) and service_name == "oxpassport": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("installer-settings.global.scim.enabled" ) and service_name == "jans-scim": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("installer-settings.config-api.enabled" ) and service_name == "config-api": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] ingress_parser.dump_it() def deploy_alb(self): alb_ingress = Path("./alb/ingress.yaml") self.kubernetes.create_objects_from_dict( alb_ingress, self.settings.get("installer-settings.namespace")) if self.settings.get("global.fqdn"): prompt = input( "Please input the DNS of the Application load balancer found on AWS UI: " ) lb_hostname = prompt while True: try: if lb_hostname: break lb_hostname = self.kubernetes.read_namespaced_ingress( name="gluu", namespace="gluu" ).status.load_balancer.ingress[0].hostname except TypeError: logger.info("Waiting for loadbalancer address..") time.sleep(10) self.settings.set("config.configmap.lbAddr", lb_hostname) def wait_for_nginx_add(self): hostname_ip = None while True: try: if hostname_ip: break if "aws" in self.settings.get( "installer-settings.volumeProvisionStrategy"): hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get( 'installer-settings.nginxIngress.releaseName') + "-ingress-nginx-controller", namespace=self.settings.get( "installer-settings.nginxIngress.releaseName" )).status.load_balancer.ingress[0].hostname self.settings.set("config.configmap.lbAddr", hostname_ip) if self.settings.get( "installer-settings.aws.lbType") == "nlb": try: ip_static = socket.gethostbyname(str(hostname_ip)) if ip_static: break except socket.gaierror: logger.info("Address has not received an ip yet.") elif "local" in self.settings.get( "installer-settings.volumeProvisionStrategy"): self.settings.set( "config.configmap.lbAddr", self.settings.get( 'installer-settings.nginxIngress.releaseName') + "-nginx-ingress-controller." + self.settings.get( "installer-settings.nginxIngress.releaseName") + ".svc.cluster.local") break else: hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get( 'installer-settings.nginxIngress.releaseName') + "-ingress-nginx-controller", namespace=self.settings.get( "installer-settings.nginxIngress.releaseName" )).status.load_balancer.ingress[0].ip self.settings.set("global.lbIp", hostname_ip) except (TypeError, AttributeError): logger.info("Waiting for address..") time.sleep(10) def check_install_nginx_ingress(self, install_ingress=True): """ Helm installs nginx ingress or checks to recieve and ip or address :param install_ingress: """ if install_ingress: self.kubernetes.delete_custom_resource( "virtualservers.k8s.nginx.org") self.kubernetes.delete_custom_resource( "virtualserverroutes.k8s.nginx.org") self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress") self.kubernetes.delete_cluster_role_binding( "ingress-nginx-nginx-ingress") self.kubernetes.create_namespace(name=self.settings.get( "installer-settings.nginxIngress.releaseName"), labels={"app": "ingress-nginx"}) self.kubernetes.delete_cluster_role( self.settings.get( 'installer-settings.nginxIngress.releaseName') + "-nginx-ingress-controller") self.kubernetes.delete_cluster_role_binding( self.settings.get( 'installer-settings.nginxIngress.releaseName') + "-nginx-ingress-controller") try: exec_cmd( "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx" ) exec_cmd("helm repo add stable https://charts.helm.sh/stable") exec_cmd("helm repo update") except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format( self.settings.get('installer-settings.nginxIngress.releaseName'), self.settings.get("installer-settings.nginxIngress.namespace")) if self.settings.get("installer-settings.volumeProvisionStrategy" ) == "minikubeDynamic": exec_cmd("minikube addons enable ingress") if "aws" in self.settings.get( "installer-settings.volumeProvisionStrategy"): if self.settings.get("installer-settings.aws.lbType") == "nlb": if install_ingress: nlb_override_values_file = Path( "./nginx/aws/aws-nlb-override-values.yaml").resolve() nlb_values = " --values {}".format( nlb_override_values_file) exec_cmd(command + nlb_values) else: if self.settings.get("installer-settings.aws.arn.enabled"): if install_ingress: elb_override_values_file = Path( "./nginx/aws/aws-elb-override-values.yaml" ).resolve() elb_file_parser = Parser(elb_override_values_file, True) elb_file_parser["controller"]["service"][ "annotations"].update({ "service.beta.kubernetes.io/aws-load-balancer-ssl-cert": self.settings.get( "installer-settings.aws.arn.arnAcmCert") }) elb_file_parser["controller"]["config"]["proxy-real-ip-cidr"] = \ self.settings.get("installer-settings.aws.vpcCidr") elb_file_parser.dump_it() elb_values = " --values {}".format( elb_override_values_file) exec_cmd(command + elb_values) else: if install_ingress: exec_cmd(command) volume_provision_strategy = self.settings.get( "installer-settings.volumeProvisionStrategy") if "gke" in volume_provision_strategy or \ "aks" in volume_provision_strategy or \ "doks" in volume_provision_strategy: if install_ingress: cloud_override_values_file = Path( "./nginx/cloud/cloud-override-values.yaml").resolve() cloud_values = " --values {}".format( cloud_override_values_file) exec_cmd(command + cloud_values) elif "local" in volume_provision_strategy: if install_ingress: baremetal_override_values_file = Path( "./nginx/baremetal/baremetal-override-values.yaml" ).resolve() baremetal_values = " --values {}".format( baremetal_override_values_file) exec_cmd(command + baremetal_values) if self.settings.get("global.storageClass.provisioner") not in \ ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"): logger.info("Waiting for nginx to be prepared...") time.sleep(60) self.wait_for_nginx_add() def install_gluu(self, install_ingress=True): """ Helm install Gluu :param install_ingress: """ labels = {"app": "gluu"} if self.settings.get("global.istio.enabled"): labels = {"app": "gluu", "istio-injection": "enabled"} self.kubernetes.create_namespace( name=self.settings.get("installer-settings.namespace"), labels=labels) if self.settings.get("global.cnPersistenceType") != "ldap" and \ self.settings.get("installer-settings.couchbase.install"): couchbase_app = Couchbase() couchbase_app.uninstall() couchbase_app = Couchbase() couchbase_app.install() self.settings = ValuesHandler() if self.settings.get("installer-settings.aws.lbType") == "alb": self.prepare_alb() self.deploy_alb() if self.settings.get("installer-settings.aws.lbType") != "alb" and \ self.settings.get("global.istio.ingress"): self.check_install_nginx_ingress(install_ingress) try: exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format( self.settings.get('installer-settings.releaseName'), self.values_file, self.settings.get("installer-settings.namespace"))) if self.settings.get("global.cnPersistenceType") in ("hybrid", "ldap"): self.install_ldap_backup() except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) def install_ldap_backup(self): values_file = Path("./helm/ldap-backup/values.yaml").resolve() values_file_parser = Parser(values_file, True) values_file_parser["ldapPass"] = self.settings.get( "config.ldapPassword") if self.settings.get("global.storageClass.provisioner") not in \ ("microk8s.io/hostpath", "k8s.io/minikube-hostpath"): values_file_parser["gluuLdapSchedule"] = self.settings.get( "installer-settings.ldap.backup.fullSchedule") if self.settings.get("opendj.multiCluster.enabled"): values_file_parser["multiCluster"]["enabled"] = True values_file_parser["multiCluster"]["ldapAdvertiseAdminPort"] = \ self.settings.get("opendj.ports.tcp-admin.nodePort") values_file_parser["multiCluster"]["serfAdvertiseAddrSuffix"] = \ self.settings.get("opendj.multiCluster.serfAdvertiseAddrSuffix")[:-6] values_file_parser.dump_it() exec_cmd( "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}" .format(self.ldap_backup_release_name, self.settings.get("installer-settings.namespace"))) def upgrade_gluu(self): values_file_parser = Parser(self.upgrade_values_file, True) values_file_parser["domain"] = self.settings.get("global.fqdn") values_file_parser["cnCacheType"] = self.settings.get( "config.configmap.cnCacheType") values_file_parser["cnCouchbaseUrl"] = self.settings.get( "config.configmap.cnCouchbaseUrl") values_file_parser["cnCouchbaseUser"] = self.settings.get( "config.configmap.cnCouchbaseUser") values_file_parser["cnCouchbaseSuperUser"] = self.settings.get( "config.configmap.cnCouchbaseSuperUser") values_file_parser["cnPersistenceLdapMapping"] = self.settings.get( "global.cnPersistenceType") values_file_parser["cnPersistenceType"] = self.settings.get( "config.configmap.cnPersistenceLdapMapping") values_file_parser["source"] = self.settings.get( "installer-settings.currentVersion") values_file_parser["target"] = self.settings.get( "installer-settings.upgrade.targetVersion") values_file_parser.dump_it() exec_cmd( "helm install {} -f {} ./helm/gluu-upgrade --namespace={}".format( self.settings.get('installer-settings.releaseName'), self.values_file, self.settings.get("installer-settings.namespace"))) def uninstall_gluu(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('installer-settings.releaseName'), self.settings.get("installer-settings.namespace"))) exec_cmd("helm delete {} --namespace={}".format( self.ldap_backup_release_name, self.settings.get("installer-settings.namespace"))) def uninstall_nginx_ingress(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('installer-settings.nginxIngress.releaseName'), self.settings.get("installer-settings.nginxIngress.namespace")))
class Gluu(object): def __init__(self): self.values_file = Path("./helm/gluu/values.yaml").resolve() self.upgrade_values_file = Path( "./helm/gluu-upgrade/values.yaml").resolve() self.settings = SettingsHandler() self.kubernetes = Kubernetes() self.ldap_backup_release_name = self.settings.get( 'CN_HELM_RELEASE_NAME') + "-ldap-backup" if self.settings.get("DEPLOYMENT_ARCH") == "gke": # Clusterrolebinding needs to be created for gke with CB or kubeDB installed if self.settings.get("INSTALL_REDIS") == "Y" or \ self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \ self.settings.get("INSTALL_COUCHBASE") == "Y": user_account, stderr, retcode = exec_cmd( "gcloud config get-value core/account") user_account = str(user_account, "utf-8").strip() user, stderr, retcode = exec_cmd("whoami") user = str(user, "utf-8").strip() cluster_role_binding_name = "cluster-admin-{}".format(user) self.kubernetes.create_cluster_role_binding( cluster_role_binding_name=cluster_role_binding_name, user_name=user_account, cluster_role_name="cluster-admin") def prepare_alb(self): ingress_parser = Parser("./alb/ingress.yaml", "Ingress") ingress_parser["spec"]["rules"][0]["host"] = self.settings.get( "CN_FQDN") ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \ self.settings.get("ARN_AWS_IAM") if not self.settings.get("ARN_AWS_IAM"): del ingress_parser["metadata"]["annotations"][ "alb.ingress.kubernetes.io/certificate-arn"] for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]: service_name = path["backend"]["serviceName"] if self.settings.get( "ENABLE_CASA") != "Y" and service_name == "casa": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("ENABLE_OXSHIBBOLETH" ) != "Y" and service_name == "oxshibboleth": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("ENABLE_OXPASSPORT" ) != "Y" and service_name == "oxpassport": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("INSTALL_GLUU_GATEWAY" ) != "Y" and service_name == "gg-kong-ui": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] ingress_parser.dump_it() def deploy_alb(self): alb_ingress = Path("./alb/ingress.yaml") self.kubernetes.create_objects_from_dict( alb_ingress, self.settings.get("CN_NAMESPACE")) if self.settings.get("IS_CN_FQDN_REGISTERED") != "Y": prompt = input( "Please input the DNS of the Application load balancer created found on AWS UI: " ) lb_hostname = prompt while True: try: if lb_hostname: break lb_hostname = self.kubernetes.read_namespaced_ingress( name="gluu", namespace="gluu" ).status.load_balancer.ingress[0].hostname except TypeError: logger.info("Waiting for loadbalancer address..") time.sleep(10) self.settings.set("LB_ADD", lb_hostname) def wait_for_nginx_add(self): hostname_ip = None while True: try: if hostname_ip: break if self.settings.get("DEPLOYMENT_ARCH") == "eks": hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-ingress-nginx-controller", namespace=self.settings.get("NGINX_INGRESS_NAMESPACE") ).status.load_balancer.ingress[0].hostname self.settings.set("LB_ADD", hostname_ip) if self.settings.get("AWS_LB_TYPE") == "nlb": try: ip_static = socket.gethostbyname(str(hostname_ip)) if ip_static: break except socket.gaierror: logger.info("Address has not received an ip yet.") elif self.settings.get("DEPLOYMENT_ARCH") == "local": self.settings.set( "LB_ADD", self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller." + self.settings.get("NGINX_INGRESS_NAMESPACE") + ".svc.cluster.local") break else: hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-ingress-nginx-controller", namespace=self.settings.get("NGINX_INGRESS_NAMESPACE") ).status.load_balancer.ingress[0].ip self.settings.set("HOST_EXT_IP", hostname_ip) except (TypeError, AttributeError): logger.info("Waiting for address..") time.sleep(10) def check_install_nginx_ingress(self, install_ingress=True): """ Helm installs nginx ingress or checks to recieve and ip or address :param install_ingress: """ if install_ingress: self.kubernetes.delete_custom_resource( "virtualservers.k8s.nginx.org") self.kubernetes.delete_custom_resource( "virtualserverroutes.k8s.nginx.org") self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress") self.kubernetes.delete_cluster_role_binding( "ingress-nginx-nginx-ingress") self.kubernetes.create_namespace( name=self.settings.get("NGINX_INGRESS_NAMESPACE"), labels={"app": "ingress-nginx"}) self.kubernetes.delete_cluster_role( self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller") self.kubernetes.delete_cluster_role_binding( self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller") try: exec_cmd( "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx" ) exec_cmd("helm repo add stable https://charts.helm.sh/stable") exec_cmd("helm repo update") except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format( self.settings.get('NGINX_INGRESS_RELEASE_NAME'), self.settings.get("NGINX_INGRESS_NAMESPACE")) if self.settings.get("DEPLOYMENT_ARCH") == "minikube": exec_cmd("minikube addons enable ingress") if self.settings.get("DEPLOYMENT_ARCH") == "eks": if self.settings.get("AWS_LB_TYPE") == "nlb": if install_ingress: nlb_override_values_file = Path( "./nginx/aws/aws-nlb-override-values.yaml").resolve() nlb_values = " --values {}".format( nlb_override_values_file) exec_cmd(command + nlb_values) else: if self.settings.get("USE_ARN") == "Y": if install_ingress: elb_override_values_file = Path( "./nginx/aws/aws-elb-override-values.yaml" ).resolve() elb_file_parser = Parser(elb_override_values_file, True) elb_file_parser["controller"]["service"][ "annotations"].update({ "service.beta.kubernetes.io/aws-load-balancer-ssl-cert": self.settings.get("ARN_AWS_IAM") }) elb_file_parser["controller"]["config"][ "proxy-real-ip-cidr"] = self.settings.get( "VPC_CIDR") elb_file_parser.dump_it() elb_values = " --values {}".format( elb_override_values_file) exec_cmd(command + elb_values) else: if install_ingress: exec_cmd(command) if self.settings.get("DEPLOYMENT_ARCH") in ("gke", "aks", "do"): if install_ingress: cloud_override_values_file = Path( "./nginx/cloud/cloud-override-values.yaml").resolve() cloud_values = " --values {}".format( cloud_override_values_file) exec_cmd(command + cloud_values) if self.settings.get("DEPLOYMENT_ARCH") == "local": if install_ingress: baremetal_override_values_file = Path( "./nginx/baremetal/baremetal-override-values.yaml" ).resolve() baremetal_values = " --values {}".format( baremetal_override_values_file) exec_cmd(command + baremetal_values) if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"): logger.info("Waiting for nginx to be prepared...") time.sleep(60) self.wait_for_nginx_add() def analyze_global_values(self): """ Parses Gluu values.yaml with the input information from prompts """ values_file_parser = Parser(self.values_file, True) if self.settings.get("DEPLOYMENT_ARCH") == "minikube": provisioner = "k8s.io/minikube-hostpath" elif self.settings.get("DEPLOYMENT_ARCH") == "eks": provisioner = "kubernetes.io/aws-ebs" elif self.settings.get("DEPLOYMENT_ARCH") == "gke": provisioner = "kubernetes.io/gce-pd" elif self.settings.get("DEPLOYMENT_ARCH") == "aks": provisioner = "kubernetes.io/azure-disk" elif self.settings.get("DEPLOYMENT_ARCH") == "do": provisioner = "dobs.csi.digitalocean.com" elif self.settings.get("DEPLOYMENT_ARCH") == "local": provisioner = "openebs.io/local" else: provisioner = "microk8s.io/hostpath" values_file_parser["global"]["storageClass"][ "provisioner"] = provisioner values_file_parser["global"]["lbIp"] = self.settings.get("HOST_EXT_IP") values_file_parser["global"]["domain"] = self.settings.get("CN_FQDN") values_file_parser["global"]["isDomainRegistered"] = "false" if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y": values_file_parser["global"]["isDomainRegistered"] = "true" if self.settings.get("CN_CACHE_TYPE") == "REDIS": values_file_parser["config"]["configmap"][ "cnRedisUrl"] = self.settings.get("REDIS_URL") values_file_parser["config"]["configmap"][ "cnRedisType"] = self.settings.get("REDIS_TYPE") values_file_parser["config"]["configmap"][ "cnRedisUseSsl"] = self.settings.get("REDIS_USE_SSL") values_file_parser["config"]["configmap"]["cnRedisSslTruststore"] = \ self.settings.get("REDIS_SSL_TRUSTSTORE") values_file_parser["config"]["configmap"]["cnRedisSentinelGroup"] = \ self.settings.get("REDIS_SENTINEL_GROUP") values_file_parser["config"]["redisPass"] = self.settings.get( "REDIS_PW") if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") \ or self.settings.get("TEST_ENVIRONMENT") == "Y": values_file_parser["global"]["cloud"]["testEnviroment"] = True values_file_parser["config"]["configmap"][ "lbAddr"] = self.settings.get("LB_ADD") values_file_parser["global"]["cnPersistenceType"] = self.settings.get( "PERSISTENCE_BACKEND") values_file_parser["config"]["configmap"][ "cnPersistenceType"] = self.settings.get("PERSISTENCE_BACKEND") values_file_parser["config"]["configmap"]["cnPersistenceLdapMapping"] = \ self.settings.get("HYBRID_LDAP_HELD_DATA") if self.settings.get("PERSISTENCE_BACKEND") != "ldap": values_file_parser["config"]["configmap"][ "cnCouchbaseUrl"] = self.settings.get("COUCHBASE_URL") values_file_parser["config"]["configmap"][ "cnCouchbaseUser"] = self.settings.get("COUCHBASE_USER") values_file_parser["config"]["configmap"][ "cnCouchbaseIndexNumReplica"] = self.settings.get( "COUCHBASE_INDEX_NUM_REPLICA") values_file_parser["config"]["configmap"][ "cnCouchbaseBucketPrefix"] = self.settings.get( "COUCHBASE_BUCKET_PREFIX") values_file_parser["config"]["configmap"]["cnCouchbaseSuperUser"] = \ self.settings.get("COUCHBASE_SUPERUSER") values_file_parser["config"]["configmap"][ "cnCouchbaseCrt"] = self.settings.get("COUCHBASE_CRT") values_file_parser["config"]["configmap"][ "cnCouchbasePass"] = self.settings.get("COUCHBASE_PASSWORD") values_file_parser["config"]["configmap"]["cnCouchbaseSuperUserPass"] = \ self.settings.get("COUCHBASE_SUPERUSER_PASSWORD") values_file_parser["global"]["auth-server"]["enabled"] = True values_file_parser["global"]["persistence"]["enabled"] = True values_file_parser["global"]["oxtrust"]["enabled"] = True values_file_parser["global"]["config"]["enabled"] = True values_file_parser["global"]["opendj"]["enabled"] = False values_file_parser["global"]["fido2"]["enabled"] = False if self.settings.get("ENABLE_FIDO2") == "Y": values_file_parser["global"]["fido2"]["enabled"] = True values_file_parser["fido2"]["replicas"] = self.settings.get( "FIDO2_REPLICAS") values_file_parser["global"]["scim"]["enabled"] = False if self.settings.get("ENABLE_SCIM") == "Y": values_file_parser["global"]["scim"]["enabled"] = True values_file_parser["scim"]["replicas"] = self.settings.get( "SCIM_REPLICAS") if self.settings.get("ENABLE_CONFIG_API") == "Y": values_file_parser["global"]["config-api"]["enabled"] = True if self.settings.get("INSTALL_JACKRABBIT") == "Y": values_file_parser["global"]["jackrabbit"]["enabled"] = True values_file_parser["config"]["configmap"][ "cnJackrabbitUrl"] = self.settings.get("JACKRABBIT_URL") values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitAdminPass"] = \ self.settings.get("JACKRABBIT_ADMIN_PASSWORD") values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitPostgresPass"] = \ self.settings.get("JACKRABBIT_PG_PASSWORD") if self.settings.get("USE_ISTIO_INGRESS") == "Y": values_file_parser["global"]["istio"]["ingress"] = True values_file_parser["global"]["istio"]["enabled"] = True values_file_parser["global"]["istio"][ "namespace"] = self.settings.get("ISTIO_SYSTEM_NAMESPACE") elif self.settings.get("AWS_LB_TYPE") == "alb": values_file_parser["global"]["alb"]["ingress"] = True else: values_file_parser["nginx-ingress"]["ingress"]["enabled"] = True values_file_parser["nginx-ingress"]["ingress"]["hosts"] = [ self.settings.get("CN_FQDN") ] values_file_parser["nginx-ingress"]["ingress"]["tls"][0][ "hosts"] = [self.settings.get("CN_FQDN")] if self.settings.get("USE_ISTIO") == "Y": values_file_parser["global"]["istio"]["enabled"] = True values_file_parser["global"]["cnJackrabbitCluster"] = "false" if self.settings.get("JACKRABBIT_CLUSTER") == "Y": values_file_parser["global"]["cnJackrabbitCluster"] = "true" values_file_parser["config"]["configmap"]["cnJackrabbitAdminId"] = \ self.settings.get("JACKRABBIT_ADMIN_ID") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \ self.settings.get("JACKRABBIT_PG_USER") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresDatabaseName"] = \ self.settings.get("JACKRABBIT_DATABASE") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresHost"] = \ self.settings.get("POSTGRES_URL") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \ self.settings.get("JACKRABBIT_PG_USER") if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \ self.settings.get("PERSISTENCE_BACKEND") == "ldap": values_file_parser["global"]["opendj"]["enabled"] = True # ALPHA-FEATURE: Multi cluster ldap replication if self.settings.get("CN_LDAP_MULTI_CLUSTER") == "Y": values_file_parser["opendj"]["multiCluster"]["enabled"] = True values_file_parser["opendj"]["multiCluster"]["serfAdvertiseAddr"] = \ self.settings.get("CN_LDAP_ADVERTISE_ADDRESS") serf_key = base64.b64encode(secrets.token_bytes()).decode() values_file_parser["opendj"]["multiCluster"][ "serfKey"] = serf_key values_file_parser["opendj"]["multiCluster"]["serfPeers"] = \ self.settings.get("CN_LDAP_SERF_PEERS") if self.settings.get("CN_LDAP_SECONDARY_CLUSTER") == "Y": values_file_parser["global"]["persistence"][ "enabled"] = False values_file_parser["opendj"]["ports"]["tcp-ldaps"]["nodePort"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_LDAPS_PORT")) values_file_parser["opendj"]["ports"]["tcp-repl"]["port"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT")) values_file_parser["opendj"]["ports"]["tcp-repl"]["targetPort"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT")) values_file_parser["opendj"]["ports"]["tcp-repl"]["nodePort"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_REPLICATION_PORT")) values_file_parser["opendj"]["ports"]["tcp-admin"]["port"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT")) values_file_parser["opendj"]["ports"]["tcp-admin"]["targetPort"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT")) values_file_parser["opendj"]["ports"]["tcp-admin"]["nodePort"] = \ int(self.settings.get("CN_LDAP_ADVERTISE_ADMIN_PORT")) values_file_parser["opendj"]["ports"]["tcp-serf"]["nodePort"] = \ int(self.settings.get("CN_LDAP_SERF_PORT")) values_file_parser["opendj"]["ports"]["udp-serf"]["nodePort"] = \ int(self.settings.get("CN_LDAP_SERF_PORT")) values_file_parser["global"]["oxshibboleth"]["enabled"] = False if self.settings.get("ENABLE_OXSHIBBOLETH") == "Y": values_file_parser["global"]["oxshibboleth"]["enabled"] = True values_file_parser["config"]["configmap"][ "cnSyncShibManifests"] = True values_file_parser["global"]["client-api"]["enabled"] = False if self.settings.get("ENABLE_CLIENT_API") == "Y": values_file_parser["global"]["client-api"]["enabled"] = True values_file_parser["config"]["configmap"]["jansClientApiApplicationCertCn"] = \ self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN") values_file_parser["config"]["configmap"][ "jansClientApiAdminCertCn"] = self.settings.get( "CLIENT_API_ADMIN_KEYSTORE_CN") values_file_parser["client-api"]["replicas"] = self.settings.get( "CLIENT_API_REPLICAS") values_file_parser["opendj"]["cnRedisEnabled"] = False if self.settings.get("CN_CACHE_TYPE") == "REDIS": values_file_parser["opendj"]["cnRedisEnabled"] = True values_file_parser["global"]["nginx-ingress"]["enabled"] = True values_file_parser["global"]["cr-rotate"]["enabled"] = False if self.settings.get("ENABLE_CACHE_REFRESH") == "Y": values_file_parser["global"]["cr-rotate"]["enabled"] = True values_file_parser["global"]["auth-server-key-rotation"][ "enabled"] = False if self.settings.get("ENABLE_AUTH_SERVER_KEY_ROTATE") == "Y": values_file_parser["global"]["auth-server-key-rotation"][ "enabled"] = True values_file_parser["auth-server-key-rotation"][ "keysLife"] = self.settings.get("AUTH_SERVER_KEYS_LIFE") values_file_parser["config"]["orgName"] = self.settings.get("ORG_NAME") values_file_parser["config"]["email"] = self.settings.get("EMAIL") values_file_parser["config"]["adminPass"] = self.settings.get( "ADMIN_PW") values_file_parser["config"]["ldapPass"] = self.settings.get("LDAP_PW") values_file_parser["config"]["countryCode"] = self.settings.get( "COUNTRY_CODE") values_file_parser["config"]["state"] = self.settings.get("STATE") values_file_parser["config"]["city"] = self.settings.get("CITY") values_file_parser["config"]["configmap"][ "cnCacheType"] = self.settings.get("CN_CACHE_TYPE") values_file_parser["opendj"]["replicas"] = self.settings.get( "LDAP_REPLICAS") values_file_parser["opendj"]["persistence"][ "size"] = self.settings.get("LDAP_STORAGE_SIZE") if self.settings.get("ENABLE_OXTRUST_API_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnOxtrustApiEnabled"] = True if self.settings.get("ENABLE_OXTRUST_TEST_MODE_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnOxtrustApiTestMode"] = True if self.settings.get("ENABLE_CASA_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnCasaEnabled"] = True values_file_parser["config"]["configmap"][ "cnSyncCasaManifests"] = True if self.settings.get("ENABLE_OXPASSPORT_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnPassportEnabled"] = True if self.settings.get("ENABLE_RADIUS_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnRadiusEnabled"] = True if self.settings.get("ENABLE_SAML_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnSamlEnabled"] = True values_file_parser["oxpassport"]["resources"] = {} values_file_parser["casa"]["image"]["repository"] = self.settings.get( "CASA_IMAGE_NAME") values_file_parser["casa"]["image"]["tag"] = self.settings.get( "CASA_IMAGE_TAG") values_file_parser["casa"]["replicas"] = self.settings.get( "CASA_REPLICAS") values_file_parser["config"]["image"][ "repository"] = self.settings.get("CONFIG_IMAGE_NAME") values_file_parser["config"]["image"]["tag"] = self.settings.get( "CONFIG_IMAGE_TAG") values_file_parser["cr-rotate"]["image"][ "repository"] = self.settings.get( "CACHE_REFRESH_ROTATE_IMAGE_NAME") values_file_parser["cr-rotate"]["image"]["tag"] = self.settings.get( "CACHE_REFRESH_ROTATE_IMAGE_TAG") values_file_parser["auth-server-key-rotation"]["image"][ "repository"] = self.settings.get("CERT_MANAGER_IMAGE_NAME") values_file_parser["auth-server-key-rotation"]["image"][ "tag"] = self.settings.get("CERT_MANAGER_IMAGE_TAG") values_file_parser["opendj"]["image"][ "repository"] = self.settings.get("LDAP_IMAGE_NAME") values_file_parser["opendj"]["image"]["tag"] = self.settings.get( "LDAP_IMAGE_TAG") values_file_parser["persistence"]["image"][ "repository"] = self.settings.get("PERSISTENCE_IMAGE_NAME") values_file_parser["persistence"]["image"]["tag"] = self.settings.get( "PERSISTENCE_IMAGE_TAG") values_file_parser["auth-server"]["image"][ "repository"] = self.settings.get("AUTH_SERVER_IMAGE_NAME") values_file_parser["auth-server"]["image"]["tag"] = self.settings.get( "AUTH_SERVER_IMAGE_TAG") values_file_parser["client-api"]["image"][ "repository"] = self.settings.get("CLIENT_API_IMAGE_NAME") values_file_parser["client-api"]["image"]["tag"] = self.settings.get( "CLIENT_API_IMAGE_TAG") values_file_parser["auth-server"]["replicas"] = self.settings.get( "AUTH_SERVER_REPLICAS") values_file_parser["oxpassport"]["image"][ "repository"] = self.settings.get("OXPASSPORT_IMAGE_NAME") values_file_parser["oxpassport"]["image"]["tag"] = self.settings.get( "OXPASSPORT_IMAGE_TAG") values_file_parser["oxpassport"]["replicas"] = self.settings.get( "OXPASSPORT_REPLICAS") values_file_parser["oxshibboleth"]["image"][ "repository"] = self.settings.get("OXSHIBBOLETH_IMAGE_NAME") values_file_parser["oxshibboleth"]["image"]["tag"] = self.settings.get( "OXSHIBBOLETH_IMAGE_TAG") values_file_parser["oxshibboleth"]["replicas"] = self.settings.get( "OXSHIBBOLETH_REPLICAS") values_file_parser["jackrabbit"]["image"][ "repository"] = self.settings.get("JACKRABBIT_IMAGE_NAME") values_file_parser["jackrabbit"]["image"]["tag"] = self.settings.get( "JACKRABBIT_IMAGE_TAG") values_file_parser["oxtrust"]["image"][ "repository"] = self.settings.get("OXTRUST_IMAGE_NAME") values_file_parser["oxtrust"]["image"]["tag"] = self.settings.get( "OXTRUST_IMAGE_TAG") values_file_parser["oxtrust"]["replicas"] = self.settings.get( "OXTRUST_REPLICAS") values_file_parser["radius"]["image"][ "repository"] = self.settings.get("RADIUS_IMAGE_NAME") values_file_parser["radius"]["image"]["tag"] = self.settings.get( "RADIUS_IMAGE_TAG") values_file_parser["radius"]["replicas"] = self.settings.get( "RADIUS_REPLICAS") values_file_parser.dump_it() def install_gluu(self, install_ingress=True): """ Helm install Gluu :param install_ingress: """ labels = {"app": "gluu"} if self.settings.get("USE_ISTIO") == "Y": labels = {"app": "gluu", "istio-injection": "enabled"} self.kubernetes.create_namespace( name=self.settings.get("CN_NAMESPACE"), labels=labels) if self.settings.get( "PERSISTENCE_BACKEND") != "ldap" and self.settings.get( "INSTALL_COUCHBASE") == "Y": couchbase_app = Couchbase() couchbase_app.uninstall() couchbase_app = Couchbase() couchbase_app.install() self.settings = SettingsHandler() if self.settings.get("AWS_LB_TYPE") == "alb": self.prepare_alb() self.deploy_alb() if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get( "USE_ISTIO_INGRESS") != "Y": self.check_install_nginx_ingress(install_ingress) self.analyze_global_values() try: exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format( self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file, self.settings.get("CN_NAMESPACE"))) if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \ self.settings.get("PERSISTENCE_BACKEND") == "ldap": self.install_ldap_backup() except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) def install_ldap_backup(self): values_file = Path("./helm/ldap-backup/values.yaml").resolve() values_file_parser = Parser(values_file, True) values_file_parser["ldapPass"] = self.settings.get("LDAP_PW") values_file_parser.dump_it() exec_cmd( "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}" .format(self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE"))) def upgrade_gluu(self): values_file_parser = Parser(self.upgrade_values_file, True) values_file_parser["domain"] = self.settings.get("CN_FQDN") values_file_parser["cnCacheType"] = self.settings.get("CN_CACHE_TYPE") values_file_parser["cnCouchbaseUrl"] = self.settings.get( "COUCHBASE_URL") values_file_parser["cnCouchbaseUser"] = self.settings.get( "COUCHBASE_USER") values_file_parser["cnCouchbaseSuperUser"] = self.settings.get( "COUCHBASE_SUPERUSER") values_file_parser["cnPersistenceLdapMapping"] = self.settings.get( "HYBRID_LDAP_HELD_DATA") values_file_parser["cnPersistenceType"] = self.settings.get( "PERSISTENCE_BACKEND") values_file_parser["source"] = self.settings.get("CN_VERSION") values_file_parser["target"] = self.settings.get( "CN_UPGRADE_TARGET_VERSION") values_file_parser.dump_it() exec_cmd( "helm install {} -f {} ./helm/gluu-upgrade --namespace={}".format( self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file, self.settings.get("CN_NAMESPACE"))) def uninstall_gluu(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('CN_HELM_RELEASE_NAME'), self.settings.get("CN_NAMESPACE"))) exec_cmd("helm delete {} --namespace={}".format( self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE"))) def uninstall_nginx_ingress(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('NGINX_INGRESS_RELEASE_NAME'), self.settings.get("NGINX_INGRESS_NAMESPACE")))
class Couchbase(object): def __init__(self): self.settings = SettingsHandler() self.kubernetes = Kubernetes() self.storage_class_file = Path("./couchbase/storageclasses.yaml") self.couchbase_cluster_file = Path( "./couchbase/couchbase-cluster.yaml") self.couchbase_buckets_file = Path( "./couchbase/couchbase-buckets.yaml") self.couchbase_group_file = Path("./couchbase/couchbase-group.yaml") self.couchbase_user_file = Path("./couchbase/couchbase-user.yaml") self.couchbase_rolebinding_file = Path( "./couchbase/couchbase-rolebinding.yaml") self.couchbase_ephemeral_buckets_file = Path( "./couchbase/couchbase-ephemeral-buckets.yaml") self.couchbase_source_folder_pattern, self.couchbase_source_file = self.get_couchbase_files self.couchbase_custom_resource_definition_file = self.couchbase_source_file.joinpath( "crd.yaml") self.couchbase_operator_dac_file = self.couchbase_source_file.joinpath( "operator_dac.yaml") self.filename = "" @property def get_couchbase_files(self): """ Returns the couchbase extracted package folder path containing manifests and the tar package file :return: """ if self.settings.get("INSTALL_COUCHBASE") == "Y": couchbase_tar_pattern = "couchbase-autonomous-operator-kubernetes_*.tar.gz" directory = Path('.') try: couchbase_tar_file = list( directory.glob(couchbase_tar_pattern))[0] if "_1." in str(couchbase_tar_file.resolve()): logger.fatal( "Couchbase Autonomous Operator version must be > 2.0") sys.exit() except IndexError: logger.fatal("Couchbase package not found.") logger.info( "Please download the couchbase kubernetes package and place it inside the same directory " "containing the pygluu-kubernetes.pyz script.https://www.couchbase.com/downloads" ) sys.exit() extract_couchbase_tar(couchbase_tar_file) couchbase_source_folder_pattern = "./couchbase-source-folder/couchbase-autonomous-operator-kubernetes_*" couchbase_source_folder = list( directory.glob(couchbase_source_folder_pattern))[0] return couchbase_tar_file, couchbase_source_folder # Couchbase is installed. return Path("."), Path(".") def create_couchbase_gluu_cert_pass_secrets(self, encoded_ca_crt_string, encoded_cb_pass_string, encoded_cb_super_pass_string): """ Create cor patch secret containing couchbase certificate authority crt and couchbase admin password :param encoded_ca_crt_string: :param encoded_cb_pass_string: :param encoded_cb_super_pass_string: """ # Remove this if its not needed self.kubernetes.patch_or_create_namespaced_secret( name="cb-crt", namespace=self.settings.get("CN_NAMESPACE"), literal="couchbase.crt", value_of_literal=encoded_ca_crt_string) # Remove this if its not needed self.kubernetes.patch_or_create_namespaced_secret( name="cb-pass", namespace=self.settings.get("CN_NAMESPACE"), literal="couchbase_password", value_of_literal=encoded_cb_pass_string) self.kubernetes.patch_or_create_namespaced_secret( name="cb-super-pass", namespace=self.settings.get("CN_NAMESPACE"), literal="couchbase_superuser_password", value_of_literal=encoded_cb_super_pass_string) def setup_backup_couchbase(self): """ Setups Couchbase backup strategy """ couchbase_backup_file = Path( "./couchbase/backup/couchbase-backup.yaml") parser = Parser(couchbase_backup_file, "CouchbaseBackup") parser["spec"]["full"]["schedule"] = self.settings.get( "COUCHBASE_FULL_BACKUP_SCHEDULE") parser["spec"]["incremental"]["schedule"] = self.settings.get( "COUCHBASE_INCR_BACKUP_SCHEDULE") parser["spec"]["backupRetention"] = self.settings.get( "COUCHBASE_BACKUP_RETENTION_TIME") parser["spec"]["size"] = self.settings.get( "COUCHBASE_BACKUP_STORAGE_SIZE") parser.dump_it() self.kubernetes.create_namespaced_custom_object( filepath=couchbase_backup_file, group="couchbase.com", version="v2", plural="couchbasebackups", namespace=self.settings.get("COUCHBASE_NAMESPACE")) @property def calculate_couchbase_resources(self): """ Return a dictionary containing couchbase resource information calculated Alpha :return: """ tps = int(self.settings.get("EXPECTED_TRANSACTIONS_PER_SEC")) number_of_data_nodes = 0 number_of_query_nodes = 0 number_of_index_nodes = 0 number_of_eventing_service_memory_nodes = 0 user_ratio = int( self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 50000000 tps_ratio = tps / 14000 if self.settings.get( "USING_RESOURCE_OWNER_PASSWORD_CRED_GRANT_FLOW") == "Y": number_of_data_nodes += tps_ratio * 7 * user_ratio number_of_query_nodes += tps_ratio * 5 * user_ratio number_of_index_nodes += tps_ratio * 5 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio if self.settings.get("USING_CODE_FLOW") == "Y": number_of_data_nodes += tps_ratio * 14 * user_ratio number_of_query_nodes += tps_ratio * 12 * user_ratio number_of_index_nodes += tps_ratio * 13 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 7 * user_ratio if self.settings.get("USING_SCIM_FLOW") == "Y": number_of_data_nodes += tps_ratio * 7 * user_ratio number_of_query_nodes += tps_ratio * 5 * user_ratio number_of_index_nodes += tps_ratio * 5 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio if not self.settings.get("COUCHBASE_GENERAL_STORAGE"): self.settings.set( "COUCHBASE_GENERAL_STORAGE", str( int((tps_ratio * (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 125000)) + 5)) + "Gi") if not self.settings.get("COUCHBASE_DATA_STORAGE"): self.settings.set( "COUCHBASE_DATA_STORAGE", str( int((tps_ratio * (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 100000)) + 5)) + "Gi") if not self.settings.get("COUCHBASE_INDEX_STORAGE"): self.settings.set( "COUCHBASE_INDEX_STORAGE", str( int((tps_ratio * (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 200000)) + 5)) + "Gi") if not self.settings.get("COUCHBASE_QUERY_STORAGE"): self.settings.set( "COUCHBASE_QUERY_STORAGE", str( int((tps_ratio * (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 200000)) + 5)) + "Gi") if not self.settings.get("COUCHBASE_ANALYTICS_STORAGE"): self.settings.set( "COUCHBASE_ANALYTICS_STORAGE", str( int((tps_ratio * (int(self.settings.get("NUMBER_OF_EXPECTED_USERS")) / 250000)) + 5)) + "Gi") if self.settings.get("COUCHBASE_DATA_NODES"): number_of_data_nodes = self.settings.get("COUCHBASE_DATA_NODES") if self.settings.get("COUCHBASE_QUERY_NODES"): number_of_query_nodes = self.settings.get("COUCHBASE_QUERY_NODES") if self.settings.get("COUCHBASE_INDEX_NODES"): number_of_index_nodes = self.settings.get("COUCHBASE_INDEX_NODES") if self.settings.get("COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"): number_of_eventing_service_memory_nodes = self.settings.get( "COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES") data_service_memory_quota = (tps_ratio * 40000 * user_ratio) + 512 data_memory_request = data_service_memory_quota / 4 data_memory_limit = data_memory_request data_cpu_request = data_service_memory_quota / 4 data_cpu_limit = data_cpu_request query_memory_request = data_memory_request query_memory_limit = query_memory_request query_cpu_request = data_service_memory_quota / 4 query_cpu_limit = query_cpu_request index_service_memory_quota = (tps_ratio * 25000 * user_ratio) + 256 index_memory_request = index_service_memory_quota / 3 index_memory_limit = index_memory_request index_cpu_request = index_service_memory_quota / 3 index_cpu_limit = index_cpu_request search_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256 eventing_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256 analytics_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 1024 search_eventing_analytics_memory_quota_sum = ( search_service_memory_quota + eventing_service_memory_quota + analytics_service_memory_quota) search_eventing_analytics_memory_request = tps_ratio * 10000 * user_ratio search_eventing_analytics_memory_limit = search_eventing_analytics_memory_request search_eventing_analytics_cpu_request = tps_ratio * 6000 * user_ratio search_eventing_analytics_cpu_limit = search_eventing_analytics_cpu_request # Two services because query is assumed to take the same amount of mem quota total_mem_resources = \ data_service_memory_quota + data_service_memory_quota + index_service_memory_quota + \ search_eventing_analytics_memory_quota_sum total_cpu_resources = data_cpu_limit + query_cpu_limit + index_cpu_limit + search_eventing_analytics_cpu_limit resources_info = dict( COUCHBASE_DATA_NODES=int(number_of_data_nodes), COUCHBASE_QUERY_NODES=int(number_of_query_nodes), COUCHBASE_INDEX_NODES=int(number_of_index_nodes), COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES=int( number_of_eventing_service_memory_nodes), COUCHBASE_DATA_MEM_QUOTA=round(data_service_memory_quota), COUCHBASE_DATA_MEM_REQUEST=round(data_memory_request), COUCHBASE_DATA_MEM_LIMIT=round(data_memory_limit), COUCHBASE_DATA_CPU_REQUEST=round(data_cpu_request), COUCHBASE_DATA_CPU_LIMIT=round(data_cpu_limit), COUCHBASE_QUERY_MEM_QUOTA=round(data_service_memory_quota), COUCHBASE_QUERY_MEM_REQUEST=round(query_memory_request), COUCHBASE_QUERY_MEM_LIMIT=round(query_memory_limit), COUCHBASE_QUERY_CPU_REQUEST=round(query_cpu_request), COUCHBASE_QUERY_CPU_LIMIT=round(query_cpu_limit), COUCHBASE_INDEX_MEM_QUOTA=round(index_service_memory_quota), COUCHBASE_INDEX_MEM_REQUEST=round(index_memory_request), COUCHBASE_INDEX_MEM_LIMIT=round(index_memory_limit), COUCHBASE_INDEX_CPU_REQUEST=round(index_cpu_request), COUCHBASE_INDEX_CPU_LIMIT=round(index_cpu_limit), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA=round( search_service_memory_quota), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST=round( search_eventing_analytics_memory_request), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT=round( search_eventing_analytics_memory_limit), COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST=round( search_eventing_analytics_cpu_request), COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT=round( search_eventing_analytics_cpu_limit), TOTAL_RAM_NEEDED=round(total_mem_resources), TOTAL_CPU_NEEDED=round(total_cpu_resources)) self.settings.set("COUCHBASE_DATA_NODES", number_of_data_nodes) self.settings.set("COUCHBASE_QUERY_NODES", number_of_query_nodes) self.settings.set("COUCHBASE_INDEX_NODES", number_of_index_nodes) self.settings.set("COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES", number_of_eventing_service_memory_nodes) return resources_info def analyze_couchbase_cluster_yaml(self): """ Dumps created calculated resources into couchbase.yaml file. ALso includes cloud zones. """ parser = Parser("./couchbase/couchbase-cluster.yaml", "CouchbaseCluster") parser["metadata"]["name"] = self.settings.get( "COUCHBASE_CLUSTER_NAME") number_of_buckets = 5 if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") or \ self.settings.get("COUCHBASE_USE_LOW_RESOURCES") == "Y": resources_servers = [{ "name": "allServices", "size": 1, "services": ["data", "index", "query", "search", "eventing", "analytics"], "volumeMounts": { "default": "pvc-general", "data": "pvc-data", "index": "pvc-index", "analytics": ["pvc-analytics"] } }] data_service_memory_quota = 1024 index_service_memory_quota = 512 search_service_memory_quota = 512 eventing_service_memory_quota = 512 analytics_service_memory_quota = 1024 memory_quota = 0 self.settings.set("COUCHBASE_GENERAL_STORAGE", "5Gi") self.settings.set("COUCHBASE_DATA_STORAGE", "5Gi") self.settings.set("COUCHBASE_INDEX_STORAGE", "5Gi") self.settings.set("COUCHBASE_QUERY_STORAGE", "5Gi") self.settings.set("COUCHBASE_ANALYTICS_STORAGE", "5Gi") else: resources = self.calculate_couchbase_resources data_service_memory_quota = resources["COUCHBASE_DATA_MEM_QUOTA"] index_service_memory_quota = resources["COUCHBASE_INDEX_MEM_QUOTA"] search_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] eventing_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] analytics_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] + 1024 memory_quota = ((resources["COUCHBASE_DATA_MEM_QUOTA"] - 500) / number_of_buckets) zones_list = self.settings.get("NODES_ZONES") data_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["COUCHBASE_DATA_NODES"]), "data", str(resources["COUCHBASE_DATA_MEM_REQUEST"]), str(resources["COUCHBASE_DATA_MEM_LIMIT"]), str(resources["COUCHBASE_DATA_CPU_REQUEST"]), str(resources["COUCHBASE_DATA_CPU_LIMIT"])) query_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["COUCHBASE_QUERY_NODES"]), "query", str(resources["COUCHBASE_QUERY_MEM_REQUEST"]), str(resources["COUCHBASE_QUERY_MEM_LIMIT"]), str(resources["COUCHBASE_QUERY_CPU_REQUEST"]), str(resources["COUCHBASE_QUERY_CPU_LIMIT"])) index_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["COUCHBASE_INDEX_NODES"]), "index", str(resources["COUCHBASE_INDEX_MEM_REQUEST"]), str(resources["COUCHBASE_INDEX_MEM_LIMIT"]), str(resources["COUCHBASE_INDEX_CPU_REQUEST"]), str(resources["COUCHBASE_INDEX_CPU_LIMIT"])) search_eventing_analytics_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"]), "analytics", str(resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST"]), str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT"] ), str(resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST"]), str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT"] )) resources_servers = \ data_server_spec + query_server_spec + index_server_spec + \ search_eventing_analytics_server_spec if self.settings.get("NODES_ZONES"): unique_zones = list(dict.fromkeys( self.settings.get("NODES_ZONES"))) parser["spec"]["serverGroups"] = unique_zones parser["spec"]["cluster"]["dataServiceMemoryQuota"] = str( data_service_memory_quota) + "Mi" parser["spec"]["cluster"]["indexServiceMemoryQuota"] = str( index_service_memory_quota) + "Mi" parser["spec"]["cluster"]["searchServiceMemoryQuota"] = str( search_service_memory_quota) + "Mi" parser["spec"]["cluster"]["eventingServiceMemoryQuota"] = str( eventing_service_memory_quota) + "Mi" parser["spec"]["cluster"]["analyticsServiceMemoryQuota"] = str( analytics_service_memory_quota) + "Mi" set_memory_for_buckets(memory_quota) parser["metadata"]["name"] = self.settings.get( "COUCHBASE_CLUSTER_NAME") parser["spec"]["servers"] = resources_servers number_of_volume_claims = len(parser["spec"]["volumeClaimTemplates"]) for i in range(number_of_volume_claims): name = parser["spec"]["volumeClaimTemplates"][i]["metadata"][ "name"] if name == "pvc-general": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \ self.settings.get("COUCHBASE_GENERAL_STORAGE") elif name == "pvc-data": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \ self.settings.get("COUCHBASE_DATA_STORAGE") elif name == "pvc-index": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \ self.settings.get("COUCHBASE_INDEX_STORAGE") elif name == "pvc-query": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \ self.settings.get("COUCHBASE_QUERY_STORAGE") elif name == "pvc-analytics": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"]["requests"]["storage"] = \ self.settings.get("COUCHBASE_ANALYTICS_STORAGE") parser.dump_it() def install(self): """ Installs Couchbase """ self.kubernetes.create_namespace( name=self.settings.get("CN_NAMESPACE")) if self.settings.get("COUCHBASE_CLUSTER_FILE_OVERRIDE") == "N": self.analyze_couchbase_cluster_yaml() cb_namespace = self.settings.get("COUCHBASE_NAMESPACE") storage_class_file_parser = Parser(self.storage_class_file, "StorageClass") if self.settings.get('DEPLOYMENT_ARCH') == "gke" or \ self.settings.get('DEPLOYMENT_ARCH') == "aks" or \ self.settings.get('DEPLOYMENT_ARCH') == "do": try: del storage_class_file_parser["parameters"]["encrypted"] except KeyError: logger.info("Key not found") storage_class_file_parser["parameters"][ "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE") if self.settings.get('DEPLOYMENT_ARCH') == "gke": storage_class_file_parser["provisioner"] = "kubernetes.io/gce-pd" elif self.settings.get('DEPLOYMENT_ARCH') == "aks": storage_class_file_parser[ "provisioner"] = "kubernetes.io/azure-disk" elif self.settings.get('DEPLOYMENT_ARCH') == "do": storage_class_file_parser[ "provisioner"] = "dobs.csi.digitalocean.com" elif self.settings.get('DEPLOYMENT_ARCH') == "microk8s": storage_class_file_parser["provisioner"] = "microk8s.io/hostpath" try: del storage_class_file_parser["allowVolumeExpansion"] del storage_class_file_parser["parameters"] except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() elif self.settings.get('DEPLOYMENT_ARCH') == "minikube": storage_class_file_parser[ "provisioner"] = "k8s.io/minikube-hostpath" try: del storage_class_file_parser["allowVolumeExpansion"] del storage_class_file_parser["parameters"] except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() else: try: storage_class_file_parser["parameters"][ "type"] = self.settings.get("COUCHBASE_VOLUME_TYPE") except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() logger.info("Installing Couchbase...") couchbase_crts_keys = Path("couchbase_crts_keys") if not couchbase_crts_keys.exists(): os.mkdir(couchbase_crts_keys) custom_cb_ca_crt = Path("./couchbase_crts_keys/ca.crt") custom_cb_crt = Path("./couchbase_crts_keys/chain.pem") custom_cb_key = Path("./couchbase_crts_keys/pkey.key") if not custom_cb_ca_crt.exists() and not custom_cb_crt.exists( ) and not custom_cb_key.exists(): setup_crts( ca_common_name=self.settings.get("COUCHBASE_CN"), cert_common_name="couchbase-server", san_list=self.settings.get("COUCHBASE_SUBJECT_ALT_NAME"), ca_cert_file="./couchbase_crts_keys/ca.crt", ca_key_file="./couchbase_crts_keys/ca.key", cert_file="./couchbase_crts_keys/chain.pem", key_file="./couchbase_crts_keys/pkey.key") self.kubernetes.create_namespace(name=cb_namespace) chain_pem_filepath = Path("./couchbase_crts_keys/chain.pem") pkey_filepath = Path("./couchbase_crts_keys/pkey.key") tls_cert_filepath = Path("./couchbase_crts_keys/tls-cert-file") tls_private_key_filepath = Path( "./couchbase_crts_keys/tls-private-key-file") ca_cert_filepath = Path("./couchbase_crts_keys/ca.crt") shutil.copyfile(ca_cert_filepath, Path("./couchbase_crts_keys/couchbase.crt")) shutil.copyfile(chain_pem_filepath, tls_cert_filepath) shutil.copyfile(pkey_filepath, tls_private_key_filepath) encoded_ca_crt_string = self.settings.get("COUCHBASE_CRT") if not encoded_ca_crt_string: with open(ca_cert_filepath) as content_file: ca_crt_content = content_file.read() encoded_ca_crt_bytes = base64.b64encode( ca_crt_content.encode("utf-8")) encoded_ca_crt_string = str(encoded_ca_crt_bytes, "utf-8") self.settings.set("COUCHBASE_CRT", encoded_ca_crt_string) with open(chain_pem_filepath) as content_file: chain_pem_content = content_file.read() encoded_chain_bytes = base64.b64encode( chain_pem_content.encode("utf-8")) encoded_chain_string = str(encoded_chain_bytes, "utf-8") with open(pkey_filepath) as content_file: pkey_content = content_file.read() encoded_pkey_bytes = base64.b64encode(pkey_content.encode("utf-8")) encoded_pkey_string = str(encoded_pkey_bytes, "utf-8") self.kubernetes.patch_or_create_namespaced_secret( name="couchbase-server-tls", namespace=cb_namespace, literal=chain_pem_filepath.name, value_of_literal=encoded_chain_string, second_literal=pkey_filepath.name, value_of_second_literal=encoded_pkey_string) self.kubernetes.patch_or_create_namespaced_secret( name="couchbase-operator-tls", namespace=cb_namespace, literal=ca_cert_filepath.name, value_of_literal=encoded_ca_crt_string) encoded_cb_super_user_bytes = base64.b64encode( self.settings.get("COUCHBASE_SUPERUSER").encode("utf-8")) encoded_cb_super_user_string = str(encoded_cb_super_user_bytes, "utf-8") encoded_cb_pass_bytes = base64.b64encode( self.settings.get("COUCHBASE_PASSWORD").encode("utf-8")) encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8") encoded_cb_super_pass_bytes = base64.b64encode( self.settings.get("COUCHBASE_SUPERUSER_PASSWORD").encode("utf-8")) encoded_cb_super_pass_string = str(encoded_cb_super_pass_bytes, "utf-8") self.create_couchbase_gluu_cert_pass_secrets( encoded_ca_crt_string, encoded_cb_pass_string, encoded_cb_super_pass_string) self.kubernetes.patch_or_create_namespaced_secret( name="gluu-couchbase-user-password", namespace=self.settings.get("COUCHBASE_NAMESPACE"), literal="password", value_of_literal=encoded_cb_pass_string) command = "./{}/bin/cbopcfg -backup=true -namespace={}".format( self.couchbase_source_file, self.settings.get("COUCHBASE_NAMESPACE")) exec_cmd(command, output_file=self.couchbase_operator_dac_file) couchbase_cluster_parser = Parser(self.couchbase_cluster_file, "CouchbaseCluster") couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][ "serverSecret"] = "couchbase-server-tls" couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][ "operatorSecret"] = "couchbase-operator-tls" try: couchbase_cluster_parser["spec"]["security"]["rbac"]["selector"]["matchLabels"]["cluster"] = \ self.settings.get("COUCHBASE_CLUSTER_NAME") couchbase_cluster_parser["spec"]["security"]["rbac"][ "managed"] = True except KeyError: logger.error( "rbac section is missing or incorrect in couchbase-cluster.yaml." " Please set spec --> security --> rbac --> managed : true" " and set spec --> security --> rbac --> selector --> matchLabels --> " "cluster --> to your cluster name") logger.info( "As a result of the above the installation will exit " "as the gluu user will not be created causing the communication between " "Gluu server and Couchbase to fail.") sys.exit() if self.settings.get("DEPLOYMENT_ARCH") == "local": volume_claims = couchbase_cluster_parser["spec"][ "volumeClaimTemplates"] for i, volume_claim in enumerate(volume_claims): couchbase_cluster_parser["spec"]["volumeClaimTemplates"][i]["spec"]["storageClassName"] = \ "openebs-hostpath" couchbase_cluster_parser.dump_it() self.kubernetes.create_objects_from_dict( self.couchbase_custom_resource_definition_file, namespace=cb_namespace) self.kubernetes.create_objects_from_dict( self.couchbase_operator_dac_file, namespace=cb_namespace) self.kubernetes.check_pods_statuses(cb_namespace, "app=couchbase-operator", 700) self.kubernetes.patch_or_create_namespaced_secret( name="cb-auth", namespace=cb_namespace, literal="username", value_of_literal=encoded_cb_super_user_string, second_literal="password", value_of_second_literal=encoded_cb_super_pass_string) self.kubernetes.create_objects_from_dict(self.storage_class_file, namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_cluster_file, group="couchbase.com", version="v2", plural="couchbaseclusters", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_buckets_file, group="couchbase.com", version="v2", plural="couchbasebuckets", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_ephemeral_buckets_file, group="couchbase.com", version="v2", plural="couchbaseephemeralbuckets", namespace=cb_namespace) coucbase_group_parser = Parser(self.couchbase_group_file, "CouchbaseGroup") coucbase_group_parser["metadata"]["labels"]["cluster"] = \ self.settings.get("COUCHBASE_CLUSTER_NAME") coucbase_group_parser.dump_it() coucbase_user_parser = Parser(self.couchbase_user_file, "CouchbaseUser") coucbase_user_parser["metadata"]["labels"]["cluster"] = \ self.settings.get("COUCHBASE_CLUSTER_NAME") coucbase_user_parser.dump_it() self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_group_file, group="couchbase.com", version="v2", plural="couchbasegroups", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_user_file, group="couchbase.com", version="v2", plural="couchbaseusers", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_rolebinding_file, group="couchbase.com", version="v2", plural="couchbaserolebindings", namespace=cb_namespace) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_analytics=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_data=enabled", 700) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_eventing=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_index=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_query=enabled", 700) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_search=enabled", 700) # Setup couchbase backups if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"): self.setup_backup_couchbase() shutil.rmtree(self.couchbase_source_folder_pattern, ignore_errors=True) if self.settings.get("DEPLOY_MULTI_CLUSTER") == "Y": logger.info( "Setup XDCR between the running Gluu couchbase cluster and this one" ) def uninstall(self): """ Uninstalls couchbase """ logger.info("Deleting Couchbase...") self.kubernetes.delete_storage_class("couchbase-sc") self.kubernetes.delete_custom_resource( "couchbaseclusters.couchbase.com") self.kubernetes.delete_validating_webhook_configuration( "couchbase-operator-admission") self.kubernetes.delete_mutating_webhook_configuration( "couchbase-operator-admission") self.kubernetes.delete_cluster_role_binding( "couchbase-operator-admission") self.kubernetes.delete_cluster_role("couchbase-operator-admission") self.kubernetes.delete_role("couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_secret("cb-auth", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_secret("gluu-couchbase-user-password", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_deployment_using_name( "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_role_binding( "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_service_account( "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_service( "couchbase-operator-admission", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_deployment_using_name( "couchbase-operator-admission", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_service( "couchbase-operator", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_custom_resource( "couchbasebackuprestores.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasebackups.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasebuckets.couchbase.com") self.kubernetes.delete_custom_resource( "couchbaseephemeralbuckets.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasereplications.couchbase.com") self.kubernetes.delete_custom_resource( "couchbaserolebindings.couchbase.com") self.kubernetes.delete_custom_resource("couchbasegroups.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasememcachedbuckets.couchbase.com") self.kubernetes.delete_custom_resource("couchbaseusers.couchbase.com") self.kubernetes.delete_service_account( "couchbase-operator-admission", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_secret("couchbase-operator-admission", self.settings.get("COUCHBASE_NAMESPACE")) self.kubernetes.delete_secret("couchbase-operator-tls", self.settings.get("COUCHBASE_NAMESPACE")) shutil.rmtree(Path("./couchbase-source-folder"), ignore_errors=True)
class Couchbase(object): def __init__(self): self.settings = ValuesHandler() self.kubernetes = Kubernetes() self.storage_class_file = Path("./couchbase/storageclasses.yaml") self.couchbase_cluster_file = Path( "./couchbase/couchbase-cluster.yaml") self.couchbase_buckets_file = Path( "./couchbase/couchbase-buckets.yaml") self.couchbase_group_file = Path("./couchbase/couchbase-group.yaml") self.couchbase_user_file = Path("./couchbase/couchbase-user.yaml") self.couchbase_rolebinding_file = Path( "./couchbase/couchbase-rolebinding.yaml") self.couchbase_ephemeral_buckets_file = Path( "./couchbase/couchbase-ephemeral-buckets.yaml") self.couchbase_source_folder_pattern, self.couchbase_source_file = self.get_couchbase_files self.couchbase_custom_resource_definition_file = self.couchbase_source_file.joinpath( "crd.yaml") self.couchbase_operator_dac_file = self.couchbase_source_file.joinpath( "operator_dac.yaml") self.couchbase_admission_file = self.couchbase_source_file.joinpath( "admission.yaml") self.couchbase_operator_backup_file = self.couchbase_source_file.joinpath( "operator_dac_backup.yaml") self.filename = "" # @TODO: Remove flag after depreciation of couchbase operator 2.0 self.old_couchbase = False @property def get_couchbase_files(self): """ Returns the couchbase extracted package folder path containing manifests and the tar package file :return: """ if self.settings.get("installer-settings.couchbase.install"): couchbase_tar_pattern = "couchbase-autonomous-operator-kubernetes_*.tar.gz" directory = Path('.') try: couchbase_tar_file = list( directory.glob(couchbase_tar_pattern))[0] if "_1." in str(couchbase_tar_file.resolve()): logger.fatal( "Couchbase Autonomous Operator version must be > 2.0") sys.exit() # @TODO: Remove condition and underlying lines after depreciation of couchbase operator 2.0 if "_2.0" in str(couchbase_tar_file.resolve()): logger.warning( "An newer version of the couchbase operator exists. " "Please consider canceling out and using it.https://www.couchbase.com/downloads" ) self.old_couchbase = True except IndexError: logger.fatal("Couchbase package not found.") logger.info( "Please download the couchbase kubernetes package and place it inside the same directory " "containing the pygluu-kubernetes.pyz script.https://www.couchbase.com/downloads" ) sys.exit() extract_couchbase_tar(couchbase_tar_file) couchbase_source_folder_pattern = "./couchbase-source-folder/couchbase-autonomous-operator-kubernetes_*" couchbase_source_folder = list( directory.glob(couchbase_source_folder_pattern))[0] return couchbase_tar_file, couchbase_source_folder # Couchbase is installed. return Path("."), Path(".") def create_couchbase_gluu_cert_pass_secrets(self, encoded_ca_crt_string, encoded_cb_pass_string, encoded_cb_super_pass_string): """ Create cor patch secret containing couchbase certificate authority crt and couchbase admin password :param encoded_ca_crt_string: :param encoded_cb_pass_string: :param encoded_cb_super_pass_string: """ # Remove this if its not needed self.kubernetes.patch_or_create_namespaced_secret( name="cb-crt", namespace=self.settings.get("installer-settings.namespace"), literal="couchbase.crt", value_of_literal=encoded_ca_crt_string) # Remove this if its not needed self.kubernetes.patch_or_create_namespaced_secret( name="cb-pass", namespace=self.settings.get("installer-settings.namespace"), literal="couchbase_password", value_of_literal=encoded_cb_pass_string) self.kubernetes.patch_or_create_namespaced_secret( name="cb-super-pass", namespace=self.settings.get("installer-settings.namespace"), literal="couchbase_superuser_password", value_of_literal=encoded_cb_super_pass_string) def setup_backup_couchbase(self): """ Setups Couchbase backup strategy """ couchbase_backup_file = Path( "./couchbase/backup/couchbase-backup.yaml") parser = Parser(couchbase_backup_file, "CouchbaseBackup") parser["spec"]["full"]["schedule"] = self.settings.get( "installer-settings.couchbase.backup.fullSchedule") parser["spec"]["incremental"]["schedule"] = self.settings.get( "installer-settings.couchbase.backup.incrementalSchedule") parser["spec"]["backupRetention"] = self.settings.get( "installer-settings.couchbase.backup.retentionTime") parser["spec"]["size"] = self.settings.get( "installer-settings.couchbase.backup.storageSize") parser.dump_it() self.kubernetes.create_namespaced_custom_object( filepath=couchbase_backup_file, group="couchbase.com", version="v2", plural="couchbasebackups", namespace=self.settings.get( "installer-settings.couchbase.namespace")) @property def calculate_couchbase_resources(self): """ Return a dictionary containing couchbase resource information calculated Alpha @todo: switch to preset values based on ranges for TPS and amount of users :return: """ tps = int(self.settings.get("CN_EXPECTED_TRANSACTIONS_PER_SEC")) number_of_data_nodes = 0 number_of_query_nodes = 0 number_of_index_nodes = 0 number_of_eventing_service_memory_nodes = 0 user_ratio = int( self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 50000000 tps_ratio = tps / 14000 if self.settings.get( "CN_USING_RESOURCE_OWNER_PASSWORD_CRED_GRANT_FLOW") == "Y": number_of_data_nodes += tps_ratio * 7 * user_ratio number_of_query_nodes += tps_ratio * 5 * user_ratio number_of_index_nodes += tps_ratio * 5 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio if self.settings.get("CN_USING_CODE_FLOW") == "Y": number_of_data_nodes += tps_ratio * 14 * user_ratio number_of_query_nodes += tps_ratio * 12 * user_ratio number_of_index_nodes += tps_ratio * 13 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 7 * user_ratio if self.settings.get("CN_USING_SCIM_FLOW") == "Y": number_of_data_nodes += tps_ratio * 7 * user_ratio number_of_query_nodes += tps_ratio * 5 * user_ratio number_of_index_nodes += tps_ratio * 5 * user_ratio number_of_eventing_service_memory_nodes += tps_ratio * 4 * user_ratio if not self.settings.get("CN_COUCHBASE_GENERAL_STORAGE"): self.settings.set( "CN_COUCHBASE_GENERAL_STORAGE", str( int((tps_ratio * (int(self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 125000)) + 5)) + "Gi") if not self.settings.get("CN_COUCHBASE_DATA_STORAGE"): self.settings.set( "CN_COUCHBASE_DATA_STORAGE", str( int((tps_ratio * (int(self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 100000)) + 5)) + "Gi") if not self.settings.get("CN_COUCHBASE_INDEX_STORAGE"): self.settings.set( "CN_COUCHBASE_INDEX_STORAGE", str( int((tps_ratio * (int(self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 200000)) + 5)) + "Gi") if not self.settings.get("CN_COUCHBASE_QUERY_STORAGE"): self.settings.set( "CN_COUCHBASE_QUERY_STORAGE", str( int((tps_ratio * (int(self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 200000)) + 5)) + "Gi") if not self.settings.get("CN_COUCHBASE_ANALYTICS_STORAGE"): self.settings.set( "CN_COUCHBASE_ANALYTICS_STORAGE", str( int((tps_ratio * (int(self.settings.get("CN_NUMBER_OF_EXPECTED_USERS")) / 250000)) + 5)) + "Gi") if self.settings.get("CN_COUCHBASE_DATA_NODES"): number_of_data_nodes = self.settings.get("CN_COUCHBASE_DATA_NODES") if self.settings.get("CN_COUCHBASE_QUERY_NODES"): number_of_query_nodes = self.settings.get( "CN_COUCHBASE_QUERY_NODES") if self.settings.get("CN_COUCHBASE_INDEX_NODES"): number_of_index_nodes = self.settings.get( "CN_COUCHBASE_INDEX_NODES") if self.settings.get("CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"): number_of_eventing_service_memory_nodes = self.settings.get( "CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES") data_service_memory_quota = (tps_ratio * 40000 * user_ratio) + 512 data_memory_request = data_service_memory_quota / 4 data_memory_limit = data_memory_request data_cpu_request = data_service_memory_quota / 4 data_cpu_limit = data_cpu_request query_memory_request = data_memory_request query_memory_limit = query_memory_request query_cpu_request = data_service_memory_quota / 4 query_cpu_limit = query_cpu_request index_service_memory_quota = (tps_ratio * 25000 * user_ratio) + 256 index_memory_request = index_service_memory_quota / 3 index_memory_limit = index_memory_request index_cpu_request = index_service_memory_quota / 3 index_cpu_limit = index_cpu_request search_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256 eventing_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 256 analytics_service_memory_quota = (tps_ratio * 4000 * user_ratio) + 1024 search_eventing_analytics_memory_quota_sum = ( search_service_memory_quota + eventing_service_memory_quota + analytics_service_memory_quota) search_eventing_analytics_memory_request = tps_ratio * 10000 * user_ratio search_eventing_analytics_memory_limit = search_eventing_analytics_memory_request search_eventing_analytics_cpu_request = tps_ratio * 6000 * user_ratio search_eventing_analytics_cpu_limit = search_eventing_analytics_cpu_request # Two services because query is assumed to take the same amount of mem quota total_mem_resources = \ data_service_memory_quota + data_service_memory_quota + index_service_memory_quota + \ search_eventing_analytics_memory_quota_sum total_cpu_resources = data_cpu_limit + query_cpu_limit + index_cpu_limit + search_eventing_analytics_cpu_limit resources_info = dict( CN_COUCHBASE_DATA_NODES=int(number_of_data_nodes), CN_COUCHBASE_QUERY_NODES=int(number_of_query_nodes), CN_COUCHBASE_INDEX_NODES=int(number_of_index_nodes), CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES=int( number_of_eventing_service_memory_nodes), COUCHBASE_DATA_MEM_QUOTA=round(data_service_memory_quota), COUCHBASE_DATA_MEM_REQUEST=round(data_memory_request), COUCHBASE_DATA_MEM_LIMIT=round(data_memory_limit), COUCHBASE_DATA_CPU_REQUEST=round(data_cpu_request), COUCHBASE_DATA_CPU_LIMIT=round(data_cpu_limit), COUCHBASE_QUERY_MEM_QUOTA=round(data_service_memory_quota), COUCHBASE_QUERY_MEM_REQUEST=round(query_memory_request), COUCHBASE_QUERY_MEM_LIMIT=round(query_memory_limit), COUCHBASE_QUERY_CPU_REQUEST=round(query_cpu_request), COUCHBASE_QUERY_CPU_LIMIT=round(query_cpu_limit), COUCHBASE_INDEX_MEM_QUOTA=round(index_service_memory_quota), COUCHBASE_INDEX_MEM_REQUEST=round(index_memory_request), COUCHBASE_INDEX_MEM_LIMIT=round(index_memory_limit), COUCHBASE_INDEX_CPU_REQUEST=round(index_cpu_request), COUCHBASE_INDEX_CPU_LIMIT=round(index_cpu_limit), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA=round( search_service_memory_quota), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST=round( search_eventing_analytics_memory_request), COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT=round( search_eventing_analytics_memory_limit), COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST=round( search_eventing_analytics_cpu_request), COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT=round( search_eventing_analytics_cpu_limit), TOTAL_RAM_NEEDED=round(total_mem_resources), TOTAL_CPU_NEEDED=round(total_cpu_resources)) self.settings.set("CN_COUCHBASE_DATA_NODES", number_of_data_nodes) self.settings.set("CN_COUCHBASE_QUERY_NODES", number_of_query_nodes) self.settings.set("CN_COUCHBASE_INDEX_NODES", number_of_index_nodes) self.settings.set("CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES", number_of_eventing_service_memory_nodes) return resources_info def analyze_couchbase_cluster_yaml(self): """ Dumps created calculated resources into couchbase.yaml file. ALso includes cloud zones. """ parser = Parser("./couchbase/couchbase-cluster.yaml", "CouchbaseCluster") parser["metadata"]["name"] = self.settings.get( "installer-settings.couchbase.clusterName") number_of_buckets = 5 if self.settings.get("global.storageClass.provisioner") in ("microk8s.io/hostpath", "k8s.io/minikube-hostpath") or \ self.settings.get("global.cloud.testEnviroment"): resources_servers = [{ "name": "allServices", "size": 1, "services": ["data", "index", "query", "search", "eventing", "analytics"], "volumeMounts": { "default": "pvc-general", "data": "pvc-data", "index": "pvc-index", "analytics": ["pvc-analytics"] } }] data_service_memory_quota = 1024 index_service_memory_quota = 512 search_service_memory_quota = 512 eventing_service_memory_quota = 512 analytics_service_memory_quota = 1024 memory_quota = 0 else: resources = self.calculate_couchbase_resources data_service_memory_quota = resources["COUCHBASE_DATA_MEM_QUOTA"] index_service_memory_quota = resources["COUCHBASE_INDEX_MEM_QUOTA"] search_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] eventing_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] analytics_service_memory_quota = resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_QUOTA"] + 1024 memory_quota = ((resources["COUCHBASE_DATA_MEM_QUOTA"] - 500) / number_of_buckets) zones_list = self.settings.get("CN_NODES_ZONES") data_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["CN_COUCHBASE_DATA_NODES"]), "data", str(resources["COUCHBASE_DATA_MEM_REQUEST"]), str(resources["COUCHBASE_DATA_MEM_LIMIT"]), str(resources["COUCHBASE_DATA_CPU_REQUEST"]), str(resources["COUCHBASE_DATA_CPU_LIMIT"])) query_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["CN_COUCHBASE_QUERY_NODES"]), "query", str(resources["COUCHBASE_QUERY_MEM_REQUEST"]), str(resources["COUCHBASE_QUERY_MEM_LIMIT"]), str(resources["COUCHBASE_QUERY_CPU_REQUEST"]), str(resources["COUCHBASE_QUERY_CPU_LIMIT"])) index_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["CN_COUCHBASE_INDEX_NODES"]), "index", str(resources["COUCHBASE_INDEX_MEM_REQUEST"]), str(resources["COUCHBASE_INDEX_MEM_LIMIT"]), str(resources["COUCHBASE_INDEX_CPU_REQUEST"]), str(resources["COUCHBASE_INDEX_CPU_LIMIT"])) search_eventing_analytics_server_spec = create_server_spec_per_cb_service( zones_list, int(resources["CN_COUCHBASE_SEARCH_EVENTING_ANALYTICS_NODES"]), "analytics", str(resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_REQUEST"]), str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_MEM_LIMIT"] ), str(resources[ "COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_REQUEST"]), str(resources["COUCHBASE_SEARCH_EVENTING_ANALYTICS_CPU_LIMIT"] )) resources_servers = \ data_server_spec + query_server_spec + index_server_spec + \ search_eventing_analytics_server_spec if self.settings.get("installer-settings.nodes.zones"): unique_zones = list( dict.fromkeys( self.settings.get("installer-settings.nodes.zones"))) parser["spec"]["serverGroups"] = unique_zones parser["spec"]["cluster"]["dataServiceMemoryQuota"] = str( data_service_memory_quota) + "Mi" parser["spec"]["cluster"]["indexServiceMemoryQuota"] = str( index_service_memory_quota) + "Mi" parser["spec"]["cluster"]["searchServiceMemoryQuota"] = str( search_service_memory_quota) + "Mi" parser["spec"]["cluster"]["eventingServiceMemoryQuota"] = str( eventing_service_memory_quota) + "Mi" parser["spec"]["cluster"]["analyticsServiceMemoryQuota"] = str( analytics_service_memory_quota) + "Mi" set_memory_for_buckets( memory_quota, self.settings.get("config.configmap.cnCouchbaseBucketPrefix")) parser["metadata"]["name"] = self.settings.get( "installer-settings.couchbase.clusterName") parser["spec"]["servers"] = resources_servers number_of_volume_claims = len(parser["spec"]["volumeClaimTemplates"]) for i in range(number_of_volume_claims): name = parser["spec"]["volumeClaimTemplates"][i]["metadata"][ "name"] if name == "pvc-general": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][ "requests"]["storage"] = "5Gi" elif name == "pvc-data": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][ "requests"]["storage"] = "5Gi" elif name == "pvc-index": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][ "requests"]["storage"] = "5Gi" elif name == "pvc-query": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][ "requests"]["storage"] = "5Gi" elif name == "pvc-analytics": parser["spec"]["volumeClaimTemplates"][i]["spec"]["resources"][ "requests"]["storage"] = "5Gi" parser.dump_it() def install(self): """ Installs Couchbase """ self.kubernetes.create_namespace( name=self.settings.get("installer-settings.namespace")) if not self.settings.get( "installer-settings.couchbase.customFileOverride"): try: self.analyze_couchbase_cluster_yaml() except Exception: # TODO remove this exception logger.error( "Looks like some of the couchbase files were misconfigured. " "If you wish to override the couchbase files please set " " installer-settings.couchbase.customFileOverride to true`" ) sys.exit() cb_namespace = self.settings.get( "installer-settings.couchbase.namespace") storage_class_file_parser = Parser(self.storage_class_file, "StorageClass") if self.settings.get('global.storageClass.provisioner') in ( "kubernetes.io/gce-pd", "dobs.csi.digitalocean.com", "kubernetes.io/azure-disk"): try: del storage_class_file_parser["parameters"]["encrypted"] except KeyError: logger.info("Key not found") storage_class_file_parser["parameters"]["type"] = \ self.settings.get("installer-settings.couchbase.volumeType") storage_class_file_parser["provisioner"] = self.settings.get( 'global.storageClass.provisioner') if self.settings.get( 'global.storageClass.provisioner') == "microk8s.io/hostpath": try: del storage_class_file_parser["allowVolumeExpansion"] del storage_class_file_parser["parameters"] except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() elif self.settings.get('global.storageClass.provisioner' ) == "k8s.io/minikube-hostpath": try: del storage_class_file_parser["allowVolumeExpansion"] del storage_class_file_parser["parameters"] except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() else: try: storage_class_file_parser["parameters"]["type"] = \ self.settings.get("installer-settings.couchbase.volumeType") except KeyError: logger.info("Key not found") storage_class_file_parser.dump_it() logger.info("Installing Couchbase...") couchbase_crts_keys = Path("couchbase_crts_keys") if not couchbase_crts_keys.exists(): os.mkdir(couchbase_crts_keys) custom_cb_ca_crt = Path("./couchbase_crts_keys/ca.crt") custom_cb_crt = Path("./couchbase_crts_keys/chain.pem") custom_cb_key = Path("./couchbase_crts_keys/pkey.key") if not custom_cb_ca_crt.exists() and not custom_cb_crt.exists( ) and not custom_cb_key.exists(): setup_crts( ca_common_name=self.settings.get( "installer-settings.couchbase.commonName"), cert_common_name="couchbase-server", san_list=self.settings.get( "installer-settings.couchbase.subjectAlternativeName"), ca_cert_file="./couchbase_crts_keys/ca.crt", ca_key_file="./couchbase_crts_keys/ca.key", cert_file="./couchbase_crts_keys/chain.pem", key_file="./couchbase_crts_keys/pkey.key") labels = {"app": "gluu-couchbase"} if self.settings.get("global.istio.enabled"): labels = {"app": "couchbase", "istio-injection": "enabled"} self.kubernetes.create_namespace(name=cb_namespace, labels=labels) chain_pem_filepath = Path("./couchbase_crts_keys/chain.pem") pkey_filepath = Path("./couchbase_crts_keys/pkey.key") tls_cert_filepath = Path("./couchbase_crts_keys/tls-cert-file") tls_private_key_filepath = Path( "./couchbase_crts_keys/tls-private-key-file") ca_cert_filepath = Path("./couchbase_crts_keys/ca.crt") shutil.copyfile(ca_cert_filepath, Path("./couchbase_crts_keys/couchbase.crt")) shutil.copyfile(chain_pem_filepath, tls_cert_filepath) shutil.copyfile(pkey_filepath, tls_private_key_filepath) encoded_ca_crt_string = self.settings.get( "config.configmap.cnCouchbaseCrt") if encoded_ca_crt_string in (None, ''): with open(ca_cert_filepath) as content_file: ca_crt_content = content_file.read() encoded_ca_crt_bytes = base64.b64encode( ca_crt_content.encode("utf-8")) encoded_ca_crt_string = str(encoded_ca_crt_bytes, "utf-8") self.settings.set("config.configmap.cnCouchbaseCrt", encoded_ca_crt_string) with open(chain_pem_filepath) as content_file: chain_pem_content = content_file.read() encoded_chain_bytes = base64.b64encode( chain_pem_content.encode("utf-8")) encoded_chain_string = str(encoded_chain_bytes, "utf-8") with open(pkey_filepath) as content_file: pkey_content = content_file.read() encoded_pkey_bytes = base64.b64encode(pkey_content.encode("utf-8")) encoded_pkey_string = str(encoded_pkey_bytes, "utf-8") self.kubernetes.patch_or_create_namespaced_secret( name="couchbase-server-tls", namespace=cb_namespace, literal=chain_pem_filepath.name, value_of_literal=encoded_chain_string, second_literal=pkey_filepath.name, value_of_second_literal=encoded_pkey_string) self.kubernetes.patch_or_create_namespaced_secret( name="couchbase-operator-tls", namespace=cb_namespace, literal=ca_cert_filepath.name, value_of_literal=encoded_ca_crt_string) encoded_cb_super_user_bytes = base64.b64encode( self.settings.get("config.configmap.cnCouchbaseSuperUser").encode( "utf-8")) encoded_cb_super_user_string = str(encoded_cb_super_user_bytes, "utf-8") encoded_cb_pass_bytes = base64.b64encode( self.settings.get("config.configmap.cnCouchbasePassword").encode( "utf-8")) encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8") encoded_cb_super_pass_bytes = base64.b64encode( self.settings.get("config.configmap.cnCouchbaseSuperUserPassword"). encode("utf-8")) encoded_cb_super_pass_string = str(encoded_cb_super_pass_bytes, "utf-8") self.create_couchbase_gluu_cert_pass_secrets( encoded_ca_crt_string, encoded_cb_pass_string, encoded_cb_super_pass_string) self.kubernetes.patch_or_create_namespaced_secret( name="gluu-couchbase-user-password", namespace=self.settings.get( "installer-settings.couchbase.namespace"), literal="password", value_of_literal=encoded_cb_pass_string) admission_command = "./{}/bin/cbopcfg generate admission --namespace {}".format( self.couchbase_source_file, self.settings.get("installer-settings.couchbase.namespace")) operator_command = "./{}/bin/cbopcfg generate operator --namespace {}".format( self.couchbase_source_file, self.settings.get("installer-settings.couchbase.namespace")) backup_command = "./{}/bin/cbopcfg generate backup --namespace {}".format( self.couchbase_source_file, self.settings.get("installer-settings.couchbase.namespace")) # @TODO: Remove condition and operator_command override after depreciation of couchbase operator 2.0 if self.old_couchbase: operator_command = "./{}/bin/cbopcfg -backup=true -namespace={}".format( self.couchbase_source_file, self.settings.get("installer-settings.couchbase.namespace")) exec_cmd(operator_command, output_file=self.couchbase_operator_dac_file) # @TODO: Remove only the condition after depreciation of couchbase operator 2.0 if not self.old_couchbase: exec_cmd(backup_command, output_file=self.couchbase_operator_backup_file) exec_cmd(admission_command, output_file=self.couchbase_admission_file) couchbase_cluster_parser = Parser(self.couchbase_cluster_file, "CouchbaseCluster") couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][ "serverSecret"] = "couchbase-server-tls" couchbase_cluster_parser["spec"]["networking"]["tls"]["static"][ "operatorSecret"] = "couchbase-operator-tls" if self.settings.get("global.istio.enabled"): couchbase_cluster_parser["spec"]["networking"][ "networkPlatform"] = "Istio" try: couchbase_cluster_parser["spec"]["security"]["rbac"]["selector"]["matchLabels"]["cluster"] = \ self.settings.get("installer-settings.couchbase.clusterName") couchbase_cluster_parser["spec"]["security"]["rbac"][ "managed"] = True except KeyError: logger.error( "rbac section is missing or incorrect in couchbase-cluster.yaml." " Please set spec --> security --> rbac --> managed : true" " and set spec --> security --> rbac --> selector --> matchLabels --> " "cluster --> to your cluster name") logger.info( "As a result of the above the installation will exit " "as the gluu user will not be created causing the communication between " "Gluu server and Couchbase to fail.") sys.exit() if "localOpenEbsHostPathDynamic" in self.settings.get( "installer-settings.volumeProvisionStrategy"): volume_claims = couchbase_cluster_parser["spec"][ "volumeClaimTemplates"] for i, volume_claim in enumerate(volume_claims): couchbase_cluster_parser["spec"]["volumeClaimTemplates"][i]["spec"]["storageClassName"] = \ "openebs-hostpath" couchbase_cluster_parser.dump_it() self.kubernetes.create_objects_from_dict( self.couchbase_custom_resource_definition_file, namespace=cb_namespace) self.kubernetes.create_objects_from_dict( self.couchbase_operator_dac_file, namespace=cb_namespace) # @TODO: Remove only the condition after depreciation of couchbase operator 2.0 if not self.old_couchbase: self.kubernetes.create_objects_from_dict( self.couchbase_admission_file, namespace=cb_namespace) self.kubernetes.create_objects_from_dict( self.couchbase_operator_backup_file, namespace=cb_namespace) self.kubernetes.check_pods_statuses(cb_namespace, "app=couchbase-operator", 700) self.kubernetes.patch_or_create_namespaced_secret( name="cb-auth", namespace=cb_namespace, literal="username", value_of_literal=encoded_cb_super_user_string, second_literal="password", value_of_second_literal=encoded_cb_super_pass_string) self.kubernetes.create_objects_from_dict(self.storage_class_file, namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_cluster_file, group="couchbase.com", version="v2", plural="couchbaseclusters", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_buckets_file, group="couchbase.com", version="v2", plural="couchbasebuckets", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_ephemeral_buckets_file, group="couchbase.com", version="v2", plural="couchbaseephemeralbuckets", namespace=cb_namespace) coucbase_group_parser = Parser(self.couchbase_group_file, "CouchbaseGroup") coucbase_group_parser["metadata"]["labels"]["cluster"] = \ self.settings.get("installer-settings.couchbase.clusterName") permissions = [ "query_select", "query_update", "query_insert", "query_delete" ] allbuckets = ["", "site", "user", "cache", "token", "session"] roles = [] for permission in permissions: for bucket in allbuckets: bucket_name = self.settings.get( "config.configmap.cnCouchbaseBucketPrefix") if bucket: bucket_name = bucket_name + "_" + bucket roles.append({"name": permission, "bucket": bucket_name}) coucbase_group_parser["spec"]["roles"] = roles coucbase_group_parser.dump_it() coucbase_user_parser = Parser(self.couchbase_user_file, "CouchbaseUser") coucbase_user_parser["metadata"]["labels"]["cluster"] = \ self.settings.get("installer-settings.couchbase.clusterName") coucbase_user_parser.dump_it() self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_group_file, group="couchbase.com", version="v2", plural="couchbasegroups", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_user_file, group="couchbase.com", version="v2", plural="couchbaseusers", namespace=cb_namespace) self.kubernetes.create_namespaced_custom_object( filepath=self.couchbase_rolebinding_file, group="couchbase.com", version="v2", plural="couchbaserolebindings", namespace=cb_namespace) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_analytics=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_data=enabled", 700) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_eventing=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_index=enabled", 700) self.kubernetes.check_pods_statuses(cb_namespace, "couchbase_service_query=enabled", 700) self.kubernetes.check_pods_statuses( cb_namespace, "couchbase_service_search=enabled", 700) # Setup couchbase backups if self.settings.get("global.storageClass.provisioner") not in ( "microk8s.io/hostpath", "k8s.io/minikube-hostpath"): self.setup_backup_couchbase() shutil.rmtree(self.couchbase_source_folder_pattern, ignore_errors=True) def uninstall(self): """ Uninstalls couchbase """ logger.info("Deleting Couchbase...") self.kubernetes.delete_storage_class("couchbase-sc") self.kubernetes.delete_custom_resource( "couchbaseclusters.couchbase.com") self.kubernetes.delete_validating_webhook_configuration( "couchbase-operator-admission") self.kubernetes.delete_mutating_webhook_configuration( "couchbase-operator-admission") self.kubernetes.delete_cluster_role_binding( "couchbase-operator-admission") self.kubernetes.delete_cluster_role("couchbase-operator-admission") self.kubernetes.delete_role( "couchbase-operator", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_secret( "cb-auth", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_secret( "gluu-couchbase-user-password", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_deployment_using_name( "couchbase-operator", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_role_binding( "couchbase-operator", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_service_account( "couchbase-operator", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_service( "couchbase-operator-admission", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_deployment_using_name( "couchbase-operator-admission", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_service( "couchbase-operator", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_custom_resource( "couchbasebackuprestores.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasebackups.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasebuckets.couchbase.com") self.kubernetes.delete_custom_resource( "couchbaseephemeralbuckets.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasereplications.couchbase.com") self.kubernetes.delete_custom_resource( "couchbaserolebindings.couchbase.com") self.kubernetes.delete_custom_resource("couchbasegroups.couchbase.com") self.kubernetes.delete_custom_resource( "couchbasememcachedbuckets.couchbase.com") self.kubernetes.delete_custom_resource("couchbaseusers.couchbase.com") self.kubernetes.delete_custom_resource( "couchbaseautoscalers.couchbase.com") self.kubernetes.delete_service_account( "couchbase-operator-admission", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_secret( "couchbase-operator-admission", self.settings.get("installer-settings.couchbase.namespace")) self.kubernetes.delete_secret( "couchbase-operator-tls", self.settings.get("installer-settings.couchbase.namespace")) shutil.rmtree(Path("./couchbase-source-folder"), ignore_errors=True)
class Helm(object): def __init__(self): self.values_file = Path("./helm/gluu/values.yaml").resolve() self.settings = SettingsHandler() self.kubernetes = Kubernetes() self.ldap_backup_release_name = self.settings.get( 'CN_HELM_RELEASE_NAME') + "-ldap-backup" if self.settings.get("DEPLOYMENT_ARCH") == "gke": # Clusterrolebinding needs to be created for gke with CB or kubeDB installed if self.settings.get("INSTALL_REDIS") == "Y" or \ self.settings.get("INSTALL_GLUU_GATEWAY") == "Y" or \ self.settings.get("INSTALL_COUCHBASE") == "Y": user_account, stderr, retcode = exec_cmd( "gcloud config get-value core/account") user_account = str(user_account, "utf-8").strip() user, stderr, retcode = exec_cmd("whoami") user = str(user, "utf-8").strip() cluster_role_binding_name = "cluster-admin-{}".format(user) self.kubernetes.create_cluster_role_binding( cluster_role_binding_name=cluster_role_binding_name, user_name=user_account, cluster_role_name="cluster-admin") def prepare_alb(self): ingress_parser = Parser("./alb/ingress.yaml", "Ingress") ingress_parser["spec"]["rules"][0]["host"] = self.settings.get( "CN_FQDN") ingress_parser["metadata"]["annotations"]["alb.ingress.kubernetes.io/certificate-arn"] = \ self.settings.get("ARN_AWS_IAM") if not self.settings.get("ARN_AWS_IAM"): del ingress_parser["metadata"]["annotations"][ "alb.ingress.kubernetes.io/certificate-arn"] for path in ingress_parser["spec"]["rules"][0]["http"]["paths"]: service_name = path["backend"]["serviceName"] if self.settings.get( "ENABLE_CASA") != "Y" and service_name == "casa": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("ENABLE_OXSHIBBOLETH" ) != "Y" and service_name == "oxshibboleth": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("ENABLE_OXPASSPORT" ) != "Y" and service_name == "oxpassport": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] if self.settings.get("INSTALL_GLUU_GATEWAY" ) != "Y" and service_name == "gg-kong-ui": path_index = ingress_parser["spec"]["rules"][0]["http"][ "paths"].index(path) del ingress_parser["spec"]["rules"][0]["http"]["paths"][ path_index] ingress_parser.dump_it() def deploy_alb(self): alb_ingress = Path("./alb/ingress.yaml") self.kubernetes.create_objects_from_dict( alb_ingress, self.settings.get("CN_NAMESPACE")) if self.settings.get("IS_CN_FQDN_REGISTERED") != "Y": prompt = input( "Please input the DNS of the Application load balancer created found on AWS UI: " ) lb_hostname = prompt while True: try: if lb_hostname: break lb_hostname = self.kubernetes.read_namespaced_ingress( name="gluu", namespace="gluu" ).status.load_balancer.ingress[0].hostname except TypeError: logger.info("Waiting for loadbalancer address..") time.sleep(10) self.settings.set("LB_ADD", lb_hostname) def wait_for_nginx_add(self): hostname_ip = None while True: try: if hostname_ip: break if self.settings.get("DEPLOYMENT_ARCH") == "eks": hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-ingress-nginx-controller", namespace=self.settings.get("NGINX_INGRESS_NAMESPACE") ).status.load_balancer.ingress[0].hostname self.settings.set("LB_ADD", hostname_ip) if self.settings.get("AWS_LB_TYPE") == "nlb": try: ip_static = socket.gethostbyname(str(hostname_ip)) if ip_static: break except socket.gaierror: logger.info("Address has not received an ip yet.") elif self.settings.get("DEPLOYMENT_ARCH") == "local": self.settings.set( "LB_ADD", self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller." + self.settings.get("NGINX_INGRESS_NAMESPACE") + ".svc.cluster.local") break else: hostname_ip = self.kubernetes.read_namespaced_service( name=self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-ingress-nginx-controller", namespace=self.settings.get("NGINX_INGRESS_NAMESPACE") ).status.load_balancer.ingress[0].ip self.settings.set("HOST_EXT_IP", hostname_ip) except (TypeError, AttributeError): logger.info("Waiting for address..") time.sleep(10) def check_install_nginx_ingress(self, install_ingress=True): """ Helm installs nginx ingress or checks to recieve and ip or address :param install_ingress: """ if install_ingress: self.kubernetes.delete_custom_resource( "virtualservers.k8s.nginx.org") self.kubernetes.delete_custom_resource( "virtualserverroutes.k8s.nginx.org") self.kubernetes.delete_cluster_role("ingress-nginx-nginx-ingress") self.kubernetes.delete_cluster_role_binding( "ingress-nginx-nginx-ingress") self.kubernetes.create_namespace( name=self.settings.get("NGINX_INGRESS_NAMESPACE"), labels={"app": "ingress-nginx"}) self.kubernetes.delete_cluster_role( self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller") self.kubernetes.delete_cluster_role_binding( self.settings.get('NGINX_INGRESS_RELEASE_NAME') + "-nginx-ingress-controller") try: exec_cmd( "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx" ) exec_cmd("helm repo add stable https://charts.helm.sh/stable") exec_cmd("helm repo update") except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) command = "helm install {} ingress-nginx/ingress-nginx --namespace={} ".format( self.settings.get('NGINX_INGRESS_RELEASE_NAME'), self.settings.get("NGINX_INGRESS_NAMESPACE")) if self.settings.get("DEPLOYMENT_ARCH") == "minikube": exec_cmd("minikube addons enable ingress") if self.settings.get("DEPLOYMENT_ARCH") == "eks": if self.settings.get("AWS_LB_TYPE") == "nlb": if install_ingress: nlb_override_values_file = Path( "./nginx/aws/aws-nlb-override-values.yaml").resolve() nlb_values = " --values {}".format( nlb_override_values_file) exec_cmd(command + nlb_values) else: if self.settings.get("USE_ARN") == "Y": if install_ingress: elb_override_values_file = Path( "./nginx/aws/aws-elb-override-values.yaml" ).resolve() elb_file_parser = Parser(elb_override_values_file, True) elb_file_parser["controller"]["service"][ "annotations"].update({ "service.beta.kubernetes.io/aws-load-balancer-ssl-cert": self.settings.get("ARN_AWS_IAM") }) elb_file_parser["controller"]["config"][ "proxy-real-ip-cidr"] = self.settings.get( "VPC_CIDR") elb_file_parser.dump_it() elb_values = " --values {}".format( elb_override_values_file) exec_cmd(command + elb_values) else: if install_ingress: exec_cmd(command) if self.settings.get("DEPLOYMENT_ARCH") in ("gke", "aks", "do"): if install_ingress: cloud_override_values_file = Path( "./nginx/cloud/cloud-override-values.yaml").resolve() cloud_values = " --values {}".format( cloud_override_values_file) exec_cmd(command + cloud_values) if self.settings.get("DEPLOYMENT_ARCH") == "local": if install_ingress: baremetal_override_values_file = Path( "./nginx/baremetal/baremetal-override-values.yaml" ).resolve() baremetal_values = " --values {}".format( baremetal_override_values_file) exec_cmd(command + baremetal_values) if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"): logger.info("Waiting for nginx to be prepared...") time.sleep(60) self.wait_for_nginx_add() def analyze_global_values(self): """ Parses Gluu values.yaml with the input information from prompts """ values_file_parser = Parser(self.values_file, True) if self.settings.get("DEPLOYMENT_ARCH") == "minikube": provisioner = "k8s.io/minikube-hostpath" elif self.settings.get("DEPLOYMENT_ARCH") == "eks": provisioner = "kubernetes.io/aws-ebs" elif self.settings.get("DEPLOYMENT_ARCH") == "gke": provisioner = "kubernetes.io/gce-pd" elif self.settings.get("DEPLOYMENT_ARCH") == "aks": provisioner = "kubernetes.io/azure-disk" elif self.settings.get("DEPLOYMENT_ARCH") == "do": provisioner = "dobs.csi.digitalocean.com" elif self.settings.get("DEPLOYMENT_ARCH") == "local": provisioner = "openebs.io/local" else: provisioner = "microk8s.io/hostpath" values_file_parser["global"]["provisioner"] = provisioner values_file_parser["global"]["lbIp"] = self.settings.get("HOST_EXT_IP") values_file_parser["global"]["domain"] = self.settings.get("CN_FQDN") values_file_parser["global"]["isDomainRegistered"] = "false" if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y": values_file_parser["global"]["isDomainRegistered"] = "true" if self.settings.get("CN_CACHE_TYPE") == "REDIS": values_file_parser["config"]["configmap"][ "cnRedisUrl"] = self.settings.get("REDIS_URL") values_file_parser["config"]["configmap"][ "cnRedisType"] = self.settings.get("REDIS_TYPE") values_file_parser["config"]["configmap"][ "cnRedisUseSsl"] = self.settings.get("REDIS_USE_SSL") values_file_parser["config"]["configmap"]["cnRedisSslTruststore"] = \ self.settings.get("REDIS_SSL_TRUSTSTORE") values_file_parser["config"]["configmap"]["cnRedisSentinelGroup"] = \ self.settings.get("REDIS_SENTINEL_GROUP") values_file_parser["config"]["redisPass"] = self.settings.get( "REDIS_PW") if self.settings.get("DEPLOYMENT_ARCH") in ("microk8s", "minikube") \ or self.settings.get("TEST_ENVIRONMENT") == "Y": values_file_parser["global"]["cloud"]["testEnviroment"] = True values_file_parser["config"]["configmap"][ "lbAddr"] = self.settings.get("LB_ADD") values_file_parser["global"]["cnPersistenceType"] = self.settings.get( "PERSISTENCE_BACKEND") values_file_parser["config"]["configmap"][ "cnPersistenceType"] = self.settings.get("PERSISTENCE_BACKEND") values_file_parser["config"]["configmap"]["cnPersistenceLdapMapping"] = \ self.settings.get("HYBRID_LDAP_HELD_DATA") if self.settings.get("PERSISTENCE_BACKEND") != "ldap": values_file_parser["config"]["configmap"][ "cnCouchbaseUrl"] = self.settings.get("COUCHBASE_URL") values_file_parser["config"]["configmap"][ "cnCouchbaseUser"] = self.settings.get("COUCHBASE_USER") values_file_parser["config"]["configmap"][ "cnCouchbaseIndexNumReplica"] = self.settings.get( "COUCHBASE_INDEX_NUM_REPLICA") values_file_parser["config"]["configmap"]["cnCouchbaseSuperUser"] = \ self.settings.get("COUCHBASE_SUPERUSER") values_file_parser["config"]["configmap"][ "cnCouchbaseCrt"] = self.settings.get("COUCHBASE_CRT") values_file_parser["config"]["configmap"][ "cnCouchbasePass"] = self.settings.get("COUCHBASE_PASSWORD") values_file_parser["config"]["configmap"]["cnCouchbaseSuperUserPass"] = \ self.settings.get("COUCHBASE_SUPERUSER_PASSWORD") values_file_parser["global"]["auth-server"]["enabled"] = True values_file_parser["global"]["persistence"]["enabled"] = True values_file_parser["global"]["oxtrust"]["enabled"] = True values_file_parser["global"]["config"]["enabled"] = True values_file_parser["global"]["opendj"]["enabled"] = False values_file_parser["global"]["fido2"]["enabled"] = False if self.settings.get("ENABLE_FIDO2") == "Y": values_file_parser["global"]["fido2"]["enabled"] = True values_file_parser["global"]["scim"]["enabled"] = False if self.settings.get("ENABLE_SCIM") == "Y": values_file_parser["global"]["scim"]["enabled"] = True if self.settings.get("ENABLE_CONFIG_API") == "Y": values_file_parser["global"]["config-api"]["enabled"] = True if self.settings.get("INSTALL_JACKRABBIT") == "Y": values_file_parser["global"]["jackrabbit"]["enabled"] = True values_file_parser["config"]["configmap"][ "cnJackrabbitUrl"] = self.settings.get("JACKRABBIT_URL") values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitAdminPass"] = \ self.settings.get("JACKRABBIT_ADMIN_PASSWORD") values_file_parser["jackrabbit"]["secrets"]["cnJackrabbitPostgresPass"] = \ self.settings.get("JACKRABBIT_PG_PASSWORD") if self.settings.get("USE_ISTIO_INGRESS") == "Y": values_file_parser["global"]["istio"]["ingress"] = True values_file_parser["global"]["istio"]["enabled"] = True values_file_parser["global"]["istio"][ "namespace"] = self.settings.get("ISTIO_SYSTEM_NAMESPACE") elif self.settings.get("AWS_LB_TYPE") == "alb": values_file_parser["global"]["alb"]["ingress"] = True else: values_file_parser["nginx-ingress"]["ingress"]["enabled"] = True values_file_parser["nginx-ingress"]["ingress"]["hosts"] = [ self.settings.get("CN_FQDN") ] values_file_parser["nginx-ingress"]["ingress"]["tls"][0][ "hosts"] = [self.settings.get("CN_FQDN")] if self.settings.get("USE_ISTIO") == "Y": values_file_parser["global"]["istio"]["enabled"] = True values_file_parser["global"]["cnJackrabbitCluster"] = "false" if self.settings.get("JACKRABBIT_CLUSTER") == "Y": values_file_parser["global"]["cnJackrabbitCluster"] = "true" values_file_parser["config"]["configmap"]["cnJackrabbitAdminId"] = \ self.settings.get("JACKRABBIT_ADMIN_ID") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \ self.settings.get("JACKRABBIT_PG_USER") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresDatabaseName"] = \ self.settings.get("JACKRABBIT_DATABASE") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresHost"] = \ self.settings.get("POSTGRES_URL") values_file_parser["config"]["configmap"]["cnJackrabbitPostgresUser"] = \ self.settings.get("JACKRABBIT_PG_USER") if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \ self.settings.get("PERSISTENCE_BACKEND") == "ldap": values_file_parser["global"]["opendj"]["enabled"] = True values_file_parser["global"]["oxshibboleth"]["enabled"] = False if self.settings.get("ENABLE_OXSHIBBOLETH") == "Y": values_file_parser["global"]["oxshibboleth"]["enabled"] = True values_file_parser["config"]["configmap"][ "cnSyncShibManifests"] = True values_file_parser["global"]["client-api"]["enabled"] = False if self.settings.get("ENABLE_CLIENT_API") == "Y": values_file_parser["global"]["client-api"]["enabled"] = True values_file_parser["config"]["configmap"]["jansClientApiApplicationCertCn"] = \ self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN") values_file_parser["config"]["configmap"][ "jansClientApiAdminCertCn"] = self.settings.get( "CLIENT_API_ADMIN_KEYSTORE_CN") values_file_parser["opendj"]["cnRedisEnabled"] = False if self.settings.get("CN_CACHE_TYPE") == "REDIS": values_file_parser["opendj"]["cnRedisEnabled"] = True values_file_parser["global"]["nginx-ingress"]["enabled"] = True values_file_parser["global"]["cr-rotate"]["enabled"] = False if self.settings.get("ENABLE_CACHE_REFRESH") == "Y": values_file_parser["global"]["cr-rotate"]["enabled"] = True values_file_parser["global"]["auth-server-key-rotation"][ "enabled"] = False if self.settings.get("ENABLE_AUTH_SERVER_KEY_ROTATE") == "Y": values_file_parser["global"]["auth-server-key-rotation"][ "enabled"] = True values_file_parser["auth-server-key-rotation"][ "keysLife"] = self.settings.get("AUTH_SERVER_KEYS_LIFE") values_file_parser["config"]["orgName"] = self.settings.get("ORG_NAME") values_file_parser["config"]["email"] = self.settings.get("EMAIL") values_file_parser["config"]["adminPass"] = self.settings.get( "ADMIN_PW") values_file_parser["config"]["ldapPass"] = self.settings.get("LDAP_PW") values_file_parser["config"]["countryCode"] = self.settings.get( "COUNTRY_CODE") values_file_parser["config"]["state"] = self.settings.get("STATE") values_file_parser["config"]["city"] = self.settings.get("CITY") values_file_parser["config"]["configmap"][ "cnCacheType"] = self.settings.get("CN_CACHE_TYPE") values_file_parser["opendj"]["replicas"] = self.settings.get( "LDAP_REPLICAS") values_file_parser["opendj"]["persistence"][ "size"] = self.settings.get("LDAP_STORAGE_SIZE") if self.settings.get("ENABLE_OXTRUST_API_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnOxtrustApiEnabled"] = True if self.settings.get("ENABLE_OXTRUST_TEST_MODE_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnOxtrustApiTestMode"] = True if self.settings.get("ENABLE_CASA_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnCasaEnabled"] = True values_file_parser["config"]["configmap"][ "cnSyncCasaManifests"] = True if self.settings.get("ENABLE_OXPASSPORT_BOOLEAN") == "true": values_file_parser["config"]["configmap"][ "cnPassportEnabled"] = True if self.settings.get("ENABLE_RADIUS_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnRadiusEnabled"] = True if self.settings.get("ENABLE_SAML_BOOLEAN") == "true": values_file_parser["config"]["configmap"]["cnSamlEnabled"] = True values_file_parser["oxpassport"]["resources"] = {} values_file_parser["casa"]["image"]["repository"] = self.settings.get( "CASA_IMAGE_NAME") values_file_parser["casa"]["image"]["tag"] = self.settings.get( "CASA_IMAGE_TAG") values_file_parser["config"]["image"][ "repository"] = self.settings.get("CONFIG_IMAGE_NAME") values_file_parser["config"]["image"]["tag"] = self.settings.get( "CONFIG_IMAGE_TAG") values_file_parser["cr-rotate"]["image"][ "repository"] = self.settings.get( "CACHE_REFRESH_ROTATE_IMAGE_NAME") values_file_parser["cr-rotate"]["image"]["tag"] = self.settings.get( "CACHE_REFRESH_ROTATE_IMAGE_TAG") values_file_parser["auth-server-key-rotation"]["image"][ "repository"] = self.settings.get("CERT_MANAGER_IMAGE_NAME") values_file_parser["auth-server-key-rotation"]["image"][ "tag"] = self.settings.get("CERT_MANAGER_IMAGE_TAG") values_file_parser["opendj"]["image"][ "repository"] = self.settings.get("LDAP_IMAGE_NAME") values_file_parser["opendj"]["image"]["tag"] = self.settings.get( "LDAP_IMAGE_TAG") values_file_parser["persistence"]["image"][ "repository"] = self.settings.get("PERSISTENCE_IMAGE_NAME") values_file_parser["persistence"]["image"]["tag"] = self.settings.get( "PERSISTENCE_IMAGE_TAG") values_file_parser["auth-server"]["image"][ "repository"] = self.settings.get("AUTH_SERVER_IMAGE_NAME") values_file_parser["auth-server"]["image"]["tag"] = self.settings.get( "AUTH_SERVER_IMAGE_TAG") values_file_parser["client-api"]["image"][ "repository"] = self.settings.get("CLIENT_API_IMAGE_NAME") values_file_parser["client-api"]["image"]["tag"] = self.settings.get( "CLIENT_API_IMAGE_TAG") values_file_parser["oxpassport"]["image"][ "repository"] = self.settings.get("OXPASSPORT_IMAGE_NAME") values_file_parser["oxpassport"]["image"]["tag"] = self.settings.get( "OXPASSPORT_IMAGE_TAG") values_file_parser["oxshibboleth"]["image"][ "repository"] = self.settings.get("OXSHIBBOLETH_IMAGE_NAME") values_file_parser["oxshibboleth"]["image"]["tag"] = self.settings.get( "OXSHIBBOLETH_IMAGE_TAG") values_file_parser["jackrabbit"]["image"][ "repository"] = self.settings.get("JACKRABBIT_IMAGE_NAME") values_file_parser["jackrabbit"]["image"]["tag"] = self.settings.get( "JACKRABBIT_IMAGE_TAG") values_file_parser["oxtrust"]["image"][ "repository"] = self.settings.get("OXTRUST_IMAGE_NAME") values_file_parser["oxtrust"]["image"]["tag"] = self.settings.get( "OXTRUST_IMAGE_TAG") values_file_parser["radius"]["image"][ "repository"] = self.settings.get("RADIUS_IMAGE_NAME") values_file_parser["radius"]["image"]["tag"] = self.settings.get( "RADIUS_IMAGE_TAG") values_file_parser.dump_it() def install_gluu(self, install_ingress=True): """ Helm install Gluu :param install_ingress: """ labels = {"app": "gluu"} if self.settings.get("USE_ISTIO") == "Y": labels = {"app": "gluu", "istio-injection": "enabled"} self.kubernetes.create_namespace( name=self.settings.get("CN_NAMESPACE"), labels=labels) if self.settings.get( "PERSISTENCE_BACKEND") != "ldap" and self.settings.get( "INSTALL_COUCHBASE") == "Y": couchbase_app = Couchbase() couchbase_app.uninstall() couchbase_app = Couchbase() couchbase_app.install() self.settings = SettingsHandler() if self.settings.get("AWS_LB_TYPE") == "alb": self.prepare_alb() self.deploy_alb() if self.settings.get("AWS_LB_TYPE") != "alb" and self.settings.get( "USE_ISTIO_INGRESS") != "Y": self.check_install_nginx_ingress(install_ingress) self.analyze_global_values() try: exec_cmd("helm install {} -f {} ./helm/gluu --namespace={}".format( self.settings.get('CN_HELM_RELEASE_NAME'), self.values_file, self.settings.get("CN_NAMESPACE"))) if self.settings.get("PERSISTENCE_BACKEND") == "hybrid" or \ self.settings.get("PERSISTENCE_BACKEND") == "ldap": values_file = Path("./helm/ldap-backup/values.yaml").resolve() values_file_parser = Parser(values_file, True) values_file_parser["ldapPass"] = self.settings.get("LDAP_PW") values_file_parser.dump_it() exec_cmd( "helm install {} -f ./helm/ldap-backup/values.yaml ./helm/ldap-backup --namespace={}" .format(self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE"))) except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) def install_gluu_gateway_ui(self): self.uninstall_gluu_gateway_ui() self.kubernetes.create_namespace( name=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"), labels={"APP_NAME": "gluu-gateway-ui"}) try: # Try to get gluu cert + key ssl_cert = self.kubernetes.read_namespaced_secret( "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_cert"] ssl_key = self.kubernetes.read_namespaced_secret( "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_key"] self.kubernetes.patch_or_create_namespaced_secret( name="tls-certificate", namespace=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"), literal="tls.crt", value_of_literal=ssl_cert, secret_type="kubernetes.io/tls", second_literal="tls.key", value_of_second_literal=ssl_key) except (KeyError, Exception): logger.error( "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. " "Please mount crt and key inside gg-ui deployment") client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format( self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN"), self.settings.get("CN_NAMESPACE")) values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve() values_file_parser = Parser(values_file, True) values_file_parser["cloud"]["isDomainRegistered"] = "false" if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y": values_file_parser["cloud"]["isDomainRegistered"] = "true" if self.settings.get( "DEPLOYMENT_ARCH") == "microk8s" or self.settings.get( "DEPLOYMENT_ARCH") == "minikube": values_file_parser["cloud"]["enabled"] = False values_file_parser["cloud"]["provider"] = self.settings.get( "DEPLOYMENT_ARCH") values_file_parser["dbUser"] = self.settings.get( "GLUU_GATEWAY_UI_PG_USER") values_file_parser[ "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format( self.settings.get("KONG_HELM_RELEASE_NAME"), self.settings.get("KONG_NAMESPACE")) values_file_parser["dbHost"] = self.settings.get("POSTGRES_URL") values_file_parser["dbDatabase"] = self.settings.get( "GLUU_GATEWAY_UI_DATABASE") values_file_parser["clientApiServerUrl"] = client_api_server_url values_file_parser["image"]["repository"] = self.settings.get( "GLUU_GATEWAY_UI_IMAGE_NAME") values_file_parser["image"]["tag"] = self.settings.get( "GLUU_GATEWAY_UI_IMAGE_TAG") values_file_parser["loadBalancerIp"] = self.settings.get("HOST_EXT_IP") values_file_parser["dbPassword"] = self.settings.get( "GLUU_GATEWAY_UI_PG_PASSWORD") values_file_parser["opServerUrl"] = "https://" + self.settings.get( "CN_FQDN") values_file_parser["ggHost"] = self.settings.get("CN_FQDN") + "/gg-ui/" values_file_parser["ggUiRedirectUrlHost"] = self.settings.get( "CN_FQDN") + "/gg-ui/" # Register new client if one was not provided if not values_file_parser["clientApiId"] or \ not values_file_parser["clientId"] or \ not values_file_parser["clientSecret"]: client_api_id, client_id, client_secret = register_op_client( self.settings.get("CN_NAMESPACE"), "konga-client", self.settings.get("CN_FQDN"), client_api_server_url, self.settings.get('CN_HELM_RELEASE_NAME')) if not client_api_id: values_file_parser.dump_it() logger.error( "Due to a failure in konga client registration the installation has stopped." " Please register as suggested above manually and enter the values returned" " for clientApiId, clientId, " "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run " "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui " "--namespace={}".format( self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) raise SystemExit(1) values_file_parser["clientApiId"] = client_api_id values_file_parser["clientId"] = client_id values_file_parser["clientSecret"] = client_secret values_file_parser.dump_it() exec_cmd( "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}" .format(self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) def install_gluu_gateway_dbmode(self): self.uninstall_gluu_gateway_dbmode() self.kubernetes.create_namespace( name=self.settings.get("KONG_NAMESPACE"), labels={"app": "ingress-kong"}) encoded_kong_pass_bytes = base64.b64encode( self.settings.get("KONG_PG_PASSWORD").encode("utf-8")) encoded_kong_pass_string = str(encoded_kong_pass_bytes, "utf-8") self.kubernetes.patch_or_create_namespaced_secret( name="kong-postgres-pass", namespace=self.settings.get("KONG_NAMESPACE"), literal="KONG_PG_PASSWORD", value_of_literal=encoded_kong_pass_string) exec_cmd("helm repo add kong https://charts.konghq.com") exec_cmd("helm repo update") exec_cmd( "helm install {} kong/kong " "--set ingressController.installCRDs=false " "--set image.repository={} " "--set image.tag={} " "--set env.database=postgres " "--set env.pg_user={} " "--set env.pg_password.valueFrom.secretKeyRef.name=kong-postgres-pass " "--set env.pg_password.valueFrom.secretKeyRef.key=KONG_PG_PASSWORD " "--set env.pg_host={} " "--set admin.enabled=true " "--set admin.type=ClusterIP " "--namespace={}".format( self.settings.get("KONG_HELM_RELEASE_NAME"), self.settings.get("GLUU_GATEWAY_IMAGE_NAME"), self.settings.get("GLUU_GATEWAY_IMAGE_TAG"), self.settings.get("KONG_PG_USER"), self.settings.get("POSTGRES_URL"), self.settings.get("KONG_NAMESPACE"))) def install_kubedb(self): self.uninstall_kubedb() self.kubernetes.create_namespace(name="gluu-kubedb", labels={"app": "kubedb"}) try: exec_cmd( "helm repo add appscode https://charts.appscode.com/stable/") exec_cmd("helm repo update") exec_cmd( "helm install kubedb-operator appscode/kubedb --version v0.13.0-rc.0 " "--namespace gluu-kubedb") self.kubernetes.check_pods_statuses("gluu-kubedb", "app=kubedb") exec_cmd( "helm install kubedb-catalog appscode/kubedb-catalog --version v0.13.0-rc.0 " "--namespace gluu-kubedb") except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) def uninstall_gluu_gateway_dbmode(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('KONG_HELM_RELEASE_NAME'), self.settings.get("KONG_NAMESPACE"))) def uninstall_gluu_gateway_ui(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) def uninstall_kubedb(self): logger.info("Deleting KubeDB...This may take a little while.") try: exec_cmd( "helm repo add appscode https://charts.appscode.com/stable/") exec_cmd("helm repo update") exec_cmd("helm delete kubedb-operator --namespace gluu-kubedb") exec_cmd("helm delete kubedb-catalog --namespace gluu-kubedb") time.sleep(20) except FileNotFoundError: logger.error( "Helm v3 is not installed. Please install it to continue " "https://helm.sh/docs/intro/install/") raise SystemExit(1) def uninstall_gluu(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('CN_HELM_RELEASE_NAME'), self.settings.get("CN_NAMESPACE"))) exec_cmd("helm delete {} --namespace={}".format( self.ldap_backup_release_name, self.settings.get("CN_NAMESPACE"))) def uninstall_nginx_ingress(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('NGINX_INGRESS_RELEASE_NAME'), self.settings.get("NGINX_INGRESS_NAMESPACE")))
class GluuGateway(object): def __init__(self): self.settings = SettingsHandler() self.kubernetes = Kubernetes() if self.settings.get("DEPLOYMENT_ARCH") == "gke": # Clusterrolebinding needs to be created for gke with CB or kubeDB installed user_account, stderr, retcode = exec_cmd( "gcloud config get-value core/account") user_account = str(user_account, "utf-8").strip() user, stderr, retcode = exec_cmd("whoami") user = str(user, "utf-8").strip() cluster_role_binding_name = "cluster-admin-{}".format(user) self.kubernetes.create_cluster_role_binding( cluster_role_binding_name=cluster_role_binding_name, user_name=user_account, cluster_role_name="cluster-admin") def install_gluu_gateway_ui(self): self.uninstall_gluu_gateway_ui() self.kubernetes.create_namespace( name=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"), labels={"APP_NAME": "gluu-gateway-ui"}) try: # Try to get gluu cert + key ssl_cert = self.kubernetes.read_namespaced_secret( "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_cert"] ssl_key = self.kubernetes.read_namespaced_secret( "gluu", self.settings.get("CN_NAMESPACE")).data["ssl_key"] self.kubernetes.patch_or_create_namespaced_secret( name="tls-certificate", namespace=self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"), literal="tls.crt", value_of_literal=ssl_cert, secret_type="kubernetes.io/tls", second_literal="tls.key", value_of_second_literal=ssl_key) except (KeyError, Exception): logger.error( "Could not read Gluu secret. Please check config job pod logs. GG-UI will deploy but fail. " "Please mount crt and key inside gg-ui deployment") client_api_server_url = "https://{}.{}.svc.cluster.local:8443".format( self.settings.get("CLIENT_API_APPLICATION_KEYSTORE_CN"), self.settings.get("CN_NAMESPACE")) values_file = Path("./helm/gluu-gateway-ui/values.yaml").resolve() values_file_parser = Parser(values_file, True) values_file_parser["cloud"]["isDomainRegistered"] = "false" if self.settings.get("IS_CN_FQDN_REGISTERED") == "Y": values_file_parser["cloud"]["isDomainRegistered"] = "true" if self.settings.get( "DEPLOYMENT_ARCH") == "microk8s" or self.settings.get( "DEPLOYMENT_ARCH") == "minikube": values_file_parser["cloud"]["enabled"] = False values_file_parser["cloud"]["provider"] = self.settings.get( "DEPLOYMENT_ARCH") values_file_parser["dbUser"] = self.settings.get( "GLUU_GATEWAY_UI_PG_USER") values_file_parser[ "kongAdminUrl"] = "https://{}-kong-admin.{}.svc.cluster.local:8444".format( self.settings.get("KONG_HELM_RELEASE_NAME"), self.settings.get("KONG_NAMESPACE")) values_file_parser["dbHost"] = self.settings.get("POSTGRES_URL") values_file_parser["dbDatabase"] = self.settings.get( "GLUU_GATEWAY_UI_DATABASE") values_file_parser["clientApiServerUrl"] = client_api_server_url values_file_parser["image"]["repository"] = self.settings.get( "GLUU_GATEWAY_UI_IMAGE_NAME") values_file_parser["image"]["tag"] = self.settings.get( "GLUU_GATEWAY_UI_IMAGE_TAG") values_file_parser["loadBalancerIp"] = self.settings.get("HOST_EXT_IP") values_file_parser["dbPassword"] = self.settings.get( "GLUU_GATEWAY_UI_PG_PASSWORD") values_file_parser["opServerUrl"] = "https://" + self.settings.get( "CN_FQDN") values_file_parser["ggHost"] = self.settings.get("CN_FQDN") + "/gg-ui/" values_file_parser["ggUiRedirectUrlHost"] = self.settings.get( "CN_FQDN") + "/gg-ui/" # Register new client if one was not provided if not values_file_parser["clientApiId"] or \ not values_file_parser["clientId"] or \ not values_file_parser["clientSecret"]: client_api_id, client_id, client_secret = register_op_client( self.settings.get("CN_NAMESPACE"), "konga-client", self.settings.get("CN_FQDN"), client_api_server_url, self.settings.get('CN_HELM_RELEASE_NAME')) if not client_api_id: values_file_parser.dump_it() logger.error( "Due to a failure in konga client registration the installation has stopped." " Please register as suggested above manually and enter the values returned" " for clientApiId, clientId, " "and clientSecret inside ./helm/gluu-gateway-ui/values.yaml then run " "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui " "--namespace={}".format( self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) raise SystemExit(1) values_file_parser["clientApiId"] = client_api_id values_file_parser["clientId"] = client_id values_file_parser["clientSecret"] = client_secret values_file_parser.dump_it() exec_cmd( "helm install {} -f ./helm/gluu-gateway-ui/values.yaml ./helm/gluu-gateway-ui --namespace={}" .format(self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) def install_gluu_gateway_dbmode(self): self.uninstall_gluu_gateway_dbmode() self.kubernetes.create_namespace( name=self.settings.get("KONG_NAMESPACE"), labels={"app": "ingress-kong"}) encoded_kong_pass_bytes = base64.b64encode( self.settings.get("KONG_PG_PASSWORD").encode("utf-8")) encoded_kong_pass_string = str(encoded_kong_pass_bytes, "utf-8") self.kubernetes.patch_or_create_namespaced_secret( name="kong-postgres-pass", namespace=self.settings.get("KONG_NAMESPACE"), literal="KONG_PG_PASSWORD", value_of_literal=encoded_kong_pass_string) exec_cmd("helm repo add kong https://charts.konghq.com") exec_cmd("helm repo update") exec_cmd( "helm install {} kong/kong " "--set ingressController.installCRDs=false " "--set image.repository={} " "--set image.tag={} " "--set env.database=postgres " "--set env.pg_user={} " "--set env.pg_password.valueFrom.secretKeyRef.name=kong-postgres-pass " "--set env.pg_password.valueFrom.secretKeyRef.key=KONG_PG_PASSWORD " "--set env.pg_host={} " "--set admin.enabled=true " "--set admin.type=ClusterIP " "--namespace={}".format( self.settings.get("KONG_HELM_RELEASE_NAME"), self.settings.get("GLUU_GATEWAY_IMAGE_NAME"), self.settings.get("GLUU_GATEWAY_IMAGE_TAG"), self.settings.get("KONG_PG_USER"), self.settings.get("POSTGRES_URL"), self.settings.get("KONG_NAMESPACE"))) def uninstall_gluu_gateway_dbmode(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('KONG_HELM_RELEASE_NAME'), self.settings.get("KONG_NAMESPACE"))) def uninstall_gluu_gateway_ui(self): exec_cmd("helm delete {} --namespace={}".format( self.settings.get('GLUU_GATEWAY_UI_HELM_RELEASE_NAME'), self.settings.get("GLUU_GATEWAY_UI_NAMESPACE"))) def uninstall_kong(self): logger.info("Removing gluu gateway kong...") self.kubernetes.delete_job(self.settings.get("KONG_NAMESPACE"), "app=kong-migration-job") self.kubernetes.delete_custom_resource( "kongconsumers.configuration.konghq.com") self.kubernetes.delete_custom_resource( "kongcredentials.configuration.konghq.com") self.kubernetes.delete_custom_resource( "kongingresses.configuration.konghq.com") self.kubernetes.delete_custom_resource( "kongplugins.configuration.konghq.com") self.kubernetes.delete_custom_resource( "tcpingresses.configuration.konghq.com") self.kubernetes.delete_custom_resource( "kongclusterplugins.configuration.konghq.com") self.kubernetes.delete_cluster_role("kong-ingress-clusterrole") self.kubernetes.delete_service_account( "kong-serviceaccount", self.settings.get("KONG_NAMESPACE")) self.kubernetes.delete_cluster_role_binding( "kong-ingress-clusterrole-nisa-binding") self.kubernetes.delete_config_map_using_name( "kong-server-blocks", self.settings.get("KONG_NAMESPACE")) self.kubernetes.delete_service("kong-proxy", self.settings.get("KONG_NAMESPACE")) self.kubernetes.delete_service("kong-validation-webhook", self.settings.get("KONG_NAMESPACE")) self.kubernetes.delete_service("kong-admin", self.settings.get("KONG_NAMESPACE")) self.kubernetes.delete_deployment_using_name( "ingress-kong", self.settings.get("KONG_NAMESPACE"))