def groom(_plugin, model): setDefaultInMap(model["cluster"]["docker"], "disabled", False) if model["cluster"]["docker"]["disabled"]: return False else: lookupRepository(model, "docker", configEntry="docker_pkg") return True
def groom(plugin, model): setDefaultInMap(model["cluster"]["confluent"], "disabled", False) if model["cluster"]["confluent"]["disabled"]: return False lookupRepository(model, "confluent") if "confluent" not in model["config"] or "ansible_repo_folder" not in model["config"]["confluent"]: ERROR("Missing 'confluent.ansible_repo_folder' in configuration file") for node in model['cluster']['nodes']: if "kafka_log_dirs" in node: if len(node["kafka_log_dirs"]) == 0: del(node["kafka_log_dirs"]) else: if "kafka_log_dirs" in model["data"]["roleByName"][node["role"]]: node["kafka_log_dirs"] = model["data"]["roleByName"][node["role"]]["kafka_log_dirs"] ansible_repo_folder = appendPath(os.path.dirname(model["data"]["configFile"]), model["config"]["confluent"]["ansible_repo_folder"]) model["config"]["confluent"]["ansible_repo_folder"] = ansible_repo_folder model["data"]["rolePaths"].add(appendPath(ansible_repo_folder, "roles")) # We need to define an ansible group "preflight" hosting all nodes preflight = [] for node in model["cluster"]["nodes"]: preflight.append(node["name"]) model["data"]["groupByName"]["preflight"] = preflight return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], NFS_CLIENT, {}) setDefaultInMap(model[CLUSTER][NFS_CLIENT], DISABLED, False) if model[CLUSTER][NFS_CLIENT][DISABLED]: return False else: return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], BUILDAH, {}) setDefaultInMap(model[CLUSTER][BUILDAH], DISABLED, False) if model[CLUSTER][BUILDAH][DISABLED]: return False else: return True
def groom(_plugin, model): setDefaultInMap(model["cluster"]["cerebro"], "disabled", False) if model["cluster"]["cerebro"]["disabled"]: return False else: lookupRepository(model, "cerebro") return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][ORACLEJDK], DISABLED, False) if model[CLUSTER][ORACLEJDK][DISABLED]: return False else: lookupRepository(model, ORACLEJDK) setDefaultInMap(model[CLUSTER][ORACLEJDK], "set_java_home", False) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], RANCHER, {}) setDefaultInMap(model[CLUSTER][K8S][RANCHER], DISABLED, False) if model[CLUSTER][K8S][RANCHER][DISABLED]: return False else: setDefaultInMap(model[CLUSTER][K8S][RANCHER], NETWORK, "canal") setDefaultInMap(model[CLUSTER][K8S][RANCHER], INTERFACE, "eth0") return True
def groom(plugin, model): setDefaultInMap(model[CLUSTER][HD_CLIENT], DISABLED, False) if model[CLUSTER][HD_CLIENT][DISABLED]: return False else: lookupRepository(model, HD_CLIENT, HORTONWORKS) if model[CLUSTER][HD_CLIENT][AMBARI_SERVER_URL].endswith("/"): model[CLUSTER][HD_CLIENT][AMBARI_SERVER_URL] = model[CLUSTER][ HD_CLIENT][AMBARI_SERVER_URL][:-1] return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][DRPROXY], DISABLED, False) if model[CLUSTER][DRPROXY][DISABLED]: return False else: for f in [CERT_FILE, KEY_FILE, ROOT_CA_FILE]: model[CLUSTER][DRPROXY][f] = appendPath(model[DATA][SOURCE_FILE_DIR], model[CLUSTER][DRPROXY][f]) if not os.path.isfile(model[CLUSTER][DRPROXY][f]): ERROR("Unable to find '{}'!".format(model[CLUSTER][DRPROXY][f])) return True
def groom(_plugin, model): setDefaultInMap(model["cluster"]["docker"], "disabled", False) if model["cluster"]["docker"]["disabled"]: return False else: setDefaultInMap(model["cluster"]["docker"], "version", "latest") lookupRepository(model, "docker", configEntry="docker_yum") lookupHttpProxy( model, model["cluster"]["docker"]["proxy_id"] if "proxy_id" in model["cluster"]["docker"] else None, "docker") return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][DOCKER_REGISTRY_HACK], DISABLED, False) if model[CLUSTER][DOCKER_REGISTRY_HACK][DISABLED]: return False else: if ETC_HOST_ENTRIES in model[CLUSTER][DOCKER_REGISTRY_HACK]: for entry in model[CLUSTER][DOCKER_REGISTRY_HACK][ ETC_HOST_ENTRIES]: entry[TARGET_IP] = resolveDns(entry[TARGET_IP]) if ALIASES not in entry: entry[ ALIASES] = "quay.io gcr.io k8s.gcr.io registry-1.docker.io" return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][REGISTER_CA], DISABLED, False) if model[CLUSTER][REGISTER_CA][DISABLED]: return False else: if FROM_PATHS in model[CLUSTER][REGISTER_CA]: for idx, p in enumerate(model[CLUSTER][REGISTER_CA][FROM_PATHS]): model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], p[SRC]) if not os.path.isfile( model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC])) return True
def groom(_plugin, model): model[DATA][AWS] = {} #model[DATA][AWS][REFERENCE_SUBNET]= model[CLUSTER][NODES][0][AWS][SUBNET] setDefaultInMap(model[CLUSTER][AWS], KEY_PAIR, "default") kp = lookupKeyPair(model, model[CLUSTER][AWS][KEY_PAIR]) model[DATA][AWS][DATA_KEY_PAIR] = kp[KEY_PAIR_NAME] if PRIVATE_KEY_PATH in kp: # If path is relative, adjust to config file location model[DATA][AWS][DATA_PRIVATE_KEY_PATH] = appendPath(os.path.dirname(model["data"]["configFile"]), kp[PRIVATE_KEY_PATH]) model[DATA][AWS][DATA_ROUTE53] = lookupRoute53(model, model[CLUSTER][AWS][ROUTE53]) groomSecurityGroups(model) groomRoles(model) groomNodes(model) model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def groom(_plugin, model): repoInConfig = "repositories" in model["config"] and "vagrant" in model["config"]["repositories"] and "yum_repo_base_url" in model["config"]["repositories"]["vagrant"] if model["cluster"]["vagrant"]["yum_repo"] == "local" and not repoInConfig: ERROR("'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.yum_repo' is set to 'local' in '{}'".format(model["data"]["sourceFileDir"])) if repoInConfig: # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one. setDefaultInMap(model["data"], "repositories", {}) setDefaultInMap(model["data"]["repositories"], "vagrant", {}) model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model["config"]["repositories"]["vagrant"]["yum_repo_base_url"] groomRoles(model) groomNodes(model) model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def groomRoles(model): for _, role in model[DATA][ROLE_BY_NAME].iteritems(): if role[AWS][SECURITY_GROUP] in model[DATA][AWS][SECURITY_GROUP_BY_NAME]: role[AWS][SECURITY_GROUP_ID] = "aws_security_group." + role[AWS][SECURITY_GROUP] + ".id" else: model[DATA][AWS][EXTERNAL_SECURITY_GROUPS].add(role[AWS][SECURITY_GROUP]) role[AWS][SECURITY_GROUP_ID] = "data.aws_security_group." + role[AWS][SECURITY_GROUP] + ".id" setDefaultInMap(role[AWS], ROOT_TYPE, "gp2") role[DISK_TO_MOUNT_COUNT] = 0 if DATA_DISKS in role: for i in range(0, len(role[DATA_DISKS])): role[DATA_DISKS][i][INDEX] = i setDefaultInMap(role[DATA_DISKS][i], DEVICE, DISK_DEVICE_FROM_IDX[i]) setDefaultInMap(role[DATA_DISKS][i], DEVICE_AWS, role[DATA_DISKS][i][DEVICE]) setDefaultInMap(role[DATA_DISKS][i], DEVICE_HOST, role[DATA_DISKS][i][DEVICE]) if MOUNT in role[DATA_DISKS][i]: role[DISK_TO_MOUNT_COUNT] += 1 setDefaultInMap(role[DATA_DISKS][i], TYPE, "gp2")
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], METALLB, {}) setDefaultInMap(model[CLUSTER][K8S][METALLB], DISABLED, False) if model[CLUSTER][K8S][METALLB][DISABLED]: return False else: if DASHBOARD_IP in model[CLUSTER][K8S][METALLB]: model[CLUSTER][K8S][METALLB][DASHBOARD_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][METALLB][DASHBOARD_IP]) dashboard_ip = ipaddress.ip_address( u"" + model[CLUSTER][K8S][METALLB][DASHBOARD_IP]) dashboardInRange = False for rangeip in model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGES]: rangeip[FIRST] = resolveDnsAndCheck(rangeip[FIRST]) rangeip[LAST] = resolveDnsAndCheck(rangeip[LAST]) first_ip = ipaddress.ip_address(u"" + rangeip[FIRST]) last_ip = ipaddress.ip_address(u"" + rangeip[LAST]) if not last_ip > first_ip: ERROR("Invalid metallb.external_ip_range (first >= last)") if dashboard_ip >= first_ip and dashboard_ip <= last_ip: dashboardInRange = True if not dashboardInRange: ERROR( "metallb.dashboard_ip is not included in one of metallb.external_ip_ranges" ) return True
def groom(plugin, model): repoInConfig = "repositories" in model["config"] and "vagrant" in model[ "config"]["repositories"] and "yum_repo_base_url" in model["config"][ "repositories"]["vagrant"] setDefaultInMap(model["cluster"]["vagrant"], "local_yum_repo", repoInConfig) if model["cluster"]["vagrant"]["local_yum_repo"] and not repoInConfig: ERROR( "'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.local_yum_repo' is set to True in '{}'" .format(model["data"]["sourceFileDir"])) if repoInConfig: # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one. setDefaultInMap(model["data"], "repositories", {}) setDefaultInMap(model["data"]["repositories"], "vagrant", {}) model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model[ "config"]["repositories"]["vagrant"]["yum_repo_base_url"] for node in model['cluster']['nodes']: if not SYNCED_FOLDERS in node: node[SYNCED_FOLDERS] = [] role = model["data"]["roleByName"][node["role"]] if SYNCED_FOLDERS in role: node[SYNCED_FOLDERS] += role[SYNCED_FOLDERS] if SYNCED_FOLDERS in model["cluster"]["vagrant"]: node[SYNCED_FOLDERS] += model["cluster"]["vagrant"][SYNCED_FOLDERS] model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], HELM_DEPLOYMENTS, []) for deployment in model[CLUSTER][K8S][HELM_DEPLOYMENTS]: setDefaultInMap(deployment, DISABLED, False) if not deployment[DISABLED]: setDefaultInMap(deployment, VALUES, {}) setDefaultInMap(deployment, STATE, "present") deployment[_OPTIONS_] = "" if VERSION in deployment and deployment[VERSION] != "": deployment[_OPTIONS_] += " --version {}".format( deployment[VERSION]) if REPO in deployment and deployment[REPO] != "": deployment[_OPTIONS_] += " --repo {}".format(deployment[REPO]) # As xxxx.value is forbidden in jinja2 templating. deployment[_VALUES_] = deployment[VALUES] del (deployment[VALUES]) #deployment[_VALUES_FILE_] = "/tmp/helm_{}_{}.yaml".format(deployment[NAME], datetime.now().strftime("%Y%m%d_%H%M%S")) deployment[_VALUES_FILE_] = "/tmp/helm_{}.yaml".format( deployment[NAME]) return True
def groom(plugin, model): setDefaultInMap(model["cluster"], "kubespray", {}) setDefaultInMap(model["cluster"]["kubespray"], "disabled", False) if model["cluster"]["kubespray"]["disabled"]: return False else: if "kubespray" not in model[ "config"] or "ansible_repo_folder" not in model["config"][ "kubespray"]: ERROR( "Missing 'kubespray.ansible_repo_folder' in configuration file" ) ansible_repo_folder = appendPath( os.path.dirname(model["data"]["configFile"]), model["config"]["kubespray"]["ansible_repo_folder"]) model["config"]["kubespray"][ "ansible_repo_folder"] = ansible_repo_folder model["data"]["rolePaths"].add(appendPath(ansible_repo_folder, "roles")) model["data"]["dnsNbrDots"] = model["cluster"]["domain"].count(".") + 1 return True
def groom(plugin, model): if ANSIBLE in model[CLUSTER]: setDefaultInMap(model[CLUSTER][ANSIBLE], DISABLED, False) if model[CLUSTER][ANSIBLE][DISABLED]: return False if PLAYBOOKS in model[CLUSTER][ANSIBLE]: for idx in range(0, len(model[CLUSTER][ANSIBLE][PLAYBOOKS])): model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE]) if ROLES_PATHS in model[CLUSTER][ANSIBLE]: for rp in model[CLUSTER][ANSIBLE][ROLES_PATHS]: model[DATA]["rolePaths"].add( appendPath(model[DATA][SOURCE_FILE_DIR], rp)) if ROLES_PATHS_FROM_PLUGINS in model[CLUSTER][ANSIBLE]: for pluginName in model[CLUSTER][ANSIBLE][ ROLES_PATHS_FROM_PLUGINS]: plugin = lookupPlugin(pluginName, model[CONFIG]["plugins_paths"]) if plugin != None: rolesPath = appendPath(plugin.path, "roles") if os.path.exists(rolesPath): model['data']["rolePaths"].add(rolesPath) else: ERROR( "ansible.{}: There is no 'roles' folder in plugin '{}'" .format(ROLES_PATHS_FROM_PLUGINS, pluginName)) else: ERROR("ansible.{}: plugin '{}' not found".format( ROLES_PATHS_FROM_PLUGINS, pluginName)) if ROLES in model[CLUSTER][ANSIBLE]: for role in model[CLUSTER][ANSIBLE][ROLES]: setDefaultInMap(role, SCOPE, "all") if ANSIBLE in model[CONFIG] and ROLES_PATHS in model[CONFIG][ANSIBLE]: for rp in model[CONFIG][ANSIBLE][ROLES_PATHS]: model[DATA]["rolePaths"].add( appendPath(os.path.dirname(model[DATA]["configFile"]), rp)) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], CERT_MANAGER, {}) setDefaultInMap(model[CLUSTER][K8S][CERT_MANAGER], DISABLED, False) if model[CLUSTER][K8S][CERT_MANAGER][DISABLED]: return False else: return True
def groom(_plugin, model): if NODES not in model[CLUSTER]: model[CLUSTER][NODES] = [] # ----------------------------------------- Handle roles model[DATA][ROLE_BY_NAME] = {} for rl in model[CLUSTER]["roles"]: role = copy.deepcopy(rl) model[DATA][ROLE_BY_NAME][role[NAME]] = role # Setup role groups list, by adding role name and dedup. setDefaultInMap(role, GROUPS, []) role[GROUPS].append(role[NAME]) role[GROUPS] = dedup(role[GROUPS]) # --------------- Handle embedded nodes by pushing them back in cluster if NODES in role: for node in role[NODES]: if ROLE in node and node[ROLE] != role[NAME]: ERROR("Node {}: role mismatch: '{}' != '{}'".format( node[NAME], node[ROLE], role[NAME])) node[ROLE] = role[NAME] # Handle node's groups setDefaultInMap(node, GROUPS, []) node[GROUPS].extend(role[GROUPS]) node[GROUPS] = dedup(node[GROUPS]) # Add node in cluster model[CLUSTER][NODES].append(node) del role[NODES] role[NODES] = [] # Replace by an array of name # ------------- domain role[DOMAIN] = locate( DOMAIN, role, model[CLUSTER], "Role '{}': Missing domain definition (And no default value in cluster definition)" .format(role[NAME])) # ----------------------------------------- Handle nodes model[DATA][GROUP_BY_NAME] = {} model[DATA][NODE_BY_NAME] = {} for node in model[CLUSTER][NODES]: if node[NAME] in model[DATA][NODE_BY_NAME]: ERROR("Node '{}' is defined twice!".format(node[NAME])) model[DATA][NODE_BY_NAME][node[NAME]] = node if not HOSTNAME in node: node[HOSTNAME] = node[NAME] if ROLE not in node: ERROR("Node '{}': Missing role definition".format(node[NAME])) if node[ROLE] not in model[DATA][ROLE_BY_NAME]: ERROR("Node '{}' reference an unexisting role ({})".format( node[NAME], node[ROLE])) role = model[DATA][ROLE_BY_NAME][node[ROLE]] role[NODES].append(node[NAME]) node[FQDN] = (node[HOSTNAME] + "." + role[DOMAIN]) if ( role[DOMAIN] != None) else node[HOSTNAME] # And add to GROUP_BY_NAME (Mainly for ansible groups) for grp in node[GROUPS]: setDefaultInMap(model[DATA][GROUP_BY_NAME], grp, []) model[DATA][GROUP_BY_NAME][grp].append(node[NAME]) return True # Always enabled
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], INGRESS_NGINX, {}) setDefaultInMap(model[CLUSTER][K8S][INGRESS_NGINX], DISABLED, False) setDefaultInMap(model[CLUSTER][K8S][INGRESS_NGINX], ENABLE_SSL_PASSTHROUGH, False) if model[CLUSTER][K8S][INGRESS_NGINX][DISABLED]: return False else: if EXTERNAL_IP in model[CLUSTER][K8S][INGRESS_NGINX]: model[CLUSTER][K8S][INGRESS_NGINX][ EXTERNAL_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP]) if DASHBOARD_HOST in model[CLUSTER][K8S][INGRESS_NGINX]: dashboard_ip = resolveDns( model[CLUSTER][K8S][INGRESS_NGINX][DASHBOARD_HOST]) if dashboard_ip is not None: if EXTERNAL_IP in model[CLUSTER][K8S][ INGRESS_NGINX] and model[CLUSTER][K8S][INGRESS_NGINX][ EXTERNAL_IP] != dashboard_ip: ERROR( "k8s.ingress_nginx: 'external_ip' and 'dashboard_host' must resolve on same ip ({} != {})" .format( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP], dashboard_ip)) else: logger.warn( "Unable to resolve '{}' for now. May be this DNS entry will be created later." .format( model[CLUSTER][K8S][INGRESS_NGINX][DASHBOARD_HOST])) enableSslPassthrough = False if COMMAND_LINE_ARGUMENTS in model[CLUSTER][K8S][INGRESS_NGINX]: for cla in model[CLUSTER][K8S][INGRESS_NGINX][ COMMAND_LINE_ARGUMENTS]: if cla == "--enable-ssl-passthrough": enableSslPassthrough = True if not enableSslPassthrough: ERROR( "k8s.ingress_nginx: Dashbaord access require '--enable-ssl-passthrough' command line argument to be defined" ) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][DOCKER_NEXUS_PROXY], DISABLED, False) if model[CLUSTER][DOCKER_NEXUS_PROXY][DISABLED]: return False else: setDefaultInMap(model[CLUSTER][DOCKER_NEXUS_PROXY], "nexus_default_port", 8081) setDefaultInMap(model[CLUSTER][DOCKER_NEXUS_PROXY], "nexus_internal_docker_port", 8082) setDefaultInMap(model[DATA], DOCKER_NEXUS_PROXY, {}) if NEXT_PROXY_ID in model[CLUSTER][DOCKER_NEXUS_PROXY]: lookupHttpProxy(model, model[CLUSTER][DOCKER_NEXUS_PROXY][NEXT_PROXY_ID], DOCKER_NEXUS_NEXT_PROXY) proxy = model[DATA][HTTPPROXIES][DOCKER_NEXUS_NEXT_PROXY] if HTTP_PROXY in proxy: x = urlparse(proxy[HTTP_PROXY]) model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTP] = {} model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTP][ HOST] = x.hostname model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTP][PORT] = x.port model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTP][ USERNAME] = x.username if x.username is not None else "" model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTP][ PASSWORD] = x.password if x.password is not None else "" if HTTPS_PROXY in proxy: x = urlparse(proxy[HTTPS_PROXY]) model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTPS] = {} model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTPS][ HOST] = x.hostname model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTPS][ PORT] = x.port model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTPS][ USERNAME] = x.username if x.username is not None else "" model[DATA][DOCKER_NEXUS_PROXY][NEXT_PROXY_HTTPS][ PASSWORD] = x.password if x.password is not None else "" if NO_PROXY_JAVA in proxy: model[DATA][DOCKER_NEXUS_PROXY][NO_PROXY] = proxy[ NO_PROXY_JAVA].split(",") return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], HELM, {}) setDefaultInMap(model[CLUSTER][K8S][HELM], DISABLED, False) if model[CLUSTER][K8S][HELM][DISABLED]: return False else: lookupRepository(model, HELM, repoId=model[CLUSTER][K8S][HELM]["repo_id"]) lookupHttpProxy( model, model[CLUSTER][K8S][HELM]["proxy_id"] if "proxy_id" in model[CLUSTER][K8S][HELM] else None, "helm") return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], HARBOR, {}) setDefaultInMap(model[CLUSTER][HARBOR], DISABLED, False) if model[CLUSTER][HARBOR][DISABLED]: return False else: lookupRepository(model, "harbor", repoId=model[CLUSTER][HARBOR][REPO_ID]) model[CLUSTER][HARBOR][SSL_CERT_SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_CERT_SRC]) if not os.path.isfile(model[CLUSTER][HARBOR][SSL_CERT_SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][HARBOR][SSL_CERT_SRC])) model[CLUSTER][HARBOR][SSL_KEY_SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_KEY_SRC]) if not os.path.isfile(model[CLUSTER][HARBOR][SSL_KEY_SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][HARBOR][SSL_KEY_SRC])) setDefaultInMap(model[CLUSTER][HARBOR], VALIDATE_API_CERT, False) setDefaultInMap(model[CLUSTER][HARBOR], HOSTNAME, "{{ ansible_fqdn }}") return True
def groom(_plugin, model): groomIssuers(model) setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], CERT_MANAGER, {}) setDefaultInMap(model[CLUSTER][K8S][CERT_MANAGER], DISABLED, False) if model[CLUSTER][K8S][CERT_MANAGER][DISABLED]: return False else: model[DATA][CLUSTER_ISSUERS] = [] if CLUSTER_ISSUERS in model[CLUSTER][K8S][CERT_MANAGER]: for issuerDef in model[CLUSTER][K8S][CERT_MANAGER][ CLUSTER_ISSUERS]: if issuerDef[ID] not in model[DATA][CERT_MANAGER_ISSUER_BY_ID]: ERROR( "Issuer of id '{}' is not defined in configuration file!" .format(issuerDef[ID])) issuer = model[DATA][CERT_MANAGER_ISSUER_BY_ID][issuerDef[ID]] issuer[NAME] = issuerDef[NAME] model[DATA][CLUSTER_ISSUERS].append(issuer) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], ARGOCD, {}) setDefaultInMap(model[CLUSTER][K8S][ARGOCD], DISABLED, False) if model[CLUSTER][K8S][ARGOCD][DISABLED]: return False else: if LOAD_BALANCER_IP in model[CLUSTER][K8S][ARGOCD]: model[CLUSTER][K8S][ARGOCD][LOAD_BALANCER_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][ARGOCD][LOAD_BALANCER_IP]) if INGRESS_NGINX_HOST in model[CLUSTER][K8S][ARGOCD]: if INGRESS_NGINX in model[CLUSTER][K8S] and EXTERNAL_IP in model[ CLUSTER][K8S][INGRESS_NGINX]: ingress_ip = resolveDnsAndCheck( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP]) argocd_ip = resolveDnsAndCheck( model[CLUSTER][K8S][ARGOCD] [INGRESS_NGINX_HOST]) # error if it does not resolve. if argocd_ip != ingress_ip: ERROR( "k8s.argocd: 'ingress_nginx_host' and 'ingress_nginx.external_ip' must resolve on same ip ({} != {})" .format(argocd_ip, ingress_ip)) return True
def groom(plugin, model): setDefaultInMap(model[CLUSTER][POSTGRESQL_SERVER], DISABLED, False) if model[CLUSTER][POSTGRESQL_SERVER][DISABLED]: return False else: setDefaultInMap(model[CLUSTER][POSTGRESQL_SERVER], USERS, []) setDefaultInMap(model[CLUSTER][POSTGRESQL_SERVER], DATABASES, []) # To ease templates lookupRepository(model, POSTGRESQL_SERVER) unencryptedCount = 0 if PASSWORD in model[CLUSTER][POSTGRESQL_SERVER]: if not model[CLUSTER][POSTGRESQL_SERVER][PASSWORD].startswith("md5"): unencryptedCount += 1 # Will encrypt password, as unencrypted ones may be unsupported in future postgresl releases model[CLUSTER][POSTGRESQL_SERVER][PASSWORD] = "md5" + hashlib.md5(model[CLUSTER][POSTGRESQL_SERVER][PASSWORD] + 'postgres').hexdigest() for user in model[CLUSTER][POSTGRESQL_SERVER][USERS]: if not user[PASSWORD].startswith("md5"): unencryptedCount += 1 user[PASSWORD] = "md5" + hashlib.md5(user[PASSWORD] + user[NAME]).hexdigest() if unencryptedCount > 0: print("") print("**WARNING**: usage of clear text password is discouraged.") print("To encrypt a password: \n\tpython -c \"import hashlib; print 'md5' + hashlib.md5('yourPassword'+'yourUser').hexdigest()\"") print("And set result in place of clear text password.") return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][K8S][TOPOLVM], DISABLED, False) setDefaultInMap(model[DATA], K8S, {}) setDefaultInMap(model[DATA][K8S], TOPOLVM, {}) if model[CLUSTER][K8S][TOPOLVM][DISABLED]: return False else: deviceClassByName = {} for deviceClass in model[CLUSTER][K8S][TOPOLVM][DEVICE_CLASSES]: setDefaultInMap(deviceClass, SPARE_GB, 10) setDefaultInMap(deviceClass, STORAGE_CLASS, "topolvm-{}".format(deviceClass[NAME])) setDefaultInMap(deviceClass, VOLUME_GROUP, "topolvm-{}".format(deviceClass[NAME])) setDefaultInMap(deviceClass, FSTYPE, "xfs") deviceClassByName[deviceClass[NAME]] = deviceClass lookupRepository(model, None, "topolvm", model[CLUSTER][K8S][TOPOLVM][REPO_ID]) model[DATA][K8S][TOPOLVM][VOLUME_GROUP_BY_NODE] = {} model[DATA][K8S][TOPOLVM][DEVICE_CLASSES_BY_NODE] = {} model[DATA][K8S][TOPOLVM][LVMD_HOSTS] = Set() for _, role in model[DATA][ROLE_BY_NAME].iteritems(): deviceClassNames = Set() volumeGroupByName = {} if DATA_DISKS in role: for disk in role[DATA_DISKS]: if TOPOLVM_DEVICE_CLASS in disk: if disk[TOPOLVM_DEVICE_CLASS] not in deviceClassByName: ERROR("Unknown device_class {} in role {}".format( disk[TOPOLVM_DEVICE_CLASS], role[NAME])) dc = deviceClassByName[disk[TOPOLVM_DEVICE_CLASS]] deviceClassNames.add(dc[NAME]) vgName = dc[VOLUME_GROUP] setDefaultInMap(volumeGroupByName, vgName, { NAME: vgName, PHYSICAL_VOLUMES: [], SIZE: 0 }) vg = volumeGroupByName[vgName] vg[PHYSICAL_VOLUMES].append("/dev/" + disk[DEVICE]) vg[SIZE] += disk[SIZE] if len(deviceClassNames) > 0: devicesClasses = [] for dcName in deviceClassNames: dc = copy.deepcopy( deviceClassByName[dcName] ) # Need a deepcopy as default may be different dc["default"] = ( len(devicesClasses) == 0 ) # Currently, topolvm need a default, or error devicesClasses.append(dc) volumeGroups = list(volumeGroupByName.values()) for nodeName in role[NODES]: model[DATA][K8S][TOPOLVM][DEVICE_CLASSES_BY_NODE][ nodeName] = devicesClasses model[DATA][K8S][TOPOLVM][VOLUME_GROUP_BY_NODE][ nodeName] = volumeGroups model[DATA][K8S][TOPOLVM][LVMD_HOSTS].add(nodeName) print "------- TOPOLVM Node capacity:" for nodeName in model[DATA][NODE_BY_NAME].keys(): if nodeName in model[DATA][K8S][TOPOLVM][VOLUME_GROUP_BY_NODE]: for vg in model[DATA][K8S][TOPOLVM][VOLUME_GROUP_BY_NODE][ nodeName]: print "\t{}:{} -> {}GB".format(nodeName, vg[NAME], vg[SIZE]) return True