def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], METALLB, {}) setDefaultInMap(model[CLUSTER][K8S][METALLB], DISABLED, False) if model[CLUSTER][K8S][METALLB][DISABLED]: return False else: if DASHBOARD_IP in model[CLUSTER][K8S][METALLB]: model[CLUSTER][K8S][METALLB][DASHBOARD_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][METALLB][DASHBOARD_IP]) dashboard_ip = ipaddress.ip_address( u"" + model[CLUSTER][K8S][METALLB][DASHBOARD_IP]) dashboardInRange = False for rangeip in model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGES]: rangeip[FIRST] = resolveDnsAndCheck(rangeip[FIRST]) rangeip[LAST] = resolveDnsAndCheck(rangeip[LAST]) first_ip = ipaddress.ip_address(u"" + rangeip[FIRST]) last_ip = ipaddress.ip_address(u"" + rangeip[LAST]) if not last_ip > first_ip: ERROR("Invalid metallb.external_ip_range (first >= last)") if dashboard_ip >= first_ip and dashboard_ip <= last_ip: dashboardInRange = True if not dashboardInRange: ERROR( "metallb.dashboard_ip is not included in one of metallb.external_ip_ranges" ) return True
def initVault(model): global vault if VAULT_ID in model[CLUSTER]: vaultId = model[CLUSTER][VAULT_ID] if VAULTS not in model["config"]: ERROR("{} is missing from configuration while encryption id required ('vault_id' is defined)".format(VAULTS)) l = filter(lambda x: x["vault_id"] == vaultId, model["config"][VAULTS]) if len(l) > 1: ERROR("{}: vault_id '{}' is defined twice in configuration file!".format(VAULTS, vaultId)) if len(l) != 1: ERROR("{}: vault_id '{}' is not defined in configuration file!".format(VAULTS, vaultId)) f = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), l[0][PASSWORD_FILE]) if not (os.path.isfile(f) and os.access(f, os.R_OK)): ERROR("Non existing or not accessible vault password file '{}'.".format(f)) pwd = file2String(f) pwd = pwd.strip() model[DATA][VAULT_PASSWORD_FILE] = f vault = Vault(pwd) if SAFE_CONFIFG_FILE in l[0]: scFileName = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), l[0][SAFE_CONFIFG_FILE]) model[DATA][_SAFE_CONFIG_FILE_] = scFileName if not (os.path.isfile(scFileName) and os.access(scFileName, os.R_OK)): ERROR("Non existing or not accessible safe config file '{}'.".format(scFileName)) logger.info("Loading safe config from '{}'".format(scFileName)) data, was_encrypted = vault.encryptedFile2String(scFileName) safeConfig = yaml.load(data, Loader=yaml.SafeLoader) model[SAFE_CONFIG] = safeConfig if not was_encrypted: print("\n'{}' was not encrypted. Will encrypt it".format(scFileName)) vault.stringToEncryptedFile(data, scFileName) else: vault = None
def groom(self, model): extRolePath = appendPath(self.path, "roles.yml") if os.path.exists(extRolePath): pathList = yaml.load(open(extRolePath), Loader=yaml.SafeLoader) if not isinstance(pathList, list): ERROR( "File {} must contain a list of path".format(extRolePath)) for p in pathList: model['data']["rolePaths"].add(appendPath(self.path, p)) else: rolesPath = appendPath(self.path, "roles") if os.path.exists(rolesPath): model['data']["rolePaths"].add(rolesPath) codeFile = appendPath(self.path, "groomer.py") if os.path.exists(codeFile): logger.debug("Will load '{0}' as python code".format(codeFile)) self.groomer = imp.load_source(self.name, codeFile) if hasattr(self.groomer, "groom"): method = getattr(self.groomer, "groom") logger.debug("FOUND '{0}' method".format(str(method))) ret = method(self, model) if ret == None or not isinstance(ret, bool): ERROR( "Invalid plugin '{}'. groom(model) must return a boolean (enabled yes/no)." .format(self.name)) else: self.enabled = ret
def groom(_plugin, model): if NODES not in model[CLUSTER]: model[CLUSTER][NODES] = [] # ----------------------------------------- Handle roles model[DATA][ROLE_BY_NAME] = {} for rl in model[CLUSTER]["roles"]: role = copy.deepcopy(rl) model[DATA][ROLE_BY_NAME][role[NAME]] = role # Setup role groups list, by adding role name and dedup. setDefaultInMap(role, GROUPS, []) role[GROUPS].append(role[NAME]) role[GROUPS] = dedup(role[GROUPS]) # --------------- Handle embedded nodes by pushing them back in cluster if NODES in role: for node in role[NODES]: if ROLE in node and node[ROLE] != role[NAME]: ERROR("Node {}: role mismatch: '{}' != '{}'".format( node[NAME], node[ROLE], role[NAME])) node[ROLE] = role[NAME] # Handle node's groups setDefaultInMap(node, GROUPS, []) node[GROUPS].extend(role[GROUPS]) node[GROUPS] = dedup(node[GROUPS]) # Add node in cluster model[CLUSTER][NODES].append(node) del role[NODES] role[NODES] = [] # Replace by an array of name # ------------- domain role[DOMAIN] = locate( DOMAIN, role, model[CLUSTER], "Role '{}': Missing domain definition (And no default value in cluster definition)" .format(role[NAME])) # ----------------------------------------- Handle nodes model[DATA][GROUP_BY_NAME] = {} model[DATA][NODE_BY_NAME] = {} for node in model[CLUSTER][NODES]: if node[NAME] in model[DATA][NODE_BY_NAME]: ERROR("Node '{}' is defined twice!".format(node[NAME])) model[DATA][NODE_BY_NAME][node[NAME]] = node if not HOSTNAME in node: node[HOSTNAME] = node[NAME] if ROLE not in node: ERROR("Node '{}': Missing role definition".format(node[NAME])) if node[ROLE] not in model[DATA][ROLE_BY_NAME]: ERROR("Node '{}' reference an unexisting role ({})".format( node[NAME], node[ROLE])) role = model[DATA][ROLE_BY_NAME][node[ROLE]] role[NODES].append(node[NAME]) node[FQDN] = (node[HOSTNAME] + "." + role[DOMAIN]) if ( role[DOMAIN] != None) else node[HOSTNAME] # And add to GROUP_BY_NAME (Mainly for ansible groups) for grp in node[GROUPS]: setDefaultInMap(model[DATA][GROUP_BY_NAME], grp, []) model[DATA][GROUP_BY_NAME][grp].append(node[NAME]) return True # Always enabled
def groom(plugin, model): setDefaultInMap(model["cluster"]["confluent"], "disabled", False) if model["cluster"]["confluent"]["disabled"]: return False lookupRepository(model, "confluent") if "confluent" not in model["config"] or "ansible_repo_folder" not in model["config"]["confluent"]: ERROR("Missing 'confluent.ansible_repo_folder' in configuration file") for node in model['cluster']['nodes']: if "kafka_log_dirs" in node: if len(node["kafka_log_dirs"]) == 0: del(node["kafka_log_dirs"]) else: if "kafka_log_dirs" in model["data"]["roleByName"][node["role"]]: node["kafka_log_dirs"] = model["data"]["roleByName"][node["role"]]["kafka_log_dirs"] ansible_repo_folder = appendPath(os.path.dirname(model["data"]["configFile"]), model["config"]["confluent"]["ansible_repo_folder"]) model["config"]["confluent"]["ansible_repo_folder"] = ansible_repo_folder model["data"]["rolePaths"].add(appendPath(ansible_repo_folder, "roles")) # We need to define an ansible group "preflight" hosting all nodes preflight = [] for node in model["cluster"]["nodes"]: preflight.append(node["name"]) model["data"]["groupByName"]["preflight"] = preflight return True
def isCidr(peer): if not peer[0].isdigit(): return False else: if not cidrCheck.match(peer): ERROR("Invalid source/destination '{}'. Not a valid CIDR".format(peer)) return True
def groom(plugin, model): repoInConfig = "repositories" in model["config"] and "vagrant" in model[ "config"]["repositories"] and "yum_repo_base_url" in model["config"][ "repositories"]["vagrant"] setDefaultInMap(model["cluster"]["vagrant"], "local_yum_repo", repoInConfig) if model["cluster"]["vagrant"]["local_yum_repo"] and not repoInConfig: ERROR( "'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.local_yum_repo' is set to True in '{}'" .format(model["data"]["sourceFileDir"])) if repoInConfig: # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one. setDefaultInMap(model["data"], "repositories", {}) setDefaultInMap(model["data"]["repositories"], "vagrant", {}) model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model[ "config"]["repositories"]["vagrant"]["yum_repo_base_url"] for node in model['cluster']['nodes']: if not SYNCED_FOLDERS in node: node[SYNCED_FOLDERS] = [] role = model["data"]["roleByName"][node["role"]] if SYNCED_FOLDERS in role: node[SYNCED_FOLDERS] += role[SYNCED_FOLDERS] if SYNCED_FOLDERS in model["cluster"]["vagrant"]: node[SYNCED_FOLDERS] += model["cluster"]["vagrant"][SYNCED_FOLDERS] model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def generate2(targetFilePath, tmpl, model): ensureFolder(os.path.dirname(targetFilePath)) if isinstance(tmpl, jinja2.Template): try: result = tmpl.render(m=model) except jinja2.exceptions.TemplateRuntimeError as err: print '---------------------------------------------------------' traceback.print_exc(file=sys.stdout) print '---------------------------------------------------------' ERROR("Error in '{0}' file generation: {1}".format( targetFilePath, err.message)) else: result = tmpl with open(targetFilePath, "w") as f: f.write(result) if targetFilePath.endswith(".sh"): cp = stat.S_IMODE(os.lstat(targetFilePath).st_mode) os.chmod( targetFilePath, cp | stat.S_IXUSR | (stat.S_IXGRP if cp & stat.S_IRGRP else 0) | (stat.S_IXOTH if cp & stat.S_IROTH else 0)) logger.info("File '{0}' successfully generated as executable".format( targetFilePath)) else: logger.info("File '{0}' successfully generated".format(targetFilePath))
def handleTcpUdpPort(rule, prefix): if PORT in rule: if FROM_PORT in rule or TO_PORT in rule: ERROR("{}: 'port' and ('from_port', 'to_port') can't be used together".format(prefix)) x = numberOrNone(rule[PORT]) if x != None: return x, x else: p = rule[PORT].strip().lower() if p in PORT_FROM_STRING: return PORT_FROM_STRING[p], PORT_FROM_STRING[p] else: ERROR("{}: Unknown port name '{}'".format(prefix, rule[PORT])) else: if FROM_PORT not in rule or TO_PORT not in rule: ERROR("{}: 'from_port' and 'to_port' must be both defined if 'port' is not".format(prefix)) return rule[FROM_PORT], rule[TO_PORT]
def appendPlugins(plugins, cluster, pluginsPath): if "plugins" in cluster: for pluginName in cluster['plugins']: plugin = lookupPlugin(pluginName, pluginsPath) if plugin != None: plugins.append(plugin) else: ERROR("Unable to find plugin '{}'".format(pluginName))
def groomIssuers(model): model[DATA][CERT_MANAGER_ISSUER_BY_ID] = {} if CERT_MANAGER_ISSUERS in model[CONFIG]: for issuer in model[CONFIG][CERT_MANAGER_ISSUERS]: if issuer[ID] in model[DATA][CERT_MANAGER_ISSUER_BY_ID]: ERROR( "Cert_manager_issuer of id '{}' is defined twice in configuration file!" .format(issuer[ID])) model[DATA][CERT_MANAGER_ISSUER_BY_ID][issuer[ID]] = issuer
def handleIcmpType(rule, prefix): if PORT in rule or FROM_PORT in rule or TO_PORT in rule: ERROR("{}: There should be no port definition when using ICMP".format(prefix)) if ICMP_TYPE not in rule: ERROR("{}: 'icmp_type' is mandatory when protocol == 'ICMP'".format(prefix)) if ICMP_CODE in rule: code = rule[ICMP_CODE] else: code = 0 itype = numberOrNone(rule[ICMP_TYPE]) if itype is None: t = rule[ICMP_TYPE].strip().lower() if t in ICMP_TYPE_FROM_STRING: return ICMP_TYPE_FROM_STRING[t], code else: ERROR("{}: Unknown 'icmp_type' value: {}".format(prefix, rule[ICMP_TYPE])) else: return itype, code
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][DRPROXY], DISABLED, False) if model[CLUSTER][DRPROXY][DISABLED]: return False else: for f in [CERT_FILE, KEY_FILE, ROOT_CA_FILE]: model[CLUSTER][DRPROXY][f] = appendPath(model[DATA][SOURCE_FILE_DIR], model[CLUSTER][DRPROXY][f]) if not os.path.isfile(model[CLUSTER][DRPROXY][f]): ERROR("Unable to find '{}'!".format(model[CLUSTER][DRPROXY][f])) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], KUBESPRAY, {}) setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], DISABLED, False) setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], METRICS_SERVER, True) setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], AUDIT, False) setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], POD_SECURITY_POLICIES, True) if model[CLUSTER][K8S][KUBESPRAY][DISABLED]: return False else: lookupRepository(model, None, "docker_yum", model[CLUSTER][K8S][KUBESPRAY]['docker_yum_repo_id']) if K9S_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]: lookupRepository(model, "k9s", repoId = model[CLUSTER][K8S][KUBESPRAY][K9S_REPO_ID]) if HELM_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]: lookupRepository(model, "helm", repoId = model[CLUSTER][K8S][KUBESPRAY][HELM_REPO_ID]) lookupHelper(model, KUBESPRAY, helperId=model[CLUSTER][K8S][KUBESPRAY]["helper_id"]) lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["docker_proxy_id"] if "docker_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "docker") lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["master_root_proxy_id"] if "master_root_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "master_root") lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["yumproxy_id"] if "yum_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "yum") if FILES_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]: lookupRepository(model, "kubespray_files", repoId=model[CLUSTER][K8S][KUBESPRAY][FILES_REPO_ID]) model[DATA][ROLE_PATHS].add(appendPath(model[DATA][HELPERS][KUBESPRAY][FOLDER], "roles")) model[DATA]["dnsNbrDots"] = model[CLUSTER][K8S][KUBESPRAY][CLUSTER_NAME].count(".") + 1 certByName = {} if DOCKER_CERTIFICATES in model["config"]: for cert in model["config"][DOCKER_CERTIFICATES]: cert["path"] = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), cert["path"]) if not os.path.isfile(cert["path"]) or not os.access(cert["path"], os.R_OK): ERROR("Configuration error: docker_certificates.{}: Invalid path '{}'".format(cert["name"], cert["path"])) certByName[cert["name"]] = cert model[DATA][DOCKER_CERTIFICATES] = [] if DOCKER_CERTIFICATES in model[CLUSTER][K8S][KUBESPRAY]: for certName in model[CLUSTER][K8S][KUBESPRAY][DOCKER_CERTIFICATES]: if certName in certByName: cert = certByName[certName] if "port" in cert: cert["endpoint"] = "{}:{}".format(cert["host"], cert['port']) else: cert["endoint"] = cert["host"] model[DATA][DOCKER_CERTIFICATES].append(cert) else: ERROR("docker_certificates '{}' is not defined in configuration file!".format(certName)) return True
def walk(self, targetFileByName): """ Enrich the targetFileByName structure with file from this plugin """ #logger.debug(self.path + "<----") snippetsPath = appendPath(self.path, "snippets") pref = len(snippetsPath) + 1 for dirpath, dirnames, filenames in os.walk( snippetsPath): # @UnusedVariable #logger.debug("dirpath:{} dirnames:{} filename:{}".format(dirpath, dirnames, filenames)) for filename in filenames: #logger.debug(filename) if not filename == ".gitignore": sourceFile = os.path.join(dirpath, filename) targetFileName = sourceFile[pref:] if targetFileName.count(".") < 2: # We pass throught non super-suffixed files order = 0 ftype = "txt" else: # Handle the type and eventual suffix (Used as short comment) pos = targetFileName.rfind(".") suffix = targetFileName[pos + 1:] targetFileName = targetFileName[:pos] pos = suffix.find("-") if pos != -1: ftype = suffix[:pos] suffix = suffix[pos + 1:] else: ftype = suffix suffix = None # Now order number pos = targetFileName.rfind(".") idx = targetFileName[pos + 1:] targetFileName = targetFileName[:pos] try: order = int(idx) except ValueError: ERROR("'{0}' is not a valid file part".format( sourceFile)) logger.debug(sourceFile + "-->" + targetFileName + "(" + str(idx) + ")") if targetFileName not in targetFileByName: targetFileByName[targetFileName] = {} #targetFileByName[targetFileName].name = targetFileName targetFileByName[targetFileName]['fileParts'] = [] fp = {} fp['name'] = sourceFile fp['order'] = order fp['plugin'] = self.name fp['type'] = ftype if suffix != None: fp["suffix"] = suffix targetFileByName[targetFileName]['fileParts'].append(fp)
def buildConfig(sourceFileDir, baseConfigFile): configFile = findUpward(baseConfigFile, sourceFileDir) logger.info("Using '{}' as configuration file".format(configFile)) config = yaml.load(open(configFile), Loader=yaml.SafeLoader) if PLUGINS_PATH not in config: ERROR("Missing '{}' in configuration file".format(PLUGINS_PATH)) # Adjust plugin path relative to the config file baseDir = os.path.dirname(configFile) for index, path in enumerate(config[PLUGINS_PATH]): config[PLUGINS_PATH][index] = misc.appendPath(baseDir, path) return (config, configFile)
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], INGRESS_NGINX, {}) setDefaultInMap(model[CLUSTER][K8S][INGRESS_NGINX], DISABLED, False) setDefaultInMap(model[CLUSTER][K8S][INGRESS_NGINX], ENABLE_SSL_PASSTHROUGH, False) if model[CLUSTER][K8S][INGRESS_NGINX][DISABLED]: return False else: if EXTERNAL_IP in model[CLUSTER][K8S][INGRESS_NGINX]: model[CLUSTER][K8S][INGRESS_NGINX][ EXTERNAL_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP]) if DASHBOARD_HOST in model[CLUSTER][K8S][INGRESS_NGINX]: dashboard_ip = resolveDns( model[CLUSTER][K8S][INGRESS_NGINX][DASHBOARD_HOST]) if dashboard_ip is not None: if EXTERNAL_IP in model[CLUSTER][K8S][ INGRESS_NGINX] and model[CLUSTER][K8S][INGRESS_NGINX][ EXTERNAL_IP] != dashboard_ip: ERROR( "k8s.ingress_nginx: 'external_ip' and 'dashboard_host' must resolve on same ip ({} != {})" .format( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP], dashboard_ip)) else: logger.warn( "Unable to resolve '{}' for now. May be this DNS entry will be created later." .format( model[CLUSTER][K8S][INGRESS_NGINX][DASHBOARD_HOST])) enableSslPassthrough = False if COMMAND_LINE_ARGUMENTS in model[CLUSTER][K8S][INGRESS_NGINX]: for cla in model[CLUSTER][K8S][INGRESS_NGINX][ COMMAND_LINE_ARGUMENTS]: if cla == "--enable-ssl-passthrough": enableSslPassthrough = True if not enableSslPassthrough: ERROR( "k8s.ingress_nginx: Dashbaord access require '--enable-ssl-passthrough' command line argument to be defined" ) return True
def groom2(_plugin, model): if LB_ADDRESS in model[CLUSTER][K8S][LOKI_STACK][GRAFANA]: if METALLB not in model[CLUSTER][K8S] or model[CLUSTER][K8S][METALLB][ DISABLED]: ERROR("A lb_address is defined while there is no metallb plugin") model[CLUSTER][K8S][LOKI_STACK][GRAFANA][ LB_ADDRESS] = resolveDnsAndCheck( model[CLUSTER][K8S][LOKI_STACK][GRAFANA][LB_ADDRESS]) lb_address = ipaddress.ip_address( u"" + model[CLUSTER][K8S][LOKI_STACK][GRAFANA][LB_ADDRESS]) lbAddressInRange = False for rangeip in model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGES]: first_ip = ipaddress.ip_address(u"" + rangeip[FIRST]) last_ip = ipaddress.ip_address(u"" + rangeip[LAST]) if lb_address >= first_ip and lb_address <= last_ip: lbAddressInRange = True if not lbAddressInRange: ERROR( "grafana.lb_address is not included in one of metallb.external_ip_ranges" ) if LB_ADDRESS in model[CLUSTER][K8S][LOKI_STACK][LOKI]: if METALLB not in model[CLUSTER][K8S] or model[CLUSTER][K8S][METALLB][ DISABLED]: ERROR("A lb_address is defined while there is no metallb plugin") model[CLUSTER][K8S][LOKI_STACK][LOKI][LB_ADDRESS] = resolveDnsAndCheck( model[CLUSTER][K8S][LOKI_STACK][LOKI][LB_ADDRESS]) lb_address = ipaddress.ip_address( u"" + model[CLUSTER][K8S][LOKI_STACK][LOKI][LB_ADDRESS]) lbAddressInRange = False for rangeip in model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGES]: first_ip = ipaddress.ip_address(u"" + rangeip[FIRST]) last_ip = ipaddress.ip_address(u"" + rangeip[LAST]) if lb_address >= first_ip and lb_address <= last_ip: lbAddressInRange = True if not lbAddressInRange: ERROR( "loki.lb_address is not included in one of metallb.external_ip_ranges" )
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], HARBOR, {}) setDefaultInMap(model[CLUSTER][HARBOR], DISABLED, False) if model[CLUSTER][HARBOR][DISABLED]: return False else: lookupRepository(model, "harbor", repoId=model[CLUSTER][HARBOR][REPO_ID]) model[CLUSTER][HARBOR][SSL_CERT_SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_CERT_SRC]) if not os.path.isfile(model[CLUSTER][HARBOR][SSL_CERT_SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][HARBOR][SSL_CERT_SRC])) model[CLUSTER][HARBOR][SSL_KEY_SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_KEY_SRC]) if not os.path.isfile(model[CLUSTER][HARBOR][SSL_KEY_SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][HARBOR][SSL_KEY_SRC])) setDefaultInMap(model[CLUSTER][HARBOR], VALIDATE_API_CERT, False) setDefaultInMap(model[CLUSTER][HARBOR], HOSTNAME, "{{ ansible_fqdn }}") return True
def groom(plugin, model): if ANSIBLE in model[CLUSTER]: setDefaultInMap(model[CLUSTER][ANSIBLE], DISABLED, False) if model[CLUSTER][ANSIBLE][DISABLED]: return False if PLAYBOOKS in model[CLUSTER][ANSIBLE]: for idx in range(0, len(model[CLUSTER][ANSIBLE][PLAYBOOKS])): model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE] = appendPath( model[DATA][SOURCE_FILE_DIR], model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE]) if ROLES_PATHS in model[CLUSTER][ANSIBLE]: for rp in model[CLUSTER][ANSIBLE][ROLES_PATHS]: model[DATA]["rolePaths"].add( appendPath(model[DATA][SOURCE_FILE_DIR], rp)) if ROLES_PATHS_FROM_PLUGINS in model[CLUSTER][ANSIBLE]: for pluginName in model[CLUSTER][ANSIBLE][ ROLES_PATHS_FROM_PLUGINS]: plugin = lookupPlugin(pluginName, model[CONFIG]["plugins_paths"]) if plugin != None: rolesPath = appendPath(plugin.path, "roles") if os.path.exists(rolesPath): model['data']["rolePaths"].add(rolesPath) else: ERROR( "ansible.{}: There is no 'roles' folder in plugin '{}'" .format(ROLES_PATHS_FROM_PLUGINS, pluginName)) else: ERROR("ansible.{}: plugin '{}' not found".format( ROLES_PATHS_FROM_PLUGINS, pluginName)) if ROLES in model[CLUSTER][ANSIBLE]: for role in model[CLUSTER][ANSIBLE][ROLES]: setDefaultInMap(role, SCOPE, "all") if ANSIBLE in model[CONFIG] and ROLES_PATHS in model[CONFIG][ANSIBLE]: for rp in model[CONFIG][ANSIBLE][ROLES_PATHS]: model[DATA]["rolePaths"].add( appendPath(os.path.dirname(model[DATA]["configFile"]), rp)) return True
def groom(plugin, model): for node in model[CLUSTER][NODES]: if ANSIBLE_USER not in node: if INVENTORY not in model[CLUSTER] or ANSIBLE_USER not in model[ CLUSTER][INVENTORY][DEFAULTS]: ERROR( "'{}' is not defined either in node '{}' and in inventory.defaults!" .format(ANSIBLE_USER, node[NAME])) else: node[ANSIBLE_USER] = model[CLUSTER][INVENTORY][DEFAULTS][ ANSIBLE_USER] if ANSIBLE_BECOME not in node: if INVENTORY not in model[CLUSTER] or ANSIBLE_BECOME not in model[ CLUSTER][INVENTORY][DEFAULTS]: ERROR( "'{}' is not defined either in node '{}' and in inventory.defaults!" .format(ANSIBLE_BECOME, node[NAME])) else: node[ANSIBLE_BECOME] = model[CLUSTER][INVENTORY][DEFAULTS][ ANSIBLE_BECOME] if ANSIBLE_PRIVATE_KEY not in node: if INVENTORY not in model[ CLUSTER] or ANSIBLE_PRIVATE_KEY not in model[CLUSTER][ INVENTORY][DEFAULTS]: ERROR( "'{}' is not defined either in node '{}' and in inventory.defaults!" .format(ANSIBLE_PRIVATE_KEY, node[NAME])) else: node[ANSIBLE_PRIVATE_KEY] = model[CLUSTER][INVENTORY][ DEFAULTS][ANSIBLE_PRIVATE_KEY] node[ANSIBLE_PRIVATE_KEY] = appendPath(model[DATA][SOURCE_FILE_DIR], node[ANSIBLE_PRIVATE_KEY]) if not os.path.isfile(node[ANSIBLE_PRIVATE_KEY]) or not os.access( node[ANSIBLE_PRIVATE_KEY], os.R_OK): ERROR("Node '{}': Invalid private key path:'{}'".format( node[NAME], node[ANSIBLE_PRIVATE_KEY])) model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def buildTargetFileByName(plugins): "Build a map by file name, where each file is an array of file parts" targetFileByName = {} for plugin in plugins: if plugin.enabled: plugin.walk(targetFileByName) # For each target file, sort parts by order. And check validity for name, targetFile in targetFileByName.iteritems(): targetFile['fileParts'] = sorted(targetFile['fileParts'], key=lambda fp: fp['order']) refType = targetFile["fileParts"][0]['type'] if refType not in validType: ERROR( "Invalid type '{0}' for file '{1}'. (plugin:'{2}', target:'{3}'). Only {4} are allowed" .format(refType, name, targetFile["fileParts"][0]['plugin'], targetFile["fileParts"][0]['name'], str(validType))) for fp in targetFile['fileParts']: if fp['type'] != refType: ERROR( "Type mismatch for file target:'{}'. All type for a target must be same ('{}' != '{}')" .format(name, fp['type'], refType)) return targetFileByName
def groom(_plugin, model): setDefaultInMap(model[CLUSTER][REGISTER_CA], DISABLED, False) if model[CLUSTER][REGISTER_CA][DISABLED]: return False else: if FROM_PATHS in model[CLUSTER][REGISTER_CA]: for idx, p in enumerate(model[CLUSTER][REGISTER_CA][FROM_PATHS]): model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC] = appendPath( model[DATA][SOURCE_FILE_DIR], p[SRC]) if not os.path.isfile( model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC]): ERROR("Unable to find '{}'!".format( model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC])) return True
def computeTfSecurityGroupRule(model, rule, sgName, ruleIdx): tf = { } if DESCRIPTION in rule: tf[DESCRIPTION] = rule[DESCRIPTION] prefix = "security_group[{}] Rule#{}".format(sgName, ruleIdx) # Handle protocol proto = rule[PROTOCOL].strip().upper() tf[PROTOCOL] = proto p = numberOrNone(proto) if p != None: # Protocol specified by number. No other control tf[FROM_PORT] = rule[FROM_PORT] tf[TO_PORT] = rule[TO_PORT] elif proto == "ALL": tf[FROM_PORT] = 0 tf[TO_PORT] = 0 tf[PROTOCOL] = "-1" elif proto == "TCP" or proto == "UDP": (tf[FROM_PORT], tf[TO_PORT]) = handleTcpUdpPort(rule, prefix) elif rule[PROTOCOL].upper() == "ICMP": (tf[FROM_PORT], tf[TO_PORT]) = handleIcmpType(rule, prefix) else: ERROR("{}: Unknow protocol token:'{}'".format(prefix, rule[PROTOCOL])) # Handle source or destination if SOURCE in rule: peer = rule[SOURCE].strip() else: peer = rule[DESTINATION].strip() if peer.upper() == "_ANY_": tf[CIDR_BLOCK] = "0.0.0.0/0" elif peer.upper() == "_SELF_": tf[SELF] = True elif peer.upper() == "_VPC_": tf[CIDR_BLOCK] = "${data.aws_vpc.my_vpc.cidr_block}" model[DATA][AWS][NEED_MY_VPC] = True elif isCidr(peer): tf[CIDR_BLOCK] = peer else: if peer == sgName: # This refers to ourself tf[SELF] = True elif peer in model[DATA][AWS][SECURITY_GROUP_BY_NAME]: # Should be a reference to another SG. tf[SECURITY_GROUP] = "aws_security_group." + peer + ".id" else: model[DATA][AWS][EXTERNAL_SECURITY_GROUPS].add(peer) tf[SECURITY_GROUP] = "data.aws_security_group." + peer + ".id" return tf
def groom(_plugin, model): if K8S not in model[CLUSTER]: ERROR("'aws_k8s' plugin could not be used if no 'k8s' plugin") if AWS not in model[CLUSTER]: ERROR("'aws_k8s' plugin could not be used if no 'aws' plugin") setDefaultInMap(model[CLUSTER], AWS_K8S, {}) if EBS_CSI in model[CLUSTER][AWS_K8S]: setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], DISABLED, False) setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], EBS_VOLUME_SCHEDULING, True) setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], EBS_VOLUME_SNAPSHOT, False) setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], EBS_VOLUME_RESIZING, False) setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], EBS_CONTROLER_REPLICAS, 1) setDefaultInMap(model[CLUSTER][AWS_K8S][EBS_CSI], EBS_PLUGIN_IMAGE_TAG, "latest") if model[CLUSTER][AWS_K8S][EBS_CSI][DISABLED]: delete(model[CLUSTER][AWS_K8S][AWS_K8S]) else: model[DATA][K8S][PERSISTENT_VOLUMES_ENABLED] = True # Ensure we will have an instance role for all node allowed to use EBS CSI driver for roleName, role in model[DATA][ROLE_BY_NAME].iteritems(): setDefaultInMap(role[AWS], EBS_CSI_ENABLED, False) if role[AWS][EBS_CSI_ENABLED]: if not INSTANCE_ROLE_NAME in role[AWS]: ERROR("AWS instance role was not enabled for Role '{}' while 'ebs_csi_enabled' is set ! (Set also 'create_instance_role' switch)".format(roleName)) return True
def groom(_plugin, model): repoInConfig = "repositories" in model["config"] and "vagrant" in model["config"]["repositories"] and "yum_repo_base_url" in model["config"]["repositories"]["vagrant"] if model["cluster"]["vagrant"]["yum_repo"] == "local" and not repoInConfig: ERROR("'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.yum_repo' is set to 'local' in '{}'".format(model["data"]["sourceFileDir"])) if repoInConfig: # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one. setDefaultInMap(model["data"], "repositories", {}) setDefaultInMap(model["data"]["repositories"], "vagrant", {}) model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model["config"]["repositories"]["vagrant"]["yum_repo_base_url"] groomRoles(model) groomNodes(model) model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh") return True # Always enabled
def groom(_plugin, model): groomIssuers(model) setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], CERT_MANAGER, {}) setDefaultInMap(model[CLUSTER][K8S][CERT_MANAGER], DISABLED, False) if model[CLUSTER][K8S][CERT_MANAGER][DISABLED]: return False else: model[DATA][CLUSTER_ISSUERS] = [] if CLUSTER_ISSUERS in model[CLUSTER][K8S][CERT_MANAGER]: for issuerDef in model[CLUSTER][K8S][CERT_MANAGER][ CLUSTER_ISSUERS]: if issuerDef[ID] not in model[DATA][CERT_MANAGER_ISSUER_BY_ID]: ERROR( "Issuer of id '{}' is not defined in configuration file!" .format(issuerDef[ID])) issuer = model[DATA][CERT_MANAGER_ISSUER_BY_ID][issuerDef[ID]] issuer[NAME] = issuerDef[NAME] model[DATA][CLUSTER_ISSUERS].append(issuer) return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], ROOK_CEPH, {}) setDefaultInMap(model[CLUSTER][K8S][ROOK_CEPH], DISABLED, False) if model[CLUSTER][K8S][ROOK_CEPH][DISABLED]: return False else: setDefaultInMap(model[CLUSTER][K8S][ROOK_CEPH], CLUSTERS, []) setDefaultInMap(model[DATA], K8S, {}) setDefaultInMap(model[DATA][K8S], ROOK_CEPH, {}) setDefaultInMap(model[DATA][K8S][ROOK_CEPH], CLUSTER_BY_NAME, {}) groupsWithDevice = Set() model[DATA][K8S][ROOK_CEPH][GROUPS_WITH_DEVICES] = groupsWithDevice for cluster in model[CLUSTER][K8S][ROOK_CEPH][CLUSTERS]: dataCluster = {} nodeByName = {} for config in cluster[NODE_CONFIGS]: if not isinstance(config, Mapping): ERROR("rook_ceph.clusters.['{}']: All node_configs items must be a Map".format(cluster[NAME])) if not GROUP in config: ERROR("rook_ceph.clusters.['{}']: All node_configs items must have a 'group' attribute".format(cluster[NAME])) if not config[GROUP] in model[DATA][GROUP_BY_NAME]: ERROR("rook_ceph.clusters.{}.node_configs: group '{}' does not exists".format(cluster["name"], config[GROUP])) for nodeName in model[DATA][GROUP_BY_NAME][config[GROUP]]: if nodeName in nodeByName: ERROR("rook_ceph.clusters.['{}'].node_configs: node '{}' belong to both group '{}' and group '{}'. nodes_config.groups can't overlap in the same cluster!".format(cluster[NAME], nodeName, config[GROUP], nodeByName[nodeName][GROUP])) nodeByName[nodeName] = copy.deepcopy(config) if DEVICES in config: groupsWithDevice.add(config[GROUP]) dataCluster[NODES] = [] for nodeName, node in nodeByName.iteritems(): node = nodeByName[nodeName] del(node[GROUP]) node[NAME] = nodeName dataCluster[NODES].append(node) model[DATA][K8S][ROOK_CEPH][CLUSTER_BY_NAME][cluster["name"]] = dataCluster if BLOCK_POOLS in cluster: for bp in cluster[BLOCK_POOLS]: if NAME not in bp: bp[NAME] = "{}-bp-{}".format(cluster[NAME], bp[REPLICATION]) if DASHBOARD_IP in cluster: if not METALLB in model[CLUSTER][K8S]: ERROR("rook_ceph.clusters.{}.dashboard_ip is defined while there is no metallb defined".format(cluster["name"])) db_ip = ipaddress.ip_address(u"" + cluster[DASHBOARD_IP]) first_ip = ipaddress.ip_address(u"" + model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGE][FIRST]) last_ip = ipaddress.ip_address(u"" + model[CLUSTER][K8S][METALLB][EXTERNAL_IP_RANGE][LAST]) if db_ip < first_ip or db_ip > last_ip: ERROR("rook_ceph.clusters.{}.dashboard_ip is not included in metallb.external_ip_range".format(cluster["name"])) return True
def groom(plugin, model): setDefaultInMap(model["cluster"], "kubespray", {}) setDefaultInMap(model["cluster"]["kubespray"], "disabled", False) if model["cluster"]["kubespray"]["disabled"]: return False else: if "kubespray" not in model[ "config"] or "ansible_repo_folder" not in model["config"][ "kubespray"]: ERROR( "Missing 'kubespray.ansible_repo_folder' in configuration file" ) ansible_repo_folder = appendPath( os.path.dirname(model["data"]["configFile"]), model["config"]["kubespray"]["ansible_repo_folder"]) model["config"]["kubespray"][ "ansible_repo_folder"] = ansible_repo_folder model["data"]["rolePaths"].add(appendPath(ansible_repo_folder, "roles")) model["data"]["dnsNbrDots"] = model["cluster"]["domain"].count(".") + 1 return True
def groom(_plugin, model): setDefaultInMap(model[CLUSTER], K8S, {}) setDefaultInMap(model[CLUSTER][K8S], ARGOCD, {}) setDefaultInMap(model[CLUSTER][K8S][ARGOCD], DISABLED, False) if model[CLUSTER][K8S][ARGOCD][DISABLED]: return False else: if LOAD_BALANCER_IP in model[CLUSTER][K8S][ARGOCD]: model[CLUSTER][K8S][ARGOCD][LOAD_BALANCER_IP] = resolveDnsAndCheck( model[CLUSTER][K8S][ARGOCD][LOAD_BALANCER_IP]) if INGRESS_NGINX_HOST in model[CLUSTER][K8S][ARGOCD]: if INGRESS_NGINX in model[CLUSTER][K8S] and EXTERNAL_IP in model[ CLUSTER][K8S][INGRESS_NGINX]: ingress_ip = resolveDnsAndCheck( model[CLUSTER][K8S][INGRESS_NGINX][EXTERNAL_IP]) argocd_ip = resolveDnsAndCheck( model[CLUSTER][K8S][ARGOCD] [INGRESS_NGINX_HOST]) # error if it does not resolve. if argocd_ip != ingress_ip: ERROR( "k8s.argocd: 'ingress_nginx_host' and 'ingress_nginx.external_ip' must resolve on same ip ({} != {})" .format(argocd_ip, ingress_ip)) return True