Пример #1
0
    def groom(self, model):

        extRolePath = appendPath(self.path, "roles.yml")
        if os.path.exists(extRolePath):
            pathList = yaml.load(open(extRolePath), Loader=yaml.SafeLoader)
            if not isinstance(pathList, list):
                ERROR(
                    "File {} must contain a list of path".format(extRolePath))
            for p in pathList:
                model['data']["rolePaths"].add(appendPath(self.path, p))
        else:
            rolesPath = appendPath(self.path, "roles")
            if os.path.exists(rolesPath):
                model['data']["rolePaths"].add(rolesPath)
        codeFile = appendPath(self.path, "groomer.py")
        if os.path.exists(codeFile):
            logger.debug("Will load '{0}' as python code".format(codeFile))
            self.groomer = imp.load_source(self.name, codeFile)
            if hasattr(self.groomer, "groom"):
                method = getattr(self.groomer, "groom")
                logger.debug("FOUND '{0}' method".format(str(method)))
                ret = method(self, model)
                if ret == None or not isinstance(ret, bool):
                    ERROR(
                        "Invalid plugin '{}'. groom(model) must return a boolean (enabled yes/no)."
                        .format(self.name))
                else:
                    self.enabled = ret
Пример #2
0
def groom(plugin, model):
    setDefaultInMap(model["cluster"]["confluent"], "disabled", False)
    if model["cluster"]["confluent"]["disabled"]:
        return False
    
    lookupRepository(model, "confluent")
    if "confluent" not in model["config"] or "ansible_repo_folder" not in model["config"]["confluent"]:
        ERROR("Missing 'confluent.ansible_repo_folder' in configuration file")
    
    for node in model['cluster']['nodes']:
        if "kafka_log_dirs" in node:
            if len(node["kafka_log_dirs"]) == 0:
                del(node["kafka_log_dirs"])
        else:
            if "kafka_log_dirs" in model["data"]["roleByName"][node["role"]]:
                node["kafka_log_dirs"] = model["data"]["roleByName"][node["role"]]["kafka_log_dirs"]
    
    ansible_repo_folder = appendPath(os.path.dirname(model["data"]["configFile"]),  model["config"]["confluent"]["ansible_repo_folder"]) 
    model["config"]["confluent"]["ansible_repo_folder"] = ansible_repo_folder
    model["data"]["rolePaths"].add(appendPath(ansible_repo_folder, "roles"))
    
    # We need to define an ansible group "preflight" hosting all nodes 
    preflight = []
    for node in model["cluster"]["nodes"]:
        preflight.append(node["name"])
    model["data"]["groupByName"]["preflight"] = preflight
    return True
Пример #3
0
def initVault(model):
    global vault
    if VAULT_ID in model[CLUSTER]: 
        vaultId = model[CLUSTER][VAULT_ID]
        if VAULTS not in model["config"]:
            ERROR("{} is missing from configuration while encryption id required ('vault_id' is defined)".format(VAULTS))
        l = filter(lambda x: x["vault_id"] == vaultId, model["config"][VAULTS])
        if len(l) > 1:
            ERROR("{}: vault_id '{}' is defined twice in configuration file!".format(VAULTS, vaultId))
        if len(l) != 1:
            ERROR("{}: vault_id '{}' is not defined in configuration file!".format(VAULTS, vaultId))
        f = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), l[0][PASSWORD_FILE])
        if not (os.path.isfile(f) and os.access(f, os.R_OK)):
            ERROR("Non existing or not accessible vault password file '{}'.".format(f))
        pwd = file2String(f)
        pwd = pwd.strip()
        model[DATA][VAULT_PASSWORD_FILE] = f
        vault = Vault(pwd)
        if SAFE_CONFIFG_FILE in l[0]:
            scFileName = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), l[0][SAFE_CONFIFG_FILE])
            model[DATA][_SAFE_CONFIG_FILE_] = scFileName
            if not (os.path.isfile(scFileName) and os.access(scFileName, os.R_OK)):
                ERROR("Non existing or not accessible safe config file '{}'.".format(scFileName))
            logger.info("Loading safe config from '{}'".format(scFileName))
            data, was_encrypted = vault.encryptedFile2String(scFileName)
            safeConfig = yaml.load(data, Loader=yaml.SafeLoader)
            model[SAFE_CONFIG] = safeConfig
            if not was_encrypted:
                print("\n'{}' was not encrypted. Will encrypt it".format(scFileName))
                vault.stringToEncryptedFile(data, scFileName)
    else:
        vault = None
Пример #4
0
def groom(_plugin, model):
    model[DATA][AWS] = {}
    #model[DATA][AWS][REFERENCE_SUBNET]= model[CLUSTER][NODES][0][AWS][SUBNET]
    setDefaultInMap(model[CLUSTER][AWS], KEY_PAIR, "default")
    kp = lookupKeyPair(model, model[CLUSTER][AWS][KEY_PAIR])
    model[DATA][AWS][DATA_KEY_PAIR] = kp[KEY_PAIR_NAME]
    if PRIVATE_KEY_PATH in kp:
        # If path is relative, adjust to config file location
        model[DATA][AWS][DATA_PRIVATE_KEY_PATH] = appendPath(os.path.dirname(model["data"]["configFile"]), kp[PRIVATE_KEY_PATH])
    model[DATA][AWS][DATA_ROUTE53] = lookupRoute53(model, model[CLUSTER][AWS][ROUTE53])
    groomSecurityGroups(model)
    groomRoles(model)
    groomNodes(model)
    model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh")
    return True # Always enabled
Пример #5
0
def groom(plugin, model):
    repoInConfig = "repositories" in model["config"] and "vagrant" in model[
        "config"]["repositories"] and "yum_repo_base_url" in model["config"][
            "repositories"]["vagrant"]
    setDefaultInMap(model["cluster"]["vagrant"], "local_yum_repo",
                    repoInConfig)
    if model["cluster"]["vagrant"]["local_yum_repo"] and not repoInConfig:
        ERROR(
            "'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.local_yum_repo' is set to True in '{}'"
            .format(model["data"]["sourceFileDir"]))
    if repoInConfig:
        # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one.
        setDefaultInMap(model["data"], "repositories", {})
        setDefaultInMap(model["data"]["repositories"], "vagrant", {})
        model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model[
            "config"]["repositories"]["vagrant"]["yum_repo_base_url"]

    for node in model['cluster']['nodes']:
        if not SYNCED_FOLDERS in node:
            node[SYNCED_FOLDERS] = []
        role = model["data"]["roleByName"][node["role"]]
        if SYNCED_FOLDERS in role:
            node[SYNCED_FOLDERS] += role[SYNCED_FOLDERS]
        if SYNCED_FOLDERS in model["cluster"]["vagrant"]:
            node[SYNCED_FOLDERS] += model["cluster"]["vagrant"][SYNCED_FOLDERS]

    model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"],
                                              "build.sh")
    return True  # Always enabled
Пример #6
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER][DRPROXY], DISABLED, False)
    if model[CLUSTER][DRPROXY][DISABLED]:
        return False
    else:
        for f in [CERT_FILE, KEY_FILE, ROOT_CA_FILE]:
            model[CLUSTER][DRPROXY][f] = appendPath(model[DATA][SOURCE_FILE_DIR], model[CLUSTER][DRPROXY][f])
            if not os.path.isfile(model[CLUSTER][DRPROXY][f]):
                ERROR("Unable to find '{}'!".format(model[CLUSTER][DRPROXY][f]))
        return True
Пример #7
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], K8S, {})
    setDefaultInMap(model[CLUSTER][K8S], KUBESPRAY, {})
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], DISABLED, False)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], METRICS_SERVER, True)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], AUDIT, False)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], POD_SECURITY_POLICIES, True)
    if model[CLUSTER][K8S][KUBESPRAY][DISABLED]:
        return False
    else:
        lookupRepository(model, None, "docker_yum", model[CLUSTER][K8S][KUBESPRAY]['docker_yum_repo_id'])
        if K9S_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "k9s", repoId = model[CLUSTER][K8S][KUBESPRAY][K9S_REPO_ID])
        if HELM_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "helm", repoId = model[CLUSTER][K8S][KUBESPRAY][HELM_REPO_ID])
        lookupHelper(model, KUBESPRAY, helperId=model[CLUSTER][K8S][KUBESPRAY]["helper_id"])
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["docker_proxy_id"] if "docker_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "docker")
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["master_root_proxy_id"] if "master_root_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "master_root")
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["yumproxy_id"] if "yum_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "yum")
        if FILES_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "kubespray_files", repoId=model[CLUSTER][K8S][KUBESPRAY][FILES_REPO_ID])
        model[DATA][ROLE_PATHS].add(appendPath(model[DATA][HELPERS][KUBESPRAY][FOLDER], "roles"))
        model[DATA]["dnsNbrDots"] = model[CLUSTER][K8S][KUBESPRAY][CLUSTER_NAME].count(".") + 1
        certByName = {}
        if DOCKER_CERTIFICATES in model["config"]:
            for cert in model["config"][DOCKER_CERTIFICATES]:
                cert["path"] = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), cert["path"])
                if not os.path.isfile(cert["path"]) or not os.access(cert["path"], os.R_OK):
                    ERROR("Configuration error: docker_certificates.{}: Invalid path '{}'".format(cert["name"],  cert["path"]))
                certByName[cert["name"]] = cert
        model[DATA][DOCKER_CERTIFICATES] = []
        if DOCKER_CERTIFICATES in model[CLUSTER][K8S][KUBESPRAY]:
            for certName in model[CLUSTER][K8S][KUBESPRAY][DOCKER_CERTIFICATES]:
                if certName in certByName:
                    cert = certByName[certName]
                    if "port" in cert:
                        cert["endpoint"] = "{}:{}".format(cert["host"], cert['port'])
                    else:
                        cert["endoint"] = cert["host"]
                    model[DATA][DOCKER_CERTIFICATES].append(cert)
                else:
                    ERROR("docker_certificates '{}' is not defined in configuration file!".format(certName))
        return True
Пример #8
0
    def walk(self, targetFileByName):
        """ Enrich the targetFileByName structure with file from this plugin """
        #logger.debug(self.path + "<----")
        snippetsPath = appendPath(self.path, "snippets")
        pref = len(snippetsPath) + 1
        for dirpath, dirnames, filenames in os.walk(
                snippetsPath):  # @UnusedVariable
            #logger.debug("dirpath:{}  dirnames:{}  filename:{}".format(dirpath, dirnames, filenames))
            for filename in filenames:
                #logger.debug(filename)
                if not filename == ".gitignore":
                    sourceFile = os.path.join(dirpath, filename)
                    targetFileName = sourceFile[pref:]
                    if targetFileName.count(".") < 2:
                        # We pass throught non super-suffixed files
                        order = 0
                        ftype = "txt"
                    else:
                        # Handle the type and eventual suffix (Used as short comment)
                        pos = targetFileName.rfind(".")
                        suffix = targetFileName[pos + 1:]
                        targetFileName = targetFileName[:pos]
                        pos = suffix.find("-")
                        if pos != -1:
                            ftype = suffix[:pos]
                            suffix = suffix[pos + 1:]
                        else:
                            ftype = suffix
                            suffix = None
                        # Now order number
                        pos = targetFileName.rfind(".")
                        idx = targetFileName[pos + 1:]
                        targetFileName = targetFileName[:pos]
                        try:
                            order = int(idx)
                        except ValueError:
                            ERROR("'{0}' is not a valid file part".format(
                                sourceFile))

                    logger.debug(sourceFile + "-->" + targetFileName + "(" +
                                 str(idx) + ")")

                    if targetFileName not in targetFileByName:
                        targetFileByName[targetFileName] = {}
                        #targetFileByName[targetFileName].name = targetFileName
                        targetFileByName[targetFileName]['fileParts'] = []
                    fp = {}
                    fp['name'] = sourceFile
                    fp['order'] = order
                    fp['plugin'] = self.name
                    fp['type'] = ftype
                    if suffix != None:
                        fp["suffix"] = suffix
                    targetFileByName[targetFileName]['fileParts'].append(fp)
Пример #9
0
def buildConfig(sourceFileDir, baseConfigFile):
    configFile = findUpward(baseConfigFile, sourceFileDir)
    logger.info("Using '{}' as configuration file".format(configFile))
    config = yaml.load(open(configFile), Loader=yaml.SafeLoader)
    if PLUGINS_PATH not in config:
        ERROR("Missing '{}' in configuration file".format(PLUGINS_PATH))
    # Adjust plugin path relative to the config file
    baseDir = os.path.dirname(configFile)
    for index, path in enumerate(config[PLUGINS_PATH]):
        config[PLUGINS_PATH][index] = misc.appendPath(baseDir, path)
    return (config, configFile)
Пример #10
0
def groom(plugin, model):
    for node in model[CLUSTER][NODES]:
        if ANSIBLE_USER not in node:
            if INVENTORY not in model[CLUSTER] or ANSIBLE_USER not in model[
                    CLUSTER][INVENTORY][DEFAULTS]:
                ERROR(
                    "'{}' is not defined either in node '{}' and in inventory.defaults!"
                    .format(ANSIBLE_USER, node[NAME]))
            else:
                node[ANSIBLE_USER] = model[CLUSTER][INVENTORY][DEFAULTS][
                    ANSIBLE_USER]
        if ANSIBLE_BECOME not in node:
            if INVENTORY not in model[CLUSTER] or ANSIBLE_BECOME not in model[
                    CLUSTER][INVENTORY][DEFAULTS]:
                ERROR(
                    "'{}' is not defined either in node '{}' and in inventory.defaults!"
                    .format(ANSIBLE_BECOME, node[NAME]))
            else:
                node[ANSIBLE_BECOME] = model[CLUSTER][INVENTORY][DEFAULTS][
                    ANSIBLE_BECOME]
        if ANSIBLE_PRIVATE_KEY not in node:
            if INVENTORY not in model[
                    CLUSTER] or ANSIBLE_PRIVATE_KEY not in model[CLUSTER][
                        INVENTORY][DEFAULTS]:
                ERROR(
                    "'{}' is not defined either in node '{}' and in inventory.defaults!"
                    .format(ANSIBLE_PRIVATE_KEY, node[NAME]))
            else:
                node[ANSIBLE_PRIVATE_KEY] = model[CLUSTER][INVENTORY][
                    DEFAULTS][ANSIBLE_PRIVATE_KEY]
        node[ANSIBLE_PRIVATE_KEY] = appendPath(model[DATA][SOURCE_FILE_DIR],
                                               node[ANSIBLE_PRIVATE_KEY])
        if not os.path.isfile(node[ANSIBLE_PRIVATE_KEY]) or not os.access(
                node[ANSIBLE_PRIVATE_KEY], os.R_OK):
            ERROR("Node '{}': Invalid private key path:'{}'".format(
                node[NAME], node[ANSIBLE_PRIVATE_KEY]))
    model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"],
                                              "build.sh")
    return True  # Always enabled
Пример #11
0
def groom(plugin, model):
    if ANSIBLE in model[CLUSTER]:
        setDefaultInMap(model[CLUSTER][ANSIBLE], DISABLED, False)
        if model[CLUSTER][ANSIBLE][DISABLED]:
            return False
        if PLAYBOOKS in model[CLUSTER][ANSIBLE]:
            for idx in range(0, len(model[CLUSTER][ANSIBLE][PLAYBOOKS])):
                model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE] = appendPath(
                    model[DATA][SOURCE_FILE_DIR],
                    model[CLUSTER][ANSIBLE][PLAYBOOKS][idx][FILE])

        if ROLES_PATHS in model[CLUSTER][ANSIBLE]:
            for rp in model[CLUSTER][ANSIBLE][ROLES_PATHS]:
                model[DATA]["rolePaths"].add(
                    appendPath(model[DATA][SOURCE_FILE_DIR], rp))
        if ROLES_PATHS_FROM_PLUGINS in model[CLUSTER][ANSIBLE]:
            for pluginName in model[CLUSTER][ANSIBLE][
                    ROLES_PATHS_FROM_PLUGINS]:
                plugin = lookupPlugin(pluginName,
                                      model[CONFIG]["plugins_paths"])
                if plugin != None:
                    rolesPath = appendPath(plugin.path, "roles")
                    if os.path.exists(rolesPath):
                        model['data']["rolePaths"].add(rolesPath)
                    else:
                        ERROR(
                            "ansible.{}: There is no 'roles' folder in plugin '{}'"
                            .format(ROLES_PATHS_FROM_PLUGINS, pluginName))
                else:
                    ERROR("ansible.{}: plugin '{}' not found".format(
                        ROLES_PATHS_FROM_PLUGINS, pluginName))
        if ROLES in model[CLUSTER][ANSIBLE]:
            for role in model[CLUSTER][ANSIBLE][ROLES]:
                setDefaultInMap(role, SCOPE, "all")
    if ANSIBLE in model[CONFIG] and ROLES_PATHS in model[CONFIG][ANSIBLE]:
        for rp in model[CONFIG][ANSIBLE][ROLES_PATHS]:
            model[DATA]["rolePaths"].add(
                appendPath(os.path.dirname(model[DATA]["configFile"]), rp))
    return True
Пример #12
0
def groom(plugin, model):
    setDefaultInMap(model["cluster"], "kubespray", {})
    setDefaultInMap(model["cluster"]["kubespray"], "disabled", False)
    if model["cluster"]["kubespray"]["disabled"]:
        return False
    else:
        if "kubespray" not in model[
                "config"] or "ansible_repo_folder" not in model["config"][
                    "kubespray"]:
            ERROR(
                "Missing 'kubespray.ansible_repo_folder' in configuration file"
            )
        ansible_repo_folder = appendPath(
            os.path.dirname(model["data"]["configFile"]),
            model["config"]["kubespray"]["ansible_repo_folder"])
        model["config"]["kubespray"][
            "ansible_repo_folder"] = ansible_repo_folder
        model["data"]["rolePaths"].add(appendPath(ansible_repo_folder,
                                                  "roles"))

        model["data"]["dnsNbrDots"] = model["cluster"]["domain"].count(".") + 1
        return True
Пример #13
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], HARBOR, {})
    setDefaultInMap(model[CLUSTER][HARBOR], DISABLED, False)
    if model[CLUSTER][HARBOR][DISABLED]:
        return False
    else:
        lookupRepository(model,
                         "harbor",
                         repoId=model[CLUSTER][HARBOR][REPO_ID])
        model[CLUSTER][HARBOR][SSL_CERT_SRC] = appendPath(
            model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_CERT_SRC])
        if not os.path.isfile(model[CLUSTER][HARBOR][SSL_CERT_SRC]):
            ERROR("Unable to find '{}'!".format(
                model[CLUSTER][HARBOR][SSL_CERT_SRC]))
        model[CLUSTER][HARBOR][SSL_KEY_SRC] = appendPath(
            model[DATA][SOURCE_FILE_DIR], model[CLUSTER][HARBOR][SSL_KEY_SRC])
        if not os.path.isfile(model[CLUSTER][HARBOR][SSL_KEY_SRC]):
            ERROR("Unable to find '{}'!".format(
                model[CLUSTER][HARBOR][SSL_KEY_SRC]))
        setDefaultInMap(model[CLUSTER][HARBOR], VALIDATE_API_CERT, False)
        setDefaultInMap(model[CLUSTER][HARBOR], HOSTNAME, "{{ ansible_fqdn }}")
        return True
Пример #14
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER][REGISTER_CA], DISABLED, False)
    if model[CLUSTER][REGISTER_CA][DISABLED]:
        return False
    else:
        if FROM_PATHS in model[CLUSTER][REGISTER_CA]:
            for idx, p in enumerate(model[CLUSTER][REGISTER_CA][FROM_PATHS]):
                model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC] = appendPath(
                    model[DATA][SOURCE_FILE_DIR], p[SRC])
                if not os.path.isfile(
                        model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC]):
                    ERROR("Unable to find '{}'!".format(
                        model[CLUSTER][REGISTER_CA][FROM_PATHS][idx][SRC]))
        return True
Пример #15
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], FREEIPA, {})
    setDefaultInMap(model[CLUSTER][FREEIPA], DISABLED, False)
    if model[CLUSTER][FREEIPA][DISABLED]:
        return False
    else:
        model[DATA][FREEIPA] = {}
        lookupHelper(model,
                     FREEIPA,
                     helperId=model[CLUSTER][FREEIPA]["helper_id"])
        model[DATA][ROLE_PATHS].add(
            appendPath(model[DATA][HELPERS][FREEIPA][FOLDER], "roles"))
        if CERT_FILES in model[CLUSTER][FREEIPA] and len(
                model[CLUSTER][FREEIPA][CERT_FILES]) > 0:
            # NB: ipaserver_external_cert_files_from_controller does not works! (Missing basename in the copy). Will handle ourself before
            # In fact, was unable to transfer root authority from one install to another. A new CA is generated on each freeipa build.
            model[DATA][FREEIPA][FILES_TO_COPY] = []
            model[DATA][FREEIPA][EXTERNAL_CERT_FILES] = []
            for fn in model[CLUSTER][FREEIPA][CERT_FILES]:
                fc = {}
                fc[SRC] = appendPath(model[DATA][SOURCE_FILE_DIR], fn)
                if not os.path.isfile(fc[SRC]):
                    ERROR("Unable to find '{}'!".format(fc[SRC]))
                fc[DEST] = os.path.join("/root/", os.path.basename(fc[SRC]))
                model[DATA][FREEIPA][FILES_TO_COPY].append(fc)
                model[DATA][FREEIPA][EXTERNAL_CERT_FILES].append(fc[DEST])
        if USERS in model[CLUSTER][FREEIPA]:
            for user in model[CLUSTER][FREEIPA][USERS]:
                setDefaultInMap(user, UPDATE_PASSWORD, "on_create")
                # We better provide some default here than letting freeipa doing it. This for better control, and for updating (freeipi does not modify them once set).
                n = "{} {}".format(user[FIRSTNAME], user[LASTNAME])
                setDefaultInMap(user, CN, n)
                setDefaultInMap(user, DISPLAYNAME, n)
                setDefaultInMap(user, INITALS, (user[FIRSTNAME][0] +
                                                user[LASTNAME][0]).upper())

        return True
Пример #16
0
def groom(_plugin, model):
    repoInConfig = "repositories" in model["config"] and "vagrant" in model["config"]["repositories"]  and "yum_repo_base_url" in model["config"]["repositories"]["vagrant"]
    if model["cluster"]["vagrant"]["yum_repo"] == "local" and not repoInConfig:
        ERROR("'repositories.vagrant.repo_yum_base_url' is not defined in config file while 'vagrant.yum_repo' is set to 'local' in '{}'".format(model["data"]["sourceFileDir"]))
    if repoInConfig:
        # All plugins are lookinhg up their repositories in model["data"]. So does the vagrant one.
        setDefaultInMap(model["data"], "repositories", {})
        setDefaultInMap(model["data"]["repositories"], "vagrant", {})
        model["data"]["repositories"]["vagrant"]["yum_repo_base_url"] = model["config"]["repositories"]["vagrant"]["yum_repo_base_url"]

    groomRoles(model)
    groomNodes(model)
        
    model["data"]["buildScript"] = appendPath(model["data"]["targetFolder"], "build.sh")
    return True # Always enabled
        
Пример #17
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], K8S, {})
    setDefaultInMap(model[CLUSTER][K8S], LOCAL_STATIC_PVS, {})
    setDefaultInMap(model[CLUSTER][K8S][LOCAL_STATIC_PVS], DISABLED, False)
    if model[CLUSTER][K8S][LOCAL_STATIC_PVS][DISABLED]:
        return False
    # Lookup interesting storage classes
    localSaticStorageClassByName = {}
    model[DATA][LOCAL_STATIC_STORAGE_CLASSES] = []
    for sc in model[CLUSTER][K8S][LOCAL_STATIC_PVS][STORAGE_CLASSES]:
        if nameCheckRegex.match(sc["name"]) is None:
            ERROR("Invalid storage_class name '{}': DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character".format(sc["name"]))
        if sc[TYPE] == SC_TYPE_LOCAL_STATIC:
            localSaticStorageClassByName[sc[NAME]] = sc
            model[DATA][LOCAL_STATIC_STORAGE_CLASSES].append(sc)
    
    model[DATA][PV_MOUNT_FOLDERS] = []
    
    for _, role in model[DATA][ROLE_BY_NAME].items():
        # Lookup interesting raw dataDsisk
        dataDiskByRef = {}
        if DATA_DISKS in role:
            for ddisk in role[DATA_DISKS]:
                if REF in ddisk:
                    dataDiskByRef[ddisk[REF]] = ddisk
                    ddisk[SPLITS] = []

        
        storageClasses = Set()
        hostDirs = Set()
        role[LVM_SPLITTERS] = []
        role[BIND_MOUNTS] = []
        splitCount = 0
        if K8S in role and LOCAL_STATIC_PVS in role[K8S]:
            for lpv in role[K8S][LOCAL_STATIC_PVS]:
                if lpv[STORAGE_CLASS] not in localSaticStorageClassByName:
                    ERROR("role['{}'].k8s.local_static_pv.host_dir['{}']': Undefined storage_class '{}'".format(role[NAME], lpv[HOST_DIR], lpv[STORAGE_CLASS]))
                if lpv[STORAGE_CLASS] in storageClasses:
                    ERROR("role['{}'].k8s.local_static_pv: storage_class '{}' is used twice".format(role[NAME], lpv[STORAGE_CLASS]))
                storageClasses.add(lpv[STORAGE_CLASS])
                if lpv[HOST_DIR] in hostDirs:
                    ERROR("role['{}'].k8s.local_static_pv: host_dir '{}' is used twice".format(role[NAME], lpv[HOST_DIR]))
                hostDirs.add(lpv[HOST_DIR])
                    
                for index, source in enumerate(lpv[SOURCES]):
                    if FOLDER in source:
                        if DATA_DISK_REF in source or SPLITS in source or COUNT not in source:
                            invalidSourceError(role, lpv, index)
                        for _ in range(0, source[COUNT]):
                            mount = {}
                            role[BIND_MOUNTS].append(mount)
                            name = "vol{:03}".format(len(role[BIND_MOUNTS]))
                            mount["path"] = appendPath(lpv[HOST_DIR], name)
                            mount["src"] = appendPath(source[FOLDER], name)
                    elif DATA_DISK_REF in source:
                        if FOLDER in source or COUNT in source or SPLITS not in source:
                            invalidSourceError(role, lpv, index)
                        if source[DATA_DISK_REF] in dataDiskByRef:
                            ddisk = dataDiskByRef[source[DATA_DISK_REF]]
                            for size in source[SPLITS]:
                                split = {}
                                ddisk[SPLITS].append(split)
                                splitCount += 1
                                split[NAME] = "split{:03}".format(splitCount)
                                split["_size"] = size
                                split["mount"] = appendPath(lpv[HOST_DIR], split[NAME])
                        else:
                            ERROR("role['{}'].k8s.local_static_pv.host_dir['{}'][{}]': Undefined data_disk_ref '{}'".format(role[NAME], lpv[HOST_DIR], index, source[DATA_DISK_REF]))
                    else:      
                        invalidSourceError(role, lpv, index)
                pvMountFolder = {}
                pvMountFolder["className"] = lpv[STORAGE_CLASS]
                pvMountFolder["hostDir"] = lpv[HOST_DIR]
                model[DATA][PV_MOUNT_FOLDERS].append(pvMountFolder)
                
            # Now, loop on data_disk to build splitter
            for ref, ddisk in dataDiskByRef.items():
                if len(ddisk[SPLITS]) > 0:
                    requiredSize = 0
                    splitter = {}
                    role[LVM_SPLITTERS].append(splitter)
                    splitter["physical_volumes"] = [ (ddisk[DEVICE] if ddisk[DEVICE].startswith("/dev") else "/dev/{}".format(ddisk[DEVICE])) ]
                    splitter["vg_name"] =  "vg{}".format( ddisk[DEVICE][5:] if ddisk[DEVICE].startswith("/dev/")  else ddisk[DEVICE] ) 
                    splitter["logical_volumes"] = []
                    for split in ddisk[SPLITS]:
                        splitter["logical_volumes"].append(split)
                        split["size"] = "{}G".format(split["_size"])
                        split["fstype"] = "xfs"
                        split["mount_options"] = "defaults,noatime"
                        split["fsopts"] = ""
                        requiredSize += split["_size"]
                    if requiredSize > ddisk[SIZE]:
                        ERROR("role['{}'].data_disk.ref[{}]: Required size ({}G) exceed disk size ({}G)'".format(role[NAME], ref, requiredSize, ddisk[SIZE]))
    return True    
Пример #18
0
def groom(plugin, model):
    setDefaultInMap(model[CLUSTER][HORTONWORKS], DISABLED, False)
    if model[CLUSTER][HORTONWORKS][DISABLED]:
        return False
    else:
        lookupRepository(model, HORTONWORKS)
        if HORTONWORKS not in model[CONFIG] or ANSIBLE_REPO_FOLDER not in model[
                CONFIG][HORTONWORKS]:
            ERROR(
                "Missing 'hortonworks.ansible_repo_folder' in configuration file"
            )
        ansible_repo_folder = appendPath(
            os.path.dirname(model[DATA][CONFIG_FILE]),
            model[CONFIG][HORTONWORKS][ANSIBLE_REPO_FOLDER])
        model[CONFIG][HORTONWORKS][ANSIBLE_REPO_FOLDER] = ansible_repo_folder
        model[DATA][ROLE_PATHS].add(appendPath(ansible_repo_folder, "roles"))
        # ------------- We need to define some groups for the intention of external tools.
        zookeepers = []
        kafka_brokers = []
        model[DATA][EXTRA_GROUP_BY_NAME] = {}
        for role in model[CLUSTER][ROLES]:
            if HW_SERVICES in role:
                if "ZOOKEEPER_SERVER" in role[HW_SERVICES]:
                    zookeepers.extend(map(lambda x: x[NAME], role[NODES]))
                if "KAFKA_BROKER" in role[HW_SERVICES]:
                    kafka_brokers.extend(map(lambda x: x[NAME], role[NODES]))
        if ZOOKEEPERS not in model[DATA][GROUP_BY_NAME]:
            model[DATA][EXTRA_GROUP_BY_NAME][ZOOKEEPERS] = zookeepers
        if KAFKA_BROKERS not in model[DATA][GROUP_BY_NAME]:
            model[DATA][EXTRA_GROUP_BY_NAME][KAFKA_BROKERS] = kafka_brokers
        # ---------------------------- Handle java
        #setDefaultInMap(model[CLUSTER][HORTONWORKS], JAVA, EMBEDDED)
        if model[CLUSTER][HORTONWORKS][JAVA] == ORACLEJDK:
            if ORACLEJDK_TARBALL_LOCATION not in model[DATA][REPOSITORIES][
                    HORTONWORKS]:
                ERROR(
                    "'hortonworks.java' is set to 'oraclejdk' while there is no 'repositories.hortonworks.oraclejdk_tarball_location' defined in configuration file!"
                )
            if ORACLEJDK_JCE_LOCATION not in model[DATA][REPOSITORIES][
                    HORTONWORKS]:
                ERROR(
                    "'hortonworks.java' is set to 'oraclejdk' while there is no 'repositories.hortonworks.oraclejdk_jce_location' defined in configuration file!"
                )
        # ---------------------------- Handle database
        if model[CLUSTER][HORTONWORKS][DATABASE][TYPE] != "embedded":
            if MODE not in model[CLUSTER][HORTONWORKS][DATABASE]:
                ERROR(
                    "hostonworks.database.mode must be defined if type != 'embedded'!"
                )
            if model[CLUSTER][HORTONWORKS][DATABASE][
                    MODE] != "included" and model[CLUSTER][HORTONWORKS][
                        DATABASE][TYPE] != 'postgres':
                ERROR(
                    "hostonworks.database: Only 'postgres' type is supported in 'internal' or 'external' mode "
                )
            if model[CLUSTER][HORTONWORKS][DATABASE][
                    MODE] == 'external' and SERVER not in model[CLUSTER][
                        HORTONWORKS][DATABASE]:
                ERROR(
                    "hostonworks.database.server must be defined in 'external' mode "
                )
            if model[CLUSTER][HORTONWORKS][DATABASE][
                    MODE] == 'internal' and SERVER in model[CLUSTER][
                        HORTONWORKS][DATABASE]:
                ERROR(
                    "hostonworks.database.server must NOT be defined in 'internal' mode "
                )
            setDefaultInMap(model[CLUSTER][HORTONWORKS][DATABASE], ADD_REPO,
                            True)
            if model[CLUSTER][HORTONWORKS][DATABASE][MODE] == 'internal':
                if not POSTGRESQL_SERVER in model[DATA][GROUP_BY_NAME]:
                    ERROR(
                        "hostonworks.database.mode == 'internal', but no group '{}' was defined"
                        .format(POSTGRESQL_SERVER))
                model[CLUSTER][HORTONWORKS][DATABASE][SERVER] = model[DATA][
                    GROUP_BY_NAME][POSTGRESQL_SERVER][0]
        # -------------------------------------------------Handle database
        # We need to create two layout.
        # - One to create databases and users on db server.
        # - One to provide info to group_vars/all
        setDefaultInMap(model[CLUSTER][HORTONWORKS], WEAK_PASSWORDS, False)
        setDefaultInMap(model[DATA], HORTONWORKS, {})
        setDefaultInMap(model[DATA][HORTONWORKS], DATABASES, {})
        setDefaultInMap(model, PASSWORDS, {})
        setDefaultInMap(model[PASSWORDS], DATABASES, {})
        tags = Set()
        tags.add("ambari")
        for role in model[CLUSTER][ROLES]:
            if HW_SERVICES in role:
                if "HIVE_METASTORE" in role[HW_SERVICES]:
                    tags.add("hive")
                if "OOZIE_SERVER" in role[HW_SERVICES]:
                    tags.add("oozie")
                if "DRUID_BROKER" in role[
                        HW_SERVICES] or "DRUID_OVERLORD" in role[HW_SERVICES]:
                    tags.add("druid")
                if "SUPERSET" in role[HW_SERVICES]:
                    tags.add("superset")
                if "RANGER_ADMIN" in role[HW_SERVICES]:
                    tags.add("rangeradmin")
                if "RANGER_KMS_SERVER" in role[HW_SERVICES]:
                    tags.add("rangerkms")
                if "REGISTRY_SERVER" in role[HW_SERVICES]:
                    tags.add("registry")
                if "STREAMLINE_SERVER" in role[HW_SERVICES]:
                    tags.add("streamline")
        model[DATA][HORTONWORKS][DATABASES_TO_CREATE] = tags
        for tag in [
                "ambari", "hive", "oozie", "druid", "superset", "rangeradmin",
                "rangerkms", "registry", "streamline"
        ]:
            user = tag
            if model[CLUSTER][HORTONWORKS][WEAK_PASSWORDS]:
                password = user
            else:
                password = genaratePassword()
            md5Password = "******" + hashlib.md5(password + user).hexdigest()
            model[DATA][HORTONWORKS][DATABASES][tag] = {
                'user': user,
                'database': tag,
                'md5Password': md5Password
            }
            model[PASSWORDS][DATABASES][tag] = password
        return True
Пример #19
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], K8S, {})
    setDefaultInMap(model[CLUSTER][K8S], KOOMGR, {})
    setDefaultInMap(model[CLUSTER][K8S][KOOMGR], DISABLED, False)
    if model[CLUSTER][K8S][KOOMGR][DISABLED]:
        return False
    else:
        setDefaultInMap(model[CLUSTER][K8S][KOOMGR], LOG_LEVEL, 0)
        setDefaultInMap(model[CLUSTER][K8S][KOOMGR], ADMIN_GROUP, "kooadmin")

        setDefaultInMap(model[DATA], K8S, {})
        setDefaultInMap(model[DATA][K8S], KOOMGR, {})

        providerByName = {}
        if KOOMGR in model[CONFIG]:
            if STATIC_PROVIDERS in model[CONFIG][KOOMGR]:
                for prvd in model[CONFIG][KOOMGR][STATIC_PROVIDERS]:
                    prvd[TYPE] = "static"
                    if prvd[NAME] in providerByName:
                        ERROR("There is two providers of name '{}'".format(
                            prvd[NAME]))
                    providerByName[prvd[NAME]] = prvd
            if LDAP_PROVIDERS in model[CONFIG][KOOMGR]:
                for prvd in model[CONFIG][KOOMGR][LDAP_PROVIDERS]:
                    prvd[TYPE] = "ldap"
                    if prvd[NAME] in providerByName:
                        ERROR("There is two providers of name '{}'".format(
                            prvd[NAME]))
                    providerByName[prvd[NAME]] = prvd
                    if ROOTCA in prvd:
                        prvd[ROOTCA] = appendPath(
                            os.path.dirname(model[DATA][CONFIG_FILE]),
                            prvd[ROOTCA])
                        if not os.path.isfile(prvd[ROOTCA]):
                            ERROR("Unable to find '{}'!".format(prvd[ROOTCA]))
            if CRD_PROVIDERS in model[CONFIG][KOOMGR]:
                for prvd in model[CONFIG][KOOMGR][CRD_PROVIDERS]:
                    prvd[TYPE] = "crd"
                    if prvd[NAME] in providerByName:
                        ERROR("There is two providers of name '{}'".format(
                            prvd[NAME]))
                    providerByName[prvd[NAME]] = prvd
        if LOCAL_MANIFESTS in model[CLUSTER][K8S][KOOMGR]:
            model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][CRD] = appendPath(
                model[DATA][SOURCE_FILE_DIR],
                model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][CRD])
            if not os.path.isfile(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][CRD]):
                ERROR("Unable to find '{}'!".format(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][CRD]))

            model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][DEPLOY] = appendPath(
                model[DATA][SOURCE_FILE_DIR],
                model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][DEPLOY])
            if not os.path.isfile(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][DEPLOY]):
                ERROR("Unable to find '{}'!".format(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][DEPLOY]))

            model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][RBAC] = appendPath(
                model[DATA][SOURCE_FILE_DIR],
                model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][RBAC])
            if not os.path.isfile(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][RBAC]):
                ERROR("Unable to find '{}'!".format(
                    model[CLUSTER][K8S][KOOMGR][LOCAL_MANIFESTS][RBAC]))

        model[DATA][K8S][KOOMGR][PROVIDERS] = []
        for pname in model[CLUSTER][K8S][KOOMGR][PROVIDERS]:
            if pname not in providerByName:
                ERROR(
                    "Provider '{}' does not exists in global configuration file!"
                    .format(pname))
            prvd = providerByName[pname]
            model[DATA][K8S][KOOMGR][PROVIDERS].append(prvd)

        return True
Пример #20
0
def groom(plugin, model):
    
    setDefaultInMap(model[CLUSTER][ELASTICSEARCH], DISABLED, False)
    if model[CLUSTER][ELASTICSEARCH][DISABLED]:
        return False
    lookupRepository(model, ELASTICSEARCH)
    if ELASTICSEARCH not in model[CONFIG] or ANSIBLE_REPO_FOLDER not in model[CONFIG][ELASTICSEARCH]:
        ERROR("Missing 'elasticsearch.ansible_repo_folder' in configuration file")
    ansible_repo_folder = appendPath(os.path.dirname(model[DATA]["configFile"]),  model[CONFIG][ELASTICSEARCH][ANSIBLE_REPO_FOLDER]) 
    model[CONFIG][ELASTICSEARCH][ANSIBLE_REPO_FOLDER] = ansible_repo_folder
    model[DATA]["rolePaths"].add(ansible_repo_folder)
    
    f = os.path.join(plugin.path, "default.yml")
    if os.path.exists(f):
        base = yaml.load(open(f))
    else:
        base = {}
        
    model[DATA][ESNODES] = []        
    """ 
    For each es_node, will merge elasticsearch vars from:
    - Plugin default configuration file
    - global from cluster definition file
    - parent role
    - es_node """
    for role in model[CLUSTER][ROLES]:
        if ELASTICSEARCH in role and NODES in role[ELASTICSEARCH]:
            index = -1
            for esnode in role[ELASTICSEARCH][NODES]:
                index += 1
                map = copy.deepcopy(base)
                # Add repository info.  There is two reasons to use a package url:
                # - It will be faster if the repo is local
                # - Seems yum install is bugged on current role:  
                #     TASK [ansible-elasticsearch : RedHat - Install Elasticsearch] **************************************************************************************************
                #     fatal: [w2]: FAILED! => {"msg": "The conditional check 'redhat_elasticsearch_install_from_repo.rc == 0' failed. The error was: error while evaluating conditional (redhat_elasticsearch_install_from_repo.rc == 0): 'dict object' has no attribute 'rc'"}  
                map["es_custom_package_url"] = model[DATA][REPOSITORIES][ELASTICSEARCH]["elasticsearch_package_url"]
                map["es_use_repository"] = False
                # Add global value
                if ELASTICSEARCH in model[CLUSTER] and PLAYBOOK_VARS in model[CLUSTER][ELASTICSEARCH]:
                    if not isinstance(model[CLUSTER][ELASTICSEARCH][PLAYBOOK_VARS], dict):
                        ERROR("Invalid global '{}.{}' definition:  not a dictionary".format(ELASTICSEARCH, PLAYBOOK_VARS))
                    else:
                        map = schemaMerge(map, model[CLUSTER][ELASTICSEARCH][PLAYBOOK_VARS])
                # Add the role specific value
                if PLAYBOOK_VARS in role[ELASTICSEARCH]:
                    if not isinstance(role[ELASTICSEARCH][PLAYBOOK_VARS], dict):
                        ERROR("Invalid role definition ('{}'):  '{}.{}' is not a dictionary".format(role[NAME], ELASTICSEARCH,PLAYBOOK_VARS))
                    else:
                        map = schemaMerge(map, role[ELASTICSEARCH][PLAYBOOK_VARS])
                # And get the es_node specific value
                if not isinstance(esnode, dict):
                    ERROR("Invalid node definition in role '{}':  item#{} is not a dictionary".format(role[NAME], index))
                else:
                    map = schemaMerge(map, esnode)
                if not ES_CONFIG in map or not NODE_MASTER in map[ES_CONFIG]:
                    ERROR("Invalid es_node definition in role '{}, item#{}: es_config.'node.master' must be defined".format(role[NAME], index))
                if not ES_CONFIG in map or not NODE_DATA in map[ES_CONFIG]:
                    ERROR("Invalid es_node definition in role '{}, item#{}: es_config.'node.data' must be defined".format(role[NAME], index))
                if not ES_INSTANCE_NAME in map:
                    ERROR("Invalid es_node definition in role '{}, item#{}: es_instance_name must be defined".format(role[NAME], index))
                map[ES_VERSION] = model[DATA][REPOSITORIES][ELASTICSEARCH][VERSION]
                map[ES_MAJOR_VERSION] = map[ES_VERSION][:2] + "X"
                esn = {}
                esn[ROLE] = role[NAME]
                esn[VARS] = map
                model[DATA][ESNODES].append(esn)
    # We must arrange for master nodes to be deployed first.
    model[DATA][ESNODES].sort(key=keyFromEsNode, reverse=False)   
    # We need to define an ansible group "_elasticsearch_" hosting all nodes with elasticsearch installed
    elasticGroup = []
    for role in model[CLUSTER][ROLES]:
        if ELASTICSEARCH in role and NODES in role[ELASTICSEARCH]:
            for node in role[NODES]:
                elasticGroup.append(node[NAME])
    model[DATA][GROUP_BY_NAME][_ELASTICSEARCH_] = elasticGroup
    return True
Пример #21
0
def main():
    global vaultFactory

    mydir = os.path.dirname(os.path.realpath(__file__))

    parser = argparse.ArgumentParser()
    parser.add_argument('--src', required=True)
    parser.add_argument('--mark', choices=["none", "both", "start", "end"])
    parser.add_argument('--dump', action='store_true')
    parser.add_argument('--dumpPasswords', action='store_true')
    parser.add_argument('--out')  # Generate a file to set some variable

    param = parser.parse_args()

    loggingConfFile = os.path.join(mydir, "./logging.yml")
    logging.config.dictConfig(
        yaml.load(open(loggingConfFile), Loader=yaml.SafeLoader))

    sourceFile = os.path.normpath(os.path.abspath(param.src))
    if not os.path.isfile(sourceFile):
        ERROR("File '{}' does not exists".format(sourceFile))
    logger.info("Will handle '{}'".format(sourceFile))
    sourceFileDir = os.path.dirname(sourceFile)

    cluster = yaml.load(open(sourceFile), Loader=yaml.SafeLoader)
    targetFolder = misc.appendPath(
        sourceFileDir,
        cluster["build_folder"] if "build_folder" in cluster else "build")
    misc.ensureFolder(targetFolder)
    logger.info("Build folder: '{}'".format(targetFolder))

    if "config_file" in cluster:
        baseConfigFile = cluster["config_file"]
    else:
        baseConfigFile = "ezconfig.yml"
    config, configFile = buildConfig(sourceFileDir, baseConfigFile)

    plugins = []
    plugins.append(Plugin("core", misc.appendPath(mydir, "../plugins/core")))
    logger.debug("Plugins path:'{}'".format(config[PLUGINS_PATH]))
    appendPlugins(plugins, cluster, config[PLUGINS_PATH])

    schema = buildSchema(mydir, plugins)
    configSchema, safeConfigSchema = buildConfigSchema(mydir,
                                                       config[PLUGINS_PATH])

    if param.dump:
        dumper = Dumper(targetFolder, param.dumpPasswords)
        dumper.dump("schema.json", schema)
        dumper.dump("config-schema.json", configSchema)
        dumper.dump("safe-config-schema.json", safeConfigSchema)
    else:
        dumper = None

    k = kwalify(source_data=cluster, schema_data=schema)
    k.validate(raise_exception=False)
    if len(k.errors) != 0:
        ERROR("Problem {0}: {1}".format(sourceFile, k.errors))

    k = kwalify(source_data=config, schema_data=configSchema)
    k.validate(raise_exception=False)
    if len(k.errors) != 0:
        ERROR("Configuration problem {0}: {1}".format(configFile, k.errors))

    data = {}
    data['sourceFileDir'] = sourceFileDir
    data["targetFolder"] = targetFolder
    data['ezclusterHome'] = misc.appendPath(mydir, "..")
    data["rolePaths"] = set()
    data["configFile"] = configFile

    model = {}
    model['cluster'] = cluster
    model["config"] = config
    model['data'] = data

    initVault(model)

    if SAFE_CONFIG in model and safeConfigSchema != None:
        k = kwalify(source_data=model[SAFE_CONFIG],
                    schema_data=safeConfigSchema)
        k.validate(raise_exception=False)
        if len(k.errors) != 0:
            ERROR("Configuration problem {0}: {1}".format(
                model["data"][_SAFE_CONFIG_FILE_], k.errors))

    for plugin in plugins:
        plugin.groom(model)

    for plugin in plugins:
        plugin.groom2(model)

    targetFileByName = buildTargetFileByName(plugins)

    if param.dump:
        dumper.dump("cluster.json", model['cluster'])
        dumper.dump("data.json", model['data'])
        dumper.dump("targetFileByName.json", targetFileByName)
        dumper.dump("config.json", config)
        if SAFE_CONFIG in model and dumper.unsafe:
            dumper.dump("safeConfig.json", model[SAFE_CONFIG])

        for plugin in plugins:
            plugin.dump(model, dumper)

    generate(targetFileByName, targetFolder, model, param.mark, dumper)

    if "out" in param:
        f = open(param.out, "w+")
        f.write("# Generated by ezcluster:\n")
        if "buildScript" in model["data"]:
            f.write('BUILD_SCRIPT="{}"\n'.format(model["data"]["buildScript"]))
        f.close()