コード例 #1
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], K8S, {})
    setDefaultInMap(model[CLUSTER][K8S], KUBESPRAY, {})
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], DISABLED, False)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], METRICS_SERVER, True)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], AUDIT, False)
    setDefaultInMap(model[CLUSTER][K8S][KUBESPRAY], POD_SECURITY_POLICIES, True)
    if model[CLUSTER][K8S][KUBESPRAY][DISABLED]:
        return False
    else:
        lookupRepository(model, None, "docker_yum", model[CLUSTER][K8S][KUBESPRAY]['docker_yum_repo_id'])
        if K9S_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "k9s", repoId = model[CLUSTER][K8S][KUBESPRAY][K9S_REPO_ID])
        if HELM_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "helm", repoId = model[CLUSTER][K8S][KUBESPRAY][HELM_REPO_ID])
        lookupHelper(model, KUBESPRAY, helperId=model[CLUSTER][K8S][KUBESPRAY]["helper_id"])
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["docker_proxy_id"] if "docker_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "docker")
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["master_root_proxy_id"] if "master_root_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "master_root")
        lookupHttpProxy(model, model[CLUSTER][K8S][KUBESPRAY]["yumproxy_id"] if "yum_proxy_id" in model[CLUSTER][K8S][KUBESPRAY] else None, "yum")
        if FILES_REPO_ID in model[CLUSTER][K8S][KUBESPRAY]:
            lookupRepository(model, "kubespray_files", repoId=model[CLUSTER][K8S][KUBESPRAY][FILES_REPO_ID])
        model[DATA][ROLE_PATHS].add(appendPath(model[DATA][HELPERS][KUBESPRAY][FOLDER], "roles"))
        model[DATA]["dnsNbrDots"] = model[CLUSTER][K8S][KUBESPRAY][CLUSTER_NAME].count(".") + 1
        certByName = {}
        if DOCKER_CERTIFICATES in model["config"]:
            for cert in model["config"][DOCKER_CERTIFICATES]:
                cert["path"] = appendPath(os.path.dirname(model[DATA][CONFIG_FILE]), cert["path"])
                if not os.path.isfile(cert["path"]) or not os.access(cert["path"], os.R_OK):
                    ERROR("Configuration error: docker_certificates.{}: Invalid path '{}'".format(cert["name"],  cert["path"]))
                certByName[cert["name"]] = cert
        model[DATA][DOCKER_CERTIFICATES] = []
        if DOCKER_CERTIFICATES in model[CLUSTER][K8S][KUBESPRAY]:
            for certName in model[CLUSTER][K8S][KUBESPRAY][DOCKER_CERTIFICATES]:
                if certName in certByName:
                    cert = certByName[certName]
                    if "port" in cert:
                        cert["endpoint"] = "{}:{}".format(cert["host"], cert['port'])
                    else:
                        cert["endoint"] = cert["host"]
                    model[DATA][DOCKER_CERTIFICATES].append(cert)
                else:
                    ERROR("docker_certificates '{}' is not defined in configuration file!".format(certName))
        return True
コード例 #2
0
def groom(_plugin, model):
    setDefaultInMap(model[CLUSTER], FREEIPA, {})
    setDefaultInMap(model[CLUSTER][FREEIPA], DISABLED, False)
    if model[CLUSTER][FREEIPA][DISABLED]:
        return False
    else:
        model[DATA][FREEIPA] = {}
        lookupHelper(model,
                     FREEIPA,
                     helperId=model[CLUSTER][FREEIPA]["helper_id"])
        model[DATA][ROLE_PATHS].add(
            appendPath(model[DATA][HELPERS][FREEIPA][FOLDER], "roles"))
        if CERT_FILES in model[CLUSTER][FREEIPA] and len(
                model[CLUSTER][FREEIPA][CERT_FILES]) > 0:
            # NB: ipaserver_external_cert_files_from_controller does not works! (Missing basename in the copy). Will handle ourself before
            # In fact, was unable to transfer root authority from one install to another. A new CA is generated on each freeipa build.
            model[DATA][FREEIPA][FILES_TO_COPY] = []
            model[DATA][FREEIPA][EXTERNAL_CERT_FILES] = []
            for fn in model[CLUSTER][FREEIPA][CERT_FILES]:
                fc = {}
                fc[SRC] = appendPath(model[DATA][SOURCE_FILE_DIR], fn)
                if not os.path.isfile(fc[SRC]):
                    ERROR("Unable to find '{}'!".format(fc[SRC]))
                fc[DEST] = os.path.join("/root/", os.path.basename(fc[SRC]))
                model[DATA][FREEIPA][FILES_TO_COPY].append(fc)
                model[DATA][FREEIPA][EXTERNAL_CERT_FILES].append(fc[DEST])
        if USERS in model[CLUSTER][FREEIPA]:
            for user in model[CLUSTER][FREEIPA][USERS]:
                setDefaultInMap(user, UPDATE_PASSWORD, "on_create")
                # We better provide some default here than letting freeipa doing it. This for better control, and for updating (freeipi does not modify them once set).
                n = "{} {}".format(user[FIRSTNAME], user[LASTNAME])
                setDefaultInMap(user, CN, n)
                setDefaultInMap(user, DISPLAYNAME, n)
                setDefaultInMap(user, INITALS, (user[FIRSTNAME][0] +
                                                user[LASTNAME][0]).upper())

        return True
コード例 #3
0
ファイル: groomer.py プロジェクト: mlahouar/ezcplugins
def groom(plugin, model):

    setDefaultInMap(model[CLUSTER][KIBANA], DISABLED, False)
    if model[CLUSTER][KIBANA][DISABLED]:
        return False
    lookupRepository(model, KIBANA)
    lookupHelper(model, KIBANA)
    model[DATA][ROLE_PATHS].add(model[DATA][HELPERS][KIBANA][FOLDER])
    f = os.path.join(plugin.path, "default.yml")
    if os.path.exists(f):
        base = yaml.load(open(f))
    else:
        base = {}

    model[DATA][KBNODES] = []
    """ 
    For each kb_node, will merge kibana vars from:
    - Plugin default configuration file
    - global from cluster definition file
    - parent role 
    - nodes definition
    """

    for role in model[CLUSTER][ROLES]:
        if KIBANA in role:
            # Get global value
            global_conf = {}
            if KIBANA in model[CLUSTER] and PLAYBOOK_VARS in model[CLUSTER][
                    KIBANA]:
                if not isinstance(model[CLUSTER][KIBANA][PLAYBOOK_VARS], dict):
                    ERROR(
                        "Invalid global '{}.{}' definition:  not a dictionary".
                        format(KIBANA, PLAYBOOK_VARS))
                else:
                    global_conf = schemaMerge(
                        global_conf, model[CLUSTER][KIBANA][PLAYBOOK_VARS])

            # Get the role specific value
            role_conf = {}
            if PLAYBOOK_VARS in role[KIBANA]:
                if not isinstance(role[KIBANA][PLAYBOOK_VARS], dict):
                    ERROR(
                        "Invalid role definition ('{}'):  '{}.{}' is not a dictionary"
                        .format(role[NAME], KIBANA, PLAYBOOK_VARS))
                else:
                    role_conf = schemaMerge(role_conf,
                                            role[KIBANA][PLAYBOOK_VARS])

            for kb_node in role[NODES]:
                mymap = copy.deepcopy(base)
                # Add repository info.  There is two reasons to use a package url:
                # - It will be faster if the repo is local
                # - Seems yum install is bugged on current role:
                #     TASK [ansible-elasticsearch : RedHat - Install Elasticsearch] **************************************************************************************************
                #     fatal: [w2]: FAILED! => {"msg": "The conditional check 'redhat_elasticsearch_install_from_repo.rc == 0' failed. The error was: error while evaluating conditional (redhat_elasticsearch_install_from_repo.rc == 0): 'dict object' has no attribute 'rc'"}
                mymap["kibana_custom_package_url"] = model[DATA][REPOSITORIES][
                    KIBANA]["kibana_package_url"]
                mymap["es_use_repository"] = False
                # Add global conf
                mymap = schemaMerge(mymap, global_conf)
                # Add role conf
                mymap = schemaMerge(mymap, role_conf)

                # Add node specific conf
                if KIBANA in kb_node and PLAYBOOK_VARS in kb_node[KIBANA]:
                    if not isinstance(kb_node[KIBANA][PLAYBOOK_VARS], dict):
                        ERROR(
                            "Invalid role definition ('{}'):  '{}.{}.{}' is not a dictionary"
                            .format(role[NAME], kb_node[NAME], KIBANA,
                                    PLAYBOOK_VARS))
                    else:
                        mymap = schemaMerge(mymap,
                                            kb_node[KIBANA][PLAYBOOK_VARS])

                mymap[ES_VERSION] = model[DATA][REPOSITORIES][KIBANA][VERSION]
                mymap[ES_MAJOR_VERSION] = mymap[ES_VERSION][:2] + "X"
                model[DATA][NODE_BY_NAME][
                    kb_node[NAME]][KIBANA_PLAYBOOK_VARS] = mymap

            kbn = {}
            kbn[ROLE] = role[NAME]
            kbn[VARS] = schemaMerge(global_conf, role_conf)
            model[DATA][KBNODES].append(kbn)
    return True
コード例 #4
0
ファイル: groomer.py プロジェクト: mehdibn/ezcplugins
def groom(plugin, model):

    setDefaultInMap(model[CLUSTER][ELASTICSEARCH], DISABLED, False)
    if model[CLUSTER][ELASTICSEARCH][DISABLED]:
        return False
    lookupRepository(model, ELASTICSEARCH)
    lookupHelper(model, ELASTICSEARCH)
    model[DATA][ROLE_PATHS].add(model[DATA][HELPERS][ELASTICSEARCH][FOLDER])
    f = os.path.join(plugin.path, "default.yml")
    if os.path.exists(f):
        base = yaml.load(open(f))
    else:
        base = {}

    model[DATA][ESNODES] = []
    """ 
    For each es_node, will merge elasticsearch vars from:
    - Plugin default configuration file
    - global from cluster definition file
    - parent role
    - es_node """
    for role in model[CLUSTER][ROLES]:
        if ELASTICSEARCH in role and NODES in role[ELASTICSEARCH]:
            index = -1
            for esnode in role[ELASTICSEARCH][NODES]:
                index += 1
                mymap = copy.deepcopy(base)
                # Add repository info.  There is two reasons to use a package url:
                # - It will be faster if the repo is local
                # - Seems yum install is bugged on current role:
                #     TASK [ansible-elasticsearch : RedHat - Install Elasticsearch] **************************************************************************************************
                #     fatal: [w2]: FAILED! => {"msg": "The conditional check 'redhat_elasticsearch_install_from_repo.rc == 0' failed. The error was: error while evaluating conditional (redhat_elasticsearch_install_from_repo.rc == 0): 'dict object' has no attribute 'rc'"}
                mymap["es_custom_package_url"] = model[DATA][REPOSITORIES][
                    ELASTICSEARCH]["elasticsearch_package_url"]
                mymap["es_use_repository"] = False
                # Add global value
                if ELASTICSEARCH in model[CLUSTER] and PLAYBOOK_VARS in model[
                        CLUSTER][ELASTICSEARCH]:
                    if not isinstance(
                            model[CLUSTER][ELASTICSEARCH][PLAYBOOK_VARS],
                            dict):
                        ERROR(
                            "Invalid global '{}.{}' definition:  not a dictionary"
                            .format(ELASTICSEARCH, PLAYBOOK_VARS))
                    else:
                        mymap = schemaMerge(
                            mymap,
                            model[CLUSTER][ELASTICSEARCH][PLAYBOOK_VARS])
                # Add the role specific value
                if PLAYBOOK_VARS in role[ELASTICSEARCH]:
                    if not isinstance(role[ELASTICSEARCH][PLAYBOOK_VARS],
                                      dict):
                        ERROR(
                            "Invalid role definition ('{}'):  '{}.{}' is not a dictionary"
                            .format(role[NAME], ELASTICSEARCH, PLAYBOOK_VARS))
                    else:
                        mymap = schemaMerge(mymap,
                                            role[ELASTICSEARCH][PLAYBOOK_VARS])
                # And get the es_node specific value
                if not isinstance(esnode, dict):
                    ERROR(
                        "Invalid node definition in role '{}':  item#{} is not a dictionary"
                        .format(role[NAME], index))
                else:
                    mymap = schemaMerge(mymap, esnode)
                if not ES_CONFIG in mymap or not NODE_MASTER in mymap[
                        ES_CONFIG]:
                    ERROR(
                        "Invalid es_node definition in role '{}, item#{}: es_config.'node.master' must be defined"
                        .format(role[NAME], index))
                if not ES_CONFIG in mymap or not NODE_DATA in mymap[ES_CONFIG]:
                    ERROR(
                        "Invalid es_node definition in role '{}, item#{}: es_config.'node.data' must be defined"
                        .format(role[NAME], index))
                if not ES_INSTANCE_NAME in mymap:
                    ERROR(
                        "Invalid es_node definition in role '{}, item#{}: es_instance_name must be defined"
                        .format(role[NAME], index))
                mymap[ES_VERSION] = model[DATA][REPOSITORIES][ELASTICSEARCH][
                    VERSION]
                mymap[ES_MAJOR_VERSION] = mymap[ES_VERSION][:2] + "X"
                esn = {}
                esn[ROLE] = role[NAME]
                esn[VARS] = mymap
                model[DATA][ESNODES].append(esn)
    # We must arrange for master nodes to be deployed first.
    model[DATA][ESNODES].sort(key=keyFromEsNode, reverse=False)
    # We need to define an ansible group "_elasticsearch_" hosting all nodes with elasticsearch installed
    elasticGroup = []
    for role in model[CLUSTER][ROLES]:
        if ELASTICSEARCH in role and NODES in role[ELASTICSEARCH]:
            for node in role[NODES]:
                elasticGroup.append(node[NAME])
    model[DATA][GROUP_BY_NAME][_ELASTICSEARCH_] = elasticGroup
    return True