Esempio n. 1
0
 def test_get_instance_params_default(self):
     instance_configs = configs.get_instance_params(self.instance)
     expected = [
         {
             "hdfs-site": {
                 "dfs.datanode.data.dir":
                 "/data1/hdfs/data,/data2/hdfs/data",
                 "dfs.journalnode.edits.dir":
                 "/data1/hdfs/journalnode,/data2/hdfs/journalnode",
                 "dfs.namenode.checkpoint.dir":
                 "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary",
                 "dfs.namenode.name.dir":
                 "/data1/hdfs/namenode,/data2/hdfs/namenode"
             }
         },
         {
             "yarn-site": {
                 "yarn.nodemanager.local-dirs":
                 "/data1/yarn/local,/data2/yarn/local",
                 "yarn.nodemanager.log-dirs":
                 "/data1/yarn/log,/data2/yarn/log",
                 "yarn.timeline-service.leveldb-timeline-store.path":
                 "/data1/yarn/timeline,/data2/yarn/timeline"
             }
         },
         {
             "oozie-site": {
                 "oozie.service.AuthorizationService.security.enabled":
                 "false"
             }
         }
     ]
     self.assertConfigEqual(expected, instance_configs)
Esempio n. 2
0
 def test_get_instance_params_default(self):
     instance_configs = configs.get_instance_params(self.instance)
     expected = [{
         "hdfs-site": {
             "dfs.datanode.data.dir":
             "/data1/hdfs/data,/data2/hdfs/data",
             "dfs.journalnode.edits.dir":
             "/data1/hdfs/journalnode,/data2/hdfs/journalnode",
             "dfs.namenode.checkpoint.dir":
             "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary",
             "dfs.namenode.name.dir":
             "/data1/hdfs/namenode,/data2/hdfs/namenode"
         }
     }, {
         "yarn-site": {
             "yarn.nodemanager.local-dirs":
             "/data1/yarn/local,/data2/yarn/local",
             "yarn.nodemanager.log-dirs":
             "/data1/yarn/log,/data2/yarn/log",
             "yarn.timeline-service.leveldb-timeline-store.path":
             "/data1/yarn/timeline,/data2/yarn/timeline"
         }
     }, {
         "oozie-site": {
             "oozie.service.AuthorizationService.security.enabled": "false"
         }
     }]
     self.assertConfigEqual(expected, instance_configs)
Esempio n. 3
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": []
            }
            for proc in procs:
                hg["components"].append({"name": proc})
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    password = cluster.extra["ambari_password"]
    with ambari_client.AmbariClient(ambari, password=password) as client:
        client.create_blueprint(cluster.name, bp)
Esempio n. 4
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": []
            }
            for proc in procs:
                hg["components"].append({"name": proc})
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    password = cluster.extra["ambari_password"]
    with ambari_client.AmbariClient(ambari, password=password) as client:
        client.create_blueprint(cluster.name, bp)
Esempio n. 5
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": get_host_group_components(cluster, procs)
            }
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version,
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }

    if kerberos.is_kerberos_security_enabled(cluster):
        bp['configurations'].extend([
            _serialize_mit_kdc_kerberos_env(cluster),
            _serialize_krb5_configs(cluster)
        ])
        bp['Blueprints']['security'] = {'type': 'KERBEROS'}

    general_configs = cluster.cluster_configs.get("general", {})
    if (general_configs.get(p_common.NAMENODE_HA)
            or general_configs.get(p_common.RESOURCEMANAGER_HA)
            or general_configs.get(p_common.HBASE_REGIONSERVER_HA)):
        bp = ha_helper.update_bp_ha_common(cluster, bp)

    if general_configs.get(p_common.NAMENODE_HA):
        bp = ha_helper.update_bp_for_namenode_ha(cluster, bp)

    if general_configs.get(p_common.RESOURCEMANAGER_HA):
        bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp)

    if general_configs.get(p_common.HBASE_REGIONSERVER_HA):
        bp = ha_helper.update_bp_for_hbase_ha(cluster, bp)

    with _get_ambari_client(cluster) as client:
        return client.create_blueprint(cluster.name, bp)
Esempio n. 6
0
    def test_get_instance_params(self):
        self.ng.node_configs = {
            "YARN": {
                "mapreduce.map.java.opts": "-Dk=v",
                "yarn.scheduler.minimum-allocation-mb": "256"
            }
        }
        instance_configs = configs.get_instance_params(self.instance)
        expected = [
            {
                "hdfs-site": {
                    "dfs.datanode.data.dir":
                    "/data1/hdfs/data,/data2/hdfs/data",
                    "dfs.journalnode.edits.dir":
                    "/data1/hdfs/journalnode,/data2/hdfs/journalnode",
                    "dfs.namenode.checkpoint.dir":
                    "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary",
                    "dfs.namenode.name.dir":
                    "/data1/hdfs/namenode,/data2/hdfs/namenode"
                }
            },
            {
                "mapred-site": {
                    "mapreduce.map.java.opts": "-Dk=v"
                }
            },
            {
                "yarn-site": {
                    "yarn.nodemanager.local-dirs":
                    "/data1/yarn/local,/data2/yarn/local",
                    "yarn.nodemanager.log-dirs":
                    "/data1/yarn/log,/data2/yarn/log",
                    "yarn.scheduler.minimum-allocation-mb": "256",
                    "yarn.timeline-service.leveldb-timeline-store.path":
                    "/data1/yarn/timeline,/data2/yarn/timeline"
                }
            },
            {
                "oozie-site": {
                    "oozie.service.AuthorizationService.security.enabled":
                    "false"
                }

            }
        ]
        self.assertConfigEqual(expected, instance_configs)
Esempio n. 7
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": []
            }
            for proc in procs:
                hg["components"].append({"name": proc})
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    password = cluster.extra["ambari_password"]

    general_configs = cluster.cluster_configs.get("general", {})
    if (general_configs.get(p_common.NAMENODE_HA) or
            general_configs.get(p_common.RESOURCEMANAGER_HA) or
            general_configs.get(p_common.HBASE_REGIONSERVER_HA)):
        bp = ha_helper.update_bp_ha_common(cluster, bp)

    if general_configs.get(p_common.NAMENODE_HA):
        bp = ha_helper.update_bp_for_namenode_ha(cluster, bp)

    if general_configs.get(p_common.RESOURCEMANAGER_HA):
        bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp)

    if general_configs.get(p_common.HBASE_REGIONSERVER_HA):
        bp = ha_helper.update_bp_for_hbase_ha(cluster, bp)

    with ambari_client.AmbariClient(ambari, password=password) as client:
        return client.create_blueprint(cluster.name, bp)
Esempio n. 8
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": []
            }
            for proc in procs:
                hg["components"].append({"name": proc})
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    password = cluster.extra["ambari_password"]

    general_configs = cluster.cluster_configs.get("general", {})
    if (general_configs.get(p_common.NAMENODE_HA)
            or general_configs.get(p_common.RESOURCEMANAGER_HA)
            or general_configs.get(p_common.HBASE_REGIONSERVER_HA)):
        bp = ha_helper.update_bp_ha_common(cluster, bp)

    if general_configs.get(p_common.NAMENODE_HA):
        bp = ha_helper.update_bp_for_namenode_ha(cluster, bp)

    if general_configs.get(p_common.RESOURCEMANAGER_HA):
        bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp)

    if general_configs.get(p_common.HBASE_REGIONSERVER_HA):
        bp = ha_helper.update_bp_for_hbase_ha(cluster, bp)

    with ambari_client.AmbariClient(ambari, password=password) as client:
        return client.create_blueprint(cluster.name, bp)
Esempio n. 9
0
 def test_get_instance_params(self):
     self.ng.node_configs = {
         "YARN": {
             "mapreduce.map.java.opts": "-Dk=v",
             "yarn.scheduler.minimum-allocation-mb": "256"
         }
     }
     instance_configs = configs.get_instance_params(self.instance)
     expected = [{
         "hdfs-site": {
             "dfs.datanode.data.dir":
             "/data1/hdfs/data,/data2/hdfs/data",
             "dfs.journalnode.edits.dir":
             "/data1/hdfs/journalnode,/data2/hdfs/journalnode",
             "dfs.namenode.checkpoint.dir":
             "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary",
             "dfs.namenode.name.dir":
             "/data1/hdfs/namenode,/data2/hdfs/namenode"
         }
     }, {
         "mapred-site": {
             "mapreduce.map.java.opts": "-Dk=v"
         }
     }, {
         "yarn-site": {
             "yarn.nodemanager.local-dirs":
             "/data1/yarn/local,/data2/yarn/local",
             "yarn.nodemanager.log-dirs":
             "/data1/yarn/log,/data2/yarn/log",
             "yarn.scheduler.minimum-allocation-mb":
             "256",
             "yarn.timeline-service.leveldb-timeline-store.path":
             "/data1/yarn/timeline,/data2/yarn/timeline"
         }
     }, {
         "oozie-site": {
             "oozie.service.AuthorizationService.security.enabled": "false"
         }
     }]
     self.assertConfigEqual(expected, instance_configs)