Exemplo n.º 1
0
 def test_get_oozie_mysql_configs(self, get_oozie_password):
     get_oozie_password.return_value = '123'
     configs = {
         'oozie.service.JPAService.jdbc.driver':
         'com.mysql.jdbc.Driver',
         'oozie.service.JPAService.jdbc.url':
         'jdbc:mysql://localhost:3306/oozie',
         'oozie.service.JPAService.jdbc.username': '******',
         'oozie.service.JPAService.jdbc.password': '******'
     }
     cluster = mock.Mock()
     ret = o_helper.get_oozie_mysql_configs(cluster)
     get_oozie_password.assert_called_once_with(cluster)
     self.assertEqual(ret, configs)
Exemplo n.º 2
0
def start_oozie_process(pctx, instance):
    with context.set_current_instance_id(instance.instance_id):
        with instance.remote() as r:
            if c_helper.is_mysql_enabled(pctx, instance.cluster):
                _start_mysql(r)
                LOG.debug("Creating Oozie DB Schema")
                sql_script = files.get_file_text(
                    'plugins/vanilla/hadoop2/resources/create_oozie_db.sql')

                password = oozie_helper.get_oozie_mysql_configs(
                    instance.cluster)['oozie.service.JPAService.jdbc.password']
                sql_script = sql_script.replace("password", password)

                script_location = "create_oozie_db.sql"
                r.write_file_to(script_location, sql_script)
                r.execute_command('mysql -u root < %(script_location)s && '
                                  'rm %(script_location)s' %
                                  {"script_location": script_location})

            _oozie_share_lib(r)
            _start_oozie(r)
Exemplo n.º 3
0
def start_oozie_process(pctx, instance):
    with context.set_current_instance_id(instance.instance_id):
        with instance.remote() as r:
            if c_helper.is_mysql_enabled(pctx, instance.cluster):
                _start_mysql(r)
                LOG.debug("Creating Oozie DB Schema")
                sql_script = files.get_file_text(
                    'plugins/vanilla/hadoop2/resources/create_oozie_db.sql')

                password = oozie_helper.get_oozie_mysql_configs(
                    instance.cluster)[
                    'oozie.service.JPAService.jdbc.password']
                sql_script = sql_script.replace("password", password)

                script_location = "create_oozie_db.sql"
                r.write_file_to(script_location, sql_script)
                r.execute_command('mysql -u root < %(script_location)s && '
                                  'rm %(script_location)s' %
                                  {"script_location": script_location})

            _oozie_share_lib(r)
            _start_oozie(r)
Exemplo n.º 4
0
def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        "Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname},
        "HDFS": {
            "dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]),
            "dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]),
            "dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR,
            "dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR,
        },
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs["YARN"] = {
            "yarn.nodemanager.aux-services": "mapreduce_shuffle",
            "yarn.resourcemanager.hostname": "%s" % res_hostname,
            "yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR),
            "yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR),
        }
        confs["MapReduce"] = {"mapreduce.framework.name": "yarn"}
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"}
        confs["Hadoop"].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs["JobFlow"] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config["name"]] = config["value"]

        confs["Hadoop"].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs["Hadoop"].update(th.TOPOLOGY_CONFIG)
        confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            "hive.warehouse.subdir.inherit.perms": True,
            "javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true",
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update(
                {
                    "javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname,
                    "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
                    "javax.jdo.option.ConnectionUserName": "******",
                    "javax.jdo.option.ConnectionPassword": "******",
                    "datanucleus.autoCreateSchema": "false",
                    "datanucleus.fixedDatastore": "true",
                    "hive.metastore.uris": "thrift://%s:9083" % hive_hostname,
                }
            )

        proxy_configs = cluster.cluster_configs.get("proxy_configs")
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            key = key_manager.API().get(context.current(), proxy_configs["proxy_password"])
            password = key.get_encoded()
            hive_cfg.update(
                {
                    swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"],
                    swift.HADOOP_SWIFT_PASSWORD: password,
                    swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"],
                    swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name,
                }
            )

        confs["Hive"] = hive_cfg

    return confs
Exemplo n.º 5
0
def _get_hadoop_configs(pctx, node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName': '******',
                'javax.jdo.option.ConnectionPassword': '******',
                'datanucleus.autoCreateSchema': 'false',
                'datanucleus.fixedDatastore': 'true',
                'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
                swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs
Exemplo n.º 6
0
def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName': '******',
                'javax.jdo.option.ConnectionPassword': '******',
                'datanucleus.autoCreateSchema': 'false',
                'datanucleus.fixedDatastore': 'true',
                'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
                swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs
Exemplo n.º 7
0
def _get_hadoop_configs(pctx, node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    return confs
Exemplo n.º 8
0
def _get_hadoop_configs(pctx, node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services':
            'mapreduce_shuffle',
            'yarn.resourcemanager.hostname':
            '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path':
            '%s/nm-include' % (HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path':
            '%s/nm-exclude' % (HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {'mapreduce.framework.name': 'yarn'}
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = ("%s:10020" %
                                                                  hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update(
            {"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    return confs