コード例 #1
0
    def test_get_hive_password(self, cluster_update, store_secret, get_secret,
                               conductor):
        cluster = mock.MagicMock()
        cluster.extra.to_dict.return_value = {"hive_pass_id": "31415926"}

        conductor.return_value = cluster

        get_secret.return_value = "hive_pass"
        result = u.get_hive_password(cluster)

        get_secret.assert_called_once_with("31415926")
        self.assertEqual('hive_pass', result)

        cluster.extra.to_dict.return_value = {}

        store_secret.return_value = 'hive_pass'
        result = u.get_hive_password(cluster)
        self.assertEqual('hive_pass', result)
コード例 #2
0
ファイル: run_scripts.py プロジェクト: lwlzpn84/sahara
def start_hiveserver_process(pctx, instance):
    with context.set_current_instance_id(instance.instance_id):
        with instance.remote() as r:
            _hive_create_warehouse_dir(r)
            _hive_copy_shared_conf(r, edp.get_hive_shared_conf_path('hadoop'))

            if c_helper.is_mysql_enabled(pctx, instance.cluster):
                oozie = vu.get_oozie(instance.node_group.cluster)
                if not oozie or instance.hostname() != oozie.hostname():
                    _start_mysql(r)

                sql_script = files.get_file_text(
                    'plugins/vanilla/hadoop2/resources/create_hive_db.sql')

                sql_script = sql_script.replace(
                    '{{password}}', u.get_hive_password(instance.cluster))
                r.write_file_to('/tmp/create_hive_db.sql', sql_script)
                _hive_create_db(r)
                _hive_metastore_start(r)
                LOG.info("Hive Metastore server at {host} has been "
                         "started".format(host=instance.hostname()))
コード例 #3
0
def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services':
            'mapreduce_shuffle',
            'yarn.resourcemanager.hostname':
            '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path':
            '%s/nm-include' % (HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path':
            '%s/nm-exclude' % (HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {'mapreduce.framework.name': 'yarn'}
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = ("%s:10020" %
                                                                  hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs(cluster))

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update(
            {"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_pass = u.get_hive_password(cluster)

        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms':
            True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName':
                '******',
                'javax.jdo.option.ConnectionPassword':
                hive_pass,
                'datanucleus.autoCreateSchema':
                'false',
                'datanucleus.fixedDatastore':
                'true',
                'hive.metastore.uris':
                'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME:
                proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD:
                key_manager.get_secret(proxy_configs['proxy_password']),
                swift.HADOOP_SWIFT_TRUST_ID:
                proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME:
                CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs