def test_get_oozie(self): cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1', [self.ng_manager, self.ng_oozie]) self.assertEqual('ooz1', u.get_oozie(cl).instance_id) cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1', [self.ng_manager]) self.assertIsNone(u.get_oozie(cl))
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) rm = vu.get_resourcemanager(cluster) hs = vu.get_historyserver(cluster) oo = vu.get_oozie(cluster) sp = vu.get_spark_history_server(cluster) info = {} if rm: info['YARN'] = { 'Web UI': 'http://%s:%s' % (rm.get_ip_or_dns_name(), '8088'), 'ResourceManager': 'http://%s:%s' % (rm.get_ip_or_dns_name(), '8032') } if nn: info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), '50070'), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000') } if oo: info['JobFlow'] = { 'Oozie': 'http://%s:%s' % (oo.get_ip_or_dns_name(), '11000') } if hs: info['MapReduce JobHistory Server'] = { 'Web UI': 'http://%s:%s' % (hs.get_ip_or_dns_name(), '19888') } if sp: info['Apache Spark'] = { 'Spark UI': 'http://%s:%s' % (sp.management_ip, '4040'), 'Spark History Server UI': 'http://%s:%s' % (sp.management_ip, '18080') } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def start_hiveserver_process(pctx, instance): with context.set_current_instance_id(instance.instance_id): with instance.remote() as r: _hive_create_warehouse_dir(r) _hive_copy_shared_conf(r, edp.get_hive_shared_conf_path('hadoop')) if config_helper.is_mysql_enabled(pctx, instance.cluster): oozie = vu.get_oozie(instance.node_group.cluster) if not oozie or instance.hostname() != oozie.hostname(): _start_mysql(r) version = instance.cluster.hadoop_version sql_script = utils.get_file_text( 'plugins/vanilla/v{}/resources/create_hive_db.sql'.format( version.replace('.', '_')), 'sahara_plugin_vanilla') sql_script = sql_script.replace( '{{password}}', u.get_hive_password(instance.cluster)) r.write_file_to('/tmp/create_hive_db.sql', sql_script) _hive_create_db(r) _hive_metastore_start(r) LOG.info("Hive Metastore server at {host} has been " "started".format(host=instance.hostname()))
def _get_hadoop_configs(pctx, instance): cluster = instance.node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(instance) confs = { 'Hadoop': { 'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname }, 'HDFS': { 'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']), 'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']), 'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR, 'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR } } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs['YARN'] = { 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.resourcemanager.hostname': '%s' % res_hostname, 'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (HADOOP_CONF_DIR), 'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (HADOOP_CONF_DIR) } confs['MapReduce'] = {'mapreduce.framework.name': 'yarn'} hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs['MapReduce']['mapreduce.jobhistory.address'] = ("%s:10020" % hs_hostname) oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = { 'hadoop.proxyuser.hadoop.hosts': '*', 'hadoop.proxyuser.hadoop.groups': 'hadoop' } confs['Hadoop'].update(hadoop_cfg) oozie_cfg = oozie_helper.get_oozie_required_xml_configs( HADOOP_CONF_DIR) if config_helper.is_mysql_enabled(pctx, cluster): oozie_cfg.update(oozie_helper.get_oozie_mysql_configs(cluster)) confs['JobFlow'] = oozie_cfg if config_helper.is_swift_enabled(pctx, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config['name']] = config['value'] confs['Hadoop'].update(swift_configs) if config_helper.is_data_locality_enabled(pctx, cluster): confs['Hadoop'].update(th.TOPOLOGY_CONFIG) confs['Hadoop'].update( {"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) if hive_hostname: hive_pass = u.get_hive_password(cluster) hive_cfg = { 'hive.warehouse.subdir.inherit.perms': True, 'javax.jdo.option.ConnectionURL': 'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true' } if config_helper.is_mysql_enabled(pctx, cluster): hive_cfg.update({ 'javax.jdo.option.ConnectionURL': 'jdbc:mysql://%s/metastore' % hive_hostname, 'javax.jdo.option.ConnectionDriverName': 'com.mysql.jdbc.Driver', 'javax.jdo.option.ConnectionUserName': '******', 'javax.jdo.option.ConnectionPassword': hive_pass, 'datanucleus.autoCreateSchema': 'false', 'datanucleus.fixedDatastore': 'true', 'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname, }) proxy_configs = cluster.cluster_configs.get('proxy_configs') if proxy_configs and config_helper.is_swift_enabled(pctx, cluster): hive_cfg.update({ swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], swift.HADOOP_SWIFT_PASSWORD: key_manager.get_secret(proxy_configs['proxy_password']), swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name }) confs['Hive'] = hive_cfg return confs
def get_oozie_server(self, cluster): return vu.get_oozie(cluster)
def start_oozie(pctx, cluster): oo = vu.get_oozie(cluster) if oo: run.start_oozie_process(pctx, oo)