# # from beaver.component.hadoop import Hadoop, YARN, HDFS from beaver.config import Config import json, logging from beaver.machine import Machine import time, requests, os from beaver.component.ambari import Ambari import json as jsonlib from requests.auth import HTTPBasicAuth import urllib2, base64, socket, uuid from beaver import util from beaver.component.hbase import HBase logger = logging.getLogger(__name__) hdfs_user = HDFS.getHDFSUser() job_user = Config.getEnv('USER') class FalconAmbariAPIUtil: def __init__(self): pass # The following functions are related to the Ambari REST API @classmethod def getConfig(cls, host, type, cluster=None): if cluster == None: cluster = 'cl1' weburl = cls.getweburl(host) url = "%s/api/v1/clusters/%s/configurations?type=%s" % (weburl,
import time, logging, re, os from beaver.component.hadoop import Hadoop, MAPRED, YARN, HDFS, MAPRED2 from beaver.component.tez import Tez from ..machine import Machine from ..config import Config from ..java import Java from beaver.htmlparser import MRHTMLParser from beaver import util logger = logging.getLogger(__name__) HDFS_USER = HDFS.getHDFSUser() def balancerModifyConfig(duReservedValue): """ Create /tmp/hadoopConf in all nodes :param duReservedValue: :return: """ Hadoop.modifyConfig( { 'hdfs-site.xml': { 'dfs.datanode.du.reserved': duReservedValue, 'dfs.replication': 1, 'dfs.namenode.heartbeat.recheck-interval': 5000, 'dfs.namenode.stale.datanode.interval': 1000, 'dfs.namenode.replication.interval': 1 }, }, {'services': ['all']})
def setup_storm_hive_topology(cls, useStandaloneCmd): from beaver.component.hive import Hive storm_version = Storm.getVersion(useStandaloneCmd=True) hive_version = Hive.getVersion() HIVE_METASTORE_URI = Hive.getConfigValue( "hive.metastore.uris", defaultValue="thrift://localhost:9083") global HIVE_METASTORE_URI global HIVE_HOST global HIVE_PORT global HIVE_WAREHOUSE_DIR HIVE_WAREHOUSE_DIR = Hive.getConfigValue( "hive.metastore.warehouse.dir", defaultValue="/apps/hive/warehouse") HIVE_HOST = Hive.getHiveHost() HIVE_PORT = Hive.getMetastoreThriftPort() if Storm.isDalorBeyond(): JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'), 'tests', 'rolling_upgrade', 'Storm', '2_3', 'storm-hive', 'java') else: JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'), 'tests', 'rolling_upgrade', 'Storm', '2_2', 'storm-hive', 'java') # hive.txn.manager and hive.support.concurrency are set through ambari as per bug-40500 #logger.info("Restart Hive") #changes = {'hive-site.xml': {'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DbTxnManager', # 'hive.support.concurrency': 'true'}} #Hive.modifyConfig(changes, services=['metastore'], restartService=True) logger.info("Create test database in Hive") exit_code, stdout = Hive.runQuery( cls.get_set_queue_cmd(useStandaloneCmd) + " drop database if exists stormdb cascade; \ create database stormdb;") ruAssert("Storm", exit_code == 0, "[StormHiveSetup] Failed to create test database" + stdout) HDFS.chmod(runasUser=HDFS.getHDFSUser(), perm=777, directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME + ".db") #copy tests/storm/storm-hive/java to artifacts/storm-hive-tests logger.info("JAVA_SRC_DIR " + JAVA_HIVE_SRC_DIR) logger.info("LOCAL_WORK_DIR " + LOCAL_HIVE_WORK_DIR) Machine.copy(JAVA_HIVE_SRC_DIR, LOCAL_HIVE_WORK_DIR, user=None, passwd=None) #mvn package if Machine.isWindows(): (_, _) = Maven.run( 'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s' % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER, storm_version, HIVE_VERSION_MAVEN_PARAMETER, hive_version, PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl(), CORE_FILE_MAVEN_PARAMETER, CORE_FILE, HADOOP_CORE_MAVEN_PARAMETER, HADOOP_CONF, HIVE_CORE_MAVEN_PARAMETER, HIVE_CORE_DIR, HIVE_FILE_MAVEN_PARAMETER, HIVE_FILE), cwd=LOCAL_HIVE_WORK_DIR) else: (_, _) = Maven.run('package', cwd=LOCAL_HIVE_WORK_DIR, env={ HADOOP_VERSION_MAVEN_PARAMETER: HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER: storm_version, HIVE_VERSION_MAVEN_PARAMETER: hive_version, PUBLIC_REPO_MAVEN_PARAMETER: Maven.getPublicRepoUrl(), CORE_FILE_MAVEN_PARAMETER: CORE_FILE, HADOOP_CONF_MAVEN_PARAMETER: HADOOP_CONF, HDFS_FILE_MAVEN_PARAMETER: HDFS_FILE, HADOOP_CORE_MAVEN_PARAMETER: HADOOP_CONF, HIVE_CORE_MAVEN_PARAMETER: HIVE_CORE_DIR, HIVE_FILE_MAVEN_PARAMETER: HIVE_FILE }) create_table_q = "use %s; \ drop table if exists %s; \ create table %s (id int, name string, phone string, street string) \ partitioned by (city string, state string) \ clustered by (id) into %s buckets \ stored as orc \ tblproperties ('transactional'='true');" % ( DATABASE_NAME, HIVE_TABLE_NAME, HIVE_TABLE_NAME, "5") exit_code, stdout = Hive.runQuery( cls.get_set_queue_cmd(useStandaloneCmd) + create_table_q) ruAssert( "Storm", exit_code == 0, "[StormHiveSetup] Failed to create test table userdata_partitioned" ) HDFS.chmod(runasUser=HDFS.getHDFSUser(), perm=777, directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME + ".db/" + HIVE_TABLE_NAME)