def setup_hbase(self): from beaver.component.hbase import HBase from beaver.component.rollingupgrade.RuAssert import ruAssert HBASE_PREUPGRADE_TABLE = "hbase_preUpgrade_table" # HBASE_CONF_DIR = HBase.getModifiedConfigPath() # propertyMap = {'mapred.job.queue.name':'hbase'} # #stopping region servers and master # HBase.stopRegionServers(nodes=HBase.getRegionServers(), skip_slider_check=True) # HBase.stopHBaseMasterNode(HBase.getMasterNode(), wait=15) # HBase.modifyConfig(changes={'hbase-site.xml':propertyMap}, nodeSelection={'services':['all']}) # host = Machine.getfqdn(HBase.getMasterNode()) # lhost = Machine.getfqdn() # if (host != lhost): # Machine.rm(None, None, HBASE_CONF_DIR, isdir=True) # Machine.copyToLocal(None, host, HBASE_CONF_DIR, HBASE_CONF_DIR) # allnodes = HBase.getSelectedNodes({'services':['all']}) # allnodes.append(Machine.getfqdn()) # util.copy_back_to_original_config(HBASE_CONF_DIR, Config.get('hbase', 'HBASE_CONF_DIR'), file_list=["hbase-site.xml"], node_list=allnodes) # #starting region servers and master # HBase.startRegionServers(nodes=HBase.getRegionServers(), skip_slider_check=True) # HBase.startHBaseMasterNode(host=HBase.getMasterNode(), masterStartUpWait=True, wait=15) # time.sleep(120) HBase.dropAndCreateTable(HBASE_PREUPGRADE_TABLE, "cf") exit_code, stdout = HBase.runShellCmds( ["put '%s','row1', 'cf:f1', 'holmes'" % HBASE_PREUPGRADE_TABLE]) ruAssert("HBASE", exit_code == 0) exit_code, stdout = HBase.runShellCmds( ["put '%s','row2', 'cf:f2', 'watson'" % HBASE_PREUPGRADE_TABLE]) ruAssert("HBASE", exit_code == 0)
def setup_storm_hbase_topology(cls, useStandaloneCmd): from beaver.component.hbase import HBase storm_version = Storm.getVersion(useStandaloneCmd=True) Machine.copy(JAVA_HBASE_SRC_DIR, LOCAL_HBASE_WORK_DIR, user=None, passwd=None) if Machine.isWindows(): (_, _) = Maven.run( 'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s' % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER, storm_version, HBASE_CONF_MAVEN_PARAMETER, HBASE_CONF, HBASE_FILE_MAVEN_PARAMETER, HBASE_FILE, HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF, HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE, CORE_FILE_MAVEN_PARAMETER, CORE_FILE, PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()), cwd=LOCAL_HBASE_WORK_DIR) else: (_, _) = Maven.run('package', cwd=LOCAL_HBASE_WORK_DIR, env={ HADOOP_VERSION_MAVEN_PARAMETER: HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER: storm_version, HBASE_CONF_MAVEN_PARAMETER: HBASE_CONF, HBASE_FILE_MAVEN_PARAMETER: HBASE_FILE, HADOOP_CONF_MAVEN_PARAMETER: HADOOP_CONF, HDFS_FILE_MAVEN_PARAMETER: HDFS_FILE, CORE_FILE_MAVEN_PARAMETER: CORE_FILE, PUBLIC_REPO_MAVEN_PARAMETER: Maven.getPublicRepoUrl() }) exit_code, stdout = HBase.createTable(HBASE_TABLE_NAME, "cf", True, None) ruAssert("Storm", exit_code == 0) grant_cmd = "grant '%s', 'RWCXA', '%s', 'cf'" % (Storm.getStormUser(), HBASE_TABLE_NAME) exit_code, stdout = HBase.runShellCmds([grant_cmd])
def getZooLogDir(cls, logoutput=False): ''' Returns Zookeeper log directory (String). ''' matchObjList = None if Machine.isHumboldt(): try: from beaver.component.hbase import HBase #get some zookeeper node hmaster_nodes = HBase.getAllMasterNodes() if hmaster_nodes: zkNode = hmaster_nodes[0] if zkNode: Machine.copyToLocal(None, zkNode, ZOOKEEPER_ENV_FILE, Machine.getTempDir()) REMOTE_ZOOKEEPER_ENV_FILE = os.path.join( Machine.getTempDir(), 'zookeeper-env.sh') matchObjList = util.findMatchingPatternInFile( REMOTE_ZOOKEEPER_ENV_FILE, "export ZOO_LOG_DIR=(.*)", return0Or1=False) except Exception: pass if not matchObjList: #gateway should have the config file. matchObjList = util.findMatchingPatternInFile( ZOOKEEPER_ENV_FILE, "export ZOO_LOG_DIR=(.*)", return0Or1=False) returnValue = None if matchObjList: returnValue = matchObjList[0].group(1) if logoutput: logger.info("Zookeeper.getZooLogDir returns %s", returnValue) return returnValue
def getZipFile(cls, version=HBase.getVersionFromBuild(), isRU=False): # download for linux, no download for windows HBASE_VER_BUILD = version if Machine.isWindows(): zipFile = os.path.join( Config.get('slider', 'SLIDER_HOME'), "app-packages", "slider-hbase-app-win-package-%s.zip" % HBASE_VER_BUILD) return zipFile pkg_list = "pkg-list_qe.txt" path = os.path.join(Config.getEnv('ARTIFACTS_DIR'), pkg_list) #pkgUrl = Config.get('slider','APP_PKG_LIST') pkgUrl = Slider.getAppPackageBaseUrl( isRU) + "/slider-app-packages/" + pkg_list util.downloadUrl(pkgUrl, path) with open(path, 'r') as f: for line in f: if line.startswith("hbase_pkg_url="): url = line.strip()[14:] break zipFile = os.path.join( os.getcwd(), "slider-hbase-app-package-%s.zip" % HBASE_VER_BUILD) logger.info("downloading " + url) util.downloadUrl(url, zipFile) return zipFile
def getSliderHbaseVersion(cls): if not cls.HBASE_VER: # eg. 0.98.4.2.2.0.0-2041-hadoop2 HBASE_VER_BUILD = HBase.getVersionFromBuild() # eg. 0.98.4.2.2.0.0 cls.HBASE_VER = HBASE_VER_BUILD[0:14] return cls.HBASE_VER
def getHbaseMasterNode(cls): ''' Return the host which is HBase master node :return: String- ''' cls._hbase_master = HBase.getMasterNode() if not cls._hbase_master: logger.error( 'HBase is not installed on any of hosts in the cluster') return cls._hbase_master
def getHbaseRegionServers(cls): ''' Return the List of HBase regionserver nodes :return: String ''' cls._hbase_regionservers = HBase.getRegionServers() if not cls._hbase_regionservers: logger.error( 'HBase is not installed on any of hosts in the cluster') return cls._hbase_regionservers
def HBase_getHBaseLogDir(cls, logoutput=True): try: from beaver.component.hbase import HBase return HBase.getHBaseLogDir(logoutput) except Exception: if logoutput: logger.error( "Exception occured during HBase_getHBaseLogDir() call") logger.error(traceback.format_exc()) return None
def restart_mc_services(cls, host, cluster=None): services = ["HDFS", "MAPREDUCE2", "YARN", "OOZIE", "FALCON"] if HBase.isInstalled(): services = [ "HDFS", "MAPREDUCE2", "YARN", "OOZIE", "FALCON", "HBASE", "HIVE" ] for service in services: cls.restart_service(service, host, cluster) return
def revokePermissionsToSystemTables(cls, schemaFunctionalityEnabled=False): if schemaFunctionalityEnabled: REVOKE_CMDS = ["revoke '%s', '@SYSTEM'" % HADOOPQA_USER] else: REVOKE_CMDS = [ "revoke '%s', 'SYSTEM.CATALOG'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.STATS'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.SEQUENCE'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.FUNCTION'" % HADOOPQA_USER ] return HBase.runShellCmds(REVOKE_CMDS, user=HBASE_USER)
def grantPermissionsToSystemTables(cls, schemaFunctionalityEnabled=False): if schemaFunctionalityEnabled: GRANT_CMDS = ["grant '%s','RWXCA'" % HADOOPQA_USER] else: GRANT_CMDS = [ "grant '%s','RWXCA', 'SYSTEM.CATALOG'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.STATS'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.SEQUENCE'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.FUNCTION'" % HADOOPQA_USER ] return HBase.runShellCmds(GRANT_CMDS, user=HBASE_USER)
def tear_down_hbase_topology(cls, topologyName, useStandaloneCmd): """ tear down hbase topology. """ from beaver.component.hbase import HBase Machine.rm(user=None, host="localhost", filepath=LOCAL_HBASE_WORK_DIR, isdir=True, passwd=None) Storm.killTopology(topologyName, logoutput=True, useStandaloneCmd=useStandaloneCmd) exit_code, stdout = HBase.dropTable(HBASE_TABLE_NAME, True, None) ruAssert("Storm", exit_code == 0)
def resetHBaseRestService(cls, action, user=None, host=None, config=None): from beaver.component.hbase import HBase if user is None: user = Config.get('hbase', 'HBASE_USER') if host is None: host = HBase.getMasterNode() service = "rest" if Machine.isWindows(): service = "hbrest" if action == "start": HBase.resetService(user, host, service, "stop", config=config) HBase.resetService(user, host, service, action, config=config) if action == "start": util.waitForPortToOpen(host, HBase.getRestServicePort())
def verify_hbase_topology(cls, topologyName, lines, useStandaloneCmd): from beaver.component.hbase import HBase #Slider app is killed before log running job verification so disabling topology activation checks. if useStandaloneCmd == True: ruAssert( "Storm", Storm.getTopologyStatus( topologyName, logoutput=True, useStandaloneCmd=useStandaloneCmd) == 'ACTIVE') exit_code, stdout = HBase.runShellCmds(["scan 'WordCount'"]) logger.info(exit_code) logger.info(stdout) for word in lines: ruAssert( "Storm", stdout.find(word) >= 0, "[StormHBaseVerify] %s not found in wordcount table" % word)
def modifyConfig(cls, changes, nodeSelection, isFirstUpdate=True, host=None, tmpConf=None): nodes = HBase.getSelectedNodes(nodeSelection, host) phoenix_conf = os.path.join(Config.get('phoenix', 'PHOENIX_HOME'), 'bin') if tmpConf is None: tmp_conf = os.path.join(Machine.getTempDir(), 'phoenixConf') else: tmp_conf = tmpConf # We change the permissions for phoenix-home/bin Machine.chmod('777', tmp_conf, user=Machine.getAdminUser(), host=host, passwd=Machine.getAdminPasswd()) configUtils.modifyConfig(changes, phoenix_conf, tmp_conf, nodes, isFirstUpdate)
def getTableRegions(cls, table, onlineOnly=False): # We locate all the regions of the table exit_code, stdout = HBase.runShellCmds([ 'import org.apache.hadoop.hbase.filter.CompareFilter', 'import org.apache.hadoop.hbase.filter.SingleColumnValueFilter', 'import org.apache.hadoop.hbase.filter.RegexStringComparator', 'import org.apache.hadoop.hbase.util.Bytes', "scan 'hbase:meta',{COLUMNS => 'info:regioninfo'," "FILTER =>SingleColumnValueFilter.new(Bytes.toBytes('info')," "Bytes.toBytes('regioninfo')," "CompareFilter::CompareOp.valueOf('EQUAL')," "RegexStringComparator.new('(?<!_)%s(?!_)'))}" % table ]) assert exit_code == 0 stdout = stdout[stdout.find(table):stdout.rfind('}')] lineArray = stdout.splitlines(True) regionArray = [] for line in lineArray: if onlineOnly and ("OFFLINE => true" in line): continue regionArray.append(line.split(', ')[2].split('=>')[1].strip()) return regionArray
def verifySchemaFunctionality(cls): ''' We verify that the system can operate with SCHEMA functionality. ''' from beaver.component.phoenix import Phoenix from beaver.component.hbase import HBase from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode HBASE_HOME = Config.get('hbase', 'HBASE_HOME') HBASE_CONF_DIR = os.path.join(HBASE_HOME, "conf") #We verify the schema functionality. HBASE_CHANGES = {} HBASE_CHANGES['hbase-site.xml'] = {} HBASE_CHANGES['hbase-site.xml'][ 'phoenix.schema.isNamespaceMappingEnabled'] = 'true' HBASE_CHANGES['hbase-site.xml'][ 'phoenix.schema.mapSystemTablesToNamespace'] = 'true' TEST_TABLE_A = 'Table_A' SCHEMA_1 = 'SCHEMA_1' masterNodes = HBase.getAllMasterNodes() regionNodes = HBase.getRegionServers() hbase_allnodes = masterNodes + regionNodes gateway_node = Machine.getfqdn() if gateway_node not in hbase_allnodes: hbase_allnodes.append(gateway_node) HBase.stopHBaseCluster() HBase.modifyConfig(changes=HBASE_CHANGES, nodeSelection={'nodes': hbase_allnodes}) util.copy_back_to_original_config(HBase.getModifiedConfigPath(), HBASE_CONF_DIR, file_list=["hbase-site.xml"], node_list=hbase_allnodes) HBase.startHBaseCluster(HBase.getModifiedConfigPath()) #We grant permissions to all tables. Phoenix.grantPermissionsToSystemTables(schemaFunctionalityEnabled=True) #We check that we can still query the original table. cls.verifyBasicTable() #We check that we can create/query schemas. exit_code, stdout = Phoenix.runSQLLineCmds( 'CREATE SCHEMA IF NOT EXISTS %s;' % SCHEMA_1) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Creation of schema %s failed due to exitcode = %s " % (SCHEMA_1, exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Schema creation %s succeeded." % (SCHEMA_1)) #we create tables inside that schema primaryKey = {'name': 'ID', 'type': 'BIGINT'} columns = [{ 'name': 'FirstName', 'type': 'VARCHAR(30)' }, { 'name': 'SecondName', 'type': 'VARCHAR(30)' }, { 'name': 'City', 'type': 'VARCHAR(30)' }] exit_code, stdout = Phoenix.createTable(SCHEMA_1 + '.' + TEST_TABLE_A, primaryKey, columns) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Table creation %s on schema %s failed due to exitcode = %s " % (TEST_TABLE_A, SCHEMA_1, exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Table creation %s on schema %s succeeded." % (TEST_TABLE_A, SCHEMA_1)) #We insert some data into the table through upsert. for i in range(0, 5): exit_code, stdout = Phoenix.runSQLLineCmds( 'UPSERT INTO %s VALUES (%s, "name_%s","secondName_%s","city_%s");' % (SCHEMA_1 + '.' + TEST_TABLE_A, str(i), str(i), str(i), str(i))) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Table UPSERT %s on schema %s failed due to exitcode = %s " % (TEST_TABLE_A, SCHEMA_1, exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Table UPSERT %s on schema %s succeeded." % (TEST_TABLE_A, SCHEMA_1)) #We verify that the data has been correctly inserted exit_code, stdout = Phoenix.runSQLLineCmds( 'SELECT * FROM %s WHERE ID=3;' % (SCHEMA_1 + '.' + TEST_TABLE_A)) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Table SELECT %s on schema %s failed due to exitcode = %s " % (TEST_TABLE_A, SCHEMA_1, exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Table SELECT %s on schema %s succeeded." % (TEST_TABLE_A, SCHEMA_1)) if stdout.find('name_3') == -1 or stdout.find( 'secondName_3') == -1 or stdout.find('city_3') == -1: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Table SELECT %s on schema %s returned the wrong results: %s" % (TEST_TABLE_A, SCHEMA_1, stdout)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Table SELECT %s on schema %s succeeded." % (TEST_TABLE_A, SCHEMA_1)) #We verify that we can drop the schemas with tables on it. exit_code, stdout = Phoenix.runSQLLineCmds('DROP SCHEMA %s;' % SCHEMA_1) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Schema drop failed due to exitcode = %s " % (exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Schema drop succeeded.") #We verify that the schema has been dropped. exit_code, stdout = Phoenix.runSQLLineCmds( 'SELECT TABLE_NAME FROM SYSTEM.CATALOG WHERE SCHEMA = %s' % SCHEMA_1, outputFormat='xmlattr') if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Schema drop failed due to exitcode = %s " % (exit_code)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Schema drop succeeded.") if stdout.find(TEST_TABLE_A) != 0: UpgradePerNode.reportProgress( "[FAILED][PHOENIX][Smoke] Table %s did not drop on drop schema command " % (TEST_TABLE_A)) else: UpgradePerNode.reportProgress( "[PASSED][PHOENIX][Smoke] Table %s successfuly dropped." % TEST_TABLE_A)
def grantPermissionsToSystemNamespace(cls): GRANT_CMDS = ["grant '%s','RWXCA', '@SYSTEM'" % HADOOPQA_USER] return HBase.runShellCmds(GRANT_CMDS, user=HBASE_USER)
def run_smoke_test(cls, smoketestnumber, config=None): ''' Run smoke test for yarn :param smoketestnumber: Used for unique output log location ''' global new_conf_path global buildNo from beaver.component.hbase import HBase # Run slider agent labels funtests from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("### Slider smoke test started ####") exit_code, stdout = HBase.runShellCmds(cls.SCAN_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path) UpgradePerNode.reportProgress("### Slider smoke test scanned " + cls.TABLE_NAME) hbase_ver = HBase.getVersion(configPath=new_conf_path) if buildNo not in hbase_ver: UpgradePerNode.reportProgress("### smoke test failed: " + buildNo + " not found in " + hbase_ver) else: UpgradePerNode.reportProgress("### smoke test passed: " + buildNo + " found in " + hbase_ver) UpgradePerNode.reportProgress("scanned " + cls.TABLE_NAME) TABLE_NM = cls.id_generator(10) CREATE_TABLE_CMD = ["create '" + TABLE_NM + "', 'family1', 'family2', 'family3'"] exit_code, stdout = HBase.runShellCmds(CREATE_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path) if exit_code == 0: UpgradePerNode.reportProgress("created " + TABLE_NM) else: UpgradePerNode.reportProgress("creation of " + TABLE_NM + "failed") if not cls._LOCAL_WORK_DIR: UpgradePerNode.reportProgress( "[FAILED][Slider][Smoke] Slider smoke test failed due to Slider source code unavailability" ) return exit_code, stdout = Maven.run( "verify -Dslider.conf.dir=%s " "-Dhadoop.version=%s " "-Dprivate.repo.url=%s " "-Dit.test=AppsUpgradeIT" % (cls._LOCAL_CONF_DIR, Hadoop.getVersion(), Maven.getPublicRepoUrl()), cwd=os.path.join(cls._LOCAL_WORK_DIR, 'slider-funtest') ) testresults = {} testResultFiles = [] TEST_RESULT = {} # get a list of all the test result files for name, dirs, files in os.walk(cls._LOCAL_WORK_DIR): if os.path.basename(name) == 'target': # Add in each failsafe-report we find -- this test only runs failsafe reports testResultFiles.extend(util.findMatchingFiles(os.path.join(name, 'failsafe-reports'), 'TEST-*.xml')) for resultFile in testResultFiles: testresults.update(util.parseJUnitXMLResult(resultFile)) for key, value in testresults.items(): TEST_RESULT[key] = value logger.info("=======================") logger.info(TEST_RESULT) logger.info("=======================") TestCases = TEST_RESULT.keys() for testcase in TestCases: result = TEST_RESULT[testcase]['result'] if result == "pass": UpgradePerNode.reportProgress("[PASSED][Slider][Smoke] Slider smoke test passed") else: UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider smoke test failed")
from beaver.component.slider import Slider logger = logging.getLogger(__name__) CWD = os.path.dirname(os.path.realpath(__file__)) HADOOPQA_USER = Config.get('hadoop', 'HADOOPQA_USER') ARTIFACTS_DIR = Config.getEnv("ARTIFACTS_DIR") INPUT_FILE = 'inputFile' PHOENIX_HOME = Config.get('phoenix', 'PHOENIX_HOME') SQLLINE_SCRIPT = os.path.join('.', 'sqlline.py') SQLLINE_THIN_SCRIPT = '/usr/bin/phoenix-sqlline-thin' PSQL_SCRIPT = os.path.join('.', 'psql.py') HBASE_LIB_DIR = os.path.join(Config.get('hbase', 'HBASE_HOME'), 'lib') HBASE_CONF_DIR = HBase.getModifiedConfigPath() if HBASE_CONF_DIR is None or HBASE_CONF_DIR == '' or not os.path.exists( HBASE_CONF_DIR): HBASE_CONF_DIR = Config.get('hbase', 'HBASE_CONF_DIR') HADOOP_CMD = Config.get('hadoop', 'HADOOP_CMD') HBASE_USER = None if Hadoop.isSecure(): HBASE_USER = Config.get('hbase', 'HBASE_USER') HOST = Machine.getfqdn() ZK_HOST = util.getPropertyValueFromConfigXMLFile( os.path.join(HBASE_CONF_DIR, 'hbase-site.xml'), "hbase.zookeeper.quorum").split(',')[0] ZK_PORT = util.getPropertyValueFromConfigXMLFile(