def setup_hbase(self): from beaver.component.hbase import HBase from beaver.component.rollingupgrade.RuAssert import ruAssert HBASE_PREUPGRADE_TABLE = "hbase_preUpgrade_table" # HBASE_CONF_DIR = HBase.getModifiedConfigPath() # propertyMap = {'mapred.job.queue.name':'hbase'} # #stopping region servers and master # HBase.stopRegionServers(nodes=HBase.getRegionServers(), skip_slider_check=True) # HBase.stopHBaseMasterNode(HBase.getMasterNode(), wait=15) # HBase.modifyConfig(changes={'hbase-site.xml':propertyMap}, nodeSelection={'services':['all']}) # host = Machine.getfqdn(HBase.getMasterNode()) # lhost = Machine.getfqdn() # if (host != lhost): # Machine.rm(None, None, HBASE_CONF_DIR, isdir=True) # Machine.copyToLocal(None, host, HBASE_CONF_DIR, HBASE_CONF_DIR) # allnodes = HBase.getSelectedNodes({'services':['all']}) # allnodes.append(Machine.getfqdn()) # util.copy_back_to_original_config(HBASE_CONF_DIR, Config.get('hbase', 'HBASE_CONF_DIR'), file_list=["hbase-site.xml"], node_list=allnodes) # #starting region servers and master # HBase.startRegionServers(nodes=HBase.getRegionServers(), skip_slider_check=True) # HBase.startHBaseMasterNode(host=HBase.getMasterNode(), masterStartUpWait=True, wait=15) # time.sleep(120) HBase.dropAndCreateTable(HBASE_PREUPGRADE_TABLE, "cf") exit_code, stdout = HBase.runShellCmds( ["put '%s','row1', 'cf:f1', 'holmes'" % HBASE_PREUPGRADE_TABLE]) ruAssert("HBASE", exit_code == 0) exit_code, stdout = HBase.runShellCmds( ["put '%s','row2', 'cf:f2', 'watson'" % HBASE_PREUPGRADE_TABLE]) ruAssert("HBASE", exit_code == 0)
def revokePermissionsToSystemTables(cls, schemaFunctionalityEnabled=False): if schemaFunctionalityEnabled: REVOKE_CMDS = ["revoke '%s', '@SYSTEM'" % HADOOPQA_USER] else: REVOKE_CMDS = [ "revoke '%s', 'SYSTEM.CATALOG'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.STATS'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.SEQUENCE'" % HADOOPQA_USER, "revoke '%s', 'SYSTEM.FUNCTION'" % HADOOPQA_USER ] return HBase.runShellCmds(REVOKE_CMDS, user=HBASE_USER)
def grantPermissionsToSystemTables(cls, schemaFunctionalityEnabled=False): if schemaFunctionalityEnabled: GRANT_CMDS = ["grant '%s','RWXCA'" % HADOOPQA_USER] else: GRANT_CMDS = [ "grant '%s','RWXCA', 'SYSTEM.CATALOG'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.STATS'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.SEQUENCE'" % HADOOPQA_USER, "grant '%s','RWXCA', 'SYSTEM.FUNCTION'" % HADOOPQA_USER ] return HBase.runShellCmds(GRANT_CMDS, user=HBASE_USER)
def setup_storm_hbase_topology(cls, useStandaloneCmd): from beaver.component.hbase import HBase storm_version = Storm.getVersion(useStandaloneCmd=True) Machine.copy(JAVA_HBASE_SRC_DIR, LOCAL_HBASE_WORK_DIR, user=None, passwd=None) if Machine.isWindows(): (_, _) = Maven.run( 'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s' % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER, storm_version, HBASE_CONF_MAVEN_PARAMETER, HBASE_CONF, HBASE_FILE_MAVEN_PARAMETER, HBASE_FILE, HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF, HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE, CORE_FILE_MAVEN_PARAMETER, CORE_FILE, PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()), cwd=LOCAL_HBASE_WORK_DIR) else: (_, _) = Maven.run('package', cwd=LOCAL_HBASE_WORK_DIR, env={ HADOOP_VERSION_MAVEN_PARAMETER: HADOOP_VERSION, STORM_VERSION_MAVEN_PARAMETER: storm_version, HBASE_CONF_MAVEN_PARAMETER: HBASE_CONF, HBASE_FILE_MAVEN_PARAMETER: HBASE_FILE, HADOOP_CONF_MAVEN_PARAMETER: HADOOP_CONF, HDFS_FILE_MAVEN_PARAMETER: HDFS_FILE, CORE_FILE_MAVEN_PARAMETER: CORE_FILE, PUBLIC_REPO_MAVEN_PARAMETER: Maven.getPublicRepoUrl() }) exit_code, stdout = HBase.createTable(HBASE_TABLE_NAME, "cf", True, None) ruAssert("Storm", exit_code == 0) grant_cmd = "grant '%s', 'RWCXA', '%s', 'cf'" % (Storm.getStormUser(), HBASE_TABLE_NAME) exit_code, stdout = HBase.runShellCmds([grant_cmd])
def verify_hbase_topology(cls, topologyName, lines, useStandaloneCmd): from beaver.component.hbase import HBase #Slider app is killed before log running job verification so disabling topology activation checks. if useStandaloneCmd == True: ruAssert( "Storm", Storm.getTopologyStatus( topologyName, logoutput=True, useStandaloneCmd=useStandaloneCmd) == 'ACTIVE') exit_code, stdout = HBase.runShellCmds(["scan 'WordCount'"]) logger.info(exit_code) logger.info(stdout) for word in lines: ruAssert( "Storm", stdout.find(word) >= 0, "[StormHBaseVerify] %s not found in wordcount table" % word)
def getTableRegions(cls, table, onlineOnly=False): # We locate all the regions of the table exit_code, stdout = HBase.runShellCmds([ 'import org.apache.hadoop.hbase.filter.CompareFilter', 'import org.apache.hadoop.hbase.filter.SingleColumnValueFilter', 'import org.apache.hadoop.hbase.filter.RegexStringComparator', 'import org.apache.hadoop.hbase.util.Bytes', "scan 'hbase:meta',{COLUMNS => 'info:regioninfo'," "FILTER =>SingleColumnValueFilter.new(Bytes.toBytes('info')," "Bytes.toBytes('regioninfo')," "CompareFilter::CompareOp.valueOf('EQUAL')," "RegexStringComparator.new('(?<!_)%s(?!_)'))}" % table ]) assert exit_code == 0 stdout = stdout[stdout.find(table):stdout.rfind('}')] lineArray = stdout.splitlines(True) regionArray = [] for line in lineArray: if onlineOnly and ("OFFLINE => true" in line): continue regionArray.append(line.split(', ')[2].split('=>')[1].strip()) return regionArray
def run_smoke_test(cls, smoketestnumber, config=None): ''' Run smoke test for yarn :param smoketestnumber: Used for unique output log location ''' global new_conf_path global buildNo from beaver.component.hbase import HBase # Run slider agent labels funtests from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("### Slider smoke test started ####") exit_code, stdout = HBase.runShellCmds(cls.SCAN_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path) UpgradePerNode.reportProgress("### Slider smoke test scanned " + cls.TABLE_NAME) hbase_ver = HBase.getVersion(configPath=new_conf_path) if buildNo not in hbase_ver: UpgradePerNode.reportProgress("### smoke test failed: " + buildNo + " not found in " + hbase_ver) else: UpgradePerNode.reportProgress("### smoke test passed: " + buildNo + " found in " + hbase_ver) UpgradePerNode.reportProgress("scanned " + cls.TABLE_NAME) TABLE_NM = cls.id_generator(10) CREATE_TABLE_CMD = ["create '" + TABLE_NM + "', 'family1', 'family2', 'family3'"] exit_code, stdout = HBase.runShellCmds(CREATE_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path) if exit_code == 0: UpgradePerNode.reportProgress("created " + TABLE_NM) else: UpgradePerNode.reportProgress("creation of " + TABLE_NM + "failed") if not cls._LOCAL_WORK_DIR: UpgradePerNode.reportProgress( "[FAILED][Slider][Smoke] Slider smoke test failed due to Slider source code unavailability" ) return exit_code, stdout = Maven.run( "verify -Dslider.conf.dir=%s " "-Dhadoop.version=%s " "-Dprivate.repo.url=%s " "-Dit.test=AppsUpgradeIT" % (cls._LOCAL_CONF_DIR, Hadoop.getVersion(), Maven.getPublicRepoUrl()), cwd=os.path.join(cls._LOCAL_WORK_DIR, 'slider-funtest') ) testresults = {} testResultFiles = [] TEST_RESULT = {} # get a list of all the test result files for name, dirs, files in os.walk(cls._LOCAL_WORK_DIR): if os.path.basename(name) == 'target': # Add in each failsafe-report we find -- this test only runs failsafe reports testResultFiles.extend(util.findMatchingFiles(os.path.join(name, 'failsafe-reports'), 'TEST-*.xml')) for resultFile in testResultFiles: testresults.update(util.parseJUnitXMLResult(resultFile)) for key, value in testresults.items(): TEST_RESULT[key] = value logger.info("=======================") logger.info(TEST_RESULT) logger.info("=======================") TestCases = TEST_RESULT.keys() for testcase in TestCases: result = TEST_RESULT[testcase]['result'] if result == "pass": UpgradePerNode.reportProgress("[PASSED][Slider][Smoke] Slider smoke test passed") else: UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider smoke test failed")
def grantPermissionsToSystemNamespace(cls): GRANT_CMDS = ["grant '%s','RWXCA', '@SYSTEM'" % HADOOPQA_USER] return HBase.runShellCmds(GRANT_CMDS, user=HBASE_USER)