def verify_test_result(self, exit_code, test_class_name, current_dir): if exit_code != 0: UpgradeLogger.reportProgress( "=====Error details for failed test(s)=====\n ", False) for key, value in UpgradeLogger.get_stack_trace( test_class_name, current_dir).items(): UpgradeLogger.reportProgress( "Test:%s AND Stacktrace: %s" % (str(key), value), False)
def perform_post_upgrade_steps(self): if Config.getEnv("HDP_STACK_INSTALLED").lower() == "true": from beaver.component.hadoop import Hadoop, HDFS from beaver.component.hive import Hive COMPONENT = str(self.COMPONENT) HDFS_USER = Config.get('hadoop', 'HDFS_USER') if 'experiment' in COMPONENT and Hive.isInstalled(): HIVE_WAREHOUSE_DIR = Hive.getConfigValue( "hive.metastore.warehouse.dir", defaultValue="/apps/hive/warehouse" ) HDFS.chmod(HDFS_USER, 777, HIVE_WAREHOUSE_DIR, True) else: UpgradeLogger.reportProgress("No additional post-upgrade steps defined for EU", True) else: logger.info("No additional post-upgrade steps defined for EU on HDF")
def prepare_and_start_long_running_jobs(self): ############################Prepare and start long running jobs self.find_components_to_test() import beaver.component.rollingupgrade.ruSetup as ruSetup ruSetup.COMPONENTS_TO_FLIP = self.COMPONENTS_TO_TEST ruSetup.COMPONENTS_AFFECTED = self.COMPONENTS_TO_TEST ruSetup.COMPONENTS_TO_TEST = self.COMPONENTS_TO_TEST ruSetup.COMPONENTS_TO_IMPORT = self.COMPONENTS_TO_TEST from beaver.component.hadoop import HDFS if "slider" in self.COMPONENTS_TO_TEST: from beaver.component.rollingupgrade.ruSlider import ruSlider from beaver.component.rollingupgrade.ruCommon import Rollingupgrade from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode HDFS.createUserDirWithGroup('/user/' + Config.get('hadoop', 'HADOOPQA_USER'), 'hdfs', Config.get('hadoop', 'HADOOPQA_USER'), 'hdfs', diffGroup=True) logger.info(Config.get('hadoop', 'HADOOPQA_USER')) logger.info( "1===========================================================================" ) self.setup_ru_cluster() logger.info( "2===========================================================================" ) logger.info( "3===========================================================================" ) DN = HDFS.getDatanodes() # Find core components (HDFS, YARN, HBase) if exist. #core_components = UpgradePerNode.find_existing_core_components(COMPONENTS_TO_TEST) logger.info( "4===========================================================================" ) #Prepare and save state before upgrade Rollingupgrade.ru_prepare_save_state_for_upgrade( self.COMPONENTS_TO_TEST) logger.info( "5===========================================================================" ) # Run setup for background Jobs for all components Rollingupgrade.background_job_setup(self.COMPONENTS_TO_TEST, config=None) logger.info("Running smoke tests before upgrade") Rollingupgrade.run_smokeTests(self.COMPONENTS_TO_TEST) logger.info( "6===========================================================================" ) # Starts Long running background Jobs for all components numBackgroundJobs = Rollingupgrade.run_longRunning_Application( self.COMPONENTS_TO_TEST, config=None) logger.info( "7===========================================================================" ) logger.info( "Total number of long running background jobs before starting upgrade is %s" % numBackgroundJobs) UpgradePerNode.reportProgress( "### Just started %s background jobs ###" % numBackgroundJobs) UpgradeLogger.reportProgress( "### Just started %s background jobs ###" % numBackgroundJobs, True)
def perform_pre_upgrade_tests(self): UpgradeLogger.reportProgress("=====Starting LR jobs=========", True) self.prepare_and_start_long_running_jobs()
def validate_lr_job(self): #################################################Finsih long running jobs ### Need to stop HDFS Falcon,Yarn long runningJobs #### # create flagFile to kill HDFS background job from beaver.component.hadoop import HDFS, YARN from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode from beaver.component.rollingupgrade.ruCommon import Rollingupgrade TEST_USER = Config.get('hadoop', 'HADOOPQA_USER') createCmd = "dfs -touchz " + UpgradePerNode._HDFS_FLAG_FILE exit_code, output = HDFS.runas(TEST_USER, createCmd) logger.info( "8===========================================================================" ) if self.DO_DOWNGRADE: self.removeComponentFromTest("falcon") if "falcon" in self.COMPONENTS_TO_TEST: from beaver.component.rollingupgrade.ruFalcon import ruFalcon ruFalcon.stopFalconLongRunningJob() if "yarn" in self.COMPONENTS_TO_TEST: from beaver.component.rollingupgrade.ruYarn import ruYARN ruYARN.stopYarnLongRunningJob() # if "hive" in self.COMPONENTS_TO_TEST: # from beaver.component.rollingupgrade.ruHive import ruHive # ruHive.stopHiveLongRunningJob() if "slider" in self.COMPONENTS_TO_TEST: from beaver.component.rollingupgrade.ruSlider import ruSlider ruSlider.stopSliderLongRunningJob() if "storm-slider" in self.COMPONENTS_TO_TEST: from beaver.component.rollingupgrade.ruStorm import ruStorm ruStorm.teardown_storm_slider_app() logger.info( "9===========================================================================" ) ## TODO - wait for long running jobs to finish isZero = YARN.waitForZeroRunningApps() logger.info( "10===========================================================================" ) if isZero: UpgradePerNode.reportProgress("#### None apps are running. ####") UpgradeLogger.reportProgress("#### None apps are running. ####", True) else: UpgradePerNode.reportProgress( "#### Check Failed. some apps are running. ####") UpgradeLogger.reportProgress( "#### Check Failed. some apps are running. ####", False) #assert isZero, "all long running jobs are not finished" ### List down Failed/Killed applications #### Failed_Killed_apps = YARN.getFailedKilledAppList() UpgradePerNode.reportProgress( "### Listing Killed/Failed applications while performing upgrade ####" ) UpgradeLogger.reportProgress( "### Listing Killed/Failed applications while performing upgrade ####", False) for app in Failed_Killed_apps: queue = YARN.getQueueForApp(app) logger.info(" %s running on %s queue Failed/Killed." % (app, queue)) UpgradePerNode.reportProgress( "#### %s running on %s queue Failed/Killed. ####" % (app, queue)) UpgradeLogger.reportProgress( "#### %s running on %s queue Failed/Killed. ####" % (app, queue), False) ## TODO - Validate long running jobs Rollingupgrade.verifyLongRunningJob(self.COMPONENTS_TO_TEST) ## KILL APPLICATIONS #### YARN.killAllApplications(useYarnUser=True) #logger.info("Running smoke tests after upgrade") #Rollingupgrade.run_smokeTests(COMPONENTS_TO_TEST) ## TODO - call Teardown for long running jobs Rollingupgrade.background_job_teardown(self.COMPONENTS_TO_TEST, None) UpgradePerNode.reportProgress("### Completed upgrade ") UpgradeLogger.reportProgress("### Completed upgrade ", True)
def perform_express_upgrade(self): COMPONENT = str(self.COMPONENT) STACK_TYPE = str(self.STACK_TYPE) env = {} env['DISPLAY'] = self.DISPLAY # Update pom.xml of uifrm with Markers - applies for tests that involve a combination of API and UI tests run within the same split ambariutils.update_pom_xml_with_markers(self.LOCAL_WORK_DIR) SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'), 'apitestframework') print "SRC_DIR = ", SRC_DIR # Change the current directory for api test code LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework') # Copy the ambari api test code to artifacts dir shutil.copytree(SRC_DIR, LOCAL_WORK_DIR) # Change the permission of openstack-keypairfile os.chmod(os.path.join(LOCAL_WORK_DIR, 'src/resources/openstack-keypair'), 0400) # populate the config properties file for api test code configPropChanges = ambariutils.updateProperties() # Change the current directory for api test code LOCAL_WORK_DIR = os.path.join( Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework', 'src', 'resources' ) util.writePropertiesToFile( os.path.join(LOCAL_WORK_DIR, 'config.properties'), os.path.join(LOCAL_WORK_DIR, 'config.properties'), configPropChanges ) # Change the current directory for api test code LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework') testSuiteLocation = "src/test/suites" # Check what is the STACK_TYPE. Based on the stack type we will decide which test suite to select logger.info( "STACK TYPE is " + STACK_TYPE + ". So going into " + STACK_TYPE + " if block for test suite " "selection" ) if STACK_TYPE == "HDF": # Setup for HDF EU. We can now add an else method to handle ru if 'ambarieu-hdf-downgrade' in COMPONENT: testSuiteFile = "ambarieu-hdf-downgrade.suite" elif 'ambarieu-hdf' in COMPONENT: testSuiteFile = "ambarieu-hdf.suite" logger.info("Setting HDF testsuite file to " + testSuiteFile) elif STACK_TYPE == "HDP" and "ambarieu-hdp-hdf" in COMPONENT: testSuiteFile = "ambarieu-hdp-hdf.suite" else: if 'e2e' not in COMPONENT: # These are system tests so use suite filename sent from json file testSuiteFile = COMPONENT + ".suite" else: if 'dwngd' in COMPONENT: # For EU E2E downgrade runs testSuiteFile = "ambari-expressupgrade-downgrade-e2e.suite" elif 'mlevel' in COMPONENT: # For EU E2E multi-level paths runs testSuiteFile = "ambari-expressupgrade-mlevel-e2e.suite" elif 'denygpl' in COMPONENT: # For Deny GPL tests in EU E2E testSuiteFile = "ambarieu-denygpl.suite" elif 'wkflow' in COMPONENT: # For EU E2E workflow suite runs testSuiteFile = "ambari-expressupgrade-wkflow-e2e.suite" elif 'iopmigration' in COMPONENT: # Added for IOP migration E2E tests testSuiteFile = "ambari-iopmigration-e2e.suite" elif 'iopintg' in COMPONENT: # Added for IOP integration tests testSuiteFile = "ambarieu-iopintg-e2e.suite" elif 'patchupgradeintg-ru' in COMPONENT: # Added for RU PU integration tests testSuiteFile = "ambarieu-patchupgradeintg-ru-e2e.suite" elif 'patchupgradeintg-thirddigit' in COMPONENT: # Added for EU PU 3rd digit integration tests testSuiteFile = "ambarieu-patchupgradeintg-thirddigit-e2e.suite" elif 'patchupgradeintg-revert' in COMPONENT: # Added for EU PU integration tests with revert testSuiteFile = "ambarieu-patchupgradeintg-revert-e2e.suite" elif 'patchupgradeintg' in COMPONENT: # Added for EU PU integration tests testSuiteFile = "ambarieu-patchupgradeintg-e2e.suite" elif 'experiment' in COMPONENT: # Added for full EU integration tests testSuiteFile = "ambari-expressupgrade-experiment-e2e.suite" else: # Default ofr EU E2E runs testSuiteFile = "ambari-expressupgrade-upgrade-e2e.suite" logger.info("Opening test suite file : " + testSuiteFile + " for test execution") file = open(os.path.join(LOCAL_WORK_DIR, testSuiteLocation, testSuiteFile)) testSuite = json.load(file) file.close() # magic word to use as key in suite file magic = "split" + self.splitNumStr print "magic key is : ", magic if 'experiment' in COMPONENT or 'patchupgradeintg' in COMPONENT or 'iopintg' in COMPONENT: magic = "split1" print "magic key for experiment EU/PU run is : ", magic # Update pom.xml for API framework with Markers info ambariutils.update_pom_xml_with_markers(LOCAL_WORK_DIR) upgrade_test_results = {} # Iterate over the list of all test classes in the split and execute them logger.info("=====Starting Express Upgrade tests=========") if testSuite.has_key(magic): for testName in testSuite[magic]: if not ambariutils.isTestClassPresent(testName, LOCAL_WORK_DIR): LOCAL_WORK_DIR = ambariutils.switchDirectory(LOCAL_WORK_DIR, COMPONENT) logger.info('LOCAL_WORK_DIR %s ', LOCAL_WORK_DIR) logger.info('================Running %s with maven===============' % (testName)) self.LOCAL_WORK_DIR = LOCAL_WORK_DIR exit_code, stdout = self.Maven2runas( ' -Dtest=%s -DfailIfNoTests=false test' % testName, cwd=self.LOCAL_WORK_DIR, env=env, user='******' ) UpgradeLogger.reportProgress('================Finished %s ========================' % (testName), True) logger.info('Exit code of the test: %s ' % (exit_code)) if exit_code != 0: upgrade_test_results.update(UpgradeLogger.get_stack_trace(testName, LOCAL_WORK_DIR)) UpgradeLogger.reportProgress("Test failure encountered: %s" % (testName), False) # Do not run any further tests if Upgrade itself has failed if self.is_upgrade_executed(): if not self.is_stack_upgrade_success(): UpgradeLogger.reportProgress('Express Upgrade failed, aborting rest of the tests', False) break else: UpgradeLogger.reportProgress( 'Error(s) in steps before starting Upgrade, aborting rest of the tests', False ) break else: print "================Not correct test suite format========================" if len(upgrade_test_results) > 0: UpgradeLogger.reportProgress("=====List of failed test(s)=====\n", False) for key, value in upgrade_test_results.items(): UpgradeLogger.reportProgress(key, False) UpgradeLogger.reportProgress("=====Error details for failed test(s)=====", False) for key, value in upgrade_test_results.items(): UpgradeLogger.reportProgress("Test:%s AND Failure details:\n %s" % (key, value), False) UpgradeLogger.reportProgress("=======================================================", True) else: UpgradeLogger.reportProgress("=====Express Upgrade test(s) completed successfully=========", True) # Gather reports for tests executed from apitestframework dir LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework') Machine.runas('root', 'chmod -R 777 ' + LOCAL_WORK_DIR) uifrmReportDirectory = os.path.join(LOCAL_WORK_DIR, '..', 'target/surefire-reports') if not os.path.exists(uifrmReportDirectory): Machine.runas('root', 'mkdir -p ' + uifrmReportDirectory) Machine.runas('root', 'chmod -R 777 ' + uifrmReportDirectory) logger.info('Created path for reporting') Machine.runas('root', 'chmod -R 777 ' + os.path.join(LOCAL_WORK_DIR, '..', 'target')) apiReportDirectory = os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports') if os.path.exists(apiReportDirectory): files = os.listdir(os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports')) for file in files: shutil.copy( os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports', file), os.path.join(LOCAL_WORK_DIR, '..', 'target', 'surefire-reports') ) # Switch back to uifrm dir for reporting purposes LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT) self.LOCAL_WORK_DIR = LOCAL_WORK_DIR
def perform_pre_upgrade_tests(self): UpgradeLogger.reportProgress("No pre-upgrade tests defined for EU", True)
def print_environment_details(self): UpgradeLogger.reportProgress("=====Environment Details=====\n ", True) is_hdp = self.STACK_TYPE.lower() == "hdp" UpgradeLogger.reportProgress( "Ambari URL : " + Ambari.getWebUrl(is_hdp=is_hdp), True) UpgradeLogger.reportProgress("Ambari OS : " + Ambari.getOS(), True) UpgradeLogger.reportProgress( "Stack Type : " + Config.get('ambari', 'STACK_TYPE'), True) UpgradeLogger.reportProgress( "Ambari DB : " + Config.get('ambari', 'AMBARI_DB'), True) UpgradeLogger.reportProgress( "Kerberos : " + Config.get('machine', 'IS_SECURE'), True) UpgradeLogger.reportProgress( "HA : " + Config.get('ambari', 'IS_HA_TEST'), True) UpgradeLogger.reportProgress( "Wire Encryption : " + Config.get('ambari', 'WIRE_ENCRYPTION'), True)
def update_result_as_failed(self, error_message): UpgradeLogger.reportProgress(error_message, False) return False
def throw_exception(self, message, stacktrace): UpgradeLogger.reportProgress(message + stacktrace, False) raise Exception(message)