def run_background_job(cls, runSmokeTestSetup=True, config=None): ''' Runs background long running TestOrderedWordCount tez job :param runSmokeTestSetup: Runs smoke test setup if set to true :param config: expected configuration location :return: Total number of long running jobs started ''' logger.info("*** Start background job for Tez ***") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "### Starting background job for Tez ####") #Sleep for 180 seconds between DAGs sleepInterval = 180 cmdLineArgs = "" for i in range(0, 4, 1): cmdLineArgs += cls._hdfsInputList[i] + " " + cls._hdfsOutputList[ i] + " " logger.info(cmdLineArgs) Tez.runTezExampleJar( "testorderedwordcount \"-DUSE_TEZ_SESSION=true\" \"-Dtez.queue.name=%s\" \"-DINTER_JOB_SLEEP_INTERVAL=%d\" \"-DRETAIN_STAGING_DIR=true\" %s " % (cls._queue, sleepInterval, cmdLineArgs), runInBackground=True) interval = 300 while (cls._background_job_appId == '' or cls._background_job_appId == None and interval > 0): logger.info("Trying to get appID..") time.sleep(10) interval = interval - 10 cls._background_job_appId = YARN.getAppIDFromAppName( "OrderedWordCountSession", state="RUNNING") logger.info("*******************appID=%s" % cls._background_job_appId) logger.info("*** End background job for Tez ***") return 1
def init_test_database(cls, dbflavor): from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode if dbflavor == "mysql": from beaver.dbutil import MySQL MySQL.recreateDatabase(cls._test_db) MySQL.createUserWithAllPriveleges( cls._test_db_user, cls._test_db_passwd, hosts=['%', cls._test_db_host, 'localhost'], database=cls._test_db) elif dbflavor == "oracle": from beaver.dbutil import Oracle Oracle.dropUser(cls._test_db_user, database=cls._test_db_oracle) Oracle.createUser(cls._test_db_user, cls._test_db_passwd, database=cls._test_db_oracle) elif dbflavor.startswith("postgres"): from beaver.dbutil import Postgres Postgres.recreateDatabase(cls._test_db) Postgres.createUser(cls._test_db_user, cls._test_db_passwd) Postgres.grantAllPrivileges(cls._test_db_user, cls._test_db) else: UpgradePerNode.reportProgress( "[FAILED][SQOOP][INIT] Invalid database flavor '%s' " % dbflavor)
def upgrade_master(cls, version, config=None): ''' Upgrades Master services: :param version: Version to be upgraded to :param config: Config location ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("### Knox Upgrade started ####") #Stop knox gateway and apacheDS Knox.stopKnox() Knox.stopLdap() node = Config.get('knox', 'KNOX_HOST').split(',')[0] #Switch to the new version hdpSelect.changeVersion("knox-server", version, node) #Start apacheDs and knox gateway service Knox.startLdap() Knox.startKnox() time.sleep(10) UpgradePerNode.reportProgress("### Knox Upgrade Finished ####")
def run_background_job(cls, runSmokeTestSetup=True, config=None): ''' Runs background long running Flume Job :param runSmokeTestSetup: Runs smoke test setup if set to true :param config: expected configuration location :return: Total number of long running jobs started ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode HDFS.createDirectory(cls._hdfs_test_dir, perm="777", force=True) UpgradePerNode.reportProgress( "[INFO][FLUME][BGJob] Long running job for Flume component started" ) logger.info("Starting the Flume Agent Topology") addlParams = "-Dflume.log.dir=%s -Dflume.log.file=agent2.log" % cls._local_work_dir agent2.start("agent2", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False) logger.info( "Sleeping for 10 seconds before starting the other Flume agent") time.sleep(10) addlParams = "-Dflume.log.dir=%s -Dflume.log.file=agent.log" % cls._local_work_dir agent1.start("agent", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False) time.sleep(5) return 1
def run_webhdfs_test(cls): ''' Knox WebHDFS Smoke test ''' cmd = "curl -I -k -u guest:guest-password -X GET 'https://%s:8443/gateway/default/webhdfs/v1/?op=LISTSTATUS'" % ( KNOX_HOST) exit_code, stdout = Machine.runas(Machine.getAdminUser(), "%s" % (cmd)) #Fix for intermittent 401 issue - QE-14368 if "401 Unauthorized" in stdout: logger.info( "LDAP taking it's own time? WebHDFS smoke test via Knox") grep_ldap_cmd = "ps aux | grep ldap.jar" exit_code, cmdout = Machine.runas(Machine.getAdminUser(), "%s" % (grep_ldap_cmd)) if "/usr/hdp/current/knox-server/bin/ldap.jar" in cmdout: exit_code, stdout = Machine.runas(Machine.getAdminUser(), "%s" % (cmd)) logger.info( "---Knox Demo LDAP is running. Retried command. %s --- %s ---" % (cmdout, stdout)) from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode if "200 OK" in stdout: logger.info("WebHDFS smoke test via Knox Passed") UpgradePerNode.reportProgress( "### [PASSED][Knox][WebHDFS] Knox Webhdfs Smoke Test Passed ####" ) else: logger.info("Smoke failed") UpgradePerNode.reportProgress( "### [FAILED][Knox][WebHDFS] Knox Webhdfs Smoke Test Failed ####" )
def run_database_query(cls, query, dbflavor): from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode if dbflavor == "mysql": from beaver.dbutil import MySQL return MySQL.runCmd(query, host=cls._test_db_host, database=cls._test_db, user=cls._test_db_user, passwd=cls._test_db_passwd) elif dbflavor == "oracle": from beaver.dbutil import Oracle return Oracle.runCmd(query, database=cls._test_db_oracle, user=cls._test_db_user, passwd=cls._test_db_passwd) elif dbflavor.startswith("postgres"): from beaver.dbutil import Postgres return Postgres.runCmd(query, database=cls._test_db, user=cls._test_db_user, passwd=cls._test_db_passwd) else: UpgradePerNode.reportProgress( "[FAILED][SQOOP][INIT] Invalid database flavor '%s' " % dbflavor)
def falcon_teardown(cls): ''' Clean up entities from Falcon ''' # Delete entities logger.info("Falcon - Begin function falcon_teardown") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][Falcon][BGJobTeardown] Falcon background jobs cleanup started" ) cls.runCommand( cls._job_user, " entity -type process -delete -name rawEmailIngestProcess", ignoreError=True) cls.runCommand(cls._job_user, " entity -type feed -delete -name rawEmailFeed", ignoreError=True) cls.runCommand(cls._job_user, " entity -type cluster -delete -name primaryCluster", ignoreError=True) cls.runCommand(cls._job_user, " entity -type cluster -delete -name backupCluster", ignoreError=True) logger.info("Falcon - End function falcon_teardown")
def run_background_job(cls, runSmokeTestSetup=True, config=None): ''' Runs background long running Yarn Job :param runSmokeTestSetup: Runs smoke test setup if set to true :param config: expected configuration location :return: Total number of long running jobs started ''' from beaver.component.sqoop import Sqoop from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][SQOOP][BGJob] Long running job for Sqoop component started" ) dbflavor = cls.getGenericDBFlavor() if dbflavor == "oracle": db = cls._test_db_oracle else: db = cls._test_db cmd = Sqoop.getJdbcOperationCmd( "export", dbflavor, db, cls._test_db_user, cls._test_db_passwd, cls._test_db_host, addlargs="--table %s --export-dir %s" % (cls._lrtest_tbl, cls._hdfs_lrtest_dir), options="-Dmapred.job.queue.name=%s" % cls._yarn_queue) Sqoop.runInBackground(cmd) if runSmokeTestSetup: logger.info("**** Running Sqoop Smoke Test Setup ****") cls.smoke_test_setup() return 1
def verifyLongRunningJob(cls): ''' Validate long running background job after end of all component upgrade ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode logger.info("Stop the Flume agents before verification") open(cls._data_stop, 'a').close() time.sleep(60) agent1.stop() agent2.stop() time.sleep(60) logger.info("Verifying the sinked data from Flume agent") exit_code, stdout, stderr = Hadoop.runas(cls._hdfs_user, "dfs -cat %s/*" % cls._hdfs_test_dir, logoutput=False, stderr_as_stdout=False) if exit_code != 0: logger.error( "Following error during the HDFS cat while fetching Flume data: %s" % stderr) if not util.compareOutputToFileIgnoreDupsAndOrder( stdout, cls._data_file): UpgradePerNode.reportProgress( "[FAILED][FLUME][BGJob] Long running test for Flume failed while verifying data" ) else: UpgradePerNode.reportProgress( "### [PASSED][FLUME][BGJob] Long running test validation for Flume passed ####" )
def ru_prepare_save_state_for_upgrade(cls): ''' Prepare Namenode to save State for Upgrade ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][HDFS][Prepare] Preparing state for HDFS upgrade") # BUG-26726: we need to be in safemode only in non HA cluster if not HDFS.isHAEnabled(): exit_code, output = HDFS.runasAdmin("dfsadmin -safemode enter") ruAssert("HDFS", exit_code == 0, '[Preparation] enter safemode failed') exit_code, output = HDFS.runas( Config.get('hadoop', 'HDFS_USER'), "dfsadmin -Ddfs.client.test.drop.namenode.response.number=0 -rollingUpgrade prepare" ) ruAssert("HDFS", exit_code == 0, '[Preparation] -rollingUpgrade prepare failed') if not HDFS.isHAEnabled(): exit_code, output = HDFS.runasAdmin("dfsadmin -safemode leave") ruAssert("HDFS", exit_code == 0, '[Preparation] leave safemode failed') UpgradePerNode.reportProgress( "[INFO][HDFS][Prepare] Preparing state for HDFS upgrade finished ")
def get_policies_backup(cls, localTestWorkDir1): from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( '#### saving existing policies for backup ####') cwObject = XACWTestCase() baseUrl = 'http://' + xa_testenv.getEnv( 'XA_ADMIN_HOST', 'localhost') + ':' + str( xa_testenv.getEnvInt('XA_ADMIN_PORT', 6080)) urlPoliciesUsingApi = baseUrl + '/service/public/api/policy' username_password = xa_testenv.getEnv( 'XA_ADMIN_USER', 'admin') + ':' + xa_testenv.getEnv( 'XA_ADMIN_PASS', 'admin') headerResponse, response = cwObject.callPyCurlRequest( url=urlPoliciesUsingApi, data='', method='get', usernamepassword=username_password) UpgradePerNode.reportProgress( '#### get policy headerResponse is : %s ####' % headerResponse) if 'HTTP/1.1 200 OK' in headerResponse: file_for_policies = os.path.join(localTestWorkDir1, 'policies.bak') UpgradePerNode.reportProgress( '#### saving policies to path = %s ####' % str(file_for_policies)) openReposFile = open(file_for_policies, 'wb') openReposFile.write(response) openReposFile.close() else: UpgradePerNode.reportProgress('unable to save policies for backup') return False UpgradePerNode.reportProgress( '#### policies back-up file saved successfully ####') return True
def run_client_smoketest(cls, config=None, env=None): ''' Run wordcount Job passing env variables :param config: Configuration location :param env: Set Environment variables ''' logger.info("**** Running HDFS CLI Test ****") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][HDFS][ClientSmoke] CLI test for HDFS started ") if not cls._SmokeInputDir: cls._SmokeInputDir = cls._base_hdfs_dir + "/smokeHdfsInput" SmokeOutputDir = cls._base_hdfs_dir + '/smokeHdfsOutput_cli' HDFS.deleteDirectory(SmokeOutputDir, Config.get('hadoop', 'HADOOPQA_USER')) jobCmd = 'jar %s wordcount \"-Dmapreduce.reduce.input.limit=-1\" \"-D%s=%s\" %s %s' % ( Config.get('hadoop', 'HADOOP_EXAMPLES_JAR'), "mapred.job.queue.name", cls._queue, cls._SmokeInputDir, SmokeOutputDir) exit_code, stdout = Hadoop.run(jobCmd, env=env) ruAssert("HDFS", exit_code == 0, "[ClientSmoke] Hdfs smoketest failed") HDFS.deleteDirectory(SmokeOutputDir) ruAssert("HDFS", exit_code == 0, "[ClientSmoke] could not delete: " + SmokeOutputDir) UpgradePerNode.reportProgress( "[INFO][HDFS][ClientSmoke] CLI test for HDFS Finished ")
def wait4DNLive(cls, node): i = 1 maxTries = 30 # ie 150sec - note the delay in QE configs for initial BR is 120sec logger.info('*** Waiting for DN %s to become live ****' % node) while i < maxTries: livenodes = HDFS.getDatanodesFromJmx() if node in livenodes: return True # saw strange behavious where the dns were ip addresses sometimes; convert livenodesIp = [] for iNode in livenodes: # convert to ip addresses livenodesIp.append(util.getIpAddress(iNode)) if node in livenodesIp: return True logger.info('*** Waiting for DN %s to become live ****' % node) logger.info('*** Live nodes list is: %s %s ****' % (livenodes, livenodesIp)) time.sleep(5) i = i + 1 from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[WARNING][HDFS][XXX] Datanode %s did not become live after 150 secs of restart, continuing " % node) return False
def runCommand(cls, user, cmd, ignoreError=True): """ Function to run falcon commands cmd as a certain user :param user: :param cmd: :return: """ try: from beaver.component.falcon import Falcon except ImportError: ## Import fails when Falcon is not installed on this machine. Nothing to do return 0, "" exit_code, output = Falcon.runas(user, cmd) if ignoreError: if exit_code != 0: #logger.info("Warning (ignoreError=True): Non-zero exit code when running command " + cmd + " as user " + user) from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[FAILED][Falcon] Warning (ignoreError=True): Non-zero exit code when running command %s as user %s " % (cmd, user)) else: ruAssert( "Falcon", exit_code == 0, "[RunCommand] Could not run command " + cmd + " as user " + user) return exit_code, output
def change_master_version(cls, version, config=None, logText="Upgrade"): ''' Changes Master services: :param version: Version to be changed to :param config: Config location ''' logger.info("Falcon - Begin function change_master_version ") from beaver.component.rollingupgrade.ruCommon import hdpSelect from time import sleep try: from beaver.component.falcon import Falcon except ImportError: ## Import fails when Falcon is not installed on this machine. Nothing to do return node = Falcon.get_falcon_server() from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][Falcon][%s] Falcon server for node %s to version %s started" % (logText, node, version)) Falcon.stop(cls._falcon_user, node) sleep(30) hdpSelect.changeVersion("falcon-server", version, node) hdpSelect.changeVersion("falcon-client", version, node) Falcon.start(cls._falcon_user, node) sleep(60) UpgradePerNode.reportProgress( "[INFO][Falcon][%s] Falcon server for node %s to version %s finished" % (logText, node, version)) return
def testAfterAllSlavesRestarted(cls): ''' Function to test upgrade is done properly after all master and slaves are upgraded for Hdfs, yarn and Hbase :return: ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "### Pig does not have any slaves, so nothing to do here ####")
def run_smoke_test(cls, smoketestnumber, config=None, components_to_test=None): ''' Run smoke test for yarn :param smoketestnumber: Used for unique output log location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress('#### ru-argus run smoke test number = %s ####' % str(smoketestnumber)) ruArgusAdmin.run_smoke_test(components_to_test)
def run_background_job(cls, runSmokeTestSetup=True, config=None): ''' Runs background long running Yarn Job :param runSmokeTestSetup: Runs smoke test setup if set to true :param config: expected configuration location :return: Total number of long running jobs started ''' logger.info("Falcon - start running background job") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[PASSED][Falcon][BGJob] starting Falcon background jobs") BACKGROUND_JOB_DIR = os.path.join(cls._local_workspace, "tests", "rolling_upgrade", "falcon", "falconChurnDemo") # Add clusters clusterXml = os.path.join(cls._local_workspace, "primaryCluster.xml") logger.info("Content of cluster xml %s" % clusterXml) f = open(clusterXml, 'r') logger.info(f.read()) f.close() CMD = " entity -type cluster -submit -file " + clusterXml cls.runCommand(cls._job_user, CMD) clusterXml = os.path.join(cls._local_workspace, "backupCluster.xml") CMD = " entity -type cluster -submit -file " + clusterXml cls.runCommand(cls._job_user, CMD) # Add feed feedXml = os.path.join(BACKGROUND_JOB_DIR, "rawEmailFeed.xml") logger.info("Content of feed xml %s" % feedXml) f = open(feedXml, 'r') logger.info(f.read()) f.close() CMD = " entity -type feed -submit -file " + feedXml cls.runCommand(cls._job_user, CMD) # Add Process processXml = os.path.join(BACKGROUND_JOB_DIR, "emailIngestProcess.xml") logger.info("Content of process xml %s" % processXml) f = open(processXml, 'r') logger.info(f.read()) f.close() CMD = " entity -type process -submit -file " + processXml cls.runCommand(cls._job_user, CMD) # Schedule Feed and Process CMD = " entity -type feed -schedule -name rawEmailFeed" cls.runCommand(cls._job_user, CMD) CMD = " entity -type process -schedule -name rawEmailIngestProcess" cls.runCommand(cls._job_user, CMD) cls._job_start_time = datetime.datetime.utcnow().strftime( "%Y-%m-%dT%H:%MZ") logger.info("Falcon - completed running background job") return 1
def downgrade_slave(cls, version, node, config=None): ''' Downgrade slave services :param version: version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "### Pig does not have any slaves, so no slaves to downgrade ####")
def verifyLongRunningJob(cls): ''' Validate long running background job after end of all component upgrade ''' logger.info("Verified when stopping Long Running Job ") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][Falcon][BGJobCheck] Verify Falcon Background Application") cls.runCommand(cls._falcon_user, "admin -version")
def downgrade_master(cls, version, config=None): ''' Downgrade Yarn Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][YARN][Downgrade] Job history server downgrade started ") logger.info("**** Begin history server downgrade ****") MAPRED.stopHistoryserver() historyNode = MAPRED.getHistoryserver() # TODO use hdp-select script to upgrade JHS from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("hadoop-mapreduce-historyserver", version, historyNode) MAPRED.startHistoryserver(config=config) time.sleep(5) logger.info( "**** End history server downgrade, begin timeline server downgrade ****" ) UpgradePerNode.reportProgress( "[INFO][YARN][Downgrade] Job history server downgrade finished ") UpgradePerNode.reportProgress( "[INFO][YARN][Downgrade] Timeline server downgrade started ") ytsNode = YARN.getATSHost() YARN.stopATSServer(ytsNode) hdpSelect.changeVersion("hadoop-yarn-timelineserver", version, ytsNode) YARN.startATSServer(config=config) time.sleep(5) logger.info( "**** End timeline server downgrade, begin rm upgrade ****") UpgradePerNode.reportProgress( "[INFO][YARN][Downgrade]Timeline server downgrade finished ") UpgradePerNode.reportProgress( "[INFO][YARN][Downgrade] Resource manager downgrade started ") if YARN.isHAEnabled(): nodes = [] nodes.append(YARN.getRMHostByState('standby')) nodes.append(YARN.getRMHostByState('active')) for node in nodes: YARN.resetResourceManager('stop', config=config, host=node) time.sleep(2) hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version, node) YARN.resetResourceManager('start', config=config, host=node) time.sleep(5) else: node = MAPRED.getJobtracker() MAPRED.stopJobtracker() time.sleep(2) # TODO use hdp-select script to upgrade JHs hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version, node) MAPRED.startJobtracker(config=config) time.sleep(5) logger.info("*** Ending RM downgrade ****")
def downgrade_master(cls, version, config=None): ''' Downgrade Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "### Pig does not have any master, so no master to downgrade ####")
def upgrade_master(cls, latestVersion, config=None, currVersion=None): ''' Upgrades Master services: :param version: Version to be upgraded to :param config: Config location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("Upgrading Argus Admin and User-sync.") ruArgusAdmin.upgrade_master(latestVersion, config, currVersion) ruArgusUserSync.upgrade_master(latestVersion, config, currVersion)
def smoke_test_setup(cls): ''' Setup required to run Smoke test ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("### Running Pig Smoke Test Setup ####") exit_code, stdout = HDFS.copyFromLocal(cls._golden_src_file, cls._hdfs_smoke_input_path) ruAssert("Pig", exit_code == 0, '[SmokeSetup] Smoke Test Data Load failed')
def downgrade_master(cls, version, config=None, currVersion=None): ''' Downgrade Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("Downgrading Argus Admin and User-sync.") ruArgusUserSync.downgrade_master(version, config, currVersion) ruArgusAdmin.downgrade_master(version, config, currVersion)
def smoke_test_setup(cls): ''' Setup required to run Smoke test ''' from tests.oozie.smoke.OozieSmokeTest import OozieSmokeTest cls.smokeObj = OozieSmokeTest() from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode function_name = inspect.stack()[0][3] UpgradePerNode.reportProgress("### Oozie %s has started ####" % function_name) cls.smokeObj.setup({'mapred_job_queue_name': 'oozie'})
def background_job_teardown(cls): ''' Cleanup for long running Yarn job ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("teardown for Argus Admin and User-Sync ") print( "**************************************** teardown for Argus Admin and User-Sync **************************************** " ) ruArgusAdmin.background_job_teardown() ruArgusUserSync.background_job_teardown()
def background_job_setup(cls, components, runSmokeTestSetup=True, config=None): ''' Setup for background long running job :param runSmokeTestSetup: Runs smoke test setup if set to true ''' # logger.info("TODO") from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress('#### call for background-job setup for argus admin and usersync started ####') ruArgusAdmin.background_job_setup(components, runSmokeTestSetup, config) ruArgusUserSync.background_job_setup(runSmokeTestSetup) UpgradePerNode.reportProgress('#### call background-job setup for argus admin and usersync done ####')
def background_job_teardown(cls): ''' Cleanup for long running Yarn job ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode function_name = inspect.stack()[0][3] UpgradePerNode.reportProgress("### Oozie %s has started ####" % function_name) cls.longRunObj.teardown() UpgradePerNode.reportProgress("### Oozie %s has finished ####" % function_name)
def upgrade_slave(cls, version, node, config=None): ''' Upgrades slave services : :param version: Version to be upgraded to :param node: Slave Node :param config: Config location :return: ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "### Pig does not have any slaves, so no slaves to upgrade ####")