def change_master_version(cls, version, config=None, logText="Upgrade"): ''' Changes Master services: :param version: Version to be changed to :param config: Config location ''' logger.info("Falcon - Begin function change_master_version ") from beaver.component.rollingupgrade.ruCommon import hdpSelect from time import sleep try: from beaver.component.falcon import Falcon except ImportError: ## Import fails when Falcon is not installed on this machine. Nothing to do return node = Falcon.get_falcon_server() from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][Falcon][%s] Falcon server for node %s to version %s started" % (logText, node, version)) Falcon.stop(cls._falcon_user, node) sleep(30) hdpSelect.changeVersion("falcon-server", version, node) hdpSelect.changeVersion("falcon-client", version, node) Falcon.start(cls._falcon_user, node) sleep(60) UpgradePerNode.reportProgress( "[INFO][Falcon][%s] Falcon server for node %s to version %s finished" % (logText, node, version)) return
def switch_master_version(cls, version): ''' Switches the Flume agent service :param version: Version to be switched to ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect logger.info("Stop the second Flume Agent before upgrade") open(cls._data_stop, 'a').close() time.sleep(10) agent1.stop() time.sleep(3) os.remove(cls._data_stop) hdpSelect.changeVersion("flume-server", version) logger.info("Restart the Flume agents with the new version") addlParams = "-Dflume.log.dir=%s -Dflume.log.file=agent.log" % cls._local_work_dir agent1.start("agent", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False) time.sleep(20) agent2.stop() time.sleep(10) addlParams = "-Dflume.log.dir=%s -Dflume.log.file=agent2.log" % cls._local_work_dir agent2.start("agent2", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False) time.sleep(10)
def upgrade_slave(cls, version, node, config=None, logText="Upgrade"): ''' Upgrade HDFS slave sevice: Datanode :param version: latestVersion. :param node: The node name where DN is running ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][HDFS][%s] Datanode %s for node %s to version %s started " % (logText, logText, node, version)) ipcPort = HDFS.getDataNodeIPCPort() cmd = "dfsadmin -shutdownDatanode %s:%s upgrade" % (node, ipcPort) exitcode, stdout = HDFS.runas(Config.get('hadoop', 'HDFS_USER'), cmd, env=None, logoutput=True, config=None, host=None, skipAuth=False) if exitcode != 0: UpgradePerNode.reportProgress( "[INFO][HDFS][%s] Datanode shutdownDatanode command failed for %s " % (logText, node)) HDFS.waitForDNDown(node, ipcPort, "ipc.client.connect.max.retries=1") from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("hadoop-hdfs-datanode", version, node) HDFS.startDatanodes(config=config, nodes=[node]) cls.wait4DNLive(node) UpgradePerNode.reportProgress( "[INFO][HDFS][%s] Datanode %s for node %s to version %s finished " % (logText, logText, node, version))
def upgrade_master(cls, version, config=None): ''' Upgrades Master services: :param version: Version to be upgraded to :param config: Config location ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress("### Knox Upgrade started ####") #Stop knox gateway and apacheDS Knox.stopKnox() Knox.stopLdap() node = Config.get('knox', 'KNOX_HOST').split(',')[0] #Switch to the new version hdpSelect.changeVersion("knox-server", version, node) #Start apacheDs and knox gateway service Knox.startLdap() Knox.startKnox() time.sleep(10) UpgradePerNode.reportProgress("### Knox Upgrade Finished ####")
def downgrade_client(cls, version, node, config=None): ''' Downgrade client services : :param version: Version to be downgraded to :param node: Slave Node :param config: Config location :return: ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("slider-client", version, HDFS.getGateway())
def downgrade_master(cls, version, config=None): ''' Downgrade Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.phoenix import Phoenix from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("phoenix-client", version) from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][PHOENIX][Smoke]Phoenix MasterNode Downgrade Finished")
def upgrade_master(cls, version, config=None): ''' Upgrades Yarn Master services: RM and JHS :param version: Version to be upgraded to :param config: Config location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Job history server upgrade started ") logger.info("**** Begin history server upgrade ****") MAPRED.stopHistoryserver() historyNode = MAPRED.getHistoryserver() # TODO use hdp-select script to upgrade JHS from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("hadoop-mapreduce-historyserver", version, historyNode) MAPRED.startHistoryserver(config=config) time.sleep(5) logger.info( "**** End history server upgrade, begin timeline server upgrade ****" ) UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade]Job history server upgrade finished ") UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Timeline server upgrade started ") ytsNode = YARN.getATSHost() YARN.stopATSServer(ytsNode) hdpSelect.changeVersion("hadoop-yarn-timelineserver", version, ytsNode) YARN.startATSServer(config=config) time.sleep(5) logger.info("**** End timeline server upgrade, begin rm upgrade ****") UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Timeline server upgrade finished ") UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Resource manager upgrade started ") if YARN.isHAEnabled(): nodes = [] nodes.append(YARN.getRMHostByState('standby')) nodes.append(YARN.getRMHostByState('active')) for node in nodes: YARN.resetResourceManager('stop', config=config, host=node) time.sleep(2) hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version, node) YARN.resetResourceManager('start', config=config, host=node) time.sleep(5) else: node = MAPRED.getJobtracker() MAPRED.stopJobtracker() time.sleep(2) # TODO use hdp-select script to upgrade JHs hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version, node) MAPRED.startJobtracker(config=config) time.sleep(5) logger.info("*** Ending RM upgrade ****") UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Resource manager Upgrade Finished")
def downgrade_master(cls, latestVersion, config, currVersion): from beaver.component.xa import Xa from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( '#### ranger-usersync: downgrading to old version.. ####') # 'knox.crt' # 'cacertswithknox' user = Machine.getAdminUser() xa_usersync_stop_cmd = 'sudo service ranger-usersync stop' xa_usersync_start_cmd = 'sudo service ranger-usersync start' node = Xa.getPolicyAdminHost() UpgradePerNode.reportProgress( '#### ranger-usersync: stopping with command %s ####' % xa_usersync_stop_cmd) exit_code, stdout = Machine.runas(user, xa_usersync_stop_cmd, host=node, logoutput=True) logger.info('****************** xa usersync stop exit_code = ' + str(exit_code)) from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("ranger-usersync", latestVersion, node) logger.info( '*************************** hdp-select to new version done = ') UpgradePerNode.reportProgress( '#### ranger-usersync: starting with command %s ####' % xa_usersync_start_cmd) exit_code, stdout = Machine.runas(user, xa_usersync_start_cmd, host=node, logoutput=True) logger.info('****************** xa usersync start exit_code = ' + str(exit_code)) logger.info('****************** xa usersync start stdout = ' + str(stdout)) if exit_code == 0 and 'UnixAuthenticationService has started successfully.' in stdout: UpgradePerNode.reportProgress( '#### ranger-usersync: downgrade successful ####') else: UpgradePerNode.reportProgress( '#### ranger-usersync: downgrade failed! startup error_code=%d ####' % exit_code)
def downgrade_argus_admin(cls, latestVersion, config, currVersion): from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( '#### ranger-admin: downgrading to old version.. ####') # 'knox.crt' # 'cacertswithknox' node = Xa.getPolicyAdminHost() user = Machine.getAdminUser() xa_admin_stop_cmd = 'sudo service ranger-admin stop' xa_admin_start_cmd = 'sudo service ranger-admin start' UpgradePerNode.reportProgress( '#### ranger-admin: stopping with command %s ####' % xa_admin_stop_cmd) exit_code, stdout = Machine.runas(user, xa_admin_stop_cmd, host=node, logoutput=True) logger.info('****************** xa admin stop exit_code = ' + str(exit_code)) node = Xa.getPolicyAdminHost() from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("ranger-admin", latestVersion, node) logger.info( '*************************** hdp-select to new version done = ') UpgradePerNode.reportProgress( '#### ranger-admin: starting with command %s ####' % xa_admin_start_cmd) exit_code, stdout = Machine.runas(user, xa_admin_start_cmd, host=node, logoutput=True) logger.info('****************** xa admin start exit_code = ' + str(exit_code)) logger.info('****************** xa admin start stdout = ' + str(stdout)) if exit_code == 0: UpgradePerNode.reportProgress( '#### ranger-admin: downgrade successful ####') else: UpgradePerNode.reportProgress( '#### ranger-admin: downgrade failed! startup exit_code=%d ####' % exit_code)
def upgrade_slave(cls, version, node, config=None): ''' Upgrades Yarn slave services : NM :param version: Version to be upgraded to :param node: Slave Node :param config: Config location :return: ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Nodemanager Upgrade for %s started " % node) logger.info("**** Beginning upgrade for nodemanager on %s ****" % node) MAPRED.stopTasktrackers(nodes=[node]) from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("hadoop-yarn-nodemanager", version, node) # BUG-27328 # make sure we set the linux container executor permissions if Hadoop.isSecure() and not Machine.isWindows(): container_exec_cfg = os.path.join( Config.get('hadoop', 'HADOOP_CONF'), 'container-executor.cfg') container_exec = os.path.join( Config.get('hadoop', 'YARN_HOME').replace("client", "nodemanager"), 'bin', 'container-executor') cmd="chown root:hadoop %s %s ; chmod 400 %s ; chmod 6050 %s" % \ (container_exec_cfg, container_exec, container_exec_cfg, container_exec) Machine.runas(Machine.getAdminUser(), cmd, host=node, cwd=None, env=None, logoutput=True, passwd=Machine.getAdminPasswd()) MAPRED.startTasktrackers(config, nodes=[node]) logger.info("**** Ending upgrade for nodemanager on %s ****" % node) UpgradePerNode.reportProgress( "[INFO][YARN][Upgrade] Nodemanager Upgrade on %s Finished " % node)
def downgrade_master(cls, version, config=None): ''' Downgrade Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode function_name = inspect.stack()[0][3] UpgradePerNode.reportProgress("### Oozie %s has started ####" % function_name) from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.oozie import Oozie oozieServers = Oozie.getOozieServers() for server in oozieServers: UpgradePerNode.reportProgress("### Oozie %s has started %s ####" % (server, function_name)) Oozie.resetOozie('stop', host=server) time.sleep(60) hdpSelect.changeVersion("oozie-server", version, server) Oozie.resetOozie('start', configDir=config, host=server) time.sleep(60) UpgradePerNode.reportProgress("### Oozie %s has finished %s ####" % (server, function_name))
def upgrade_master(cls, version, config=None): ''' Upgrades Master services: :param version: Version to be upgraded to :param config: Config location ''' from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode function_name = inspect.stack()[0][3] from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.oozie import Oozie cls.background_job_when_master_upgrade() oozieServers = Oozie.getOozieServers() for server in oozieServers: UpgradePerNode.reportProgress("### Oozie %s has started %s ####" % (server, function_name)) Oozie.resetOozie('stop', host=server) time.sleep(60) hdpSelect.changeVersion("oozie-server", version, server) Oozie.resetOozie('start', configDir=config, host=server) time.sleep(60) UpgradePerNode.reportProgress("### Oozie %s has finished %s ####" % (server, function_name)) cls.verify_background_job_when_master_upgrade() cls.background_job_teardown_when_master_upgrade()
def downgrade_master(cls, version, config=None): ''' Downgrade HDFS Master services :param version: Version to be downgraded to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode if HDFS.isHAEnabled(): UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] HA Namenode Downgrade Started ") nodes = [] nodes.append(HDFS.getNamenodeByState('standby')) nodes.append(HDFS.getNamenodeByState('active')) for node in nodes: HDFS.resetNamenode('stop', host=node) HDFS.resetZkfc('stop', hosts=node.split()) # BUG-25534: There is no package for zkfc. So just updating the NN is enough. hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node) HDFS.resetNamenode('start', config=config, host=node) HDFS.resetZkfc('start', hosts=node.split()) # lets make sure the NN is out of safemode before we proceed to the next namenode HDFS.waitForNNOutOfSafemode(options='-fs hdfs://%s:8020' % node) UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] HA Namenode Downgrade Finished ") jn_nodes = HDFS.getJournalNodes() if len(jn_nodes) < 3: UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] Less than three Journal Nodes. Not going to do Downgrade " ) return #Loop through all the JNs and stop flip start one at a time hdfs_user = Config.get('hadoop', 'HDFS_USER') UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] HA Journal Node Upgrade Started") cls.ensure_all_jns_are_up(jn_nodes) #Loop through all the JNs and stop flip start one at a time for node in jn_nodes: Hadoop.resetService(hdfs_user, node, "journalnode", 'stop', binFolder="sbin") hdpSelect.changeVersion("hadoop-hdfs-journalnode", version, node) Hadoop.resetService(hdfs_user, node, "journalnode", 'start', binFolder="sbin") time.sleep(5) cls.ensure_all_jns_are_up(jn_nodes) cls.ensure_nn_is_active() UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] HA Journal Node Downgrade Finished ") else: ## TODO add code to upgrade SNN UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] Namenode Downgrade Started ") node = HDFS.getNamenode() HDFS.stopNamenode() hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node) HDFS.startNamenode(config=config) UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] Namenode Downgrade Finished ") # upgrade SNN UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade]Secondary Namenode Downgrade Started ") node = HDFS.getSecondaryNamenode() HDFS.stopSecondaryNamenode() hdpSelect.changeVersion("hadoop-hdfs-secondarynamenode", version, node) HDFS.startSecondaryNamenode(config=config) UpgradePerNode.reportProgress( "[INFO][HDFS][Downgrade] Secondary Namenode Downgrade Finished" )
def upgrade_argus_admin(cls, latestVersion, config, currVersion): from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress('#### install argus admin. ####') # 'knox.crt' # 'cacertswithknox' node = Xa.getPolicyAdminHost() user = Machine.getAdminUser() xa_admin_stop_cmd = 'sudo service ranger-admin stop' xa_admin_start_cmd = 'sudo service ranger-admin start' ranger_old_version = '/usr/hdp/' + currVersion + '/ranger-admin/' ranger_new_version = '/usr/hdp/' + latestVersion + '/ranger-admin/' localTestWorkDir1 = os.path.join(Config.getEnv('ARTIFACTS_DIR')) knox_cert_files = '{knox.crt,cacertswithknox}' source_files_to_copy = os.path.join(ranger_old_version, knox_cert_files) command_copy_knoxcerts = 'cp -f %s %s' % (source_files_to_copy, localTestWorkDir1) exit_code, stdout = Machine.runas(user, command_copy_knoxcerts, host=node, logoutput=True) logger.info( '*************************** admin copy command_copy_knoxcerts exit_code = ' + str(exit_code)) logger.info( '*************************** admin copy command_copy_knoxcerts stdout = ' + str(stdout)) exit_code, stdout = Machine.runas(user, xa_admin_stop_cmd, host=node, logoutput=True) logger.info('****************** xa admin stop exit_code = ' + str(exit_code)) source_properties_file = os.path.join(ranger_old_version, 'install.properties') destination_properties_file = os.path.join(ranger_new_version, 'install.properties') command = 'cp -f %s %s' % (source_properties_file, destination_properties_file) exit_code, stdout = Machine.runas(user, command, host=node, logoutput=True) logger.info( '*************************** admin copy command exit_code = ' + str(exit_code)) logger.info( '*************************** admin copy command stdout = ' + str(stdout)) command = '(cd %s && export JAVA_HOME=%s && ./setup.sh)' % ( ranger_new_version, Machine.getJavaHome()) UpgradePerNode.reportProgress( '#### ranger-admin: installing new version with command %s ###' % command) exit_code, stdout = Machine.runas(user, command, host=node, logoutput=True) if exit_code == 0 and 'Installation of XASecure PolicyManager Web Application is completed.' in stdout: UpgradePerNode.reportProgress( '#### ranger-admin: installation successful ###') from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("ranger-admin", latestVersion, node) logger.info( '*************************** ranger-admin: hdp-select to new version done = ' ) source_files_to_copy = os.path.join(localTestWorkDir1, knox_cert_files) command_copy_knoxcerts = 'cp -f %s %s' % (source_files_to_copy, ranger_new_version) exit_code, stdout = Machine.runas(user, command_copy_knoxcerts, host=node, logoutput=True) logger.info( '*************************** admin copy command_copy_knoxcerts back exit_code = ' + str(exit_code)) logger.info( '*************************** admin copy command_copy_knoxcerts back stdout = ' + str(stdout)) UpgradePerNode.reportProgress( '#### ranger-admin: starting new version with command %s ###' % xa_admin_start_cmd) exit_code, stdout = Machine.runas(user, xa_admin_start_cmd, host=node, logoutput=True) logger.info('****************** xa admin start exit_code = ' + str(exit_code)) logger.info('****************** xa admin start stdout = ' + str(stdout)) if exit_code == 0: UpgradePerNode.reportProgress( '#### ranger-admin: new version started successfully ####') else: UpgradePerNode.reportProgress( '#### ranger-admin: failed to start new version! exit_code=%d ####' % exit_code) else: logger.info( '****************** setup.sh script failed for admin ******* ') UpgradePerNode.reportProgress( '#### ranger-admin: installation of new version failed! exit_code=%d ###' % exit_code)
def restartOneNode(cls, node, version, upgrade=True): ''' Stop and restart one node with selected version :param upgrade: True for Upgrade, False for Downgrade :return: ''' from beaver.component.zookeeper import Zookeeper from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode version_change = "Upgrade" if not upgrade: version_change = "Downgrade" UpgradePerNode.reportProgress( "[INFO][Zookeeper][Upgrade] Zookeeper %s on %s started " % (version_change, node)) logger.info("Stopping %s" % node) Zookeeper.runZKServer("stop", node) logger.info("Changing version on %s" % node) hdpSelect.changeVersion("zookeeper-server", version, node) logger.info("Starting %s" % node) Zookeeper.runZKServer("start", node) # Now make sure ZK is usable after restart retry_limit = 100 retry = 0 while retry < retry_limit: exit_code, stdout = Zookeeper.runZKCli( "create /zk_smoketest_during_restart", server=node) m = re.search('exists', stdout) if exit_code == 0: break elif m is not None: Zookeeper.runZKCli("delete /zk_smoketest_during_upgrade", server=node) logger.info( "Couldn't create a znode after restart. Will retry. Sleeping..." ) time.sleep(1) retry += 1 if retry == retry_limit: UpgradePerNode.reportProgress( "[FAILED][Zookeeper][Upgrade] Zookeeper Upgrade on %s Failed " % node) return retry_limit = 100 retry = 0 while retry < retry_limit: exit_code, stdout = Zookeeper.runZKCli( "ls /zk_smoketest_during_restart", server=node) if exit_code == 0: break logger.info( "Couldn't ls a znode after restart. Will retry. Sleeping...") time.sleep(1) retry += 1 if retry == retry_limit: UpgradePerNode.reportProgress( "[FAILED][Zookeeper][Upgrade] Zookeeper Upgrade on %s Failed " % node) return exit_code, stdout = Zookeeper.runZKCli( "delete /zk_smoketest_during_restart", server=node) if exit_code != 0: UpgradePerNode.reportProgress( "[FAILED][Zookeeper][Upgrade] Zookeeper Upgrade on %s Failed " % node) return logger.info("**** Ending upgrade for Zookeeper on %s ****" % node) UpgradePerNode.reportProgress( "[INFO][Zookeeper][Upgrade] Zookeeper Upgrade on %s Finished " % node)
def switch_master_version(cls, action, version, config=None): ''' Switches Hive master services' version :param action: Whether to "upgrade" or "downgrade" :param version: Version to be switched to :param config: Configuration location ''' from beaver.component.rollingupgrade.ruCommon import hdpSelect from beaver.component.hive import Hive currentHiveVersion = Hive.getVersion() if action == 'upgrade': # Backup the database used by the Hive Metastore logger.info( "Performing backup of the Hive Metastore DB before starting the upgrade" ) Hive.backupMetastoreDB(cls._metastore_backup_file) node = Hive.getHiveHost() # Stop the old Hive Metastore logger.info("Stopping the Hive Metastore") Hive.stopService(services=["metastore"]) # Upgrade Hive Metastore servers to new version hdpSelect.changeVersion("hive-metastore", version, node) if action == 'upgrade': logger.info("Upgrading the Hive metastore schema") Hive.upgradeSchema() # Restart Hive Metastore servers one at a time logger.info("Restarting the Hive Metastore") Hive.startService(services=["metastore"]) # Start new Hive Server 2 instance confHS2Port = Hive.getHiveserver2ThriftPort() hs2port = util.getNextAvailablePort(node, confHS2Port) hdpSelect.changeVersion("hive-server2", version, node) Hive.modifyConfig(config, services=['hiveserver2'], restartService=False) logger.info( "Starting a new HiveServer2 at port '%d' for assisting rolling-upgrade" % hs2port) if hs2port != confHS2Port: changes = {'hive-site.xml': {'hive.server2.thrift.port': hs2port}} Hive.modifyConfig(changes, services=["hiveserver2"], restartService=False) Hive.startService(services=["hiveserver2"]) cls._hs2_live_ports = [Hive.getHiveserver2ThriftPort(), hs2port] # Deregister the old Hive Server 2 instances logger.info("Deregistering the HiveServer2 on version '%s'" % currentHiveVersion) Hive.deregisterHiveServer2(version=currentHiveVersion) from beaver.component.hcatalog import Hcatalog # Stop the old WebHCat server logger.info("Stopping the WebHCat server") node = Config.get('templeton', 'TEMPLETON_HOST', default=Machine.getfqdn()) webhcatPort = Config.get('templeton', 'TEMPLETON_PORT', default="50111") # Stop the old WebHCat server logger.info("Stop the WebHCat server") Hcatalog.stop(node) # Upgrade WebHCat to the new version hdpSelect.changeVersion("hive-webhcat", version, node) # Start the WebHCat server logger.info("Restarting the WebHCat server") newConfDir = os.path.join(Config.getEnv('ARTIFACTS_DIR'), 'localWebhcatConf') if os.path.exists(newConfDir): Hcatalog.start(node, hcat_confdir=newConfDir) else: Hcatalog.start(node)
def upgrade_master(cls, latestVersion, config, currVersion): from beaver.component.xa import Xa from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode UpgradePerNode.reportProgress('#### install argus admin. ####') # 'knox.crt' # 'cacertswithknox' user = Machine.getAdminUser() xa_usersync_stop_cmd = 'sudo service ranger-usersync stop' xa_usersync_start_cmd = 'sudo service ranger-usersync start' ranger_old_version = '/usr/hdp/' + currVersion + '/ranger-usersync/' ranger_new_version = '/usr/hdp/' + latestVersion + '/ranger-usersync/' exit_code, stdout = Machine.runas(user, xa_usersync_stop_cmd, host=Xa.getPolicyAdminHost(), logoutput=True) logger.info('****************** xa usersync stop exit_code = ' + str(exit_code)) source_properties_file = os.path.join(ranger_old_version, 'install.properties') destination_properties_file = os.path.join(ranger_new_version, 'install.properties') command = 'cp -f %s %s' % (source_properties_file, destination_properties_file) exit_code, stdout = Machine.runas(user, command, host=Xa.getPolicyAdminHost(), logoutput=True) logger.info( '*************************** usersync copy command exit_code = ' + str(exit_code)) logger.info( '*************************** usersync copy command stdout = ' + str(stdout)) command = '(cd %s && export JAVA_HOME=%s && ./setup.sh)' % ( ranger_new_version, Machine.getJavaHome()) UpgradePerNode.reportProgress( '#### ranger-usersync: installing new version with command %s ###' % command) exit_code, stdout = Machine.runas(user, command, host=Xa.getPolicyAdminHost(), logoutput=True) if exit_code == 0: UpgradePerNode.reportProgress( '#### ranger-usersync: installation successful ###') node = Xa.getPolicyAdminHost() from beaver.component.rollingupgrade.ruCommon import hdpSelect hdpSelect.changeVersion("ranger-usersync", latestVersion, node) logger.info( '*************************** hdp-select to new version done = ' ) UpgradePerNode.reportProgress( '#### ranger-usersync: starting new version with command %s ###' % xa_usersync_start_cmd) exit_code, stdout = Machine.runas(user, xa_usersync_start_cmd, host=node, logoutput=True) logger.info('****************** xa usersync start exit_code = ' + str(exit_code)) logger.info('****************** xa usersync start stdout = ' + str(stdout)) if exit_code == 0 and 'UnixAuthenticationService has started successfully.' in stdout: UpgradePerNode.reportProgress( '#### ranger-usersync: new version started successfully ###' ) else: UpgradePerNode.reportProgress( '#### ranger-usersync: new version failed to start! exit_code=%d ###' % exit_code) else: logger.info( '****************** setup.sh script failed for usersync ******* ' ) UpgradePerNode.reportProgress( '#### ranger-usersync: installation of new version failed! exit_code=%d ###' % exit_code)