def setup_capacity_scheduler(cls, components): """ Setup yarn capacity scheduler based on components. This API is not called during setup_module. :param components: list of components :type components: list of str :return: None """ if RuSetup._defaultQueue: components.append("default") logger.info("*** setup_capacity_scheduler ***") if RuSetup._skipQueue != None: logger.info("Components do not have a queue: " + str(RuSetup._skipQueue)) components = list(set(components) - RuSetup._skipQueue) logger.info("components = %s" % components) numComponents = len(components) percentPerQueue = 100.0 / numComponents percentPerQueueStr = "{0:0.2f}".format(percentPerQueue) xmlDict = {} rootQueues = ",".join(components) xmlDict["yarn.scheduler.capacity.root.queues"] = rootQueues for component in components: xmlDict["yarn.scheduler.capacity.root.%s.capacity" % component] = percentPerQueueStr xmlDict["yarn.scheduler.capacity.root.%s.user-limit-factor" % component] = 1 xmlDict["yarn.scheduler.capacity.root.%s.maximum-capacity" % component] = percentPerQueueStr xmlDict["yarn.scheduler.capacity.root.%s.state" % component] = "RUNNING" xmlDict["yarn.scheduler.capacity.root.%s.acl_submit_jobs" % component] = "*" xmlDict["yarn.scheduler.capacity.root.%s.acl_administer_jobs" % component] = "*" util.dumpTextString(xmlDict, "====== PLANNED QUEUES ======", "==================") master_capacity_file = os.path.join(Config.getEnv("WORKSPACE"), "tests", "rolling_upgrade", "yarn", "data", "capacity-scheduler.xml") modified_capacity_file = os.path.join(Config.getEnv("ARTIFACTS_DIR"), "capacity-scheduler.xml") Machine.copy(master_capacity_file, modified_capacity_file) util.writePropertiesToConfigXMLFile(modified_capacity_file, modified_capacity_file, xmlDict) #util.dumpText(modified_capacity_file, "====== capacity-scheduler.xml ======", "==================") if RuSetup._defaultQueue: components.remove("default") return modified_capacity_file
def write_hbase_site(cls, hbase_site): tmpHBaseConfFile = os.path.join(Config.getEnv('ARTIFACTS_DIR'), "hbase-site.xml") fout = open(tmpHBaseConfFile, "w") fout.write(hbase_site) fout.close() propertyMap = {'hbase.tmp.dir': '/tmp/hbase-tmp'} util.writePropertiesToConfigXMLFile(tmpHBaseConfFile, tmpHBaseConfFile, propertyMap) hbase_conf = Config.get('hbase', 'HBASE_CONF_DIR') hbaseConfFile = os.path.join(hbase_conf, "hbase-site.xml") Machine.copy(tmpHBaseConfFile, hbaseConfFile, user=Machine.getAdminUser(), passwd=Machine.getAdminPasswd()) return tmpHBaseConfFile
def write_hbase_site(cls, config=None): ''' Obtain hbase-site.xml from Slider HBase and write to modified config path ''' global new_conf_path from beaver.component.sliderhbase import SliderHBase from beaver.component.slider import Slider tmpHBaseConfFile = os.path.join(Machine.getTempDir(), "hbase-site.xml") if Hadoop.isSecure(): hbasesite = Slider.registry( "sliderhbase", flags="--getconf hbase-site --out " + tmpHBaseConfFile, format="xml", user=cls.HBASE_USER, userFlag=cls.HBASE_USER ) else: hbasesite = Slider.registry( "sliderhbase", flags="--getconf hbase-site --out " + tmpHBaseConfFile, format="xml", user=cls.HBASE_USER ) propertyMap = {'hbase.tmp.dir': '/tmp/hbase-tmp'} generated_hbase_conf = os.path.join(new_conf_path, "hbase-site.xml") # QE-3108 # if the hbase app is created successfully, a /tmp/hbase-site.xml will be # generated from the app. Otherwise if the /tmp/hbase-site.xml is missing # it means the slider app creation fails from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode if os.path.isfile(generated_hbase_conf): util.writePropertiesToConfigXMLFile(tmpHBaseConfFile, generated_hbase_conf, propertyMap) else: UpgradePerNode.reportProgress( "[FAILED][Slider][background] Slider hbase background setup failed due to hbase-site.xml not generated" )
def smoke_test_setup(cls): ''' Setup required to run Smoke test ''' from beaver.component.slider import Slider from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode HADOOPQA_USER = Config.get('hadoop', 'HADOOPQA_USER') UpgradePerNode.reportProgress("### Starting set up for Slider smoke test ####") if Hadoop.isSecure(): keytabFile = Machine.getHeadlessUserKeytab(HADOOPQA_USER) kinitloc = Machine.which("kinit", "root") cmd = "%s -k -t %s %s" % (kinitloc, keytabFile, Machine.get_user_principal(HADOOPQA_USER)) exit_code, stdout = Machine.run(cmd) if exit_code != 0: UpgradePerNode.reportProgress("### smoke test setup for Slider failed due to kinit failed ####") # Local directory in artifacts that we'll run tests from # it is possible the download_source() will fail try: cls._LOCAL_WORK_DIR = Slider.download_source(useHDPBaseRepoFile=False, isRUcluster=True) logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR) except TypeError as err: UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source download fail.") return if not cls._LOCAL_WORK_DIR: UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source Code missing.") return # Local conf directory with modified conf for tests if not os.path.exists(cls._LOCAL_CONF_DIR): os.makedirs(cls._LOCAL_CONF_DIR) slider_conf = os.path.join(Slider.getSliderHome(), 'conf') logger.info("slider_conf = %s" % slider_conf) logger.info("os path exist slider_conf = %s" % os.path.exists(slider_conf)) if os.path.exists(slider_conf): Machine.copy( os.path.join(slider_conf, 'log4j.properties'), os.path.join(cls._LOCAL_CONF_DIR, 'log4j.properties') ) Machine.copy( os.path.join(slider_conf, 'slider-env.sh'), os.path.join(cls._LOCAL_CONF_DIR, 'slider-env.sh') ) else: UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Conf %s missing" % slider_conf) return if Hadoop.isSecure(): util.writePropertiesToConfigXMLFile( os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'), os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), { "slider.funtest.enabled": "true", "slider.test.agent.enabled": "true", "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'), "slider.am.keytab.local.path": Machine.getHeadlessUserKeytab(HADOOPQA_USER), "slider.keytab.principal.name": Machine.get_user_principal(HADOOPQA_USER) } ) else: util.writePropertiesToConfigXMLFile( os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'), os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), { "slider.funtest.enabled": "true", "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'), "slider.test.agent.enabled": "true" } ) logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR) # Check that the precondition is met and the source is available if not os.path.exists(cls._LOCAL_WORK_DIR) or not os.path.exists(os.path.join(cls._LOCAL_WORK_DIR, 'pom.xml')): logger.info('Slider source does not appear to exist at %s' % (cls._LOCAL_WORK_DIR)) UpgradePerNode.reportProgress( "### Slider source does not appear to exist at %s ####" % (cls._LOCAL_WORK_DIR) ) logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR) if cls._LOCAL_WORK_DIR == None: logger.info("ERROR: cls._LOCAL_WORK_DIR is None") # Install first so isolated modules can be tested exit_code, stdout = Maven.run( "clean install -DskipTests " "-Dhadoop.version=%s " "-Dprivate.repo.url=%s " % (Hadoop.getVersion(), Maven.getPublicRepoUrl()), cwd=cls._LOCAL_WORK_DIR ) if exit_code != 0: UpgradePerNode.reportProgress("### Error installing Slider source : %d: %s ####" % (exit_code, stdout)) else: UpgradePerNode.reportProgress("### Slider source install passed ####")
def modifyConfig( # pylint: disable=redefined-builtin changes, confDir, updatedConfDir, nodes, isFirstUpdate=True, makeCurrConfBackupInWindows=True, id=None): ''' Modifies hadoop config or config with similar structure. Returns None. Linux: 1. Create tmpModifyConfDir_<time> in artifacts dir based on source config directory in gateway 2. Modify contents in created directory. 3. Copy the directory to /tmp/hadoopConf in target machines Windows: 1. If makeCurrConfBackupInWindows is True, backup current config first. Copy current config to artifacts/HDPStackBackupConfig 2. Create tmpModifyConfDir_<time> in gateway. 3. Modify contents in created directory. 4. Copy the directory to target machines. Replace config in default locations in remote machines. Calling modifyConfig twice, changes will be cumulative. ''' backuploc = getBackupConfigLocation(id=id) if Machine.type() == 'Windows' and makeCurrConfBackupInWindows: # clean up the backup Machine.rm(None, Machine.getfqdn(), backuploc, isdir=True, passwd=None) util.copyReadableFilesFromDir(confDir, backuploc) if isFirstUpdate: tmpConfDir = os.path.join( ARTIFACTS_DIR, 'tmpModifyConfDir_' + str(int(round(time.time() * 1000)))) Config.set(PYTHON_CONFIG_NAME, TMP_CONF_DIR_VAR, tmpConfDir, overwrite=True) tmpConfDir = Config.get(PYTHON_CONFIG_NAME, TMP_CONF_DIR_VAR) if isFirstUpdate: util.copyReadableFilesFromDir(confDir, tmpConfDir) for filename, values in changes.items(): filepath = os.path.join(tmpConfDir, filename) if os.path.isfile(filepath): logger.info("Modifying file: %s", filepath) _fname, fext = os.path.splitext(filepath) if fext == ".xml": util.writePropertiesToConfigXMLFile(filepath, filepath, values) elif fext == ".json": util.writePropertiesToConfigJSONFile(filepath, filepath, values, ["global"], "site.hbase-site.") elif fext == ".properties": util.writePropertiesToFile(filepath, filepath, values) elif fext == ".cfg": util.writePropertiesToFile(filepath, filepath, values) elif fext == ".conf": util.writePropertiesToConfFile(filepath, filepath, values) elif fext == ".ini": # 'shiro.ini : {'section:prop' : 'val} util.writePropertiesToIniFile(filepath, filepath, values) elif fext == ".sh": text = "" for value in values: text += "\n" + value util.writeToFile(text, filepath, isAppend=True) elif fext == ".yaml": text = "" for k, v in values.iteritems(): text += k + " : " + v util.writeToFile(text, filepath, isAppend=True) elif fext == ".cmd": text = "" for value in values: text += "\n" + value util.writeToFile(text, filepath, isAppend=True) elif fext is None or fext == "" or fext == ".include": text = "" isFirst = True for value in values: if isFirst: text += value else: text += "\n" + value isFirst = False util.writeToFile(text, filepath, isAppend=True) # in windows world copy the configs back to the src location if Machine.type() == 'Windows': for node in nodes: for filename in changes.keys(): Machine.copyFromLocal(None, node, os.path.join(tmpConfDir, filename), os.path.join(confDir, filename), passwd=None) else: for node in nodes: Machine.rm(user=Machine.getAdminUser(), host=node, filepath=updatedConfDir, isdir=True, passwd=Machine.getAdminPasswd()) Machine.copyFromLocal(None, node, tmpConfDir, updatedConfDir)
def modifyConfigRemote(changes, OriginalConfDir, ConfDir, nodes, id=None): # pylint: disable=redefined-builtin ''' Modifies hadoop config or config with similar structure. Returns None. Linux: 1. Create tmpModifyConfDir_<time> in artifacts dir based on source config directory in gateway 2. Modify contents in created directory. 3. Copy the directory to /tmp/hadoopConf in target machines ''' _backuploc = getBackupConfigLocation(id=id) tmpConfDir = os.path.join( ARTIFACTS_DIR, 'tmpModifyConfDir_' + str(int(round(time.time() * 1000)))) Config.set(PYTHON_CONFIG_NAME, TMP_CONF_DIR_VAR, tmpConfDir, overwrite=True) tmpConfDir = Config.get(PYTHON_CONFIG_NAME, TMP_CONF_DIR_VAR) for node in nodes: Machine.rm(Machine.getAdminUser(), node, ConfDir, isdir=True) Machine.rm(Machine.getAdminUser(), Machine.getfqdn(), tmpConfDir, isdir=True) logger.info("*** COPY ORIGINAL CONFIGS FROM REMOTE TO LOCAL ***") Machine.copyToLocal(None, node, OriginalConfDir, tmpConfDir) #if node == Machine.getfqdn(): # Machine.copy(OriginalConfDir,tmpConfDir) for filename, values in changes.items(): filepath = os.path.join(tmpConfDir, filename) if os.path.isfile(filepath): logger.info("Modifying file locally: %s", filepath) _fname, fext = os.path.splitext(filepath) if fext == ".xml": util.writePropertiesToConfigXMLFile( filepath, filepath, values) elif fext == ".json": util.writePropertiesToConfigJSONFile( filepath, filepath, values, ["global"], "site.hbase-site.") elif fext == ".properties": util.writePropertiesToFile(filepath, filepath, values) elif fext == ".cfg": util.writePropertiesToFile(filepath, filepath, values) elif fext == ".conf": util.writePropertiesToConfFile(filepath, filepath, values) elif fext == ".sh": text = "" for value in values: text += "\n" + value util.writeToFile(text, filepath, isAppend=True) elif fext == ".yaml": text = "" for k, v in values.iteritems(): text += k + " : " + v util.writeToFile(text, filepath, isAppend=True) elif fext == ".cmd": text = "" for value in values: text += "\n" + value util.writeToFile(text, filepath, isAppend=True) elif fext is None or fext == "" or fext == ".include": text = "" isFirst = True for value in values: if isFirst: text += value else: text += "\n" + value isFirst = False util.writeToFile(text, filepath, isAppend=True) logger.info("****** Copy back the configs to remote ******") #if node!=Machine.getfqdn(): Machine.copyFromLocal(None, node, tmpConfDir, ConfDir) Machine.chmod('777', ConfDir, recursive=True, host=node)