def preInstallOnHosts(self): """ execute preinstall step """ self.logger.debug("Start to preinstall database step.\n") newBackIps = self.context.newHostList newHostNames = [] for host in newBackIps: newHostNames.append(self.context.backIpNameMap[host]) envfile = self.envFile tempXmlFile = "%s/clusterconfig.xml" % self.tempFileDir preinstallCmd = "{softpath}/script/gs_preinstall -U {user} -G {group} \ -X {xmlfile} --sep-env-file={envfile} \ --non-interactive 2>&1\ ".format(softpath=self.context.packagepath,user=self.user, group=self.group,xmlfile=tempXmlFile,envfile=envfile) sshTool = SshTool(newHostNames) status, output = sshTool.getSshStatusOutput(preinstallCmd , [], envfile) statusValues = status.values() if STATUS_FAIL in statusValues: GaussLog.exitWithError(output) self.logger.debug("End to preinstall database step.\n")
def checkAllStandbyState(self): """ check all standby state whether switchover is happening """ for hostNameLoop in self.context.hostMapForDel.keys(): if hostNameLoop not in self.context.failureHosts: sshtool_host = SshTool([hostNameLoop]) for i in self.context.hostMapForDel[hostNameLoop]['datadir']: # check whether switchover/failover is happening if not self.commonOper.checkStandbyState( hostNameLoop, i, sshtool_host, self.userProfile, True): GaussLog.exitWithError( ErrorCode.GAUSS_358["GAUSS_35808"] % hostNameLoop) self.commonOper.stopInstance(hostNameLoop, sshtool_host, i, self.userProfile) self.cleanSshToolFile(sshtool_host) for hostNameLoop in self.context.hostMapForExist.keys(): sshtool_host = SshTool([hostNameLoop]) for i in self.context.hostMapForExist[hostNameLoop]['datadir']: # check whether switchover/failover is happening if not self.commonOper.checkStandbyState( hostNameLoop, i, sshtool_host, self.userProfile): GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35808"] % hostNameLoop) self.cleanSshToolFile(sshtool_host)
def validNodeInStandbyList(self): """ check if the node has been installed in the cluster. """ self.logger.debug("Start to check if the nodes in standby list\n") curHostName = socket.gethostname() command = "su - %s -c 'source %s;gs_om -t status --detail'" % \ (self.user, self.envFile) sshTool = SshTool([curHostName]) resultMap, outputCollect = sshTool.getSshStatusOutput( command, [curHostName], self.envFile) self.logger.debug(outputCollect) newHosts = self.context.newHostList standbyHosts = [] existHosts = [] while len(newHosts) > 0: hostIp = newHosts.pop() nodeName = self.context.backIpNameMap[hostIp] nodeInfo = self.context.clusterInfoDict[nodeName] dataNode = nodeInfo["dataNode"] exist_reg = r"(.*)%s[\s]*%s(.*)" % (nodeName, hostIp) if not re.search(exist_reg, outputCollect): standbyHosts.append(hostIp) else: existHosts.append(hostIp) self.context.newHostList = standbyHosts if len(existHosts) > 0: self.logger.log("The nodes [%s] are already in the cluster. Skip expand these nodes." \ % ",".join(existHosts)) self.cleanSshToolFile(sshTool) if len(standbyHosts) == 0: self.logger.log("There is no node can be expanded.") sys.exit(0)
def addTrustOnExistNodes(self): """ add host trust in pg_hba.conf on existing standby node. """ self.logger.debug("Start to set host trust on existing node.") allNodeNames = self.context.nodeNameList newNodeIps = self.context.newHostList newNodeNames = [] trustCmd = [] for node in newNodeIps: nodeName = self.context.backIpNameMap[node] newNodeNames.append(nodeName) cmd = 'host all all %s/32 trust' % node trustCmd.append(cmd) existNodes = list(set(allNodeNames).difference(set(newNodeNames))) for node in existNodes: dataNode = self.context.clusterInfoDict[node]["dataNode"] cmd = "" for trust in trustCmd: cmd += "source %s; gs_guc set -D %s -h '%s';" % \ (self.envFile, dataNode, trust) sshTool = SshTool([node]) resultMap, outputCollect = sshTool.getSshStatusOutput( cmd, [node], self.envFile) self.cleanSshToolFile(sshTool) self.logger.debug("End to set host trust on existing node.")
def initSshTool(self, nodeNames, timeout=0): """ function: Init ssh tool input : nodeNames, timeout output: NA """ self.sshTool = SshTool(nodeNames, self.logger.logFile, timeout)
def doStartCluster(self): """ function: do start cluster input: NA output: NA """ self.logger.debug("Operating: Starting.") # Specifies the stop node # Gets the specified node id startType = "node" if self.context.g_opts.nodeName != "" else "cluster" # Perform a start operation self.logger.log("Starting %s." % startType) self.logger.log("=========================================") hostName = DefaultValue.GetHostIpOrName() #get the newest dynaminc config and send to other node self.clusterInfo.checkClusterDynamicConfig(self.context.user, hostName) if self.context.g_opts.nodeName == "": hostList = self.clusterInfo.getClusterNodeNames() else: hostList = [] hostList.append(self.context.g_opts.nodeName) self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(), None, DefaultValue.TIMEOUT_CLUSTER_START) if self.time_out is None: time_out = DefaultValue.TIMEOUT_CLUSTER_START else: time_out = self.time_out cmd = "source %s; %s -U %s -R %s -t %s --security-mode=%s" % ( self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Local_StartInstance"), self.context.user, self.context.clusterInfo.appPath, time_out, self.context.g_opts.security_mode) if self.dataDir != "": cmd += " -D %s" % self.dataDir starttime = time.time() (statusMap, output) = self.sshTool.getSshStatusOutput(cmd, hostList) for nodeName in hostList: if statusMap[nodeName] != 'Success': raise Exception( ErrorCode.GAUSS_536["GAUSS_53600"] % (cmd, output)) if re.search("another server might be running", output): self.logger.log(output) if startType == "cluster": cmd = "source %s; gs_om -t status|grep cluster_state|grep Normal" \ % self.context.g_opts.mpprcFile while time.time() <= time_out + starttime: status = subprocess.getstatusoutput(cmd)[0] if status != 0: self.logger.log("Waiting for check cluster state...") time.sleep(5) else: break if time.time() > time_out + starttime: raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] % "cluster" + "Start timeout, please check the process" " status manually") self.logger.log("=========================================") self.logger.log("Successfully started.") self.logger.debug("Operation succeeded: Start.")
def buildInstance(self, host, datanode, mode, env): command = "source %s ; gs_ctl build -D %s -M %s" % (env, datanode, mode) self.logger.debug(command) sshTool = SshTool([host]) resultMap, outputCollect = sshTool.getSshStatusOutput(command, [host], env) self.logger.debug(host) self.logger.debug(outputCollect)
def checkUserAndGroupExists(self): """ check system user and group exists and be same on primary and standby nodes """ inputUser = self.user inputGroup = self.group user_group_id = "" isUserExits = False localHost = socket.gethostname() for user in pwd.getpwall(): if user.pw_name == self.user: user_group_id = user.pw_gid isUserExits = True break if not isUserExits: GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \ % ("User", self.user, localHost)) isGroupExits = False group_id = "" for group in grp.getgrall(): if group.gr_name == self.group: group_id = group.gr_gid isGroupExits = True if not isGroupExits: GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \ % ("Group", self.group, localHost)) if user_group_id != group_id: GaussLog.exitWithError("User [%s] is not in the group [%s]."\ % (self.user, self.group)) hostNames = self.context.newHostList envfile = self.envFile sshTool = SshTool(hostNames) #get username in the other standy nodes getUserNameCmd = "cat /etc/passwd | grep -w %s" % inputUser resultMap, outputCollect = sshTool.getSshStatusOutput( getUserNameCmd, [], envfile) for hostKey in resultMap: if resultMap[hostKey] == STATUS_FAIL: self.cleanSshToolFile(sshTool) GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \ % ("User", self.user, hostKey)) #get groupname in the other standy nodes getGroupNameCmd = "cat /etc/group | grep -w %s" % inputGroup resultMap, outputCollect = sshTool.getSshStatusOutput( getGroupNameCmd, [], envfile) for hostKey in resultMap: if resultMap[hostKey] == STATUS_FAIL: self.cleanSshToolFile(sshTool) GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \ % ("Group", self.group, hostKey)) self.cleanSshToolFile(sshTool)
def modifyStaticConf(self): """ Modify the cluster static conf and save it """ self.logger.log( "[gs_dropnode]Start to modify the cluster static conf.") staticConfigPath = "%s/bin/cluster_static_config" % self.appPath # first backup, only need to be done on primary node tmpDir = DefaultValue.getEnvironmentParameterValue( "PGHOST", self.user, self.userProfile) cmd = "cp %s %s/%s_BACKUP" % (staticConfigPath, tmpDir, 'cluster_static_config') (status, output) = subprocess.getstatusoutput(cmd) if status: self.logger.debug( "[gs_dropnode]Backup cluster_static_config failed" + output) backIpDict = self.context.backIpNameMap backIpDict_values = list(backIpDict.values()) backIpDict_keys = list(backIpDict.keys()) for ipLoop in self.context.hostIpListForDel: nameLoop = backIpDict_keys[backIpDict_values.index(ipLoop)] dnLoop = self.context.clusterInfo.getDbNodeByName(nameLoop) self.context.clusterInfo.dbNodes.remove(dnLoop) for dbNode in self.context.clusterInfo.dbNodes: if dbNode.name == self.localhostname: self.context.clusterInfo.saveToStaticConfig( staticConfigPath, dbNode.id) continue staticConfigPath_dn = "%s/cluster_static_config_%s" % (tmpDir, dbNode.name) self.context.clusterInfo.saveToStaticConfig( staticConfigPath_dn, dbNode.id) self.logger.debug( "[gs_dropnode]Start to scp the cluster static conf to any other node." ) if not self.context.flagOnlyPrimary: sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames()) cmd = "%s/script/gs_om -t refreshconf" % self.gphomepath (status, output) = subprocess.getstatusoutput(cmd) self.logger.debug( "[gs_dropnode]Output of refresh dynamic conf :%s." % output) for hostName in self.context.hostMapForExist.keys(): hostSsh = SshTool([hostName]) if hostName != self.localhostname: staticConfigPath_name = "%s/cluster_static_config_%s" % ( tmpDir, hostName) hostSsh.scpFiles(staticConfigPath_name, staticConfigPath, [hostName], self.envFile) try: os.unlink(staticConfigPath_name) except FileNotFoundError: pass self.cleanSshToolFile(hostSsh) self.logger.log("[gs_dropnode]End of modify the cluster static conf.")
def stopInstance(self, host, datanode, env): """ """ command = "source %s ; gs_ctl stop -D %s" % (env, datanode) sshTool = SshTool([host]) resultMap, outputCollect = sshTool.getSshStatusOutput( command, [host], env) self.logger.debug(host) self.logger.debug(outputCollect) self.cleanSshToolTmpFile(sshTool)
def startOmCluster(self, host, env): """ om tool start cluster """ command = "source %s ; gs_om -t start" % env self.logger.debug(command) sshTool = SshTool([host]) resultMap, outputCollect = sshTool.getSshStatusOutput(command, [host], env) self.logger.debug(host) self.logger.debug(outputCollect)
def checkLocalModeOnStandbyHosts(self): """ expansion the installed standby node. check standby database. 1. if the database is normal 2. if the databases version are same before existing and new """ standbyHosts = self.context.newHostList envfile = self.envFile self.logger.log("Checking the database with locale mode.") for host in standbyHosts: hostName = self.context.backIpNameMap[host] dataNode = self.context.clusterInfoDict[hostName]["dataNode"] insType, dbStat = self.commonGsCtl.queryInstanceStatus( hostName, dataNode, self.envFile) if insType not in (MODE_PRIMARY, MODE_STANDBY, MODE_NORMAL): GaussLog.exitWithError( ErrorCode.GAUSS_357["GAUSS_35703"] % (hostName, self.user, dataNode, dataNode)) allHostIp = [] allHostIp.append(self.context.localIp) versionDic = {} for hostip in standbyHosts: allHostIp.append(hostip) sshTool = SshTool(allHostIp) #get version in the nodes getversioncmd = "gaussdb --version" resultMap, outputCollect = sshTool.getSshStatusOutput( getversioncmd, [], envfile) self.cleanSshToolFile(sshTool) versionLines = outputCollect.splitlines() for verline in versionLines: if verline[0:9] == '[SUCCESS]': ipKey = verline[10:-1] continue else: versionStr = "".join(verline) preVersion = versionStr.split(' ') versionInfo = preVersion[4] versionDic[ipKey] = versionInfo[:-2] for hostip in versionDic: if hostip == self.context.localIp: versionCompare = "" versionCompare = versionDic[hostip] else: if versionDic[hostip] == versionCompare: continue else: GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35705"] \ %(hostip, versionDic[hostip])) self.logger.log("Successfully checked the database with locale mode.")
def clearTmpFile(self): """ clear temporary file after expansion success """ self.logger.debug("start to delete temporary file") hostNames = self.context.nodeNameList sshTool = SshTool(hostNames) clearCmd = "source %s ; rm -rf %s" % (self.envFile, self.tempFileDir) result, output = sshTool.getSshStatusOutput(clearCmd, hostNames, self.envFile) self.logger.debug(output)
def reloadPrimaryConf(self): """ """ primaryHost = self.getPrimaryHostName() dataNode = self.context.clusterInfoDict[primaryHost]["dataNode"] command = "gs_ctl reload -D %s " % dataNode sshTool = SshTool([primaryHost]) self.logger.debug(command) resultMap, outputCollect = sshTool.getSshStatusOutput(command, [primaryHost], self.envFile) self.logger.debug(outputCollect)
def queryOmCluster(self, host, env): """ query om cluster detail with command: gs_om -t status --detail """ command = "source %s ; gs_om -t status --detail" % env sshTool = SshTool([host]) resultMap, outputCollect = sshTool.getSshStatusOutput(command, [host], env) self.logger.debug(host) self.logger.debug(outputCollect) return outputCollect
def sendSoftToHosts(self): """ create software dir and send it on each nodes """ self.logger.debug("Start to send soft to each standby nodes.\n") hostNames = self.context.newHostList hostList = hostNames sshTool = SshTool(hostNames) srcFile = self.context.packagepath targetDir = os.path.realpath(os.path.join(srcFile, "../")) ## mkdir package dir and send package to remote nodes. sshTool.executeCommand("mkdir -p %s" % srcFile, "", DefaultValue.SUCCESS, hostList) sshTool.scpFiles(srcFile, targetDir, hostList) ## change mode of package dir to set privileges for users tPathList = os.path.split(targetDir) path2ChangeMode = targetDir if len(tPathList) > 2: path2ChangeMode = os.path.join(tPathList[0], tPathList[1]) changeModCmd = "chmod -R a+x {srcFile}".format(user=self.user, group=self.group, srcFile=path2ChangeMode) sshTool.executeCommand(changeModCmd, "", DefaultValue.SUCCESS, hostList) self.logger.debug("End to send soft to each standby nodes.\n") self.cleanSshToolFile(sshTool)
def checkTmpDir(self, hostName): """ if the tmp dir id not exist, create it. """ tmpDir = os.path.realpath(DefaultValue.getTmpDirFromEnv()) checkCmd = 'if [ ! -d "%s" ]; then exit 1;fi;' % (tmpDir) sshTool = SshTool([hostName]) resultMap, outputCollect = sshTool.getSshStatusOutput( checkCmd, [hostName], self.envFile) ret = resultMap[hostName] if ret == STATUS_FAIL: self.logger.debug("Node [%s] does not have tmp dir. need to fix.") fixCmd = "mkdir -p %s" % (tmpDir) sshTool.getSshStatusOutput(fixCmd, [hostName], self.envFile) self.cleanSshToolFile(sshTool)
def restartInstance(self): if self.context.flagOnlyPrimary: self.logger.log("[gs_dropnode]Remove the dynamic conf.") dynamicConfigPath = "%s/bin/cluster_dynamic_config" % self.appPath try: os.unlink(dynamicConfigPath) except FileNotFoundError: pass flag = input( "Only one primary node is left." "It is recommended to restart the node." "\nDo you want to restart the primary node now (yes/no)? ") count_f = 2 while count_f: if (flag.upper() != "YES" and flag.upper() != "NO" and flag.upper() != "Y" and flag.upper() != "N"): count_f -= 1 flag = input("Please type 'yes' or 'no': ") continue break if flag.upper() != "YES" and flag.upper() != "Y": GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35805"] % flag.upper()) sshTool = SshTool([self.localhostname]) for i in self.context.hostMapForExist[ self.localhostname]['datadir']: self.commonOper.stopInstance(self.localhostname, sshTool, i, self.userProfile) self.commonOper.startInstance(i, self.userProfile) self.cleanSshToolFile(sshTool) else: pass
def queryOmCluster(self, host, env): """ query om cluster detail with command: gs_om -t status --detail """ command = "source %s ; gs_om -t status --detail" % env sshTool = SshTool([host]) resultMap, outputCollect = sshTool.getSshStatusOutput( command, [host], env) self.logger.debug(host) self.logger.debug(outputCollect) if resultMap[host] == STATUS_FAIL: GaussLog.exitWithError("Query cluster failed. Please check " \ "the cluster status or " \ "source the environmental variables of user [%s]." % self.user) self.cleanSshToolTmpFile(sshTool) return outputCollect
def addStandbyIpInPrimaryConf(self): """ add standby hosts ip in primary node pg_hba.conf """ standbyHosts = self.context.newHostList primaryHost = self.getPrimaryHostName() command = '' for host in standbyHosts: hostName = self.context.backIpNameMap[host] dataNode = self.context.clusterInfoDict[hostName]["dataNode"] command += "gs_guc set -D %s -h 'host all all %s/32 \ trust';" % (dataNode, host) self.logger.debug(command) sshTool = SshTool([primaryHost]) resultMap, outputCollect = sshTool.getSshStatusOutput(command, [primaryHost], self.envFile) self.logger.debug(outputCollect)
def clearTmpFile(self): """ clear temporary file after expansion success """ self.logger.debug("start to delete temporary file %s" % self.tempFileDir) clearCmd = "if [ -d '%s' ];then rm -rf %s;fi" % \ (self.tempFileDir, self.tempFileDir) hostNames = self.context.nodeNameList for host in hostNames: try: sshTool = SshTool(hostNames) result, output = sshTool.getSshStatusOutput( clearCmd, hostNames, self.envFile) self.logger.debug(output) self.cleanSshToolFile(sshTool) except Exception as e: self.logger.debug(str(e)) self.cleanSshToolFile(sshTool)
def generateClusterStaticFile(self): """ generate static_config_files and send to all hosts """ self.logger.debug("Start to generate and send cluster static file.\n") primaryHosts = self.getPrimaryHostName() command = "gs_om -t generateconf -X %s --distribute" % self.context.xmlFile sshTool = SshTool([primaryHosts]) resultMap, outputCollect = sshTool.getSshStatusOutput( command, [primaryHosts], self.envFile) self.logger.debug(outputCollect) self.cleanSshToolFile(sshTool) nodeNameList = self.context.nodeNameList for hostName in nodeNameList: hostSsh = SshTool([hostName]) toolPath = self.context.clusterInfoDict["toolPath"] appPath = self.context.clusterInfoDict["appPath"] srcFile = "%s/script/static_config_files/cluster_static_config_%s" \ % (toolPath, hostName) if not os.path.exists(srcFile): GaussLog.exitWithError("Generate static file [%s] not found." \ % srcFile) targetFile = "%s/bin/cluster_static_config" % appPath hostSsh.scpFiles(srcFile, targetFile, [hostName], self.envFile) self.cleanSshToolFile(hostSsh) self.logger.debug("End to generate and send cluster static file.\n") time.sleep(10) # Single-node database need start cluster after expansion if self.isSingleNodeInstance: self.logger.debug("Single-Node instance need restart.\n") self.commonGsCtl.queryOmCluster(primaryHosts, self.envFile) # if primary database not normal, restart it primaryHost = self.getPrimaryHostName() dataNode = self.context.clusterInfoDict[primaryHost]["dataNode"] insType, dbStat = self.commonGsCtl.queryInstanceStatus( primaryHost, dataNode, self.envFile) if insType != MODE_PRIMARY: self.commonGsCtl.startInstanceWithMode(primaryHost, dataNode, MODE_PRIMARY, self.envFile) # if stat if not normal,rebuild standby database standbyHosts = self.context.newHostList for host in standbyHosts: hostName = self.context.backIpNameMap[host] dataNode = self.context.clusterInfoDict[hostName]["dataNode"] insType, dbStat = self.commonGsCtl.queryInstanceStatus( hostName, dataNode, self.envFile) if dbStat != STAT_NORMAL: self.commonGsCtl.startInstanceWithMode( hostName, dataNode, MODE_STANDBY, self.envFile) self.commonGsCtl.startOmCluster(primaryHosts, self.envFile)
def generateAndSendXmlFile(self): """ """ self.logger.debug("Start to generateAndSend XML file.\n") tempXmlFile = "%s/clusterconfig.xml" % self.tempFileDir cmd = "mkdir -p %s; touch %s; cat /dev/null > %s" % \ (self.tempFileDir, tempXmlFile, tempXmlFile) (status, output) = subprocess.getstatusoutput(cmd) cmd = "chown -R %s:%s %s" % (self.user, self.group, self.tempFileDir) (status, output) = subprocess.getstatusoutput(cmd) newHosts = self.context.newHostList for host in newHosts: # create single deploy xml file for each standby node xmlContent = self.__generateXml(host) with os.fdopen( os.open("%s" % tempXmlFile, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IRUSR), 'w') as fo: fo.write(xmlContent) fo.close() # send single deploy xml file to each standby node sshTool = SshTool(host) retmap, output = sshTool.getSshStatusOutput( "mkdir -p %s" % self.tempFileDir, [host], self.envFile) retmap, output = sshTool.getSshStatusOutput( "chown %s:%s %s" % (self.user, self.group, self.tempFileDir), [host], self.envFile) sshTool.scpFiles("%s" % tempXmlFile, "%s" % tempXmlFile, [host], self.envFile) self.cleanSshToolFile(sshTool) self.logger.debug("End to generateAndSend XML file.\n")
def checkAllStandbyState(self): """ check all standby state whether switchover is happening """ for hostNameLoop in self.context.hostMapForExist.keys(): sshtool_host = SshTool([hostNameLoop]) for i in self.context.hostMapForExist[hostNameLoop]['datadir']: # check whether switchover/failover is happening self.commonOper.checkStandbyState(hostNameLoop, i, sshtool_host, self.userProfile) self.cleanSshToolFile(sshtool_host) for hostNameLoop in self.context.hostMapForDel.keys(): if hostNameLoop not in self.context.failureHosts: sshtool_host = SshTool([hostNameLoop]) for i in self.context.hostMapForDel[hostNameLoop]['datadir']: # check whether switchover/failover is happening self.commonOper.checkStandbyState(hostNameLoop, i, sshtool_host, self.userProfile, True) self.commonOper.stopInstance(hostNameLoop, sshtool_host, i, self.userProfile) cmdDelCert = "ls %s/share/sslcert/grpc/* | " \ "grep -v openssl.cnf | xargs rm -rf" % self.appPath result, output = sshtool_host.getSshStatusOutput( cmdDelCert, [hostNameLoop], self.userProfile) if result[hostNameLoop] != 'Success': self.logger.debug(output) self.logger.log("[gs_dropnode]Failed to delete the GRPC " "sslcert of %s." % hostNameLoop) self.logger.log("[gs_dropnode]Please check and delete the " "GRPC sslcert of %s manually." % hostNameLoop) self.cleanSshToolFile(sshtool_host) else: self.logger.log("[gs_dropnode]Cannot connect %s. Please check " "and delete the GRPC sslcert of %s manually." % (hostNameLoop, hostNameLoop))
def dropNodeOnAllHosts(self): """ drop the target node on the other host """ for hostNameLoop in self.context.hostMapForExist.keys(): sshtool_host = SshTool([hostNameLoop]) # backup backupfile = self.commonOper.backupConf(self.gphomepath, self.user, hostNameLoop, self.userProfile, sshtool_host, self.pghostPath) self.logger.log("[gs_dropnode]The backup file of " + hostNameLoop + " is " + backupfile) if hostNameLoop == self.localhostname: self.backupFilePrimary = backupfile indexForuse = 0 for i in self.context.hostMapForExist[hostNameLoop]['datadir']: # parse resultDict = self.commonOper.parseConfigFile( hostNameLoop, i, self.dnIdForDel, self.context.hostIpListForDel, sshtool_host, self.envFile) resultDictForRollback = self.commonOper.parseBackupFile( hostNameLoop, backupfile, self.context.hostMapForExist[hostNameLoop]['dn_id'] [indexForuse], resultDict['replStr'], sshtool_host, self.envFile) if hostNameLoop == self.localhostname: self.resultDictOfPrimary.append(resultDict) # try set try: self.commonOper.SetPgsqlConf( resultDict['replStr'], hostNameLoop, i, resultDict['syncStandbyStr'], sshtool_host, self.userProfile, self.context.hostMapForExist[hostNameLoop]['port'] [indexForuse], '', self.context.flagOnlyPrimary) except ValueError: self.logger.log("[gs_dropnode]Rollback pgsql process.") self.commonOper.SetPgsqlConf( resultDict['replStr'], hostNameLoop, i, resultDict['syncStandbyStr'], sshtool_host, self.userProfile, self.context.hostMapForExist[hostNameLoop]['port'] [indexForuse], resultDictForRollback['rollbackReplStr']) indexForuse += 1 self.cleanSshToolFile(sshtool_host)
def operationOnlyOnPrimary(self): """ operation only need to be executed on primary node """ for hostNameLoop in self.context.hostMapForExist.keys(): sshtool_host = SshTool([hostNameLoop]) try: self.commonOper.SetPghbaConf( self.userProfile, hostNameLoop, self.resultDictOfPrimary[0]['pghbaStr'], False) except ValueError: self.logger.log("[gs_dropnode]Rollback pghba conf.") self.commonOper.SetPghbaConf( self.userProfile, hostNameLoop, self.resultDictOfPrimary[0]['pghbaStr'], True) self.cleanSshToolFile(sshtool_host)