def startCluster(self): """ function: start cluster input : NA output: NA """ # Start cluster applications cmd = "source %s;" % self.context.mpprcFile cmd += "%s -t %s -U %s -X %s -R %s -c %s -l %s %s" % ( OMCommand.getLocalScript("Local_Install"), ACTION_START_CLUSTER, self.context.user + ":" + self.context.group, self.context.xmlFile, self.context.clusterInfo.appPath, self.context.clusterInfo.name, self.context.localLog, self.getCommandOptions()) self.context.logger.debug("Command for start cluster: %s" % cmd) DefaultValue.execCommandWithMode( cmd, "Start cluster", self.context.sshTool, self.context.isSingle or self.context.localMode, self.context.mpprcFile) # build stand by cmd = "source %s;" % self.context.mpprcFile cmd += "%s -t %s -U %s -X %s -R %s -c %s -l %s %s" % ( OMCommand.getLocalScript("Local_Install"), ACTION_BUILD_STANDBY, self.context.user + ":" + self.context.group, self.context.xmlFile, self.context.clusterInfo.appPath, self.context.clusterInfo.name, self.context.localLog, self.getCommandOptions()) self.context.logger.debug("Command for build standby: %s" % cmd) DefaultValue.execCommandWithMode( cmd, "Build standby", self.context.sshTool, self.context.isSingle or self.context.localMode, self.context.mpprcFile) self.context.logger.log("Successfully started cluster.")
def stopCluster(self): """ function:Stop cluster input:NA output:NA """ self.logger.log("Stopping the cluster.") # Stop cluster in 300 seconds cmd = "source %s; %s -t %d" % (self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Gs_Stop"), DefaultValue.TIMEOUT_CLUSTER_STOP) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.log( "Warning: Failed to stop cluster within 300 seconds," "stopping cluster again at immediate mode.") cmd = "source %s; %s -m immediate -t %d" % ( self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Gs_Stop"), DefaultValue.TIMEOUT_CLUSTER_STOP) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.log("The cmd is %s " % cmd) raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] % "the cluster at immediate mode" + " Error: \n%s" % output) self.logger.log("Successfully stopped the cluster.")
def doKerberos(self): """ function: operation kerberos input: NA output: NA """ try: if self.context.g_opts.kerberosMode == "install": self.logger.log("Starting install Kerberos.", "addStep") cmd = "%s -m %s -U %s --%s" % \ (OMCommand.getLocalScript("Local_Kerberos"), "install", self.context.g_opts.clusterUser, self.context.g_opts.kerberosType) # local mode (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % "Command: %s. Error:\n%s" % (cmd, output)) self.logger.log("Successfully install Kerberos.") elif self.context.g_opts.kerberosMode == "uninstall": self.logger.log("Starting uninstall Kerberos.", "addStep") cmd = "%s -m %s -U %s" % \ (OMCommand.getLocalScript("Local_Kerberos"), "uninstall", self.context.g_opts.clusterUser) # local mode (status, output) = subprocess.getstatusoutput(cmd) if status != 0: raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % "Command: %s. Error:\n%s" % (cmd, output)) self.logger.log("Successfully uninstall Kerberos.") except Exception as e: raise Exception(str(e))
def cleanRemoteOsUser(self): """ function: Clean remote os user input : NA output: NA """ # check if local mode if (self.localMode): return if (not self.deleteUser): # clean static config file cmd = "rm -rf '%s'" % self.clusterInfo.appPath DefaultValue.execCommandWithMode(cmd, "delete install directory", self.sshTool, self.localMode, self.mpprcFile) return group = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name # get other nodes hostName = DefaultValue.GetHostIpOrName() otherNodes = self.clusterInfo.getClusterNodeNames() for otherNode in otherNodes: if (otherNode == hostName): otherNodes.remove(otherNode) # clean remote user self.logger.log("Deleting remote OS user.") cmd = "%s -U %s -l %s" % (OMCommand.getLocalScript( "Local_Clean_OsUser"), self.user, self.localLog) self.logger.debug("Command for deleting remote OS user: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "delete OS user", self.sshTool, self.localMode, self.mpprcFile, otherNodes) self.logger.log("Successfully deleted remote OS user.") if (self.deleteGroup): # clean remote group self.logger.debug("Deleting remote OS group.") cmd = "%s -t %s -u %s -l '%s' -X '%s'" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_DELETE_GROUP, group, self.localLog, self.xmlFile) self.logger.debug("Command for deleting remote OS group: %s" % cmd) status = self.sshTool.getSshStatusOutput(cmd, otherNodes, self.mpprcFile)[0] outputMap = self.sshTool.parseSshOutput(otherNodes) for node in status.keys(): if (status[node] != DefaultValue.SUCCESS): self.logger.log((outputMap[node]).strip("\n")) self.logger.debug("Deleting remote group is completed.")
def checkNodeInstall(self): """ function: check node install input: NA output: NA """ self.context.logger.debug("Checking node's installation.", "constant") # Checking node's installation self.context.logger.log( "Checking the installation environment on all nodes.", "constant") # Checking the installation environment cmd = "source %s;" % self.context.mpprcFile cmd += "%s -U %s -R %s -l %s -X %s" % ( OMCommand.getLocalScript("Local_Check_Install"), self.context.user + ":" + self.context.group, self.context.clusterInfo.appPath, self.context.localLog, self.context.xmlFile) self.context.logger.debug("Command for checking installation: %s." % cmd) cmd = self.singleCmd(cmd) DefaultValue.execCommandWithMode(cmd, "check installation environment", self.context.sshTool, self.context.isSingle) self.context.logger.debug("Successfully checked node's installation.", "constant")
def checkNodeConfig(self): """ function: Check node config on all nodes input : NA output: NA """ self.context.logger.log("Checking node configuration on all nodes.") # Check node config on all nodes cmdParam = "" for param in self.context.dataGucParam: cmdParam += " -D \\\"%s\\\"" % param cmd = "source %s;" % self.context.mpprcFile cmd += "%s -U %s -l %s %s" % ( OMCommand.getLocalScript("Local_Check_Config"), self.context.user, self.context.localLog, cmdParam) self.context.logger.debug( "Command for checking node configuration: %s." % cmd) cmd = self.singleCmd(cmd) DefaultValue.execCommandWithMode(cmd, "check node configuration", self.context.sshTool, self.context.isSingle) self.context.logger.debug("Successfully checked node configuration.")
def doRemoteRestore(self): """ function: Get user and group input : NA output: NA """ self.context.logger.log("Performing remote restoration.") cmd = "%s -U %s -l %s --ingore_miss" % ( OMCommand.getLocalScript("Local_Restore"), self.context.user, self.context.localLog) if (self.context.backupDir != ""): cmd += " -P %s" % self.context.backupDir if self.context.isParameter: cmd += " -p" if self.context.isBinary: cmd += " -b" self.context.logger.debug("Remote restoration command: %s." % cmd) try: (status, output) = self.context.sshTool.getSshStatusOutput(cmd) for node in status.keys(): if status[node] != DefaultValue.SUCCESS: raise Exception(output) self.__cleanTmpTar() self.context.logger.log("Successfully restored cluster files.") except Exception as e: self.__cleanTmpTar() raise Exception(str(e))
def checkNodeInstall(self, nodes=None, checkParams=None, strictUserCheck=True): """ function: Check node install input : nodes, checkParams, strictUserCheck output: NA """ if nodes is None: nodes = [] if checkParams is None: checkParams = [] validParam = ["shared_buffers", "max_connections"] cooGucParam = "" for param in checkParams: entry = param.split("=") if (len(entry) != 2): raise Exception(ErrorCode.GAUSS_500["GAUSS_50009"]) if (entry[0].strip() in validParam): cooGucParam += " -C \\\"%s\\\"" % param self.logger.log("Checking installation environment on all nodes.") cmd = "%s -U %s:%s -R %s %s -l %s -X '%s'" % ( OMCommand.getLocalScript("Local_Check_Install"), self.user, self.group, self.clusterInfo.appPath, cooGucParam, self.localLog, self.xmlFile) if (not strictUserCheck): cmd += " -O" self.logger.debug("Checking the install command: %s." % cmd) DefaultValue.execCommandWithMode(cmd, "check installation environment", self.sshTool, self.localMode or self.isSingle, "", nodes)
def initNodeInstance(self): """ function: init instance applications input : NA output: NA """ self.context.logger.log("Initializing instances on all nodes.") # init instance applications cmdParam = "" # get the --gsinit-parameter parameter values for param in self.context.dbInitParam: cmdParam += " -P \\\"%s\\\"" % param cmd = "source %s;" % self.context.mpprcFile # init instances on all nodes cmd += "%s -U %s %s -l %s" % ( OMCommand.getLocalScript("Local_Init_Instance"), self.context.user, cmdParam, self.context.localLog) self.context.logger.debug("Command for initializing instances: %s" % cmd) cmd = self.singleCmd(cmd) DefaultValue.execCommandWithMode(cmd, "initialize instances", self.context.sshTool, self.context.isSingle) self.context.logger.debug("Successfully initialized node instance.")
def fixServerPackageOwner(self): """ function: fix server package. when distribute server package, the os user has not been created, so we should fix server package Owner here after user create. input: NA output: NA """ self.context.logger.log("Fixing server package owner.", "addStep") try: # fix server package owner for oltp cmd = ("%s -t %s -u %s -g %s -X %s -Q %s -l %s" % (OMCommand.getLocalScript("Local_PreInstall"), ACTION_FIX_SERVER_PACKAGE_OWNER, self.context.user, self.context.group, self.context.xmlFile, self.context.clusterToolPath, self.context.localLog)) # check the env file if self.context.mpprcFile != "": cmd += " -s %s" % self.context.mpprcFile self.context.logger.debug("Fix server pkg cmd: %s" % cmd) # exec the cmd DefaultValue.execCommandWithMode(cmd, "fix server package owner", self.context.sshTool, self.context.localMode, self.context.mpprcFile) self.del_remote_pkgpath() except Exception as e: raise Exception(str(e))
def cleanSyslogConfig(self): """ function: clean syslog config input : NA output: NA """ try: # only suse11/suse12 can support it distname = g_Platform.dist()[0] if (distname.upper() != "SUSE"): return # clean syslog-ng/rsyslog config cmd = "%s -t %s -u %s -l '%s' -X '%s'" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_CLEAN_SYSLOG_CONFIG, self.user, self.localLog, self.xmlFile) self.logger.debug( "Command for clean syslog-ng/rsyslog config: %s" % cmd) DefaultValue.execCommandWithMode( cmd, "clean syslog-ng/rsyslog config", self.sshTool, self.localMode, self.mpprcFile, self.clusterInfo.getClusterNodeNames()) except Exception as e: self.logger.logExit(str(e))
def doRemoteRestore(self): """ function: Get user and group input : NA output: NA """ self.context.logger.log("Performing remote restoration.") cmd = "%s -U %s -l %s " % ( OMCommand.getLocalScript("Local_Restore"), self.context.user, self.context.localLog) if (self.context.backupDir != ""): cmd += " -P %s" % self.context.backupDir if self.context.isParameter: cmd += " -p" if self.context.isBinary: cmd += " -b" if self.context.isForce: cmd += " -f" self.context.logger.debug("Remote restoration command: %s." % cmd) try: self._runCmd(cmd) self.__cleanTmpTar() self.context.logger.log("Successfully restored cluster files.") except Exception as e: self.__cleanTmpTar() raise Exception(str(e))
def StopCluster(self): """ function: Stopping the cluster input : NA output: NA """ self.logger.log("Stopping the cluster.", "addStep") # get the static config static_config = \ "%s/bin/cluster_static_config" % self.clusterInfo.appPath static_config_bak = \ "%s/bin/cluster_static_config_bak" % self.clusterInfo.appPath # if cluster_static_config_bak exists # and static_config does not exists, mv it to static_config if (not os.path.exists(static_config) and os.path.exists(static_config_bak)): cmd = "mv %s %s" % (static_config_bak, static_config) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.debug("The cmd is %s " % cmd) self.logger.error("rename cluster_static_config_bak failed") self.logger.debug("Error:\n%s" % output) # if path not exits, can not stop cluster if (not os.path.exists(static_config)): self.logger.debug("Failed to stop the cluster.", "constant") return # Stop cluster applications cmd = "source %s; %s -U %s -R %s -l %s" % ( self.mpprcFile, OMCommand.getLocalScript("Local_StopInstance"), self.user, self.clusterInfo.appPath, self.localLog) self.logger.debug("Command for stop cluster: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "Stop cluster", self.sshTool, self.localMode, self.mpprcFile) self.logger.log("Successfully stopped cluster.")
def setArmOptimization(self): """ function: setting ARM Optimization input: NA output: NA """ self.context.logger.log("Set ARM Optimization.", "addStep") cmd = "python3 -c 'import platform;print(platform.machine())'" (status, output) = subprocess.getstatusoutput(cmd) if status != 0: self.context.logger.logExit("Command for set platform ARM:" "%s" % cmd + " Error: \n%s" % output) if str(output) == "aarch64": pass else: self.context.logger.log("No need to set ARM Optimization.", "constant") return try: # exec cmd for set platform ARM cmd = "%s -t %s -u %s -l %s -Q %s" % ( OMCommand.getLocalScript("Local_PreInstall"), ACTION_SET_ARM_OPTIMIZATION, self.context.user, self.context.localLog, self.context.clusterToolPath) self.context.logger.debug("Command for set platform ARM: %s" % cmd) DefaultValue.execCommandWithMode( cmd, "set platform ARM", self.context.sshTool, self.context.localMode or self.context.isSingle, self.context.mpprcFile) except Exception as e: raise Exception(str(e)) # Successfully set ARM Optimization self.context.logger.log("Successfully set ARM Optimization.", "constant")
def startCluster(self): """ function:Start cluster input:NA output:NA """ self.logger.log("Starting the cluster.", "addStep") # Delete cluster dynamic config if it is exist on all nodes clusterDynamicConf = "%s/bin/cluster_dynamic_config" \ % self.oldClusterInfo.appPath cmd = g_file.SHELL_CMD_DICT["deleteFile"] % (clusterDynamicConf, clusterDynamicConf) self.logger.debug( "Command for removing the cluster dynamic configuration: %s." % cmd) self.sshTool.executeCommand(cmd, "remove dynamic configuration") # Start cluster in 300 seconds cmd = "source %s; %s -t %s" % (self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Gs_Start"), DefaultValue.TIMEOUT_CLUSTER_START) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.debug("The cmd is %s " % cmd) raise Exception(ErrorCode.GAUSS_516["GAUSS_51607"] % "the cluster" + " Error: \n%s" % output) self.logger.log("Successfully started the cluster.", "constant")
def rollbackInstall(self): """ function: Rollback install input : NA output: NA 0 succeed 1 failed 2 rollback succeed 3 rollback failed """ # Rollback install self.context.logger.log("Rolling back.") try: self.deleteTempFileForUninstall() # Rollback install cmd = "source %s;" % self.context.mpprcFile cmd += "%s -U %s -R '%s' -l '%s' -T" % ( OMCommand.getLocalScript("Local_Uninstall"), self.context.user, os.path.realpath( self.context.clusterInfo.appPath), self.context.localLog) self.context.logger.debug("Command for rolling back: %s." % cmd) # exec the cmd for rollback (status, output) = self.context.sshTool.getSshStatusOutput(cmd) for ret in list(status.values()): if (ret != DefaultValue.SUCCESS): raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + "Error:\n%s" % str(output)) self.context.logger.debug(output) except Exception as e: # failed to roll back self.context.logger.error(str(e)) sys.exit(ROLLBACK_FAILED) # Rollback succeeded self.context.logger.log("Rollback succeeded.")
def CleanInstance(self): """ function: clean instance input : NA output : NA """ self.logger.debug("Deleting instance.", "addStep") # check if need delete instance if (not self.cleanInstance): self.logger.debug("No need to delete data.", "constant") return # Clean instance data cmd = "%s -U %s -l %s" % (OMCommand.getLocalScript( "Local_Clean_Instance"), self.user, self.localLog) self.logger.debug("Command for deleting instance: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "delete instances data.", self.sshTool, self.localMode, self.mpprcFile) # clean upgrade temp backup path upgrade_bak_dir = DefaultValue.getBackupDir(self.user, "upgrade") cmd = g_file.SHELL_CMD_DICT["cleanDir"] % ( upgrade_bak_dir, upgrade_bak_dir, upgrade_bak_dir) DefaultValue.execCommandWithMode( cmd, "delete backup directory for upgrade", self.sshTool, self.localMode, self.mpprcFile) self.logger.log("Successfully deleted instances.", "constant")
def doStartCluster(self): """ function: do start cluster input: NA output: NA """ self.logger.debug("Operating: Starting.") # Specifies the stop node # Gets the specified node id startType = "node" if self.context.g_opts.nodeName != "" else "cluster" # Perform a start operation self.logger.log("Starting %s." % startType) self.logger.log("=========================================") hostName = DefaultValue.GetHostIpOrName() #get the newest dynaminc config and send to other node self.clusterInfo.checkClusterDynamicConfig(self.context.user, hostName) if self.context.g_opts.nodeName == "": hostList = self.clusterInfo.getClusterNodeNames() else: hostList = [] hostList.append(self.context.g_opts.nodeName) self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(), None, DefaultValue.TIMEOUT_CLUSTER_START) if self.time_out is None: time_out = DefaultValue.TIMEOUT_CLUSTER_START else: time_out = self.time_out cmd = "source %s; %s -U %s -R %s -t %s --security-mode=%s" % ( self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Local_StartInstance"), self.context.user, self.context.clusterInfo.appPath, time_out, self.context.g_opts.security_mode) if self.dataDir != "": cmd += " -D %s" % self.dataDir starttime = time.time() (statusMap, output) = self.sshTool.getSshStatusOutput(cmd, hostList) for nodeName in hostList: if statusMap[nodeName] != 'Success': raise Exception( ErrorCode.GAUSS_536["GAUSS_53600"] % (cmd, output)) if re.search("another server might be running", output): self.logger.log(output) if startType == "cluster": cmd = "source %s; gs_om -t status|grep cluster_state|grep Normal" \ % self.context.g_opts.mpprcFile while time.time() <= time_out + starttime: status = subprocess.getstatusoutput(cmd)[0] if status != 0: self.logger.log("Waiting for check cluster state...") time.sleep(5) else: break if time.time() > time_out + starttime: raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] % "cluster" + "Start timeout, please check the process" " status manually") self.logger.log("=========================================") self.logger.log("Successfully started.") self.logger.debug("Operation succeeded: Start.")
def cleanOtherNodesEnvSoftware(self): """ function: clean other nodes environment software and variable input : NA output: NA """ # check if local mode if self.localMode: return self.logger.log("Deleting software packages " "and environmental variables of other nodes.") try: # get other nodes hostName = DefaultValue.GetHostIpOrName() otherNodes = self.clusterInfo.getClusterNodeNames() for otherNode in otherNodes: if (otherNode == hostName): otherNodes.remove(otherNode) self.logger.debug( "Deleting environmental variables of nodes: %s." % otherNodes) # clean $GAUSS_ENV if (not self.deleteUser): cmd = "%s -t %s -u %s -l '%s' -X '%s'" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_CLEAN_GAUSS_ENV, self.user, self.localLog, self.xmlFile) self.logger.debug("Command for deleting $GAUSS_ENV: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "delete $GAUSS_ENV", self.sshTool, self.localMode, self.mpprcFile, otherNodes) cmd = "%s -t %s -u %s -l '%s' -X '%s'" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_CLEAN_TOOL_ENV, self.user, self.localLog, self.xmlFile) self.logger.debug( "Command for deleting environmental variables: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "delete environment variables", self.sshTool, self.localMode, self.mpprcFile, otherNodes) except Exception as e: self.logger.logExit(str(e)) self.logger.log("Successfully deleted software packages " "and environmental variables of other nodes.")
def cleanScript(self): """ clean script directory """ self.logger.debug("Clean script path") cmd = "%s -t %s -u %s -Q %s" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_CLEAN_DEPENDENCY, self.user, self.clusterToolPath) if self.deleteUser: cmd += " -P %s" % self.userHome DefaultValue.execCommandWithMode(cmd, "clean script", self.sshTool, self.localMode, self.mpprcFile) self.logger.debug("Clean script path successfully.")
def cleanLocalOsUser(self): """ function: Clean local os user input : NA output: NA """ if (not self.deleteUser): if (self.localMode): cmd = "rm -rf '%s'" % self.clusterInfo.appPath DefaultValue.execCommandWithMode(cmd, "delete install directory", self.sshTool, self.localMode, self.mpprcFile) return group = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name # clean local user self.logger.log("Deleting local OS user.") cmd = "%s -U %s -l %s" % (OMCommand.getLocalScript( "Local_Clean_OsUser"), self.user, self.localLog) self.logger.debug("Command for deleting local OS user: %s" % cmd) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.logExit(output) self.logger.log("Successfully deleted local OS user.") if (self.deleteGroup): # clean local user group self.logger.debug("Deleting local OS group.") cmd = "%s -t %s -u %s -l '%s' -X '%s'" % ( OMCommand.getLocalScript("Local_UnPreInstall"), ACTION_DELETE_GROUP, group, self.localLog, self.xmlFile) self.logger.debug("Command for deleting local OS group: %s" % cmd) (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.logger.log(output.strip()) self.logger.debug("Deleting local group is completed.")
def UninstallApp(self): """ function: Uninstall application input : NA output: NA """ self.logger.log("Uninstalling application.", "addStep") cmd = "%s -R '%s' -U %s -l %s -T" % ( OMCommand.getLocalScript("Local_Uninstall"), self.clusterInfo.appPath, self.user, self.localLog) self.logger.debug("Command for Uninstalling: %s" % cmd) # clean application DefaultValue.execCommandWithMode(cmd, "uninstall application", self.sshTool, self.localMode, self.mpprcFile) self.logger.log("Successfully uninstalled application.", "constant")
def checkDiskSpace(self): """ function: delete step tmp file input : NA output: NA """ try: cmd = "%s -t %s -u %s -l %s -R %s" % ( OMCommand.getLocalScript("Local_PreInstall"), ACTION_CHECK_DISK_SPACE, self.context.user, self.context.localLog, self.context.clusterInfo.appPath) if self.context.mpprcFile != "": cmd += " -s '%s'" % self.context.mpprcFile self.context.sshTool.executeCommand(cmd, "check disk space") except Exception as e: raise Exception(str(e))
def checkPreInstall(self, user, flag, nodes=None): """ function: check if have done preinstall on given nodes input : user, nodes output: NA """ if nodes is None: nodes = [] try: cmd = "%s -U %s -t %s" % ( OMCommand.getLocalScript("Local_Check_PreInstall"), user, flag) DefaultValue.execCommandWithMode(cmd, "check preinstall", self.sshTool, self.localMode or self.isSingle, "", nodes) except Exception as e: raise Exception(str(e))
def doRemoteBackup(self): """ function: Get user and group input : NA output: NA """ self.context.logger.log("Performing remote backup.") localHostName = DefaultValue.GetHostIpOrName() tmp_backupDir = "%s/backupTemp_%d" % ( DefaultValue.getTmpDirFromEnv(), os.getpid()) cmd = "%s -U %s --nodeName %s -P %s -B %s -l %s --ingore_miss" % \ (OMCommand.getLocalScript("Local_Backup"), self.context.user, localHostName, tmp_backupDir, self.context.backupDir, self.context.localLog) if self.context.isParameter: cmd += " -p" if self.context.isBinary: cmd += " -b" self.context.logger.debug("Remote backup command is %s." % cmd) try: if (not os.path.exists(tmp_backupDir)): os.makedirs(tmp_backupDir, DefaultValue.KEY_DIRECTORY_PERMISSION) (status, output) = self.context.sshTool.getSshStatusOutput(cmd) for node in status.keys(): if (status[node] != DefaultValue.SUCCESS): raise Exception(output) if self.context.isParameter: self.__distributeBackupFile(tmp_backupDir, "parameter") if self.context.isBinary: self.__distributeBackupFile(tmp_backupDir, "binary") DefaultValue.cleanFileDir(tmp_backupDir, self.context.sshTool) self.context.logger.log("Remote backup succeeded.") self.context.logger.log("Successfully backed up cluster files.") except Exception as e: DefaultValue.cleanFileDir(tmp_backupDir, self.context.sshTool) raise Exception(str(e))
def startInstance(self, dirDn, env): """ """ self.logger.log("[gs_dropnode]Start to start the target node.") command = "source %s ; %s -U %s -D %s" % ( env, OMCommand.getLocalScript("Local_StartInstance"), self.user, dirDn) (status, output) = subprocess.getstatusoutput(command) self.logger.debug(output) if status: self.logger.debug("[gs_dropnode]Failed to start the node.") GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35809"]) elif re.search("another server might be running", output): self.logger.log(output) elif re.search("] WARNING:", output): tmp = '\n'.join(re.findall(".*] WARNING:.*", output)) self.logger.log(tmp) self.logger.debug("[gs_dropnode]End to start the node.")
def updateHbaConfig(self): """ function: config Hba instance input : NA output: NA """ self.context.logger.log("Configuring pg_hba on all nodes.") # Configuring pg_hba cmd = "source %s;" % self.context.mpprcFile cmd += "%s -U %s -X '%s' -l '%s' " % ( OMCommand.getLocalScript("Local_Config_Hba"), self.context.user, self.context.xmlFile, self.context.localLog) self.context.logger.debug("Command for configuring Hba instance: %s" % cmd) DefaultValue.execCommandWithMode(cmd, "config Hba instance", self.context.sshTool, self.context.isSingle) self.context.logger.debug("Successfully configured HBA.")
def doStopCluster(self): """ function: do stop cluster input: NA output: NA """ self.logger.debug("Operating: Stopping.") # Specifies the stop node # Gets the specified node id stopType = "node" if self.context.g_opts.nodeName != "" else "cluster" # Perform a stop operation self.logger.log("Stopping %s." % stopType) self.logger.log("=========================================") if self.context.g_opts.nodeName == "": hostList = self.clusterInfo.getClusterNodeNames() else: hostList = [] hostList.append(self.context.g_opts.nodeName) self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(), None, DefaultValue.TIMEOUT_CLUSTER_START) if self.time_out is None: time_out = DefaultValue.TIMEOUT_CLUSTER_STOP else: time_out = self.time_out cmd = "source %s; %s -U %s -R %s -t %s" % ( self.context.g_opts.mpprcFile, OMCommand.getLocalScript("Local_StopInstance"), self.context.user, self.context.clusterInfo.appPath, time_out) if self.dataDir != "": cmd += " -D %s" % self.dataDir if self.mode != "": cmd += " -m %s" % self.mode (statusMap, output) = self.sshTool.getSshStatusOutput(cmd, hostList) for nodeName in hostList: if statusMap[nodeName] != 'Success': raise Exception(ErrorCode.GAUSS_536["GAUSS_53606"] % (cmd, output)) self.logger.log("Successfully stopped %s." % stopType) self.logger.log("=========================================") self.logger.log("End stop %s." % stopType) self.logger.debug("Operation succeeded: Stop.")
def checkUninstall(self): """ function: Check uninstall input : NA output: NA """ # Checking uninstallation self.logger.log("Checking uninstallation.", "addStep") # use check uninstall to check every nodes cmd = "%s -R '%s' -U %s -l %s" % ( OMCommand.getLocalScript("Local_Check_Uninstall"), self.clusterInfo.appPath, self.user, self.localLog) # check if need to clean instance if (self.cleanInstance): cmd += " -d" self.logger.debug("Command for checking uninstallation: " + cmd) DefaultValue.execCommandWithMode(cmd, "check uninstallation.", self.sshTool, self.localMode, self.mpprcFile) self.logger.log("Successfully checked uninstallation.", "constant")
def cleanNodeConfig(self, nodes=None, datadirs=None): """ function: Clean instance input : nodes, datadirs output: NA """ self.logger.log("Deleting instances from all nodes.") if nodes is None: nodes = [] if datadirs is None: datadirs = [] cmdParam = "" for datadir in datadirs: cmdParam += " -D %s " % datadir cmd = "%s -U %s %s -l %s" % (OMCommand.getLocalScript( "Local_Clean_Instance"), self.user, cmdParam, self.localLog) DefaultValue.execCommandWithMode(cmd, "clean instance", self.sshTool, self.localMode or self.isSingle, "", nodes) self.logger.log("Successfully deleted instances from all nodes.")