def queryInstanceStatus(self, host, datanode, env):
        """
        """
        command = "source %s ; gs_ctl query -D %s" % (env, datanode)
        sshTool = SshTool([datanode])
        resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
        [host], env)
        self.logger.debug(outputCollect)
        localRole = re.findall(r"local_role.*: (.*?)\n", outputCollect)
        db_state = re.findall(r"db_state.*: (.*?)\n", outputCollect)

        insType = ""

        if(len(localRole)) == 0:
            insType = ""
        else:
            insType = localRole[0]
        
        dbStatus = ""
        if(len(db_state)) == 0:
            dbStatus = ""
        else:
            dbStatus = db_state[0]
        
        return insType.strip().lower(), dbStatus.strip().lower()
    def preInstallOnHosts(self):
        """
        execute preinstall step
        """
        self.logger.debug("Start to preinstall database step.\n")
        newBackIps = self.context.newHostList
        newHostNames = []
        for host in newBackIps:
            newHostNames.append(self.context.backIpNameMap[host])
        envfile = self.envFile
        tempXmlFile = "%s/clusterconfig.xml" % self.tempFileDir

        preinstallCmd = "{softpath}/script/gs_preinstall -U {user} -G {group} \
            -X {xmlfile} --sep-env-file={envfile} \
                --non-interactive 2>&1\
                    ".format(softpath=self.context.packagepath,user=self.user,
                    group=self.group,xmlfile=tempXmlFile,envfile=envfile)

        sshTool = SshTool(newHostNames)
        
        status, output = sshTool.getSshStatusOutput(preinstallCmd , [], envfile)
        statusValues = status.values()
        if STATUS_FAIL in statusValues:
            GaussLog.exitWithError(output)

        self.logger.debug("End to preinstall database step.\n")
Esempio n. 3
0
    def validNodeInStandbyList(self):
        """
        check if the node has been installed in the cluster.
        """
        self.logger.debug("Start to check if the nodes in standby list\n")

        curHostName = socket.gethostname()
        command = "su - %s -c 'source %s;gs_om -t status --detail'" % \
            (self.user, self.envFile)
        sshTool = SshTool([curHostName])
        resultMap, outputCollect = sshTool.getSshStatusOutput(
            command, [curHostName], self.envFile)
        self.logger.debug(outputCollect)

        newHosts = self.context.newHostList
        standbyHosts = []
        existHosts = []
        while len(newHosts) > 0:
            hostIp = newHosts.pop()
            nodeName = self.context.backIpNameMap[hostIp]
            nodeInfo = self.context.clusterInfoDict[nodeName]
            dataNode = nodeInfo["dataNode"]
            exist_reg = r"(.*)%s[\s]*%s(.*)" % (nodeName, hostIp)
            if not re.search(exist_reg, outputCollect):
                standbyHosts.append(hostIp)
            else:
                existHosts.append(hostIp)
        self.context.newHostList = standbyHosts
        if len(existHosts) > 0:
            self.logger.log("The nodes [%s] are already in the cluster. Skip expand these nodes." \
                % ",".join(existHosts))
        self.cleanSshToolFile(sshTool)
        if len(standbyHosts) == 0:
            self.logger.log("There is no node can be expanded.")
            sys.exit(0)
Esempio n. 4
0
 def restartInstance(self):
     if self.context.flagOnlyPrimary:
         self.logger.log("[gs_dropnode]Remove the dynamic conf.")
         dynamicConfigPath = "%s/bin/cluster_dynamic_config" % self.appPath
         try:
             os.unlink(dynamicConfigPath)
         except FileNotFoundError:
             pass
         flag = input(
             "Only one primary node is left."
             "It is recommended to restart the node."
             "\nDo you want to restart the primary node now (yes/no)? ")
         count_f = 2
         while count_f:
             if (flag.upper() != "YES" and flag.upper() != "NO"
                     and flag.upper() != "Y" and flag.upper() != "N"):
                 count_f -= 1
                 flag = input("Please type 'yes' or 'no': ")
                 continue
             break
         if flag.upper() != "YES" and flag.upper() != "Y":
             GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35805"] %
                                    flag.upper())
         sshTool = SshTool([self.localhostname])
         for i in self.context.hostMapForExist[
                 self.localhostname]['datadir']:
             self.commonOper.stopInstance(self.localhostname, sshTool, i,
                                          self.userProfile)
             self.commonOper.startInstance(i, self.userProfile)
         self.cleanSshToolFile(sshTool)
     else:
         pass
    def generateAndSendXmlFile(self):
        """
        """
        self.logger.debug("Start to generateAndSend XML file.\n")

        tempXmlFile = "%s/clusterconfig.xml" % self.tempFileDir
        cmd = "mkdir -p %s; touch %s; cat /dev/null > %s" % \
        (self.tempFileDir, tempXmlFile, tempXmlFile)
        (status, output) = subprocess.getstatusoutput(cmd)

        cmd = "chown -R %s:%s %s" % (self.user, self.group, self.tempFileDir)
        (status, output) = subprocess.getstatusoutput(cmd)

        newHosts = self.context.newHostList
        for host in newHosts:
            # create single deploy xml file for each standby node
            xmlContent = self.__generateXml(host)
            with os.fdopen(
                    os.open("%s" % tempXmlFile, os.O_WRONLY | os.O_CREAT,
                            stat.S_IWUSR | stat.S_IRUSR), 'w') as fo:
                fo.write(xmlContent)
                fo.close()
            # send single deploy xml file to each standby node
            sshTool = SshTool(host)
            retmap, output = sshTool.getSshStatusOutput(
                "mkdir -p %s" % self.tempFileDir, [host], self.envFile)
            retmap, output = sshTool.getSshStatusOutput(
                "chown %s:%s %s" % (self.user, self.group, self.tempFileDir),
                [host], self.envFile)
            sshTool.scpFiles("%s" % tempXmlFile, "%s" % tempXmlFile, [host],
                             self.envFile)
            self.cleanSshToolFile(sshTool)

        self.logger.debug("End to generateAndSend XML file.\n")
 def addTrustOnExistNodes(self):
     """
     add host trust in pg_hba.conf on existing standby node. 
     """
     self.logger.debug("Start to set host trust on existing node.")
     allNodeNames = self.context.nodeNameList
     newNodeIps = self.context.newHostList
     newNodeNames = []
     trustCmd = []
     for node in newNodeIps:
         nodeName = self.context.backIpNameMap[node]
         newNodeNames.append(nodeName)
         cmd = 'host    all    all    %s/32    trust' % node
         trustCmd.append(cmd)
     existNodes = list(set(allNodeNames).difference(set(newNodeNames)))
     for node in existNodes:
         dataNode = self.context.clusterInfoDict[node]["dataNode"]
         cmd = ""
         for trust in trustCmd:
             cmd += "source %s; gs_guc set -D %s -h '%s';" % \
                 (self.envFile, dataNode, trust)
         sshTool = SshTool([node])
         resultMap, outputCollect = sshTool.getSshStatusOutput(
             cmd, [node], self.envFile)
         self.cleanSshToolFile(sshTool)
     self.logger.debug("End to set host trust on existing node.")
    def sendSoftToHosts(self):
        """
        create software dir and send it on each nodes
        """
        self.logger.debug("Start to send soft to each standby nodes.\n")
        hostNames = self.context.newHostList
        hostList = hostNames

        sshTool = SshTool(hostNames)

        srcFile = self.context.packagepath
        targetDir = os.path.realpath(os.path.join(srcFile, "../"))

        ## mkdir package dir and send package to remote nodes.
        sshTool.executeCommand("mkdir -p %s" % srcFile, "",
                               DefaultValue.SUCCESS, hostList)
        sshTool.scpFiles(srcFile, targetDir, hostList)

        ## change mode of package dir to set privileges for users
        tPathList = os.path.split(targetDir)
        path2ChangeMode = targetDir
        if len(tPathList) > 2:
            path2ChangeMode = os.path.join(tPathList[0], tPathList[1])
        changeModCmd = "chmod -R a+x {srcFile}".format(user=self.user,
                                                       group=self.group,
                                                       srcFile=path2ChangeMode)
        sshTool.executeCommand(changeModCmd, "", DefaultValue.SUCCESS,
                               hostList)
        self.logger.debug("End to send soft to each standby nodes.\n")
        self.cleanSshToolFile(sshTool)
 def initSshTool(self, nodeNames, timeout=0):
     """
     function: Init ssh tool
     input : nodeNames, timeout
     output: NA
     """
     self.sshTool = SshTool(nodeNames, self.logger.logFile, timeout)
Esempio n. 9
0
 def doStartCluster(self):
     """
     function: do start cluster
     input: NA
     output: NA
     """
     self.logger.debug("Operating: Starting.")
     # Specifies the stop node
     # Gets the specified node id
     startType = "node" if self.context.g_opts.nodeName != "" else "cluster"
     # Perform a start operation
     self.logger.log("Starting %s." % startType)
     self.logger.log("=========================================")
     hostName = DefaultValue.GetHostIpOrName()
     #get the newest dynaminc config and send to other node
     self.clusterInfo.checkClusterDynamicConfig(self.context.user, hostName)
     if self.context.g_opts.nodeName == "":
         hostList = self.clusterInfo.getClusterNodeNames()
     else:
         hostList = []
         hostList.append(self.context.g_opts.nodeName)
     self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(), None,
                            DefaultValue.TIMEOUT_CLUSTER_START)
     if self.time_out is None:
         time_out = DefaultValue.TIMEOUT_CLUSTER_START
     else:
         time_out = self.time_out
     cmd = "source %s; %s -U %s -R %s -t %s --security-mode=%s" % (
     self.context.g_opts.mpprcFile,
     OMCommand.getLocalScript("Local_StartInstance"),
     self.context.user, self.context.clusterInfo.appPath, time_out,
     self.context.g_opts.security_mode)
     if self.dataDir != "":
         cmd += " -D %s" % self.dataDir
     starttime = time.time()
     (statusMap, output) = self.sshTool.getSshStatusOutput(cmd, hostList)
     for nodeName in hostList:
         if statusMap[nodeName] != 'Success':
             raise Exception(
                 ErrorCode.GAUSS_536["GAUSS_53600"] % (cmd, output))
     if re.search("another server might be running", output):
         self.logger.log(output)
     if startType == "cluster":
         cmd = "source %s; gs_om -t status|grep cluster_state|grep Normal" \
               % self.context.g_opts.mpprcFile
         while time.time() <= time_out + starttime:
             status = subprocess.getstatusoutput(cmd)[0]
             if status != 0:
                 self.logger.log("Waiting for check cluster state...")
                 time.sleep(5)
             else:
                 break
         if time.time() > time_out + starttime:
             raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] % "cluster"
                             + "Start timeout, please check the process"
                               " status manually")
     self.logger.log("=========================================")
     self.logger.log("Successfully started.")
     self.logger.debug("Operation succeeded: Start.")
Esempio n. 10
0
 def buildInstance(self, host, datanode, mode, env):
     command = "source %s ; gs_ctl build -D %s -M %s" % (env, datanode, mode)
     self.logger.debug(command)
     sshTool = SshTool([host])
     resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
     [host], env)
     self.logger.debug(host)
     self.logger.debug(outputCollect)
    def checkUserAndGroupExists(self):
        """
        check system user and group exists and be same 
        on primary and standby nodes
        """
        inputUser = self.user
        inputGroup = self.group

        user_group_id = ""
        isUserExits = False
        localHost = socket.gethostname()
        for user in pwd.getpwall():
            if user.pw_name == self.user:
                user_group_id = user.pw_gid
                isUserExits = True
                break
        if not isUserExits:
            GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \
                % ("User", self.user, localHost))

        isGroupExits = False
        group_id = ""
        for group in grp.getgrall():
            if group.gr_name == self.group:
                group_id = group.gr_gid
                isGroupExits = True
        if not isGroupExits:
            GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \
                % ("Group", self.group, localHost))
        if user_group_id != group_id:
            GaussLog.exitWithError("User [%s] is not in the group [%s]."\
                 % (self.user, self.group))

        hostNames = self.context.newHostList
        envfile = self.envFile
        sshTool = SshTool(hostNames)

        #get username in the other standy nodes
        getUserNameCmd = "cat /etc/passwd | grep -w %s" % inputUser
        resultMap, outputCollect = sshTool.getSshStatusOutput(
            getUserNameCmd, [], envfile)

        for hostKey in resultMap:
            if resultMap[hostKey] == STATUS_FAIL:
                self.cleanSshToolFile(sshTool)
                GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \
                       % ("User", self.user, hostKey))

        #get groupname in the other standy nodes
        getGroupNameCmd = "cat /etc/group | grep -w %s" % inputGroup
        resultMap, outputCollect = sshTool.getSshStatusOutput(
            getGroupNameCmd, [], envfile)
        for hostKey in resultMap:
            if resultMap[hostKey] == STATUS_FAIL:
                self.cleanSshToolFile(sshTool)
                GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35704"] \
                       % ("Group", self.group, hostKey))
        self.cleanSshToolFile(sshTool)
 def stopInstance(self, host, datanode, env):
     """
     """
     command = "source %s ; gs_ctl stop -D %s" % (env, datanode)
     sshTool = SshTool([host])
     resultMap, outputCollect = sshTool.getSshStatusOutput(
         command, [host], env)
     self.logger.debug(host)
     self.logger.debug(outputCollect)
     self.cleanSshToolTmpFile(sshTool)
Esempio n. 13
0
 def clearTmpFile(self):
     """
     clear temporary file after expansion success
     """
     self.logger.debug("start to delete temporary file")
     hostNames = self.context.nodeNameList
     sshTool = SshTool(hostNames)
     clearCmd = "source %s ; rm -rf %s" % (self.envFile, self.tempFileDir)
     result, output = sshTool.getSshStatusOutput(clearCmd, 
     hostNames, self.envFile)
     self.logger.debug(output)
Esempio n. 14
0
 def reloadPrimaryConf(self):
     """
     """
     primaryHost = self.getPrimaryHostName()
     dataNode = self.context.clusterInfoDict[primaryHost]["dataNode"]
     command = "gs_ctl reload -D %s " % dataNode
     sshTool = SshTool([primaryHost])
     self.logger.debug(command)
     resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
     [primaryHost], self.envFile)
     self.logger.debug(outputCollect)
Esempio n. 15
0
 def startOmCluster(self, host, env):
     """
     om tool start cluster
     """
     command = "source %s ; gs_om -t start" % env
     self.logger.debug(command)
     sshTool = SshTool([host])
     resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
     [host], env)
     self.logger.debug(host)
     self.logger.debug(outputCollect)
    def checkLocalModeOnStandbyHosts(self):
        """
        expansion the installed standby node. check standby database.
        1. if the database is normal
        2. if the databases version are same before existing and new 
        """
        standbyHosts = self.context.newHostList
        envfile = self.envFile

        self.logger.log("Checking the database with locale mode.")
        for host in standbyHosts:
            hostName = self.context.backIpNameMap[host]
            dataNode = self.context.clusterInfoDict[hostName]["dataNode"]
            insType, dbStat = self.commonGsCtl.queryInstanceStatus(
                hostName, dataNode, self.envFile)
            if insType not in (MODE_PRIMARY, MODE_STANDBY, MODE_NORMAL):
                GaussLog.exitWithError(
                    ErrorCode.GAUSS_357["GAUSS_35703"] %
                    (hostName, self.user, dataNode, dataNode))

        allHostIp = []
        allHostIp.append(self.context.localIp)
        versionDic = {}

        for hostip in standbyHosts:
            allHostIp.append(hostip)
        sshTool = SshTool(allHostIp)
        #get version in the nodes
        getversioncmd = "gaussdb --version"
        resultMap, outputCollect = sshTool.getSshStatusOutput(
            getversioncmd, [], envfile)
        self.cleanSshToolFile(sshTool)
        versionLines = outputCollect.splitlines()
        for verline in versionLines:
            if verline[0:9] == '[SUCCESS]':
                ipKey = verline[10:-1]
                continue
            else:
                versionStr = "".join(verline)
                preVersion = versionStr.split(' ')
                versionInfo = preVersion[4]
                versionDic[ipKey] = versionInfo[:-2]
        for hostip in versionDic:
            if hostip == self.context.localIp:
                versionCompare = ""
                versionCompare = versionDic[hostip]
            else:
                if versionDic[hostip] == versionCompare:
                    continue
                else:
                    GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35705"] \
                       %(hostip, versionDic[hostip]))

        self.logger.log("Successfully checked the database with locale mode.")
Esempio n. 17
0
 def queryOmCluster(self, host, env):
     """
     query om cluster detail with command:
     gs_om -t status --detail
     """
     command = "source %s ; gs_om -t status --detail" % env
     sshTool = SshTool([host])
     resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
     [host], env)
     self.logger.debug(host)
     self.logger.debug(outputCollect)
     return outputCollect
Esempio n. 18
0
 def doQuery(self):
     """
     function: do query
     input  : NA
     output : NA
     """
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     cmd = queryCmd()
     if (self.context.g_opts.outFile != ""):
         cmd.outputFile = self.context.g_opts.outFile
     self.context.clusterInfo.queryClsInfo(hostName, sshtool,
                                           self.context.mpprcFile, cmd)
Esempio n. 19
0
    def checkAllStandbyState(self):
        """
        check all standby state whether switchover is happening
        """
        for hostNameLoop in self.context.hostMapForExist.keys():
            sshtool_host = SshTool([hostNameLoop])
            for i in self.context.hostMapForExist[hostNameLoop]['datadir']:
                # check whether switchover/failover is happening
                self.commonOper.checkStandbyState(hostNameLoop, i,
                                                  sshtool_host,
                                                  self.userProfile)
            self.cleanSshToolFile(sshtool_host)

        for hostNameLoop in self.context.hostMapForDel.keys():
            if hostNameLoop not in self.context.failureHosts:
                sshtool_host = SshTool([hostNameLoop])
                for i in self.context.hostMapForDel[hostNameLoop]['datadir']:
                    # check whether switchover/failover is happening
                    self.commonOper.checkStandbyState(hostNameLoop, i,
                                                      sshtool_host,
                                                      self.userProfile, True)
                    self.commonOper.stopInstance(hostNameLoop, sshtool_host, i,
                                                 self.userProfile)
                cmdDelCert = "ls %s/share/sslcert/grpc/* | " \
                    "grep -v openssl.cnf | xargs rm -rf" % self.appPath
                result, output = sshtool_host.getSshStatusOutput(
                    cmdDelCert, [hostNameLoop], self.userProfile)
                if result[hostNameLoop] != 'Success':
                    self.logger.debug(output)
                    self.logger.log("[gs_dropnode]Failed to delete the GRPC "
                                    "sslcert of %s." % hostNameLoop)
                    self.logger.log("[gs_dropnode]Please check and delete the "
                                    "GRPC sslcert of %s manually." %
                                    hostNameLoop)
                self.cleanSshToolFile(sshtool_host)
            else:
                self.logger.log("[gs_dropnode]Cannot connect %s. Please check "
                                "and delete the GRPC sslcert of %s manually." %
                                (hostNameLoop, hostNameLoop))
 def checkTmpDir(self, hostName):
     """
     if the tmp dir id not exist, create it.
     """
     tmpDir = os.path.realpath(DefaultValue.getTmpDirFromEnv())
     checkCmd = 'if [ ! -d "%s" ]; then exit 1;fi;' % (tmpDir)
     sshTool = SshTool([hostName])
     resultMap, outputCollect = sshTool.getSshStatusOutput(
         checkCmd, [hostName], self.envFile)
     ret = resultMap[hostName]
     if ret == STATUS_FAIL:
         self.logger.debug("Node [%s] does not have tmp dir. need to fix.")
         fixCmd = "mkdir -p %s" % (tmpDir)
         sshTool.getSshStatusOutput(fixCmd, [hostName], self.envFile)
     self.cleanSshToolFile(sshTool)
Esempio n. 21
0
    def checkAllStandbyState(self):
        """
        check all standby state whether switchover is happening
        """
        for hostNameLoop in self.context.hostMapForExist.keys():
            sshtool_host = SshTool([hostNameLoop])
            for i in self.context.hostMapForExist[hostNameLoop]['datadir']:
                # check whether switchover/failover is happening
                self.commonOper.checkStandbyState(hostNameLoop, i,
                                                  sshtool_host,
                                                  self.userProfile)
            self.cleanSshToolFile(sshtool_host)

        for hostNameLoop in self.context.hostMapForDel.keys():
            if hostNameLoop not in self.context.failureHosts:
                sshtool_host = SshTool([hostNameLoop])
                for i in self.context.hostMapForDel[hostNameLoop]['datadir']:
                    # check whether switchover/failover is happening
                    self.commonOper.checkStandbyState(hostNameLoop, i,
                                                      sshtool_host,
                                                      self.userProfile, True)
                    self.commonOper.stopInstance(hostNameLoop, sshtool_host, i,
                                                 self.userProfile)
                self.cleanSshToolFile(sshtool_host)
Esempio n. 22
0
 def dropNodeOnAllHosts(self):
     """
     drop the target node on the other host
     """
     for hostNameLoop in self.context.hostMapForExist.keys():
         sshtool_host = SshTool([hostNameLoop])
         # backup
         backupfile = self.commonOper.backupConf(self.gphomepath, self.user,
                                                 hostNameLoop,
                                                 self.userProfile,
                                                 sshtool_host,
                                                 self.pghostPath)
         self.logger.log("[gs_dropnode]The backup file of " + hostNameLoop +
                         " is " + backupfile)
         if hostNameLoop == self.localhostname:
             self.backupFilePrimary = backupfile
         indexForuse = 0
         for i in self.context.hostMapForExist[hostNameLoop]['datadir']:
             # parse
             resultDict = self.commonOper.parseConfigFile(
                 hostNameLoop, i, self.dnIdForDel,
                 self.context.hostIpListForDel, sshtool_host, self.envFile)
             resultDictForRollback = self.commonOper.parseBackupFile(
                 hostNameLoop, backupfile,
                 self.context.hostMapForExist[hostNameLoop]['dn_id']
                 [indexForuse], resultDict['replStr'], sshtool_host,
                 self.envFile)
             if hostNameLoop == self.localhostname:
                 self.resultDictOfPrimary.append(resultDict)
             # try set
             try:
                 self.commonOper.SetPgsqlConf(
                     resultDict['replStr'], hostNameLoop, i,
                     resultDict['syncStandbyStr'], sshtool_host,
                     self.userProfile,
                     self.context.hostMapForExist[hostNameLoop]['port']
                     [indexForuse], '', self.context.flagOnlyPrimary)
             except ValueError:
                 self.logger.log("[gs_dropnode]Rollback pgsql process.")
                 self.commonOper.SetPgsqlConf(
                     resultDict['replStr'], hostNameLoop, i,
                     resultDict['syncStandbyStr'], sshtool_host,
                     self.userProfile,
                     self.context.hostMapForExist[hostNameLoop]['port']
                     [indexForuse],
                     resultDictForRollback['rollbackReplStr'])
             indexForuse += 1
         self.cleanSshToolFile(sshtool_host)
Esempio n. 23
0
 def doRefreshConf(self):
     """
     function: do refresh conf
     input  : NA
     output : NA
     """
     if self.context.clusterInfo.isSingleNode():
         self.logger.log(
             "No need to generate dynamic configuration file for one node.")
         return
     self.logger.log("Generating dynamic configuration file for all nodes.")
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     self.context.clusterInfo.createDynamicConfig(self.context.user,
                                                  hostName, sshtool)
     self.logger.log("Successfully generated dynamic configuration file.")
Esempio n. 24
0
 def operationOnlyOnPrimary(self):
     """
     operation only need to be executed on primary node
     """
     for hostNameLoop in self.context.hostMapForExist.keys():
         sshtool_host = SshTool([hostNameLoop])
         try:
             self.commonOper.SetPghbaConf(
                 self.userProfile, hostNameLoop,
                 self.resultDictOfPrimary[0]['pghbaStr'], False)
         except ValueError:
             self.logger.log("[gs_dropnode]Rollback pghba conf.")
             self.commonOper.SetPghbaConf(
                 self.userProfile, hostNameLoop,
                 self.resultDictOfPrimary[0]['pghbaStr'], True)
         self.cleanSshToolFile(sshtool_host)
 def queryOmCluster(self, host, env):
     """
     query om cluster detail with command:
     gs_om -t status --detail
     """
     command = "source %s ; gs_om -t status --detail" % env
     sshTool = SshTool([host])
     resultMap, outputCollect = sshTool.getSshStatusOutput(
         command, [host], env)
     self.logger.debug(host)
     self.logger.debug(outputCollect)
     if resultMap[host] == STATUS_FAIL:
         GaussLog.exitWithError("Query cluster failed. Please check " \
             "the cluster status or " \
             "source the environmental variables of user [%s]." % self.user)
     self.cleanSshToolTmpFile(sshTool)
     return outputCollect
Esempio n. 26
0
 def doStatus(self):
     """
     function:Get the status of cluster or node
     input:NA
     output:NA
     """
     hostName = DefaultValue.GetHostIpOrName()
     sshtool = SshTool(self.context.clusterInfo.getClusterNodeNames())
     nodeId = 0
     if (self.context.g_opts.nodeName != ""):
         for dbnode in self.context.clusterInfo.dbNodes:
             if dbnode.name == self.context.g_opts.nodeName:
                 nodeId = dbnode.id
         if (nodeId == 0):
             raise Exception(ErrorCode.GAUSS_516["GAUSS_51619"] %
                             self.context.g_opts.nodeName)
     cmd = queryCmd()
     if (self.context.g_opts.outFile != ""):
         cmd.outputFile = self.context.g_opts.outFile
     else:
         cmd.outputFile = self.logger.logFile
     if (self.context.g_opts.show_detail):
         if (self.context.clusterInfo.clusterType ==
                 DefaultValue.CLUSTER_TYPE_SINGLE_PRIMARY_MULTI_STANDBY):
             cmd.dataPathQuery = True
             cmd.azNameQuery = True
         else:
             cmd.dataPathQuery = True
     else:
         if (nodeId > 0):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile)
             return
         az_name = self.context.g_opts.azName
         if (az_name):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile,
                                                    az_name)
             return
         if (self.context.g_opts.showAll):
             self.context.clusterInfo.queryNodeInfo(sshtool, hostName,
                                                    nodeId, cmd.outputFile)
             return
         cmd.clusterStateQuery = True
     self.context.clusterInfo.queryClsInfo(hostName, sshtool,
                                           self.context.mpprcFile, cmd)
    def setGUCOnClusterHosts(self, hostNames=[]):
        """
        guc config on all hosts 
        """

        gucDict = self.getGUCConfig()

        tempShFile = "%s/guc.sh" % self.tempFileDir

        if len(hostNames) == 0:
            hostNames = self.context.nodeNameList

        for host in hostNames:

            command = "source %s ; " % self.envFile + gucDict[host]

            self.logger.debug(command)

            sshTool = SshTool([host])

            # create temporary dir to save guc command bashfile.
            mkdirCmd = "mkdir -m a+x -p %s; chown %s:%s %s" % \
                (self.tempFileDir,self.user,self.group,self.tempFileDir)
            retmap, output = sshTool.getSshStatusOutput(
                mkdirCmd, [host], self.envFile)

            subprocess.getstatusoutput("mkdir -m a+x -p %s; touch %s; \
                cat /dev/null > %s"                                    % \
                    (self.tempFileDir, tempShFile, tempShFile))
            with os.fdopen(
                    os.open("%s" % tempShFile, os.O_WRONLY | os.O_CREAT,
                            stat.S_IWUSR | stat.S_IRUSR), 'w') as fo:
                fo.write("#bash\n")
                fo.write(command)
                fo.close()

            # send guc command bashfile to each host and execute it.
            sshTool.scpFiles("%s" % tempShFile, "%s" % tempShFile, [host],
                             self.envFile)

            resultMap, outputCollect = sshTool.getSshStatusOutput("sh %s" % \
                tempShFile, [host], self.envFile)

            self.logger.debug(outputCollect)
            self.cleanSshToolFile(sshTool)
Esempio n. 28
0
 def operationOnlyOnPrimary(self):
     """
     operation only need to be executed on primary node
     """
     LocalhostName = self.localhostname
     sshtool_host = SshTool([LocalhostName])
     try:
         self.commonOper.SetPghbaConf(
             self.userProfile, LocalhostName,
             self.resultDictOfPrimary[0]['pghbaStr'], False,
             self.context.flagOnlyPrimary)
     except ValueError:
         self.logger.log("[gs_dropnode]Rollback pghba conf.")
         self.commonOper.SetPghbaConf(
             self.userProfile, LocalhostName,
             self.resultDictOfPrimary[0]['pghbaStr'], True,
             self.context.flagOnlyPrimary)
     self.cleanSshToolFile(sshtool_host)
Esempio n. 29
0
    def addStandbyIpInPrimaryConf(self):
        """
        add standby hosts ip in primary node pg_hba.conf
        """

        standbyHosts = self.context.newHostList
        primaryHost = self.getPrimaryHostName()
        command = ''
        for host in standbyHosts:
            hostName = self.context.backIpNameMap[host]
            dataNode = self.context.clusterInfoDict[hostName]["dataNode"]
            command += "gs_guc set -D %s -h 'host    all    all    %s/32   \
                 trust';" % (dataNode, host)
        self.logger.debug(command)
        sshTool = SshTool([primaryHost])
        resultMap, outputCollect = sshTool.getSshStatusOutput(command, 
        [primaryHost], self.envFile)
        self.logger.debug(outputCollect)
 def clearTmpFile(self):
     """
     clear temporary file after expansion success
     """
     self.logger.debug("start to delete temporary file %s" %
                       self.tempFileDir)
     clearCmd = "if [ -d '%s' ];then rm -rf %s;fi" % \
         (self.tempFileDir, self.tempFileDir)
     hostNames = self.context.nodeNameList
     for host in hostNames:
         try:
             sshTool = SshTool(hostNames)
             result, output = sshTool.getSshStatusOutput(
                 clearCmd, hostNames, self.envFile)
             self.logger.debug(output)
             self.cleanSshToolFile(sshTool)
         except Exception as e:
             self.logger.debug(str(e))
             self.cleanSshToolFile(sshTool)