def doCheck(self): parRes = "" # generate hostfile file, server node name self.genhostfile(self.nodes) # shell name shellName = "getClusterInfo.sh" # the path of script shellName = os.path.join(SHELLPATH, shellName) # judge permission self.checkFilePermission(shellName) g_file.replaceFileLineContent('omm', self.user, shellName) g_file.replaceFileLineContent( '\/opt\/huawei\/Bigdata\/mppdb\/.mppdbgs_profile', self.mpprcFile.replace('/', '\/'), shellName) # the shell command exectueCmd = "cd %s && sh %s -p %s" % (SHELLPATH, shellName, self.port) self.result.raw = exectueCmd # Call the shell script SharedFuncs.runShellCmd(exectueCmd, self.user, self.mpprcFile) self.result.rst = ResultStatus.OK pacakageName = os.path.join(self.outPath, "checkcollector_%s" % self.context.checkID) # crate tar package g_file.compressZipFiles(pacakageName, os.path.join(SHELLPATH, 'out')) # Check the result information parRes += "The inspection(checkcollector) has been completed!\n" parRes += "Please perform decompression firstly." \ " The log is saved in '%s.zip'" % (pacakageName) self.result.val = parRes
def doSet(self): if os.getuid == 0: cmd = "crontab -l -u '%s'" % self.crontabUser else: cmd = "crontab -l" (status, output) = subprocess.getstatusoutput(cmd) if status != 0 or output.find('om_monitor') < 0: self.result.val = "No gauss process in crontab.\n" return tmpCrondFileName = "gauss_crond_tmp" tmpCrondFile = os.path.join(self.tmpPath, tmpCrondFileName) try: SharedFuncs.createFile(tmpCrondFile, self.tmpPath) SharedFuncs.writeFile(tmpCrondFile, output, self.tmpPath) cmd = "sed -i '/om_monitor/d' %s" % tmpCrondFile SharedFuncs.runShellCmd(cmd) cmd = "crontab %s " % tmpCrondFile if os.getuid == 0: cmd = "su - %s '%s'" % (self.crontabUser, cmd) (status, output) = subprocess.getstatusoutput(cmd) if status != 0: self.result.val = "Failed to cleaned om_monitor in crontab." \ " Error: %s\n" % output + "The cmd is %s " \ % cmd else: self.result.val = "Successfully to cleaned om_monitor " \ "in crontab.\n" SharedFuncs.cleanFile(tmpCrondFile) except Exception as e: if os.path.exists(tmpCrondFile): SharedFuncs.cleanFile(tmpCrondFile) raise Exception(str(e))
def doCheck(self): procadj = {} result = "" prolist = ['om_monitor', 'cm_agent', 'gaussdb', 'cm_server', 'gtm', 'etcd'] gausshome = self.cluster.appPath gaussdbpath = os.path.join(gausshome, "bin/gaussdb") for process in prolist: if (process == 'gaussdb'): getpidcmd = "ps ux| grep '%s'|grep -v 'grep'|awk '{print " \ "$2}'" \ % gaussdbpath else: getpidcmd = "ps ux| grep '%s'|grep -v 'grep'|awk '{print " \ "$2}'" \ % process pids = SharedFuncs.runShellCmd(getpidcmd) for pid in pids.splitlines(): getAdjcmd = "cat /proc/%s/oom_adj" % pid adjValue = SharedFuncs.runShellCmd(getAdjcmd) if (int(adjValue) < 0): tmpkey = "%s_%s" % (process, pid) procadj[tmpkey] = adjValue if (procadj): self.result.rst = ResultStatus.NG for key, value in procadj.items(): result += "%s : %s \n" % (key, value) self.result.val = "There are processes omm_adj value " \ "less than 0 \n%s" % (result) else: self.result.rst = ResultStatus.OK self.result.val = "All key processes omm_adj value" \ " are not less than 0"
def getLocalPrimaryDNid(self, nodeInfo): """ function: Get local primary DNid input: NA output: NA """ tmpFile = os.path.join(self.tmpPath, "gauss_dn_status.dat") primaryDNidList = [] try: # Use cm_ctl to query the current node instance cmd = ClusterCommand.getQueryStatusCmd(self.user, nodeInfo.name, tmpFile) SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile) # Match query results and cluster configuration clusterStatus = DbClusterStatus() clusterStatus.initFromFile(tmpFile) if (os.path.exists(tmpFile)): os.remove(tmpFile) # Find the master DB instance for dbNode in clusterStatus.dbNodes: for instance in dbNode.datanodes: if instance.status == 'Primary': primaryDNidList.append(instance.instanceId) return primaryDNidList except Exception as e: if (os.path.exists(tmpFile)): os.remove(tmpFile) raise Exception(str(e))
def getTestFile(self): machine = platform.machine() testSpeedFile = "%s/lib/checknetspeed/speed_test" \ % self.context.basePath if machine == "x86_64": cmd = "cp -p %s_x86 %s" % (testSpeedFile, testSpeedFile) # debian: deepin Maipo: NOE Kylin elif machine == "aarch64": cmd = "cp -p %s_arm %s" % (testSpeedFile, testSpeedFile) else: raise Exception(ErrorCode.GAUSS_530["GAUSS_53017"] % machine) SharedFuncs.runShellCmd(cmd)
def SetIOSchedulers(self, devname, expectedScheduler): """ function : Set IO Schedulers input : String output : NA """ (THPFile, initFile) = SharedFuncs.getTHPandOSInitFile() cmd = " echo %s >> /sys/block/%s/queue/scheduler" % (expectedScheduler, devname) cmd += " && echo \"echo %s >> /sys/block/%s/queue/scheduler\" >> %s" \ % ( expectedScheduler, devname, initFile) SharedFuncs.runShellCmd(cmd)
def checkSingleSysTable(self, Instance): tablelist = [ "pg_attribute", "pg_class", "pg_constraint", "pg_partition", "pgxc_class", "pg_index", "pg_stats" ] localPath = os.path.dirname(os.path.realpath(__file__)) resultMap = {} try: for i in tablelist: sqlFile = "%s/sqlFile_%s_%s.sql" % (self.tmpPath, i, Instance.instanceId) resFile = "%s/resFile_%s_%s.out" % (self.tmpPath, i, Instance.instanceId) g_file.createFile(sqlFile, True, DefaultValue.SQL_FILE_MODE) g_file.createFile(resFile, True, DefaultValue.SQL_FILE_MODE) g_file.changeOwner(self.user, sqlFile) g_file.changeOwner(self.user, resFile) sql = "select * from pg_table_size('%s');" % i sql += "select count(*) from %s;" % i sql += "select * from pg_column_size('%s');" % i g_file.writeFile(sqlFile, [sql]) cmd = "gsql -d %s -p %s -f %s --output %s -t -A -X" % ( self.database, Instance.port, sqlFile, resFile) if (self.mpprcFile != "" and self.mpprcFile is not None): cmd = "source '%s' && %s" % (self.mpprcFile, cmd) SharedFuncs.runShellCmd(cmd, self.user) restule = g_file.readFile(resFile) g_file.removeFile(sqlFile) g_file.removeFile(resFile) size = restule[0].strip() line = restule[1].strip() width = restule[2].strip() Role = "" if (Instance.instanceRole == INSTANCE_ROLE_COODINATOR): Role = "CN" elif (Instance.instanceRole == INSTANCE_ROLE_DATANODE): Role = "DN" instanceName = "%s_%s" % (Role, Instance.instanceId) resultMap[i] = [instanceName, size, line, width] return resultMap except Exception as e: if os.path.exists(sqlFile): g_file.removeFile(sqlFile) if os.path.exists(resFile): g_file.removeFile(resFile) raise Exception(str(e))
def getDisk(self): diskDic = {} cmd = "fdisk -l 2>/dev/null " \ "| grep 'Disk /dev/' | grep -v '/dev/mapper/' " \ "| awk '{ print $2 }'| awk -F'/' '{ print $NF }'| sed s/:$//g" output = SharedFuncs.runShellCmd(cmd) for disk in output.splitlines(): cmd = "fdisk -l 2>/dev/null | grep '%s'" \ "| grep -v '/dev/mapper/'| grep -v 'Disk /dev/'" \ "| awk -F ' ' ' {print $1}'" % disk output = SharedFuncs.runShellCmd(cmd) if output: diskDic[disk] = output.splitlines() else: diskDic[disk] = "/dev/" + disk return diskDic
def getDevices(self): pathList = [] devices = [] diskName = "" diskDic = {} diskDic = self.getDisk() if (self.cluster): pathList = self.obtainDataDir( self.cluster.getDbNodeByName(self.host)) else: pathList = self.obtainDiskDir() for path in pathList: if path.find('No such file or directory') >= 0 or path.find( 'no file systems processed') >= 0: self.result.rst = ResultStatus.ERROR self.result.val += \ "There are no cluster and no /data* directory." return cmd = "df -P -i %s" % path output = SharedFuncs.runShellCmd(cmd) # Filesystem Inodes IUsed IFree IUse% Mounted on # /dev/xvda2 2363904 233962 2129942 10% / diskName = output.split('\n')[-1].split()[0] for disk in diskDic.keys(): if diskName in diskDic[disk] and disk not in devices: devices.append(disk) return devices
def doCheck(self): # Perform 60-pass disk data collection dic = {} slowDiskList = [] cmd = "for varible1 in {1..30}; do iostat -d -x -k 1 1 " \ "| grep -E -v \"Linux|Device\"|awk 'NF'" \ "|awk '{print $1,$(NF-1)}'; " \ "sleep 1;done" output = SharedFuncs.runShellCmd(cmd) for line in output.splitlines(): diskname = line.split()[0] svctmValue = line.split()[1] if (diskname in dic.keys()): diskList = dic[diskname] diskList.append(float(svctmValue)) dic[diskname] = diskList else: dic[diskname] = [float(svctmValue)] for diskname, svctmValues in dic.items(): diskList = sorted(svctmValues) if (diskList[-1] > self.max and diskList[-10] > self.high): slowDiskList.append(diskname) if (slowDiskList): self.result.rst = ResultStatus.NG self.result.val = "Slow Disk Found:\n%s" % ( "\n".join(slowDiskList)) else: self.result.rst = ResultStatus.OK self.result.val = "No Slow Disk Found"
def doCheck(self): cmd = "gs_om -t status" output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile) if (output.find("Normal") < 0 and output.find("Degraded") < 0): self.result.rst = ResultStatus.NG self.result.val = "The database can not be connected." return instanceList = [] AbnormalInst = [] primaryDnList = SharedFuncs.getMasterDnNum(self.user, self.mpprcFile) nodeInfo = self.cluster.getDbNodeByName(self.host) localDnList = nodeInfo.datanodes for dn in localDnList: if (dn.instanceId in primaryDnList): instanceList.append(dn) sqlcmd = "select pg_sleep(1);" for instance in instanceList: cmd = "gsql -m -d postgres -p %s -c '%s'" % (instance.port, sqlcmd) if (self.mpprcFile): cmd = "source '%s' && %s" % (self.mpprcFile, cmd) if (os.getuid() == 0): cmd = "su - %s -c \"%s\" " % (self.user, cmd) self.result.raw += "\n%s" % cmd (status, output) = subprocess.getstatusoutput(cmd) if (status != 0 or output.find("connect to server failed") > 0): AbnormalInst.append(instance.instanceId) self.result.val += "The install %s can not be connected.\n" \ % instance.instanceId self.result.raw += "\nError: %s" % output if AbnormalInst: self.result.rst = ResultStatus.NG else: self.result.rst = ResultStatus.OK self.result.val = "The database connection is normal."
def checkSar(self, ethName): global errorMsg global serviceIP global speedMsg cmd = "sar -n DEV 1 10|grep %s|grep Average|awk '{print $6}'" \ % ethName output = SharedFuncs.runShellCmd(cmd) if (output.strip() != ""): try: average = float(output.strip()) except Exception as e: errorMsg.append(output.strip()) return errorMsg else: errorMsg.append("get %s RX average failed. commands: %s" % (serviceIP, cmd)) return errorMsg string = "%s RX average is %dkB/s" % (serviceIP, average) if (average < DEFINE_SPEED_WARNING): g_lock.acquire() errorMsg.append(string) g_lock.release() else: speedMsg = string return errorMsg
def doCheck(self): self.result.rst = ResultStatus.OK timelist = [] gaussPro = "gaussdb" cmd = "ps -C %s -o lstart,args | grep -v grep | grep -v 'om_monitor'" \ " 2>/dev/null" % gaussPro output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile) for line in output.splitlines()[1:]: resultList = line.split() year = resultList[4] month = monthdic[resultList[1]] day = resultList[2] time = resultList[3] timestring = "%s-%s-%s %s" % (year, month, day, time) dattime = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S') timelist.append(dattime) if (timelist): mintime = timelist[0] maxtime = timelist[0] else: mintime = None maxtime = None for tmpdatetime in timelist: if (tmpdatetime < mintime): mintime = tmpdatetime elif (tmpdatetime > maxtime): maxtime = tmpdatetime if (maxtime and mintime): if (int((maxtime - mintime).days) > 0 or int( (maxtime - mintime).seconds) > 300): self.result.rst = ResultStatus.WARNING self.result.val = output else: self.result.rst = ResultStatus.OK self.result.val = output
def doCheck(self): ipList = [] routingBinary = self.getBinaryRouting(self.routing) if g_Platform.isPlatFormEulerOSOrRHEL7X(): cmd = "/sbin/ifconfig -a |grep -E '\<inet\>'| awk '{print $2}'" else: cmd = "/sbin/ifconfig -a |grep 'inet addr'|" \ " awk '{print $2}'| awk -F ':' '{print $2}'" output = SharedFuncs.runShellCmd(cmd) for eachLine in output.split('\n'): if (SharedFuncs.validate_ipv4(eachLine)): maskAddr = SharedFuncs.getMaskByIP(eachLine) ipMask = "%s:%s" % (eachLine, maskAddr) ipList.append(ipMask) self.result.raw = "Routing: %s [bit]%s\nlocalIP:\n%s" % ( self.routing, routingBinary, "\n".join(ipList)) commIP = [] for ipMask in ipList: ipBinary = self.getBinaryRouting(ipMask) if (ipBinary == routingBinary): commIP.append(ipMask) if (len(commIP) > 1): self.result.rst = ResultStatus.WARNING else: self.result.rst = ResultStatus.OK self.result.val = "Business network segment IP: " + ", ".join(commIP)
def collectIORequest(self): """ function : Collector ioRequest input : NA output : Dict """ devices = [] pathList = [] if (self.cluster): pathList = self.obtainDataDir( self.cluster.getDbNodeByName(self.host)) else: pathList = self.obtainDiskDir() diskDict = self.obtainDisk() for path in pathList: cmd = "df -h %s" % path output = SharedFuncs.runShellCmd(cmd) partitionInfo = output.split('\n')[-1] partitionName = partitionInfo.split()[0] if (partitionName in devices): continue else: devices.append(partitionName) result = {} for d in devices: for item in diskDict.items(): if d in item[1]: request = g_file.readFile( "/sys/block/%s/queue/nr_requests" % item[0])[0] result[item[0]] = request.strip() return result
def doCheck(self): """ """ global g_gucDist global RecommendedMaxMem DNidList = [] nodeInfo = self.cluster.getDbNodeByName(self.host) CN = nodeInfo.coordinators for DnInstance in nodeInfo.datanodes: if (self.checkMaster(DnInstance.instanceId)): DNidList.append(DnInstance) if (len(CN) < 1 and len(DNidList) < 1): self.result.rst = ResultStatus.NA self.result.val = "NA" return # test database Connection for Instance in (CN + DNidList): if not Instance: continue sqlcmd = "select pg_sleep(1);" output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", Instance.port, self.tmpPath, 'postgres', self.mpprcFile) self.checkInstanceGucValue(Instance) cmd = "/sbin/sysctl -a |grep vm.min_free_kbytes|awk '{print $3}'" min_free_kbytes = int(SharedFuncs.runShellCmd(cmd).splitlines()[-1]) cmd = "free -k | grep 'Mem'| grep -v 'grep'|awk '{print $2}'" raw = int(SharedFuncs.runShellCmd(cmd)) if (min_free_kbytes * 100 > raw * 5): RecommendedMaxMem = int((raw * 0.7) // (len(DNidList) + 1)) else: RecommendedMaxMem = int((raw * 0.8) // (len(DNidList) + 1)) self.result.rst = ResultStatus.OK result = "RecommendedMaxMem is %s\n" % RecommendedMaxMem for key, value in g_gucDist.items(): if (int(value) > RecommendedMaxMem): self.result.rst = ResultStatus.NG result += "%s : %s\n" % (key, value) if (self.result.rst == ResultStatus.OK): self.result.val = "parameter max_process_memory setting is ok" else: self.result.val = "parameter max_process_memory " \ "setting should not be bigger than " \ "recommended(kb):%s:\n%s" % ( RecommendedMaxMem, result)
def doCheck(self): global setItem flag = "Normal" resultStr = "" self.result.raw = "" WarningItem = ['PasswordAuthentication', 'UseDNS'] for item in self.sshdThreshold.keys(): cmd = "cat /etc/ssh/sshd_config | grep -E %s | grep -v '^#' | " \ "awk '{print $1,$2}'" % item output = SharedFuncs.runShellCmd(cmd) self.result.raw += "\n%s" % output if (item == "ClientAliveInterval"): if (output == ""): continue else: timeout = int(output.split()[-1]) if (timeout != 0 and timeout < int( self.sshdThreshold[item])): flag = "Abnormal" resultStr += "\nAbnormal reason: %s; expected: %s" % ( output, self.sshdThreshold[item]) setItem.append(output.split()[0]) else: if (output != ""): if (str(output.strip()).lower() != str('%s %s' % ( item, self.sshdThreshold[item])).lower()): if (item in WarningItem): flag = "Warning" resultStr += "\nWarning reason: %s; expected: %s" \ % ( output, self.sshdThreshold[item]) else: flag = "Abnormal" resultStr += "\nAbnormal reason: %s; expected: " \ "%s" \ % ( output, self.sshdThreshold[item]) setItem.append(output.split()[0]) else: if (item in WarningItem): flag = "Warning" resultStr += "\nWarning reason: " \ "%s parameter is not set; expected: %s" \ % ( item, self.sshdThreshold[item]) else: flag = "Abnormal" resultStr += "\nAbnormal reason: " \ "%s parameter is not set; expected: %s" \ % ( item, self.sshdThreshold[item]) setItem.append(output.split()[0]) self.result.val = resultStr if (flag == "Normal"): self.result.rst = ResultStatus.OK elif (flag == "Warning" and len(setItem) == 0): self.result.rst = ResultStatus.WARNING else: self.result.rst = ResultStatus.NG
def obtainDiskDir(self): cmd = "df -h -P /data* | grep -v 'Mounted' | awk '{print $6}'" output = SharedFuncs.runShellCmd(cmd) if output.lower().find("no such") >= 0: allDiskPath = ["/"] else: allDiskPath = output.split('\n') return allDiskPath
def doCheck(self): cmd = "source /etc/profile 2>/dev/null; locale | grep '^LANG='" output = SharedFuncs.runShellCmd(cmd) self.result.raw = cmd if (output != ""): self.result.rst = ResultStatus.OK else: self.result.rst = ResultStatus.NG self.result.val = output
def getDevices(self): """ """ cmd = "fdisk -l 2>/dev/null | grep \"Disk /dev/\"" \ " | grep -v \"/dev/mapper/\" | awk '{ print $2 }' " \ "| awk -F'/' '{ print $NF }' | sed s/:$//g" output = SharedFuncs.runShellCmd(cmd) devList = output.split('\n') return devList
def doCheck(self): if (self.cluster): LocalNodeInfo = self.cluster.getDbNodeByName(self.host) backIP = LocalNodeInfo.backIps[0] elif (self.ipAddr): backIP = self.ipAddr else: backIP = SharedFuncs.getIpByHostName(self.host) networkCardNumList = SharedFuncs.CheckNetWorkBonding(backIP) if networkCardNumList == "Shell command faild": return networkCardNums = [] if (len(networkCardNumList) != 1): networkCardNums = networkCardNumList[1:] else: networkCardNums.append(networkCardNumList[0]) flag = True for networkCardNum in networkCardNums: cmd = "/sbin/ethtool -i %s" % networkCardNum output = SharedFuncs.runShellCmd(cmd) self.result.raw += "[%s]\n%s\n" % (networkCardNum, output) NICVer = "" PCIAddr = "" for eachLine in output.split("\n"): if (eachLine.startswith("version:")): NICVer = eachLine if (eachLine.startswith('bus-info:')): if (len(eachLine.split(':')) == 4): PCIAddr = eachLine.split(':')[2] + ':' + \ eachLine.split(':')[3] if (NICVer): self.result.val += "%s\n" % (NICVer) else: self.result.val += "Failed to get NIC %s 'version' info\n" \ % networkCardNum flag = False if (PCIAddr): cmd = "lspci |grep %s" % PCIAddr (status, output) = subprocess.getstatusoutput(cmd) self.result.raw += "%s\n" % (output) if status == 0 and len(output.split(':')) >= 3: modelInfo = ':'.join(output.split(':')[2:]).split('(')[0] self.result.val += "model: %s\n" % (modelInfo.strip()) else: self.result.val += "Failed to get NIC %s model" \ " 'bus-info' info\n" % networkCardNum self.result.val += "The cmd is %s " % cmd flag = False else: self.result.val += "Failed to get NIC %s model" \ " 'bus-info' info\n" % networkCardNum flag = False if (flag): self.result.rst = ResultStatus.OK else: self.result.rst = ResultStatus.NG
def doCheck(self): cmd = "uname -r" output = SharedFuncs.runShellCmd(cmd) if (output != ""): self.result.rst = ResultStatus.OK self.result.val = output else: self.result.rst = ResultStatus.NG self.result.val = "Failed to get kernel version." self.result.raw = cmd
def doSet(self): if (self.cluster): LocalNodeInfo = self.cluster.getDbNodeByName(self.host) backIP = LocalNodeInfo.backIps[0] elif (self.ipAddr): backIP = self.ipAddr else: backIP = SharedFuncs.getIpByHostName(self.host) networkCardNumList = SharedFuncs.CheckNetWorkBonding(backIP) if (len(networkCardNumList) != 1): networkCardNums = networkCardNumList[1:] else: networkCardNums = networkCardNumList for networkCardNum in networkCardNums: cmd = "/sbin/ethtool -G %s %s %d" % (networkCardNum, "rx", EXPECTED_RXTX) cmd += ";/sbin/ethtool -G %s %s %d" % (networkCardNum, "tx", EXPECTED_RXTX) SharedFuncs.runShellCmd(cmd)
def getTcpUsedPort(self): if (self.ipAddr): serviceIP = self.ipAddr else: serviceIP = SharedFuncs.getIpByHostName(self.host) cmd = "netstat -ano|awk '{print $4}'|grep '%s'|sort|uniq -c|" \ "grep ' 1 '|wc -l" % serviceIP tcpUsed = SharedFuncs.runShellCmd(cmd) return int(tcpUsed)
def obtainDisk(self): """ function: get disk name by partition input: partition list return: disk dict """ devices = {} cmd = "fdisk -l 2>/dev/null | grep \"Disk /dev/\" " \ "| grep -v \"/dev/mapper/\" | awk '{ print $2 }' " \ "| awk -F'/' '{ print $NF }' | sed s/:$//g" output = SharedFuncs.runShellCmd(cmd) for disk in output.splitlines(): cmd = "fdisk -l 2>/dev/null | grep \"%s\" " \ "| grep -v \"Disk\" | grep -v \"/dev/mapper/\" " \ "| awk '{ print $1 }'" % disk output = SharedFuncs.runShellCmd(cmd) if output: devices[disk] = output.splitlines() else: devices[disk] = "/dev/" + disk return devices
def checkMaster(self, instanceId): cmd = "gs_om -t query |grep %s" % (instanceId) output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile) line = output.splitlines()[0] instanceinfo = line.split() for idx in range(len(instanceinfo)): if (instanceinfo[idx] == str(instanceId)): if (instanceinfo[idx + 2] == "Primary"): return True else: return False return False
def doCheck(self): gaussdbVersion = "" gsqlVersion = "" # Get the version cmd = "gaussdb -V | awk '{print $4\"_\"$6}'" self.result.raw = cmd + "\n" gaussdbVersion = SharedFuncs.runShellCmd(cmd, "", self.mpprcFile) if (gaussdbVersion[-1] == ")"): gaussdbVersion = gaussdbVersion[:-1] # Get the version cmd = "gsql -V | awk '{print $4\"_\"$6}'" self.result.raw += cmd gsqlVersion = SharedFuncs.runShellCmd(cmd, "", self.mpprcFile) if (gsqlVersion[-1] == ")"): gsqlVersion = gsqlVersion[:-1] # Compare the two version numbers are the same if gaussdbVersion and gaussdbVersion == gsqlVersion: self.result.rst = ResultStatus.OK else: self.result.rst = ResultStatus.NG self.result.val = "gaussdb Version: %s \ngsql Version: %s" % ( gaussdbVersion, gsqlVersion)
def doCheck(self): parRes = "" cmd = g_Platform.getAllCrontabCmd() allCrontab = SharedFuncs.runShellCmd(cmd, self.user) for crontabService in allCrontab.split('\n'): if crontabService.find('om_monitor') >= 0: parRes = "Gauss process om_monitor remains in crontab. " \ "please delete this gauss info." self.result.raw += "%s\n" % crontabService if parRes: self.result.rst = ResultStatus.NG self.result.val = parRes else: self.result.rst = ResultStatus.OK
def doCheck(self): cmd = "ps -ef |grep '^<%s\>' | grep '\<gaussdb\>' | grep -v grep |" \ " awk '{print $2}' |(while read arg; do gstack $arg |" \ " grep CatchupMain; done) 2>/dev/null" % self.user output = SharedFuncs.runShellCmd(cmd) if (output != ""): self.result.rst = ResultStatus.NG self.result.val = "The gatchdb process stack contains the" \ " CatchupMain function." else: self.result.rst = ResultStatus.OK self.result.val = "The gatchdb process stack not contains" \ " the CatchupMain function." self.result.raw = cmd
def doCheck(self): DiskInfoDict = {} ResultStr = "" cmd = "df -h -P | awk '{print $1,$2,$6}'" output = SharedFuncs.runShellCmd(cmd) diskList = output.split('\n')[1:] for disk in diskList: diskInfo = disk.split() DiskInfoDict[diskInfo[0]] = disk keys = DiskInfoDict.keys() sorted(keys) for diskName in keys: ResultStr += "%s\n" % DiskInfoDict[diskName] self.result.val = ResultStr self.result.rst = ResultStatus.OK