hostUuid = glustercli.hostUUIDGet() for volumeName, volumeInfo in volInfo.iteritems(): for brick in volumeInfo['bricksInfo']: if brick.get('hostUuid') == hostUuid \ and brick['name'].split(':')[1] == brickPath: return brick['name'] if __name__ == '__main__': args = parse_input() status, msg = check_proc_util.getGlusterdStatus() if status == utils.PluginStatusCode.OK: if args.type == _NFS: status, msg = check_proc_util.getNfsStatus(glustercli.volumeInfo()) elif args.type == _SMB: status, msg = check_proc_util.getSmbStatus(glustercli.volumeInfo()) elif args.type == _SHD: status, msg = check_proc_util.getShdStatus(glustercli.volumeInfo()) elif args.type == _QUOTA: status, msg = check_proc_util.getQuotadStatus( glustercli.volumeInfo()) elif args.type == _CTDB: volInfo = glustercli.volumeInfo() nfsStatus, nfsMsg = check_proc_util.getNfsStatus(volInfo) smbStatus, smbMsg = check_proc_util.getSmbStatus(volInfo) status, msg = check_proc_util.getCtdbStatus(smbStatus, nfsStatus) elif args.type == _BRICK: volInfo = glustercli.volumeInfo(args.volume) brickName = _findBrickName(volInfo, args.brickPath) if brickName: status, msg = check_proc_util.getBrickStatus(args.volume,
def run(self): hostName = nscautils.getCurrentHostNameInNagiosServer() sleepTime = int(nscautils.getProcessMonitorSleepTime()) glusterdStatus = Status() nfsStatus = Status() smbStatus = Status() shdStatus = Status() quotaStatus = Status() ctdbStatus = Status() brickStatus = {} while True: if not hostName: hostName = nscautils.getCurrentHostNameInNagiosServer() if not hostName: logger.warn("'hostname_in_nagios' is not configured " "in %s" % nagios_server_conf_path) time.sleep(sleepTime) continue status, msg = check_proc_util.getGlusterdStatus() if glusterdStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _glusterdService, status, msg) # Get the volume status only if glusterfs is running to avoid # unusual delay if status != utils.PluginStatusCode.OK: logger.warn("Glusterd is not running") time.sleep(sleepTime) continue try: volInfo = glustercli.volumeInfo() except glusternagios.glustercli.GlusterCmdFailedException: logger.error("failed to find volume info") time.sleep(sleepTime) continue status, msg = check_proc_util.getNfsStatus(volInfo) if nfsStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _nfsService, status, msg) status, msg = check_proc_util.getSmbStatus(volInfo) if smbStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _smbService, status, msg) status, msg = check_proc_util.getCtdbStatus( smbStatus.code, nfsStatus.code) if ctdbStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _ctdbdService, status, msg) status, msg = check_proc_util.getShdStatus(volInfo) if shdStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _shdService, status, msg) status, msg = check_proc_util.getQuotadStatus(volInfo) if quotaStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _quotadService, status, msg) brick = getBrickStatus(volInfo) # brickInfo contains status, and message for brickService, brickInfo in brick.iteritems(): if brickInfo != brickStatus.get(brickService, [None]) \ or brickInfo[0] == utils.PluginStatusCode.CRITICAL: brickStatus[brickService] = brickInfo nscautils.send_to_nsca(hostName, brickService, brickInfo[0], brickInfo[1]) time.sleep(sleepTime)
def run(self): hostName = nscautils.getCurrentHostNameInNagiosServer() sleepTime = int(nscautils.getProcessMonitorSleepTime()) glusterdStatus = Status() nfsStatus = Status() smbStatus = Status() shdStatus = Status() quotaStatus = Status() ctdbStatus = Status() brickStatus = {} while True: if not hostName: hostName = nscautils.getCurrentHostNameInNagiosServer() if not hostName: logger.warn("'hostname_in_nagios' is not configured " "in %s" % nagios_server_conf_path) time.sleep(sleepTime) continue status, msg = check_proc_util.getGlusterdStatus() if glusterdStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _glusterdService, status, msg) # Get the volume status only if glusterfs is running to avoid # unusual delay if status != utils.PluginStatusCode.OK: logger.warn("Glusterd is not running") time.sleep(sleepTime) continue try: volInfo = glustercli.volumeInfo() except glusternagios.glustercli.GlusterCmdFailedException: logger.error("failed to find volume info") time.sleep(sleepTime) continue status, msg = check_proc_util.getNfsStatus(volInfo) if nfsStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _nfsService, status, msg) status, msg = check_proc_util.getSmbStatus(volInfo) if smbStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _smbService, status, msg) status, msg = check_proc_util.getCtdbStatus(smbStatus.code, nfsStatus.code) if ctdbStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _ctdbdService, status, msg) status, msg = check_proc_util.getShdStatus(volInfo) if shdStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _shdService, status, msg) status, msg = check_proc_util.getQuotadStatus(volInfo) if quotaStatus.isStatusChanged(status, msg): nscautils.send_to_nsca(hostName, _quotadService, status, msg) brick = getBrickStatus(volInfo) # brickInfo contains status, and message for brickService, brickInfo in brick.iteritems(): if brickInfo != brickStatus.get(brickService, [None]) \ or brickInfo[0] == utils.PluginStatusCode.CRITICAL: brickStatus[brickService] = brickInfo nscautils.send_to_nsca(hostName, brickService, brickInfo[0], brickInfo[1]) time.sleep(sleepTime)