def check(c): ''' Sends a [json] file with key/values as single/multiple events (array) to metrology servers ''' jsonfile = c.conf.get('jsonfile', "") data = [] # [ {k:v, k:v ...} , {k:v, k:v, ...}, {}, ...] try: with open(jsonfile, "r") as fi: data = fi.read() except Exception as e: debug("ERROR - Persist() : couldn't read file {} - {}".format( jsonfile, e)) myarray = [] try: myarray = json.loads(data) except Exception as e: debug("ERROR - Persist() : couldn't decode data - {}".format(e)) c.multievent = myarray count = len(myarray) c.add_item(CheckItem("sendfile_name", jsonfile, datapoint=False)) c.add_item(CheckItem("sendfile_lines", count)) c.add_message("{} - {} lines/events".format(jsonfile, count)) return c
def check(c): '''Collect memory percent, used, free, available''' threshold = c.conf.get('threshold', 101) # svmem(total=2749374464, available=1501151232, percent=45.4, used=979968000, # free=736043008, active=1145720832, inactive=590102528, buffers=107663360, # cached=925700096, shared=86171648) memory = psutil.virtual_memory() m1 = CheckItem('memory_percent', memory.percent, "Memory used (percent)", unit='%') c.add_item(m1) m2 = CheckItem('memory_used', memory.used, "Memory used (bytes)", unit='bytes') h_used = m2.human() c.add_item(m2) m3 = CheckItem('memory_available', memory.available, "Memory available (bytes)", unit='bytes') h_avail = m3.human() c.add_item(m3) m4 = CheckItem('memory_total', memory.total, "Memory total (bytes)", unit='bytes') h_total = m4.human() c.add_item(m4) # alerts ? if float(memory.percent) > float(threshold): c.severity = cmt.SEVERITY_CRITICAL c.add_message("memory above threshold : {} % > {} %".format( memory.percent, threshold)) return c # OK c.add_message("mem used {} % - used {} - avail {} - total {}".format( memory.percent, h_used, h_avail, h_total)) return c
def check(c): '''Checks mount points OUTPUT - cmt_mount ''' partitions = psutil.disk_partitions(all=True) if cmt.ARGS["available"]: print("-" * 25) print("Available mountpoints :") print("-" * 25) for p in partitions: print(p) print("-" * 25) return c # sdiskpart(device='/dev/sda1', mountpoint='/', fstype='ext4', # opts='rw,relatime,errors=remount-ro,data=ordered') path = c.conf['path'] ci = CheckItem('mount', path, datapoint=False) c.add_item(ci) for part in partitions: if part.mountpoint == path: c.add_message("mount {} found".format(path)) return c c.severity = cmt.SEVERITY_CRITICAL c.add_message("mount {} not found".format(path)) return c
def check(c): '''Ping a remote host and return availability Output: - cmt_ping ''' host = c.conf['host'] ci = CheckItem('ping', host, datapoint=False) c.add_item(ci) # response = os.system("ping -c 1 -W 2 " + host + "> /dev/null 2>&1") proc = subprocess.Popen( ["ping", "-c", "1", "-W" "2", host], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) # starts the process proc.wait() # waits for it to finish (there's a timeout option) response = proc.returncode if response == 0: c.add_message("ping {} ok".format(host)) else: c.severity = cmt.SEVERITY_CRITICAL c.add_message("ping {} not responding".format(host)) return c
def check(c): threshold = c.conf.get('threshold',101) # sswap(total=2147479552, used=0, free=2147479552, percent=0.0, sin=0, sout=0) swap = psutil.swap_memory() m1 = CheckItem('swap_percent',swap.percent,"Swap used (percent)", unit = '%') c.add_item(m1) m2 = CheckItem('swap_used',swap.used,'Swap used (bytes)', unit = 'bytes') h_used = m2.human() c.add_item(m2) m3 = CheckItem('swap_total',swap.total,'Swap total (bytes)', unit = 'bytes') h_total = m3.human() c.add_item(m3) # alerts ? if float(swap.percent) > float(threshold): c.severity = cmt.SEVERITY_CRITICAL c.add_message("swap above threshold : {} % > {} %".format(swap.percent, threshold)) return c # OK c.add_message("swap used: {} % / {} - total {}".format(swap.percent, h_used, h_total)) return c
def add_tags(self): ''' Add checkitems tags from global or check config ''' # global tags = cmt.CONF['global'].get("tags", "").split() # self.conf tags += self.conf.get("tags", "").split() # parse tags / split if value provided / add check item for tag in tags: k = '' v = '' if '=' in tag: (k, v) = tag.split('=') else: k = tag v = 1 #print("tag : ",tag,k,v) ci = 'tag_' + k self.add_item(CheckItem(ci, v))
def check(c): ''' Get data from CLI / pipe and send ; single check mode only ''' #jsonfile = c.conf.get('jsonfile',"") # must not run in cron mode, or multi module/checks mode (because read on stdin) if not (c.opt["single_module_run"] and c.opt["specific_checkname_run"]): c.result_info = "must be run as single check w/ stdin piped command" c.result = "skip" return c attribute = c.conf['attribute'] unit = c.conf.get("unit", "") comment = c.conf.get("comment", "") value = get_value() c.add_item(CheckItem(attribute, value, comment, unit)) c.add_message("{} = {} - (unit = {} - {})".format(attribute, value, unit, comment)) return c
def check(c): '''Check for various folder attributes ''' global s_dirs s_dirs = 0 path = c.conf['path'] name = c.check recursive = c.conf.get("recursive", False) is True no_store = c.conf.get("no_store", False) is True send_content = c.conf.get("send_content", False) is True send_list = c.conf.get("send_list", False) is True global conf_filter_extension conf_filter_extension = c.conf.get("filter_extension", "").split() if len(conf_filter_extension) > 0: has_filter_extension = True else: has_filter_extension = False global conf_filter_regexp if "filter_regexp" in c.conf: conf_filter_regexp = re.compile(c.conf.get("filter_regexp")) has_filter_regexp = True else: has_filter_regexp = False targets = [] if 'target' in c.conf: targets = c.conf['target'] c.add_item(CheckItem('folder_path', path, datapoint=False)) c.add_item(CheckItem('folder_name', name, datapoint=False)) if not os.path.exists(path): c.severity = cmt.SEVERITY_CRITICAL c.add_message("folder {} missing".format(path)) return c # scan # ---- s_count = 0 # total file count s_size = 0 # total size sum s_mintime = -1 # oldest file (minimal unix timestamp) s_maxtime = 0 # most recent file (maximal unix timestamp) s_files = [] s_files_detail = {} # single file if os.path.isfile(path): statinfo = os.stat(path) s_size = statinfo.st_size s_count = 1 s_mintime = statinfo.st_mtime s_maxtime = statinfo.st_mtime s_files.append(path) s_files_detail[path] = { "size": s_size, "mtime": statinfo.st_mtime, "uid": statinfo.st_uid, "gid": statinfo.st_gid, "mode": stat.filemode(statinfo.st_mode), } # option : send_content if send_content: fico = get_file_content(path) ci = CheckItem('file_content', fico, "file content", multiline=True) c.add_item(ci) # directory elif os.path.isdir(path): #for entry in os.scandir(path): for entry in scanCommon(path, recursive=recursive): if has_filter_extension: if not filter_extension(entry): continue if has_filter_regexp: if not filter_regexp(entry): continue s_count += 1 statinfo = os.stat(entry.path) s_size += statinfo.st_size if statinfo.st_mtime > s_maxtime: s_maxtime = statinfo.st_mtime if statinfo.st_mtime < s_mintime or s_mintime == -1: s_mintime = statinfo.st_mtime if not no_store: s_files.append(entry.name) s_files_detail[entry.path] = { "size": s_size, "mtime": statinfo.st_mtime, "uid": statinfo.st_uid, "gid": statinfo.st_gid, "mode": stat.filemode(statinfo.st_mode), } else: c.severity = cmt.SEVERITY_WARNING c.add_message("folder {} ({}) is not a dir / nor a file".format( name, path)) return c # file count ci = CheckItem('folder_files', s_count, "Number of files in folder " + name, unit="files") c.add_item(ci) # dirs count ci = CheckItem('folder_dirs', s_dirs, "Number of dirs/subdirs in folder " + name, unit="dirs") c.add_item(ci) # size ci = CheckItem('folder_size', s_size, "Total Size (bytes)", unit="bytes") h_size = ci.human() c.add_item(ci) # age now = time.time() if s_maxtime > 0: ci = CheckItem('folder_youngest', "", "most recent file (seconds)", unit="sec") ci.value = int(now - s_maxtime) c.add_item(ci) if s_mintime != -1: ci = CheckItem('folder_oldest', "", "oldest file (seconds)", unit="sec") ci.value = int(now - s_mintime) c.add_item(ci) # send list if send_list: r = "" for f in s_files_detail: delta_time = str( datetime.timedelta(seconds=int(now - s_files_detail[f]["mtime"]))) r = r + "{} - {} bytes - {} sec - id {}/{} - perm {}\n".format( f, s_files_detail[f]["size"], delta_time, s_files_detail[f]["uid"], s_files_detail[f]["gid"], s_files_detail[f]["mode"], ) ci = CheckItem('file_list', r, multiline=True) c.add_item(ci) # Target checks # -------------- tgcount = 0 tgtotal = len(targets) # check valid target name for t in targets: if not t in VALID_TARGET_LIST: c.severity = cmt.SEVERITY_WARNING c.add_message("{} {} : unknown target {}".format(name, path, t)) return c # target : files_min: 4 if 'files_min' in targets: tgcount += 1 if s_count < targets['files_min']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : too few files ({})".format( name, path, s_count)) return c # target : files_max: 23 if 'files_max' in targets: tgcount += 1 if s_count > targets['files_max']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : too many files ({})".format( name, path, s_count)) return c # target : size_max (folder max bytes) if 'size_max' in targets: tgcount += 1 if s_size > targets['size_max']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : too big ({})".format(name, path, s_size)) return c # target : size_min (folder min bytes) if 'size_min' in targets: tgcount += 1 if s_size < targets['size_min']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : too small ({})".format(name, path, s_size)) return c # target : age_max: # all files must be more recent than age_max seconds if 'age_max' in targets: tgcount += 1 if s_mintime != -1: if int(now - s_mintime) > targets['age_max']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : some files are too old ({} sec)".format( name, path, int(now - s_mintime))) return c # target : age_min: # all files must be older than age_min if 'age_min' in targets: tgcount += 1 if s_maxtime != 0: if int(now - s_maxtime) < targets['age_min']: c.alert += 1 c.severity = cmt.SEVERITY_CRITICAL c.add_message( "{} {} : some files are too young ({} sec)".format( name, path, int(now - s_maxtime))) return c # target : has_recent: # some files must be recent (more than has_recent) if 'has_recent' in targets: tgcount += 1 if s_maxtime != 0: if int(now - s_maxtime) > targets['has_recent']: c.alert += 1 c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : missing young file (min {} sec)".format( name, path, int(now - s_maxtime))) return c # target : has_old: # some files must be older than has_old if 'has_old' in targets: tgcount += 1 if s_mintime != -1: if int(now - s_mintime) < targets['has_old']: c.severity = cmt.SEVERITY_CRITICAL c.add_message("{} {} : missing old file (max {} sec)".format( name, path, int(now - s_mintime))) return c if no_store: c.add_message( "{} {} OK - {} files, {} dirs, {} bytes [{}] - targets {}/{}". format(name, path, s_count, s_dirs, s_size, h_size, tgcount, tgtotal)) return c # NEED flist to be stored at scan time # target : has_file: filename if 'has_files' in targets: tgcount += 1 flist = targets['has_files'] for f in flist: if f not in s_files: c.severity = cmt.SEVERITY_CRITICAL c.add_message( "folder {} : expected file not found ({})".format(path, f)) return c if 'permission' in targets: tgcount += 1 target_perm = targets['permission'] for f in s_files_detail: fperm = s_files_detail[f]["mode"] if fperm != target_perm: c.severity = cmt.SEVERITY_CRITICAL c.add_message( "folder {} : incorrect permission for {}: found {} , expected {}" .format(path, f, fperm, target_perm)) return c if 'uid' in targets: tgcount += 1 target_uid = targets['uid'] for f in s_files_detail: fuid = s_files_detail[f]["uid"] if fuid != target_uid: c.severity = cmt.SEVERITY_CRITICAL c.add_message( "folder {} : incorrect uid for {}: found {} , expected {}". format(path, f, fuid, target_uid)) return c if 'gid' in targets: tgcount += 1 target_gid = targets['gid'] for f in s_files_detail: fgid = s_files_detail[f]["gid"] if fgid != target_gid: c.severity = cmt.SEVERITY_CRITICAL c.add_message( "folder {} : incorrect gid for {}: found {} , expected {}". format(path, f, fgid, target_gid)) return c c.add_message( "{} {} OK - {} files, {} dirs, {} bytes - targets {}/{}".format( name, path, s_count, s_dirs, s_size, tgcount, tgtotal)) return c
def check(c): hostname = c.conf.get("hostname", "localhost") # remote network target port = c.conf.get("port", 443) # remote network port name = c.conf.get("name", hostname) # expected subject name in cert # for previous configuration format backward compatibility try: threshold_warning = int(c.conf.get("warning_in", DEFAULT_CERT_WARNING)) except: threshold_warning = DEFAULT_CERT_WARNING try: threshold_notice = int(c.conf.get("notice_in", DEFAULT_CERT_NOTICE)) except: threshold_notice = DEFAULT_CERT_NOTICE hostdisplay = "{}:{}".format(hostname,port) c.add_item(CheckItem("certificate_host", hostdisplay, "", datapoint=False)) c.add_item(CheckItem("certificate_name", name, unit="", datapoint=False)) context = ssl.create_default_context() try: with socket.create_connection((hostname, port),CERT_SOCKET_TIMEOUT) as sock: with context.wrap_socket(sock, server_hostname = name) as ssock: cert = ssock.getpeercert() except ConnectionRefusedError as e: c.severity = cmt.SEVERITY_CRITICAL c.add_message("no connection to {}".format(hostdisplay)) return c except ssl.SSLError as e: c.severity = cmt.SEVERITY_CRITICAL c.add_message("no ssl connection to {}".format(hostdisplay)) return c except socket.timeout as e: c.severity = cmt.SEVERITY_CRITICAL c.add_message("timeout to {}".format(hostdisplay)) return c except: c.severity = cmt.SEVERITY_CRITICAL c.add_message("couldn't get peer cert from {}".format(hostdisplay)) return c if cert is None: c.severity = cmt.SEVERITY_CRITICAL c.add_message("no certificate found for {}".format(hostdisplay)) #c.add_message("no certificate found for {}:{} - err = {}".format(hostname, port, e)) return c todatetime = lambda x: datetime.datetime.fromtimestamp( ssl.cert_time_to_seconds(x), tz=datetime.timezone.utc ) cert_infos = { "notAfter": todatetime(cert["notAfter"]), "notBefore": todatetime(cert["notBefore"]), "issuer": get_source_infos(cert["issuer"]), "subject": get_source_infos(cert["subject"]), } # certificate dates are in utc. Just reading the warning from the documentation, # it seems like Python's datetime modules is pretty bad when it comes to managing # timezones correctly. So, keep everything in terms of UTC. now = datetime.datetime.now(tz=datetime.timezone.utc) if now > cert_infos["notAfter"]: c.severity = cmt.SEVERITY_CRITICAL c.add_message("cert for {} on host {} - certificate expired by {}".format( name, hostdisplay, now - cert_infos["notAfter"]) ) elif now < cert_infos["notBefore"]: c.severity = cmt.SEVERITY_CRITICAL c.add_message("cert for {} host {} - certificate not yet valid (will be valid in {})".format( name, hostdisplay, cert_infos["notBefore"] - now) ) expires_in = cert_infos["notAfter"] - now expires_sec = int(round(expires_in.total_seconds())) expires_day = int(expires_sec / 86400) #c.add_item(checkitem.CheckItem("certificate_seconds", expires_sec, unit="seconds")) c.add_item(CheckItem("certificate_days", expires_day, unit="days")) if expires_day < threshold_warning: c.severity = cmt.SEVERITY_WARNING elif expires_day < threshold_notice: c.severity = cmt.SEVERITY_NOTICE else: c.severity = cmt.SEVERITY_NONE #c.add_item(checkitem.CheckItem("certificate_issuer_cn", cert_infos["issuer"]["commonName"])) c.add_item(CheckItem("certificate_issuer", cert_infos["issuer"]["organizationName"])) c.add_item(CheckItem("certificate_subject", cert_infos["subject"]["commonName"] )) c.add_message("{} day(s) left for SSL certificate {} on {} ".format(expires_day, name, hostdisplay)) return c
def check(c): # --available option ? if cmt.ARGS["available"]: print("-" * 25) print("Process available :") print("-" * 25) for p in psutil.process_iter(): try: # Get process name & pid from process object. print(p.name()) print(p.cmdline()) #print(processName , ' ::: ', processID) except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass print("-" * 25) return c # real check name = c.check psname = c.conf['psname'] search_arg = c.conf.get('search_arg', None) #{'name': 'python3', 'cpu_times': pcputimes(user=0.39, system=0.3, # children_user=0.0, children_system=0.0), # 'memory_info': pmem(rss=27049984, vms=123904000, shared=13443072, text=3883008, # lib=0, data=13901824, dirty=0), 'username': '******', 'pid': 3125} #for proc in psutil.process_iter(['pid', 'name', 'username','cpu_times','memory_info']): # #print(proc.info) c.add_item(CheckItem('process_name', psname, "", datapoint=False)) for proc in psutil.process_iter(): try: # Get process name & pid from process object. processName = proc.name() # processID = proc.pid # print(processName , ' ::: ', processID) except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass # pinfo = proc.as_dict(attrs=['name','pid','memory_info','cpu_times']) if processName == psname: ok = False # search args needed (an expected arg is in conf) if search_arg: try: pargs = proc.cmdline() except Exception: pargs = [] for p in pargs: if p == search_arg: ok = True break else: # process name is enough, no need to search args ok = True if ok: mem = proc.memory_info().rss ci = CheckItem('process_memory', mem, "rss", unit="bytes") h_mem = ci.human() c.add_item(ci) cpu = proc.cpu_times().user ci = CheckItem('process_cpu', cpu, "cpu time, user", unit='seconds') c.add_item(ci) c.add_message( "process {} found ({}, {}) - memory rss {} - cpu {} sec.". format(name, psname, search_arg, h_mem, cpu)) return c c.severity = cmt.SEVERITY_CRITICAL c.add_message("process {} missing ({}, {})".format(name, psname, search_arg)) return c