class CloudDatabaseAnalyzer(Analyzer): """ Get apt sources """ categories = ['reports', 'cloud'] collector = LogreadCollector error_re = re.compile( "Error calling client.Do: Post https://database.untangle.com/v1/put\?source=.+&type=db&queueName=mfw_events: net/http: request canceled \((.+)\)" ) heading = "Cloud Database" results = { "client_timeout": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary= "Unable to send {instances} message to https://database.untangle.com", detail="This could be an issue with the client or the server"), "pass": AnalyzerResult(severity=AnalyzerResultSeverityPass, summary="No issues detected with cloud communication"), } def analyze(self, collector_results): results = [] data_results = {} for collector_result in collector_results: for line in collector_result.output: match = self.error_re.search(line) if match is not None: reason = None if "Client.Timeout exceeded" in match.group(1): reason = "client_timeout" if not reason in data_results: data_results[reason] = {'instances': 0} data_results[reason][ 'instances'] = data_results[reason]['instances'] + 1 for key in data_results: result = copy.deepcopy(CloudDatabaseAnalyzer.results[key]) result.analyzer = self result.format(data_results[key]) results.append(result) if len(results) == 0: result = copy.deepcopy(CloudDatabaseAnalyzer.results["pass"]) result.analyzer = self results.append(result) return results
class SpamBlockerAnalyzer(Analyzer): # """ # Analyze spam logs # """ categories = ["mail"] collector = { 'collector': FilesCollector, 'arguments': { 'id': 'mail_log', 'path': '/var/log/mail.*' } } spam_result_re = re.compile('spamd: result: ([^\s]+) ([^\s]+) - ([^\s]+)') spam_test_time_limit_exceeded = "TIME_LIMIT_EXCEEDED" heading = "Spam Blocker Mail Log" results = { "timeout": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary="Spam processing timeouts detected", detail=[ '{timeouts} msgs (of {total} msgs) matched TIME_LIMIT_EXCEEDED result.', 'This may point to a DNS resolver that limits queries which can cause incomplete spam analysis.' ], recommendation="Consider changing WAN resolver to a public resolver like 8.8.8.8" ) } def analyze(self, collector_results): results = [] timeouts = 0 total = 0 for collector_result in collector_results: for line in collector_result.output: match = self.spam_result_re.search(line) if match is not None: tests = match.group(3).split(',') total = total + 1 if self.spam_test_time_limit_exceeded in tests: timeouts = timeouts + 1 if timeouts > 0: result = SpamBlockerAnalyzer.results["timeout"].copy() result.collector_result = collector_result result.analyzer = self result.format({ "timeouts": timeouts, "total": total, }) results.append(result) return results
def analyze(self, collector_results): results = [] result_fields = {} format_fields = {} severity = None for collector_result in collector_results: if collector_result.source == "entries": for entry in collector_result.output: result = AnalyzerResult( severity=AnalyzerResultSeverityPass, other_results={ "{severity}": '{type:<12}{entry:<20} {size}' }) format_fields = { 'entry': entry, 'size': support_diagnostics.utilities.SizeConversion.to_human( collector_result.output[entry]) } if os.path.isfile(entry): format_fields['type'] = "file" else: format_fields['type'] = "directory" result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) return results
def analyze(self, collector_results): results = [] cpu_sorted_process_results = sorted( filter( lambda r: r.source == "process" and '%cpu' in r.output['top'], collector_results), key=lambda d: d.output['top']['%cpu'], reverse=True) for process_result in cpu_sorted_process_results[:ProcessCpuAnalyzer. max_records]: result = AnalyzerResult( severity=AnalyzerResultSeverityPass, other_results={"{severity}": '{cmdline:<55} {percent}%'}) if 'cmdline' in process_result.output and process_result.output[ 'cmdline'] != '': cmdline = process_result.output['cmdline'] else: cmdline = process_result.output['status']['name'] cmdline_len = len(cmdline) format_fields = { 'cmdline': '{process}{elide}'.format( process=cmdline[:50], elide='...' if cmdline_len > 50 else ''), 'percent': process_result.output['top']['%cpu'] } result.collector_result = process_result result.analyzer = self result.format(format_fields) results.append(result) return results
def analyze(self, collector_results): results = [] memory_total = None for result in filter(lambda r: r.source == "memory", collector_results): if "memtotal" in result.output: memory_total = result.output["memtotal"] memory_sorted_process_results = sorted(filter( lambda r: r.source == "process" and 'vmrss' in r.output['status'], collector_results), key=lambda d: d.output['status'] ['vmrss'], reverse=True) for process_result in memory_sorted_process_results[: ProcessMemoryAnalyzer .max_records]: result = AnalyzerResult( severity=AnalyzerResultSeverityPass, other_results={ "{severity}": '{cmdline:<55} {size:<11} {percent:<.2f}%' }) cmdline_len = len(process_result.output['cmdline']) format_fields = { 'cmdline': '{process}{elide}'.format( process=process_result.output['cmdline'][:50], elide='...' if cmdline_len > 50 else ''), 'size': support_diagnostics.utilities.SizeConversion.to_human( process_result.output['status']['vmrss']), 'percent': round( process_result.output['status']['vmrss'] / memory_total * 100, 2) } result.collector_result = process_result result.analyzer = self result.format(format_fields) results.append(result) return results
class SystemAnalyzer(Analyzer): """ Get apt sources """ order = 0 heading = "System Information" categories = ["system"] collector = SystemCollector results = { "unsupported_arch": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary="Unsupported architecture", detail="Software updates are not supported for this archiecture", recommendation="Reinstall using 64 bit", other_results={'architecture': '{arch}'}) } def analyze(self, collector_results): results = [] result_fields = {} format_fields = {} severity = None for collector_result in collector_results: if collector_result.source in [ "hostname", 'version', 'uid', 'serial', 'model' ]: result_fields = { collector_result.source: '{{{source}}}'.format(source=collector_result.source) } format_fields[ collector_result.source] = collector_result.output[0] result = AnalyzerResult(severity=severity, other_results=result_fields) result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) elif collector_result.source == "arch": arch_result_fields = {'architecture': '{arch}'} system_arch = collector_result.output[0] # system_arch = 'armv7l' arch = None result = None if system_arch == "x86_64": arch = "64 bit x86" result = AnalyzerResult(other_results=arch_result_fields) else: result = self.results['unsupported_arch'].copy() if "86" in system_arch: arch = "32 bit x86" elif "arm" in system_arch: arch = "ARM" format_fields['arch'] = arch result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) return results
def analyze(self, collector_results): results = [] result_fields = {} format_fields = {} severity = None for collector_result in collector_results: if collector_result.source in [ "hostname", 'version', 'uid', 'serial', 'model' ]: result_fields = { collector_result.source: '{{{source}}}'.format(source=collector_result.source) } format_fields[ collector_result.source] = collector_result.output[0] result = AnalyzerResult(severity=severity, other_results=result_fields) result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) elif collector_result.source == "arch": arch_result_fields = {'architecture': '{arch}'} system_arch = collector_result.output[0] # system_arch = 'armv7l' arch = None result = None if system_arch == "x86_64": arch = "64 bit x86" result = AnalyzerResult(other_results=arch_result_fields) else: result = self.results['unsupported_arch'].copy() if "86" in system_arch: arch = "32 bit x86" elif "arm" in system_arch: arch = "ARM" format_fields['arch'] = arch result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) return results
class PartitionsAnalyzer(Analyzer): """ Get apt sources """ order = 0 heading = "Partition Usage" categories = ["os"] collector = FilesystemCollector results = { "critical": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary='Disk usage critical', detail=None, recommendation=None, other_results={ '{severity}': '{type:<10} {partition:<15}-> {mount:<10} {percent_used:<10} {summary}' }), "near-critical": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary='Disk usage near critical', detail=None, recommendation=None, other_results={ '{severity}': '{type:<10} {partition:<15}-> {mount:<10} {percent_used} {summary}' }), "non-critical": AnalyzerResult( severity=AnalyzerResultSeverityPass, summary='', detail=None, recommendation=None, other_results={ '{severity}': '{type:<10} {partition:<15}-> {mount:<10} {percent_used} {summary}' }), } def analyze(self, collector_results): results = [] result_fields = {} format_fields = {} severity = None for collector_result in collector_results: if collector_result.source == "partition": for partition in collector_result.output: format_fields = { 'partition': partition, 'mount': 'unknown', 'percent_used': 'unknown', 'type': 'unknown', } percent_used = None # print(collector_result.output[partition]) if 'used' in collector_result.output[ partition] and collector_result.output[partition][ 'size'] > 0: percent_used = int( round( collector_result.output[partition]['used'] / collector_result.output[partition]['size'] * 100, 0)) format_fields[ 'percent_used'] = '{percent_used}%'.format( percent_used=percent_used) if 'mount' in collector_result.output[partition]: format_fields['mount'] = collector_result.output[ partition]['mount'] if 'type' in collector_result.output[partition]: format_fields['type'] = collector_result.output[ partition]['type'] if collector_result.output[partition]['type'] == 'swap': result = PartitionsAnalyzer.results[ 'non-critical'].copy() format_fields['percent_used'] = '-' format_fields['mount'] = '-' elif collector_result.output[partition]['type'] in [ "unknown", "cdrom", "usb" ]: # Partiton exists, not mounted result = PartitionsAnalyzer.results[ 'near-critical'].copy() result.results['summary'] = 'not mounted' elif collector_result.output[partition]['type'] in [ "vfat" ]: # Partiton exists, mounted, but non-typical result = PartitionsAnalyzer.results[ 'near-critical'].copy() result.results['summary'] = 'non-typical type' elif percent_used is None: result = PartitionsAnalyzer.results[ 'near-critical'].copy() result.results['summary'] = '' elif percent_used >= 95: result = PartitionsAnalyzer.results['critical'].copy() # elif percent_used > 80 or percent_used is None: elif percent_used >= 85 or percent_used is None: result = PartitionsAnalyzer.results[ 'near-critical'].copy() else: result = PartitionsAnalyzer.results[ 'non-critical'].copy() result.collector_result = collector_result result.analyzer = self result.format(format_fields) results.append(result) return results
class UvmExceptionsAnalyzer(Analyzer): # """ # Process uvm logs for exceptions # """ categories = ["uvm"] collector = { 'collector': FilesCollector, 'arguments': { 'id': 'uvm_log', 'path': '/var/log/uvm/*', 'ignore': 'packages.log' } } exception_begin_re = re.compile( '\[([^]]+)\] <> ([^\s]+)\s+Exception in ([^\s]+)') exception_any_re = re.compile('((com\.|java.*\.|org\.).+)') exception_untangle_re = re.compile('(com\.untangle\..+)\(([^:]+):(\d+)\)') null_pointer_exception = "java.lang.NullPointerException" general_exception = "Exception:" heading = "untangle-vm exceptions" results = { "unknown_exception": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="untangle-vm unknown exception detected", detail=[ 'The following exception was found:', '{path} at line {line_number}', 'the following exception was encountered {instances} times:' '{last_error}' ], recommendation="Send this information to Untangle engineering."), "known_exception": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary="untangle-vm known exception detected", detail=[ 'The following exception was found:', '{path} at line {line_number}', 'the following exception was encountered {instances} times:', '{last_error}' ], recommendation= "Untangle engineering is likely aware of this but should review.") } def analyze(self, collector_results): results = [] update_error_codes = {} timeouts = 0 total = 0 exceptions = {} for collector_result in collector_results: exception = None last_line = None for line in collector_result.output: match = UvmExceptionsAnalyzer.exception_begin_re.search(line) if match is not None: # We've found an uncaught exception. exception = { 'last_error': None, 'result': UvmExceptionsAnalyzer.results["unknown_exception"] } last_line = line continue if exception is None: if UvmExceptionsAnalyzer.null_pointer_exception in line: # We want to know about null pointer exceptions. exception = { 'last_error': None, 'result': UvmExceptionsAnalyzer.results["unknown_exception"] } # DON'T continue; use this as our last_error elif UvmExceptionsAnalyzer.general_exception in line: # A caught exception. Not as severe. exception = { 'last_error': None, 'result': UvmExceptionsAnalyzer.results["known_exception"] } # DON'T continue; use this as our last_error if exception is not None: if exception['last_error'] is None: # Next line is the error. match = UvmExceptionsAnalyzer.exception_any_re.search( line) if match is not None: exception['last_error'] = match.group(1) else: exception['last_error'] = line match = UvmExceptionsAnalyzer.exception_untangle_re.search( line) if match is not None: # Everything between the error and our code is likely just # noise; we want to know where we used it. exception['path'] = match.group(1) exception['file'] = match.group(2) exception['line_number'] = match.group(3) exception['instances'] = 0 exception_path = "{path}:{line_number}".format( path=exception['path'], line_number=exception['line_number']) exception['path_key'] = exception_path if exception_path not in exceptions: exceptions[exception_path] = exception exceptions[exception_path]['instances'] += 1 exception = None # Sort by instances, reverse order. Unlike other stats, we want all of these! for exception in sorted(exceptions.values(), key=lambda d: d['instances'], reverse=True): key = exception['path_key'] result = exception['result'].copy() result.collector_result = collector_result result.analyzer = self result.format(exceptions[key]) results.append(result) return results
class BitdefenderAnalyzer(Analyzer): # """ # Analyze BitDefender logs # """ categories = ["mail", "virus"] collector = { 'collector': FilesCollector, 'arguments': { 'id': 'bitdefender_log', 'path': '/var/log/bdamserver.*' } } update_error_result_re = re.compile( 'ERROR: The anti-malware database update failed, error Unknown error \((.+)\)' ) heading = "BitDefender Log" results = { "update_error": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="BitDefender update errors detected", detail=[ 'The update error code {update_error_code} was detected {total} times.', 'Many of these may indicate the following issues:', 'A web proxy is in front of the system and preventing updates from occuring properly.' ], recommendation= "Perform a tcpdump with -A to the host bd.untangle.com to watch for traffic and look for possibility of web proxy." ) } def analyze(self, collector_results): results = [] update_error_codes = {} timeouts = 0 total = 0 for collector_result in collector_results: for line in collector_result.output: match = BitdefenderAnalyzer.update_error_result_re.search(line) # print(match) if match is not None: update_error_code = match.group(1) if update_error_code not in update_error_codes: update_error_codes[update_error_code] = 0 update_error_codes[update_error_code] += 1 update_error_code_keys = update_error_codes.keys() if len(update_error_code_keys) > 0: for key in update_error_code_keys: result = BitdefenderAnalyzer.results["update_error"].copy() result.collector_result = collector_result result.analyzer = self result.format({ "update_error_code": update_error_code, "total": total, }) results.append(result) return results
class UpdateSourceAnalyzer(Analyzer): """ Get apt sources """ categories = ["updates"] collector = AptSourcesCollector deb_re = re.compile( '^(?P<type>deb[^\s]*)\s+(\[.+\]\s+|)(?P<url>[^\s+]+)\s+(?P<distribution>[^\s+]+)(.*)' ) heading = "Debian apt sources" results = { "public": AnalyzerResult( severity=AnalyzerResultSeverityPass, summary="Pointing to production package server '{host}'", detail="All customer units should be using this package server."), "internal": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary="Pointing to development package server '{host}'", detail= "For internal Untangle corporate units this is acceptable, but not for customer units.", recommendation= "If this is a customer facing system, change the host to updates.untangle.com" ), "unknown": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="Pointing to unknown server '{host}'", detail= "No customer or corporate units should be pointing to an unknown package server", recommendation= "From file:\n\t\t{collector_result_source}:\n\t\tdelete entry:\n\t\t{entry}" ) } def analyze(self, collector_results): results = [] for collector_result in collector_results: for line in collector_result.output: if line.startswith('#') or len(line) == 0: # Ignore comments, blank lines. continue match = self.deb_re.search(line) if match is not None: url = match.group("url") # parsed_url = urlparse(url) parsed_url = urllib.parse.urlsplit(url) result = None if parsed_url.hostname == 'updates.untangle.com': result = copy.deepcopy( UpdateSourceAnalyzer.results["public"]) elif parsed_url.hostname == 'package-server.untangle.int': result = copy.deepcopy( UpdateSourceAnalyzer.results["internal"]) else: result = copy.deepcopy( UpdateSourceAnalyzer.results["unknown"]) result.collector_result = collector_result result.analyzer = self result.format({"entry": line, "host": parsed_url.hostname}) results.append(result) return results
class UpdateSourceAnalyzer(Analyzer): """ Analyze apt sources """ categories = ["updates"] collector = [ SystemCollector, { 'collector': FilesCollector, 'arguments': { 'id': 'apt_sources', 'path': ['/etc/apt/*.list', '/etc/apt/sources.list.d/*.list'] } } ] deb_re = re.compile( '^(?P<type>deb[^\s]*)\s+(\[.+\]\s+|)(?P<url>[^\s+]+)\s+(?P<distribution>[^\s+]+)(.*)' ) distribution_prefix = "stable-" heading = "Debian apt sources" results = { "public": AnalyzerResult( severity=AnalyzerResultSeverityPass, summary="Pointing to production package server '{host}'", detail="All customer units should be using this package server."), "internal": AnalyzerResult( severity=AnalyzerResultSeverityWarn, summary="Pointing to development package server '{host}'", detail= "For internal Untangle corporate units this is acceptable, but not for customer units.", recommendation= "If this is a customer facing system, change the host to updates.untangle.com" ), "unknown": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="Pointing to unknown server '{host}'", detail= "No customer or corporate units should be pointing to an unknown package server", recommendation=[ 'From file:', '{collector_result_source}:', 'delete entry:', '{entry}' ]), "bad_distribution": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="Distribution is incorrect '{distribution}'", detail="Incorrect distribution affects downloading correct updates", recommendation=[ 'In file:', '{collector_result_source}:', 'modify entry:', '{entry}', 'to:', 'deb http://{uid}:[email protected]/public/buster {distribution_prefix}{pubversion} main non-free' ]), "none_found": AnalyzerResult( severity=AnalyzerResultSeverityFail, summary="No active sources found", detail="This unit is in an un-upgradable state", recommendation=[ 'In file:', '/etc/apt/sources.list.d/untangle.list:', 'Add entry:', 'deb http://{uid}:[email protected]/public/buster {distribution_prefix}{pubversion} main non-free' ]) } def analyze(self, collector_results): uid = "0000-0000-0000-0000" pubversion = "0" results = [] # Get uid, version for collector_result in collector_results: if collector_result.collector.id == "system": if collector_result.source == 'uid': uid = collector_result.output[0] elif collector_result.source == 'pubversion': pubversion = collector_result.output[0].replace('.', '') for collector_result in collector_results: if collector_result.collector.id == "apt_sources": for line in collector_result.output: if line.startswith('#') or len(line) == 0: # Ignore comments, blank lines. continue match = self.deb_re.search(line) if match is not None: url = match.group("url") parsed_url = urllib.parse.urlsplit(url) result = None if parsed_url.hostname == 'updates.untangle.com': # Correct target; how does everything else look? if match.group( "distribution" ) != '{distribution_prefix}{pubversion}'.format( distribution_prefix=UpdateSourceAnalyzer. distribution_prefix, pubversion=pubversion): result = UpdateSourceAnalyzer.results[ "bad_distribution"].copy() else: # All good! result = UpdateSourceAnalyzer.results[ "public"].copy() elif parsed_url.hostname == 'package-server.untangle.int': # Legit for internal, almost certainly not for customers. result = UpdateSourceAnalyzer.results[ "internal"].copy() else: # Not legit at all. result = UpdateSourceAnalyzer.results[ "unknown"].copy() result.collector_result = collector_result result.analyzer = self result.format({ "entry": line, "host": parsed_url.hostname, "distribution": match.group("distribution"), 'uid': uid, "distribution_prefix": UpdateSourceAnalyzer.distribution_prefix, 'pubversion': pubversion }) results.append(result) if len(results) == 0: # Inexplicably, no active entrie found result = UpdateSourceAnalyzer.results["none_found"].copy() result.analyzer = self result.format({ 'uid': uid, "distribution_prefix": UpdateSourceAnalyzer.distribution_prefix, 'pubversion': pubversion }) results.append(result) return results