def get_freeze_locations(scan_log_filename, scan): """ [Wed Nov 1 23:43:16 2017 - debug] ... [Wed Nov 1 23:43:19 2017 - debug] ... 3 seconds without writing anything to the log? That is a freeze! :param scan: The file handler for the scan log :return: None, all printed to the output """ scan.seek(0) freezes = [] previous_line_time = get_line_epoch(scan.readline()) for line in scan: try: current_line_epoch = get_line_epoch(line) except InvalidTimeStamp: continue time_spent = current_line_epoch - previous_line_time if time_spent > 5: line = line.strip() freezes.append(line) previous_line_time = current_line_epoch return KeyValueOutput( 'debug_log_freeze', 'Delays greater than 5 seconds between two log lines', freezes)
def get_errors(scan_log_filename, scan): scan.seek(0) errors = [] for line in scan: for error_re in ERRORS_RE: match = error_re.search(line) if match: line = line.strip() errors.append(line) scan.seek(0) for line in scan: if POOL_INTERNAL not in line: continue if line.count('True') == 3: continue line = line.strip() errors.append(line) output = KeyValueOutput('errors', 'errors and exceptions', {'count': len(errors), 'errors': errors}) return output
def get_parser_errors_summary(scan_log_filename, scan): (timeout_count, _, _, memory_count, _, _) = get_parser_errors_data(scan_log_filename, scan) return KeyValueOutput('parser_error_summary', 'Parser errors', {'Timeout errors': timeout_count, 'Memory errors': memory_count})
def show_consumer_join_times(scan_log_filename, scan): scan.seek(0) join_times = [] for line in scan: if 'seconds to join' not in line: continue match = JOIN_TIMES.search(line) if match: join_times.append(match.group(0)) if not join_times: return KeyValueOutput('consumer_join_times', 'The scan log has no calls to join()') return KeyValueOutput('consumer_join_times', 'These consumers have been join()ed', join_times)
def get_error_rate_summary(scan_log_filename, scan): error_rate, _ = get_error_rate_data(scan_log_filename, scan) return KeyValueOutput( 'error_rate_summary', 'Error rate summary', { 'Error rate exceeded 10%': False if not error_rate else max(error_rate) > 10, 'Error rate exceeded 20%': False if not error_rate else max(error_rate) > 20 })
def get_http_errors(scan_log_filename, scan): scan.seek(0) error_count = 0 for line in scan: for error in HTTP_ERRORS: if error in line: error_count += 1 return KeyValueOutput('http_errors', 'HTTP errors', error_count)
def get_not_found_requests(scan_log_filename, scan): scan.seek(0) not_found_count = 0 for line in scan: for error in NOT_FOUND_RESPONSE: if error in line: not_found_count += 1 return KeyValueOutput('not_found_requests', '404 requests sent by is_404()', not_found_count)
def get_dbms_queue_size_exceeded(scan_log_filename, scan): scan.seek(0) error_count = 0 for line in scan: if SQLITE_MAX_REACHED in line: error_count += 1 return KeyValueOutput('sqlite_limit_reached', 'SQLite queue limit reached', error_count)
def get_scan_finished_in(scan_log_filename, scan): scan.seek(0) first_timestamp = get_first_timestamp(scan) for line in scan: match = SCAN_FINISHED_IN.search(line) if match: return KeyValueOutput('scan_time', 'Scan time and state', {'finished': True, 'scan_time': match.group(1)}) last_timestamp = get_last_timestamp(scan) scan_run_time = last_timestamp - first_timestamp return KeyValueOutput('scan_time', 'Scan time and state', {'finished': False, 'scan_time': epoch_to_string(scan_run_time)})
def get_file_sizes(scan_log_filename, scan): stat_info = os.stat(scan_log_filename) latest_xml_size = None for line in scan: match = XML_OUTPUT_SIZE.search(line) if match: latest_xml_size = match.group(1) data = {'debug_log': stat_info.st_size, 'xml_output': latest_xml_size} return KeyValueOutput('file_sizes', 'output file sizes (bytes)', data)
def get_known_problems(scan_log_filename, scan): """ This will query the log for some known issues and if those appear show alerts in the output. :param scan: The file handler for the scan log :return: None, all printed to the output """ scan.seek(0) # # Identify a problem I rarely see: grep plugin finishes and other plugins # are still running. This seems to be an issue in the teardown process. # found_grep_teardown = None grep_teardown = 'Finished Grep consumer _teardown' discover_call = '.discover(uri=' for line in scan: if grep_teardown in line: found_grep_teardown = line.strip() continue if discover_call in line and found_grep_teardown: data = ('The grep consumer was finished at:\n' ' %s\n' 'But calls to discover were found after:\n' ' %s' % (found_grep_teardown, line)) return KeyValueOutput('known_problems', 'Known scanner race condition found!', data) return KeyValueOutput('known_problems', 'No known problems found', [])
def show_generic_spent_time(scan, name, must_have): scan.seek(0) spent_time = 0.0 for line in scan: if must_have not in line: continue match = SCAN_TOOK_RE.search(line) if match: spent_time += float(match.group(1)) return KeyValueOutput('%s_spent_time' % name, 'Time spent running %s plugins' % name, { 'human': epoch_to_string(spent_time), 'seconds': spent_time })
def show_errors(scan_log_filename, scan): scan.seek(0) errors = [] for line in scan: for error_re in ERRORS_RE: match = error_re.search(line) if match: errors.append(line) output = KeyValueOutput('errors', 'errors and exceptions', { 'count': len(errors), 'errors': errors }) return output
def get_bruteforce_data(scan_log_filename, scan): scan.seek(0) times = [] for line in scan: if 'brute' not in line: continue for finished_re in FINISHED_BRUTEFORCE: match = finished_re.search(line) if match: took = match.group(1) times.append(took) output = KeyValueOutput('bruteforce_performance', 'Time spent brute-forcing', { 'count': len(times), 'times': times }) return output
def get_parser_process_memory_limit_summary(scan_log_filename, scan): memory_limit, _ = get_parser_process_memory_limit_data(scan_log_filename, scan) return KeyValueOutput('parser_process_memory_limit', 'Latest memory limit', '%s MB' % memory_limit[-1])