def parse_findings(filename, scanner): """ Parse findings """ # Load JSON try: with open(filename, "r") as file: data = json.load(file) except: # pylint: disable=W0702 log.exception("Failed to load report JSON") return # Load CWE map cwe_map = json.loads( pkg_resources.resource_string( "dusty", f"{'/'.join(__name__.split('.')[1:-1])}/data/cwe_map_v4.2.json")) # Parse JSON if not isinstance(data, dict) or "vulnerabilities" not in data: log.info("No data in report") return # Make finding instances for item in data["vulnerabilities"]: vuln_severity = cvss_to_severity(item.get("cvss", 0.0)) vuln_cwe = item.get("cwe", "Vulnerability") # vuln_cwe_title = cwe_map[vuln_cwe] if vuln_cwe in cwe_map else vuln_cwe vuln_file_title = f" in {item.get('classMessage')}" if "classMessage" in item else "" vuln_title = f"{vuln_cwe_title}{vuln_file_title}" # vuln_file = item.get("classMessage", "").rsplit(" (", 1)[0] # vuln_info_chunks = list() if "longMessage" in item: vuln_info_chunks.append( markdown.markdown_escape(item["longMessage"])) if "shortMessage" in item: vuln_info_chunks.append( markdown.markdown_escape(item["shortMessage"])) vuln_info_chunks.append( f"**Class:** {markdown.markdown_escape(item['classMessage'])}") vuln_info_chunks.append( f"**Method:** {markdown.markdown_escape(item['method'])}") if "affectedFiles" in item: vuln_info_chunks.append( f"**Files:** {markdown.markdown_escape(', '.join(item['affectedFiles']))}" ) # finding = SastFinding(title=vuln_title, description=["\n\n".join(vuln_info_chunks)]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", vuln_severity) finding.set_meta("legacy.file", vuln_file) endpoints = list() if vuln_file: endpoints.append(namedtuple("Endpoint", ["raw"])(raw=vuln_file)) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(data, scanner): """ Parse findings """ # Parse JSON using legacy parser try: findings = BrakemanParser(data).items except: # pylint: disable=W0702 log.exception("Failed to parse brakeman report") log.warning( "Possibly ruby code path is invalid or not Ruby-on-Rails application" ) findings = list() # Make finding instances for item in findings: finding = SastFinding( title=item["title"], description=[ "\n\n".join([ markdown.markdown_escape(item['description']), f"**References:** {markdown.markdown_escape(item['references'])}", f"**File to review:** {markdown.markdown_escape(item['file_path'])}" \ f":{item['line']}" ]) ] ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", constants.BRAKEMAN_SEVERITY_MAPPING[item["severity"]]) finding.set_meta("legacy.file", item["file_path"]) finding.set_meta("legacy.line", item["line"]) finding.set_meta( "endpoints", [namedtuple("Endpoint", ["raw"])(raw=item["file_path"])]) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(result, scanner): """ Parse findings """ # Parse JSON using legacy parser findings = NodeJsScanParser(result).items # Make finding instances for item in findings: finding = SastFinding( title=item["title"], description=[ "\n\n".join([ markdown.markdown_escape(item['description']), f"**File to review:** {markdown.markdown_escape(item['file_path'])}" \ f":{item['line']}" ]) ] + item["steps_to_reproduce"] ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", item["severity"]) finding.set_meta("legacy.file", item["file_path"]) finding.set_meta("legacy.line", item["line"]) endpoints = list() if item["file_path"]: endpoints.append( namedtuple("Endpoint", ["raw"])(raw=item["file_path"])) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(data, scanner): """ Parse findings """ log.debug("Parsing findings") item_regex = re.compile( "".join([ "^(\[\+\] New Finding!!!)$", "\s*Name: (?P<name>.*)$", "\s*Url: (?P<url>.*)$", "\s*Description: (?P<description>[\s\S]*?)\n\n" ]), re.MULTILINE) for item in item_regex.finditer(data): # Make finding object description = list() description.append(markdown.markdown_escape(item.group("description"))) description.append( f'\n**URL:** {markdown.markdown_escape(item.group("url"))}') description = "\n".join(description) finding = DastFinding(title=item.group("name"), description=description) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", "Info") # Endpoints (for backwards compatibility) endpoints = list() endpoint = url.parse_url(item.group("url")) endpoints.append(endpoint) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") # Done scanner.findings.append(finding)
def parse_findings(filename, scanner): """ Parse findings """ # Parse JSON using legacy parser findings = DependencyCheckParser(filename).items # Make finding instances for item in findings: finding = SastFinding( title=item["title"], description=[ "\n\n".join([ "Vulnerable dependency was found. Please upgrade component or check that vulnerable functionality is not used.", # pylint: disable=C0301 markdown.markdown_escape(item["description"]), f"**File to review:** {markdown.markdown_escape(item['file_path'])}" ]) ] + item["steps_to_reproduce"]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", item["severity"]) finding.set_meta("legacy.file", item["file_path"]) endpoints = list() if item["file_path"]: endpoints.append( namedtuple("Endpoint", ["raw"])(raw=item["file_path"])) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(output_file, scanner): # pylint: disable=E,W,R,C """ Parse findings (code from dusty 1.0) """ # Parse HTML report using legacy parser filtered_statuses = scanner.config.get( "filtered_statuses", constants.PTAI_DEFAULT_FILTERED_STATUSES) if isinstance(filtered_statuses, str): filtered_statuses = [ item.strip() for item in filtered_statuses.split(",") ] findings = PTAIScanParser(output_file, filtered_statuses).items for item in findings: finding = SastFinding( title=item["title"], description=[ html.escape( markdown.markdown_escape(item["description"].replace( " ", ""))) + f"\n\n**File to review:** {markdown.markdown_escape(item['file_path'])}" ] + [html.escape(data) for data in item["steps_to_reproduce"]]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", constants.PTAI_SEVERITIES[item["severity"]]) finding.set_meta("legacy.file", item["file_path"]) finding.set_meta( "endpoints", [namedtuple("Endpoint", ["raw"])(raw=item["file_path"])]) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(data, scanner): """ Parse findings """ # Parse JSON using legacy parser findings = SafetyScanParser(data).items # Make finding instances for item in findings: finding = SastFinding( title=item["title"], description=[markdown.markdown_escape(item["description"])]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", "Medium") scanner.findings.append(finding)
def parse_findings(output_file, scanner): """ Parse findings (code from dusty 1.0) """ log.debug("Parsing findings") # Load JSON with open(output_file, "rb") as json_file: data = json.load(json_file) # Walk results for issue in data: title = f'Open port {issue["ports"][0]["port"]} found on {issue["ip"]}' finding = DastFinding(title=title, description=markdown.markdown_escape(title)) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", SEVERITIES[-1]) scanner.findings.append(finding)
def parse_findings(data, scanner): """ Parse findings """ # Parse JSON using legacy parser findings = BanditParser(data).items # Make finding instances for item in findings: finding = SastFinding( title=item["title"], description=[ "\n\n".join([ f"```\n{item['description']}\n```", f"**Mitigation:** {markdown.markdown_escape(item['mitigation'])}", f"**Impact:** {markdown.markdown_escape(item['impact'])}", f"**References:** {markdown.markdown_escape(item['references'])}", f"**File to review:** {markdown.markdown_escape(item['file_path'])}" \ f":{item['line']}" ]) ] ) # Better bandit finding titles/descriptions database = json.load( pkg_resources.resource_stream( "dusty", f"{'/'.join(__name__.split('.')[1:-1])}/data/findings.json")) if item["bandit_id"] in database: db_item = database[item["bandit_id"]] finding.set_meta("rewrite_title_to", db_item["title"]) if db_item.get("description", None): finding.description[0] = "\n\n".join([ markdown.markdown_escape(db_item["description"]), finding.description[0] ]) # Other meta finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", constants.BANDIT_SEVERITIES[item["severity"]]) finding.set_meta("legacy.file", item["file_path"]) finding.set_meta("legacy.line", item["line"]) finding.set_meta( "endpoints", [namedtuple("Endpoint", ["raw"])(raw=item["file_path"])]) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def parse_findings(filename, scanner): """ Parse findings """ # Load JSON try: with open(filename, "r") as file: data = json.load(file) except: # pylint: disable=W0702 log.exception("Failed to load report JSON") return # Severity mapping severity_mapping = { "UNKNOWN": "Info", "LOW": "Low", "MEDIUM": "Medium", "HIGH": "High", "CRITICAL": "Critical", } # Parse JSON if not isinstance(data, list) or not data: log.info("No data in report") return # Make finding instances for data_block in data: if not data_block.get("Vulnerabilities", list()): log.info("Skipping empty data block: %s", data_block.get("Target", data_block)) continue for item in data_block.get("Vulnerabilities", list()): # vuln_id = item.get("VulnerabilityID", "") vuln_pkgname = item.get("PkgName", "") vuln_installed_version = item.get("InstalledVersion", "") vuln_fixed_version = item.get("FixedVersion", "") vuln_layer = item.get("Layer", dict()).get("DiffID", "") # vuln_title = item.get("Title", "-") if vuln_id: vuln_title = f"{vuln_id}: {vuln_title}" if vuln_pkgname: vuln_title = f"{vuln_pkgname}: {vuln_title}" # if not scanner.config.get("show_with_temp_id", False) and \ vuln_id.startswith("TEMP-"): log.info("Skipping finding with TEMP ID: %s", vuln_title) continue if not scanner.config.get("show_without_description", True) and \ "Description" not in item: log.info("Skipping finding without description: %s", vuln_title) continue # vuln_severity = severity_mapping[item.get("Severity", "UNKNOWN")] vuln_file = vuln_layer # vuln_info_chunks = list() # vuln_info_chunks.append( markdown.markdown_escape(item.get("Description", "-"))) # if vuln_id: vuln_info_chunks.append( f"**VulnerabilityID:** {markdown.markdown_escape(vuln_id)}" ) if vuln_pkgname: vuln_info_chunks.append( f"**PkgName:** {markdown.markdown_escape(vuln_pkgname)}") if vuln_installed_version: vuln_info_chunks.append( f"**InstalledVersion:** {markdown.markdown_escape(vuln_installed_version)}" ) if vuln_fixed_version: vuln_info_chunks.append( f"**FixedVersion:** {markdown.markdown_escape(vuln_fixed_version)}" ) if vuln_layer: vuln_info_chunks.append( f"**Layer DiffID:** {markdown.markdown_escape(vuln_layer)}" ) # vuln_refs = item.get("References", list()) if vuln_refs: vuln_info_chunks.append("**References:**") for vuln_ref in vuln_refs: vuln_info_chunks.append(markdown.markdown_escape(vuln_ref)) # finding = SastFinding(title=vuln_title, description=["\n\n".join(vuln_info_chunks)]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", vuln_severity) finding.set_meta("legacy.file", vuln_file) endpoints = list() if vuln_file: endpoints.append( namedtuple("Endpoint", ["raw"])(raw=vuln_file)) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def __init__(self, data, deps): dupes = dict() find_date = None self.items = [] data = json.loads(data) advisories = data.get('advisories') for action in data['actions']: module = action.get('module') if module in deps: EXTENDED_SEVERITIES = { 'Info': 4, 'Low': 3, 'Moderate': 2, 'High': 1, 'Critical': 0 } unique_ids = {} tmp_values = { 'file_paths': {}, 'descriptions': [], 'urls': [], 'references_list': [], 'cwes': [] } severity = 'Info' format_str = ' \n*{}*: {}\n \n' for resolve in action.get('resolves'): id = resolve.get('id') if id not in unique_ids: advisory = advisories.get(str(id)) unique_ids[id] = advisory.get('title') tmp_values['file_paths'][unique_ids[id]] = [] current_severity = advisory.get('severity').title() tmp_values['cwes'].append(advisory.get('cwe')) if EXTENDED_SEVERITIES.get(current_severity) \ < EXTENDED_SEVERITIES.get(severity): severity = current_severity if advisory.get('url'): tmp_values['urls'].append( format_str.format( unique_ids[id], markdown.markdown_escape( advisory.get('url')))) if advisory.get('references'): tmp_values['references_list'].append( format_str.format( unique_ids[id], markdown.markdown_escape( advisory.get('references')))) tmp_values['descriptions'].append( format_str.format( unique_ids[id], markdown.markdown_escape( advisory.get('overview')))) if id not in tmp_values['file_paths']: tmp_values['file_paths'][unique_ids[id]].append( '\n- {}'.format(resolve.get('path'))) file_path = '' for key in tmp_values['file_paths']: file_path = file_path + format_str.format( key, markdown.markdown_escape(', '.join( tmp_values['file_paths'][key]))) rehearsal_str = '\n' url = rehearsal_str.join(tmp_values['urls']) references = rehearsal_str.join(tmp_values['references_list']) description = rehearsal_str.join(tmp_values['descriptions']) swe = rehearsal_str.join(tmp_values['cwes']) title = ' '.join([ action.get('action', ''), action.get('module', ''), action.get('target', '') ]) if title not in dupes: dupes[title] = { "title": title, "description": description, "severity": severity, "file_path": file_path, "url": url, "date": find_date, "references": references, "cwe": swe } self.items = dupes.values()
def __init__(self, filename): logging.debug("Spotbugs parser initialization") dupes = dict() find_date = None data = xml.etree.ElementTree.parse(filename).getroot() for item in data.findall('BugInstance'): title = item.find('ShortMessage').text description = markdown.markdown_escape( item.find('LongMessage').text) category = item.get('category') issue_type = item.get('type') severity = item.get('priority') classname = item.find('Class').get('classname') filename = item.find('Class').find('SourceLine').get('sourcefile') file_path = item.find('Class').find('SourceLine').get('sourcepath') line = item.find('Class').find('SourceLine').find('Message').text steps_to_reproduce = "" details = data.find(f'.//BugPattern[@type="{issue_type}"]') for i, element in enumerate(item.findall('Method')): steps_to_reproduce += f"\n\nClassname: {classname}\n" \ f"{element.find('Message').text}\n" try: steps_to_reproduce += \ f"{sanitize(item.findall('SourceLine')[i].find('Message').text)}" except: pass if details is not None: description += f'\n\n Details: {markdown.html_to_text(details.find("Details").text)}' dupe_key = hashlib.md5(f'{title} {issue_type} {category}'.encode( 'utf-8')).hexdigest() if file_path: dupe_key += f' {file_path}' if filename: title += f' in {filename}' if dupe_key not in dupes: dupes[dupe_key] = { "title": title, "category": category, "description": description, "severity": int(severity), "file_path": file_path if file_path else filename if filename else "", "line": line, "date": find_date, "steps_to_reproduce": list() } dupes[dupe_key]['steps_to_reproduce'].append( f'<pre>{issue_type} issue {steps_to_reproduce}</pre>') else: dupes[dupe_key]['steps_to_reproduce'].append( f"<pre>{steps_to_reproduce}</pre>") self.items = dupes.values() logging.debug("Spotbugs output parsing done")
def __init__(self, filename, filtered_statuses=constants.PTAI_DEFAULT_FILTERED_STATUSES): """ :param filename: :param filtered_statuses: str with statuses, separated ', ' """ file_path_descriptions_list = ['Уязвимый файл', 'Vulnerable File'] def trim_blank_lines(line): blank_lines_patterns = ['\n( *\n)', '\n+'] for pattern in blank_lines_patterns: finds = re.findall(pattern, line) for find in finds: line = line.replace(find, '\n') return line def get_value_by_description(table_soap, descriptions): option_descriptions_soup = table_soap.select( 'td[class*="option-description"]') option_descriptions = [ item.text for item in option_descriptions_soup ] value_index = -1 value = '' for description in descriptions: if description in option_descriptions: value_index = option_descriptions.index(description) break if value_index >= 0: option_values_soup = table_soap.select( 'td[class*="option-value"]') value = option_values_soup[value_index].text return value dupes = dict() self.items = [] if not os.path.exists(filename): return soup = BeautifulSoup(open(filename, encoding="utf8"), 'html.parser') vulnerabilities_info = {} vulnerabilities_info_soup = soup.find_all( 'div', {'class': 'type-description'}) for vulnerability_info_soup in vulnerabilities_info_soup: id = vulnerability_info_soup.find('a', { 'class': 'glossary-anchor' }).attrs.get('id') vulnerabilities_info[id] = vulnerability_info_soup.text.replace( id, '') vulnerabilities_soup = soup.find_all('div', {'class': 'vulnerability'}) for vulnerability_soup in vulnerabilities_soup: if filtered_statuses: skip_flag = False for filter_status in filtered_statuses: status = vulnerability_soup.find_all( 'i', {'class': '{}-icon'.format(filter_status)}) if status: skip_flag = True break if skip_flag: continue severity_level_soup = vulnerability_soup.select( 'div[class*="vulnerability-type-name-level-"]') title = '' file_path = '' short_file_path = '' if severity_level_soup: title = severity_level_soup[0].text # Get file path (strip line number if present) file_path = get_value_by_description( vulnerability_soup, file_path_descriptions_list).rsplit(' : ', 1)[0] if '\\' in file_path: short_file_path = ' in ...\\' + file_path.split('\\')[-1] severity_classes_soup = severity_level_soup[0].attrs.get( 'class') for severity_class_soup in severity_classes_soup: if 'vulnerability-type-name-level-' in severity_class_soup: severity = severity_class_soup.split('-')[-1] vulnerability_link_info_soup = vulnerability_soup.find_all( 'a', {'class': 'vulnerability-description-link'}) if vulnerability_link_info_soup: vulnerability_info_href = vulnerability_link_info_soup[ 0].attrs.get('href').replace('#', '') vulnerability_info = '' if vulnerability_info_href in vulnerabilities_info: vulnerability_info = vulnerabilities_info[ vulnerability_info_href][ vulnerabilities_info[vulnerability_info_href]. find(title) + len(title):] detail_info_soup = vulnerability_soup.find_all( 'table', {'class': 'vulnerability-detail-info'}) detail_info_values = {} if detail_info_soup: detail_info_soup_tds = detail_info_soup[0].find_all('td') detail_info_values[detail_info_soup_tds[0]. text] = detail_info_soup_tds[1].text functions = vulnerability_soup.find_all( 'div', {'class': 'vulnerability-info'}) function_blocks_strs = [] for function in functions: function_info_values = {} for tr in function.find_all( 'table', {'class': 'vulnerability-detail-info'})[0].find_all('tr'): tds = tr.find_all('td') if tds: param = tds[0].text if param.startswith('\n'): param = param[1:] value = ' ' if len(tds) == 2: value = tds[1].text if value.startswith('\n'): value = value[1:] if 'CWE' in value: link_str_list = vulnerability_info[ vulnerability_info.find(value.strip() ):].split('\n') link_info = [ x.strip() for x in link_str_list if x.strip() ] if not link_info or link_info == ['.']: a_soup = tds[1].find_all('a') if a_soup: a_href = a_soup[0].attrs.get('href') a_text = a_soup[0].text if a_text.startswith('\n'): a_text = value[1:] link_info = [a_text.strip(), a_href] else: link_info = [' '] value = ': '.join(link_info) function_info_values[param] = trim_blank_lines(value) tables_lines = [] tables_soup = function.find_all( 'div', {'class': 'data-flow-entry-root'}) for table_soup in tables_soup: lines = {} header_file_name = table_soup.find_all( 'span', {'class': 'data-flow-entry-header-file-name'})[0].text header_type = table_soup.find_all( 'span', {'class': 'data-flow-entry-header-type'})[0].text code_lines_soup = table_soup.find_all( 'div', {'class': 'data-flow-entry-code-line-root'}) for code_line_soup in code_lines_soup: line_number = code_line_soup.find_all( 'span', {'class': 'data-flow-entry-code-line-number' })[0].text line_content = code_line_soup.find_all( 'pre', {'class': 'data-flow-entry-code-line-content'})[0] line_text = line_content.text bold_text = line_content.find( 'span', { 'class': [ 'code-line-part-EntryPoint', 'code-line-part-DataEntryPoint', 'code-line-part-DataOperation', 'code-line-part-VulnerableCode' ] }) if bold_text: line_text = line_text + ' <------' lines[line_number] = line_text tables_lines.append({ 'lines': lines, 'header_file_name': header_file_name, 'header_type': header_type }) # format strings srt_code_blocks = [] for table_lines in tables_lines: table_markdown_str = '{{code:title={} - {}|borderStyle=solid}} \n{} \n{{code}}' code_lines = '' for key, value in table_lines['lines'].items(): code_lines += '{} {} \n'.format(key, value) srt_code_blocks.append( table_markdown_str.format( table_lines['header_file_name'], table_lines['header_type'], code_lines)) data_flow_panel_str = '' for str_code_block in srt_code_blocks: if data_flow_panel_str: # add arrow data_flow_panel_str += ' \n \n|{}| \n \n'.format( chr(129147)) data_flow_panel_str += str_code_block function_info_values_str = '' for param, value in detail_info_values.items(): if param not in function_info_values: # value = value.replace('\n ', ': ')\ # .replace('|', '| ').replace('{', '\{').replace('}', '\}') value = markdown_escape(value).replace("\n", " ") if value.isspace(): value = "-" str_line = ' \n \n|| *{}* | *{}* |'.format( param, value) function_info_values_str = str_line for param, value in function_info_values.items(): # value = value.replace('*', '\*').replace('|', '| ').replace('{', '\{')\ # .replace('}', '\}') value = markdown_escape(value).replace("\n", " ") if value.isspace(): value = "-" str_line = '|| *{}* | {} |'.format(param, value) str_line = str_line.replace(' ', '') function_info_values_str += ' \n' + str_line function_full_info_str = function_info_values_str + '\n \n ' if data_flow_panel_str: function_full_info_str += ' \n {panel:title=Data Flow:|borderStyle=dashed|borderColor' \ '=#ccc|titleBGColor=#F7D6C1|bgColor=#FFFFCE} \n \n' + data_flow_panel_str \ + ' \n \n {panel} \n \n' function_blocks_strs.append(function_full_info_str) description = ' \n \n{}: \n \n{} \n \n'.format( title, vulnerability_info.strip()) dup_key = title + ' in file: ' + file_path # Add finding data to de-duplication store if dup_key not in dupes: dupes[dup_key] = { "title": title + short_file_path, "description": description, "severity": severity.title(), "file_path": file_path, "steps_to_reproduce": function_blocks_strs } else: dupes[dup_key]["steps_to_reproduce"].extend( function_blocks_strs) self.items = dupes.values()
def parse_findings(output_file, scanner): """ Parse findings (code from dusty 1.0) """ log.debug("Parsing findings") # Load JSON with open(output_file, "rb") as json_file: data = json.load(json_file) # SSLyze report has no severity. Set all to Medium severity = "Medium" # Walk results try: # Process each scanned target result for target in data["server_scan_results"]: # Heartbleed if target["scan_commands_results"]["heartbleed"]["is_vulnerable_to_heartbleed"]: finding = DastFinding( title="SSL: Server is vulnerable to HeartBleed", description=markdown.markdown_escape( f"Server is vulnerable to heartbleed" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) # CCS Injection if target[ "scan_commands_results" ]["openssl_ccs_injection"]["is_vulnerable_to_ccs_injection"]: finding = DastFinding( title="SSL: Server is vulnerable to CCS Injection", description=markdown.markdown_escape( f"Server is vulnerable to CCS Injection" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) # Robot if "NOT_VULNERABLE" not in target["scan_commands_results"]["robot"]["robot_result"]: finding = DastFinding( title="SSL: Server is vulnerable to Robot", description=markdown.markdown_escape( f"SSL server is vulnerable to robot with " f'{target["scan_commands_results"]["robot"]["robot_result"]}' ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) # Certificate validation for deployment in target[ "scan_commands_results" ]["certificate_info"]["certificate_deployments"]: # Collect target chain info chain_info = "" for each in reversed(deployment["received_certificate_chain"]): chain_info += f'{each["subject"]["rfc4514_string"]}\n\n' # Collect certificate chain validation info certificate_validation = [] for validation_result in deployment["path_validation_results"]: if validation_result["verified_certificate_chain"] is None: certificate_validation.append( f"- Is not trusted by " f"{validation_result['trust_store']['name']} " f"({validation_result['trust_store']['version']})" ) # Create finding object if certificate_validation: descr = "\n\n".join(certificate_validation) finding = DastFinding( title="SSL: Certificate is not trusted", description=markdown.markdown_escape( f"Certificate chain: \n\n{chain_info}\n {descr}" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) except: # pylint: disable=W0702 log.exception("Failed to parse results")
def parse_findings(output_file, scanner): """ Parse findings (code from dusty 1.0) """ log.debug("Parsing findings") # Load JSON with open(output_file, "rb") as json_file: data = json.load(json_file) # SSLyze report has no severity. Set all to Medium severity = "Medium" # Walk results for target in data["accepted_targets"]: chain_info = "" for each in target["commands_results"]["certinfo"]["certificate_chain"]: chain_info += f'{each["subject"]}\n' certificate_validation = [] for validation_result in \ target["commands_results"]["certinfo"]["path_validation_result_list"]: if validation_result["verify_string"] != "ok": certificate_validation.append( f"Certificate chain is not trusted by " f"{validation_result['trust_store']['name']} " f"trust_store version {validation_result['trust_store']['version']}" ) # Create finding objects if certificate_validation: descr = "\n".join(certificate_validation) finding = DastFinding( title="Certificate is not trusted", description=markdown.markdown_escape( f"Certificate chain: {chain_info}\n {descr}" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) if target["commands_results"]["heartbleed"]["is_vulnerable_to_heartbleed"]: finding = DastFinding( title="Certificate is vulnerable to Heardbleed", description=markdown.markdown_escape( f"Certificate chain: {chain_info}\n is vulnerable to heartbleed" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) if "NOT_VULNERABLE" not in target["commands_results"]["robot"]["robot_result_enum"]: finding = DastFinding( title="Certificate is vulnerable to Robot", description=markdown.markdown_escape( f"Certificate chain: {chain_info}\n " f"is vulnerable to robot with " f'{target["commands_results"]["robot"]["robot_result_enum"]}' ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding) if target["commands_results"]["openssl_ccs"]["is_vulnerable_to_ccs_injection"]: finding = DastFinding( title="Certificate is vulnerable to CCS Injection", description=markdown.markdown_escape( f"Certificate chain: {chain_info}\n " f"is vulnerable to CCS Injection" ) ) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", severity) scanner.findings.append(finding)
def parse_findings(output_file, scanner): """ Parse findings (code from dusty 1.0) """ log.debug("Parsing findings") dupes = dict() # tree = ET.parse(output_file) root = tree.getroot() new_root = root.find("niktoscan") scan = new_root.find("scandetails") # for item in scan.findall("item"): # Title titleText = None description = item.find("description").text # Cut the title down to the first sentence sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description) if sentences: titleText = sentences[0][:900] else: titleText = description[:900] # # Url ip = item.find("iplink").text # Remove the port numbers for 80/443 ip = ip.replace(":80", "") ip = ip.replace(":443", "") # # Description description = "\nHost: " + ip + "\n" + item.find("description").text dupe_key = hashlib.md5(description.encode("utf-8")).hexdigest() # if dupe_key in dupes: finding = dupes[dupe_key] if finding["description"]: finding["description"] = \ finding["description"] + "\nHost:" + ip + "\n" + description finding["endpoints"].append(ip) dupes[dupe_key] = finding else: dupes[dupe_key] = True finding = { "title": titleText, "description": description, "endpoints": list() } dupes[dupe_key] = finding finding["endpoints"].append(ip) # Create finding objects for item in dupes.values(): finding = DastFinding(title=item["title"], description=markdown.markdown_escape( item["description"])) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", SEVERITIES[-1]) # Endpoints (for backwards compatibility) endpoints = list() for entry in item["endpoints"]: endpoint = url.parse_url(entry) if endpoint in endpoints: continue endpoints.append(endpoint) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") # Done scanner.findings.append(finding)
def parse_findings(filename, scanner): """ Parse findings """ # Load JSON try: with open(filename, "r") as file: data = json.load(file) except: # pylint: disable=W0702 log.exception("Failed to load report JSON") return # Parse JSON if not isinstance(data, dict) or "results" not in data: log.info("No data in report") return # Make finding instances for item in data["results"]: vuln_data = item.get("extra", dict()) vuln_meta = vuln_data.get("metadata", dict()) # vuln_rule = item["check_id"] vuln_file = item["path"] vuln_info = vuln_data.get("message", "") vuln_severity = map_severity(vuln_data.get("severity", "")) # vuln_cwe_owasp_title = vuln_meta.get("cwe", "") if not vuln_cwe_owasp_title: vuln_cwe_owasp_title = vuln_meta.get("owasp", "") if not vuln_cwe_owasp_title: vuln_cwe_owasp_title = "Vulnerability" # vuln_title = f"{vuln_cwe_owasp_title} in {vuln_file}" # vuln_info_chunks = list() if vuln_info: vuln_info_chunks.append(markdown.markdown_escape(vuln_info)) vuln_info_chunks.append( f"**Rule:** {markdown.markdown_escape(vuln_rule)}") if "source-rule-url" in vuln_meta: vuln_info_chunks.append( f"**Rule source:** {markdown.markdown_escape(vuln_meta['source-rule-url'])}" ) if "cwe" in vuln_meta: vuln_info_chunks.append( f"**CWE:** {markdown.markdown_escape(vuln_meta['cwe'])}") if "owasp" in vuln_meta: vuln_info_chunks.append( f"**OWASP:** {markdown.markdown_escape(vuln_meta['owasp'])}") vuln_info_chunks.append( f"**File:** {markdown.markdown_escape(vuln_file)}") if "start" in item and "line" in item["start"]: vuln_info_chunks.append( f"**Start line:** {markdown.markdown_escape(str(item['start']['line']))}" ) if "end" in item and "line" in item["end"]: vuln_info_chunks.append( f"**End line:** {markdown.markdown_escape(str(item['end']['line']))}" ) if "lines" in vuln_data: vuln_info_chunks.append( f"**Lines:** {markdown.markdown_escape(vuln_data['lines'])}") # finding = SastFinding(title=vuln_title, description=["\n\n".join(vuln_info_chunks)]) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", vuln_severity) finding.set_meta("legacy.file", vuln_file) endpoints = list() if vuln_file: endpoints.append(namedtuple("Endpoint", ["raw"])(raw=vuln_file)) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") scanner.findings.append(finding)
def __init__(self, filename, deps): dupes = dict() find_date = None self.items = [] if not os.path.exists(filename): return data = json.load(open(filename))['data'] components_data = {} for file_results in data: file_path = file_results.get('file') for version_results in file_results.get('results'): component = version_results.get('component') if component in deps: if component not in components_data: components_data[component] = \ {'versions': set(), 'descriptions': {}, 'references': {}, 'file_paths': {}, 'version_to_update': '0', 'severity': 'Info'} components_data[component]['versions'].add( version_results.get('version')) for vulnerability in version_results.get( 'vulnerabilities', []): summary = vulnerability.get('identifiers').get( 'summary') if summary not in components_data[component][ 'file_paths']: components_data[component]['file_paths'][ summary] = set() components_data[component]['references'][ summary] = set() components_data[component]['file_paths'][summary].add( file_path) for reference in vulnerability.get('info'): if reference not in components_data[component][ 'references']: components_data[component]['references'][ summary].add(reference) if NVD_URL in reference: url_text = requests.get(reference).text soup = BeautifulSoup( url_text, 'html.parser') recomendation = soup.find_all( 'a', {'id': 'showCPERanges'}) if recomendation: ver_res = re.findall( 'versions up to \(excluding\)(.*)', recomendation[0]. attrs['data-range-description']) if ver_res: ver = ver_res[0].strip() if (LooseVersion( components_data[component] ['version_to_update']) < LooseVersion(ver)): components_data[component][ 'version_to_update'] = ver description = soup.find_all( 'p', {'data-testid': 'vuln-description'}) if description: components_data[component][ 'descriptions'][ summary] = description[0].text cur_severity = vulnerability.get('severity').title() if SEVERITIES.get(components_data[component]['severity']) \ > SEVERITIES.get(cur_severity): components_data[component][ 'severity'] = cur_severity format_str = ' \n**{}**: {}\n \n' for key, value in components_data.items(): title = 'Update {}'.format(key) if value.get('version_to_update') != '0': title += ' to version {}'.format( value.get('version_to_update')) severity = value.get('severity') description = ' \n'.join([ format_str.format(markdown.markdown_escape(key), markdown.markdown_escape(val)) for key, val in value.get('descriptions').items() ]) references = '' for ref_key, ref_val in value.get('references').items(): _references = ','.join([' \n- {}'.format(x) for x in ref_val]) + ' \n' references += format_str.format( markdown.markdown_escape(ref_key), markdown.markdown_escape(_references)) file_path = '' for path_key, path_val in value.get('file_paths').items(): _paths = ','.join([' \n- {}'.format(x) for x in path_val]) + ' \n' file_path += format_str.format( markdown.markdown_escape(path_key), markdown.markdown_escape(_paths)) dupes[title] = { "title": title, "description": description, "severity": severity, "file_path": file_path, "date": find_date, "references": references } self.items = dupes.values()
def parse_findings(output_file, scanner): """ Parse findings (code from dusty 1.0) """ log.debug("Parsing findings") nscan = parse(output_file) root = nscan.getroot() # Check validity if "nmaprun" not in root.tag: log.error( "Exception during Nmap findings processing: invalid XML file") error = Error( tool=scanner.get_name(), error=f"Exception during Nmap findings processing", details=f"Output file doesn't seem to be a valid Nmap xml file.") scanner.errors.append(error) return dupes = dict() hostInfo = "" for host in root.iter("host"): ip = host.find("address[@addrtype='ipv4']").attrib["addr"] fqdn = None if host.find("hostnames/hostname[@type='PTR']") is not None: fqdn = host.find("hostnames/hostname[@type='PTR']").attrib["name"] # for os in root.iter("os"): if ip is not None: hostInfo += "IP Address: %s\n" % ip if fqdn is not None: fqdn += "FQDN: %s\n" % ip for osv in os.iter("osmatch"): if "name" in osv.attrib: hostInfo += "Host OS: %s\n" % osv.attrib["name"] if "accuracy" in osv.attrib: hostInfo += "Accuracy: {0}%\n".format( osv.attrib["accuracy"]) hostInfo += "\n" # xpath_port_selector = "ports/port[state/@state='open']" if scanner.config.get("include_unfiltered", False): xpath_port_selector = "ports/port[state/@state=('open','unfiltered')]" # for portelem in elementpath.select(host, xpath_port_selector): port = portelem.attrib["portid"] protocol = portelem.attrib["protocol"] # title = f"Open port: {ip}:{port}/{protocol}" description = hostInfo description += f"Port: {port}\n" serviceinfo = "" # if portelem.find("service") is not None: if "product" in portelem.find("service").attrib: serviceinfo += "Product: %s\n" % portelem.find( "service").attrib["product"] # if "version" in portelem.find("service").attrib: serviceinfo += "Version: %s\n" % portelem.find( "service").attrib["version"] # if "extrainfo" in portelem.find("service").attrib: serviceinfo += "Extra Info: %s\n" % portelem.find( "service").attrib["extrainfo"] # description += serviceinfo # description += "\n\n" # dupe_key = f"{port}_{protocol}_{ip}" if dupe_key in dupes: find = dupes[dupe_key] if description is not None: find["description"] += description else: find = { "title": title, "description": description, "endpoints": list() } find["endpoints"].append(f"{ip}:{port}/{protocol}") dupes[dupe_key] = find # Create finding objects for item in dupes.values(): finding = DastFinding(title=item["title"], description=markdown.markdown_escape( item["description"])) finding.set_meta("tool", scanner.get_name()) finding.set_meta("severity", SEVERITIES[-1]) # Endpoints (for backwards compatibility) endpoints = list() for entry in item["endpoints"]: endpoint = url.parse_url(entry) if endpoint in endpoints: continue endpoints.append(endpoint) finding.set_meta("endpoints", endpoints) log.debug(f"Endpoints: {finding.get_meta('endpoints')}") # Done scanner.findings.append(finding)