Пример #1
0
def parse_findings(data, scanner):
    """ Parse findings """
    log.debug("Parsing findings")
    item_regex = re.compile(
        "".join([
            "^(\[\+\] New Finding!!!)$", "\s*Name: (?P<name>.*)$",
            "\s*Url: (?P<url>.*)$",
            "\s*Description: (?P<description>[\s\S]*?)\n\n"
        ]), re.MULTILINE)
    for item in item_regex.finditer(data):
        # Make finding object
        description = list()
        description.append(markdown.markdown_escape(item.group("description")))
        description.append(
            f'\n**URL:** {markdown.markdown_escape(item.group("url"))}')
        description = "\n".join(description)
        finding = DastFinding(title=item.group("name"),
                              description=description)
        finding.set_meta("tool", scanner.get_name())
        finding.set_meta("severity", "Info")
        # Endpoints (for backwards compatibility)
        endpoints = list()
        endpoint = url.parse_url(item.group("url"))
        endpoints.append(endpoint)
        finding.set_meta("endpoints", endpoints)
        log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
        # Done
        scanner.findings.append(finding)
Пример #2
0
 def execute(self):
     """ Run the scanner """
     # Prepare parameters
     target_url = url.parse_url(self.config.get("target"))
     nikto_parameters = shlex.split(self.config.get("nikto_parameters", ""))
     # Make temporary files
     output_file_fd, output_file = tempfile.mkstemp()
     log.debug("Output file: %s", output_file)
     os.close(output_file_fd)
     # Prepare -Save option if needed
     save_findings = list()
     if self.config.get("save_intermediates_to", None):
         base = os.path.join(self.config.get("save_intermediates_to"), __name__.split(".")[-2])
         try:
             os.makedirs(base, mode=0o755, exist_ok=True)
             save_findings.append("-Save")
             save_findings.append(base)
         except:
             pass
     # Run scanner
     task = subprocess.run(["perl", "nikto.pl"] + nikto_parameters + [
         "-h", target_url.hostname, "-p", url.get_port(target_url),
         "-Format", "xml", "-output", output_file
     ] + save_findings, cwd="/opt/nikto/program", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     log.log_subprocess_result(task)
     # Parse findings
     parse_findings(output_file, self)
     # Save intermediates
     self.save_intermediates(output_file, task)
     # Remove temporary files
     os.remove(output_file)
Пример #3
0
 def execute(self):
     """ Run the scanner """
     # Discover open ports
     include_ports = list()
     if self.config.get("include_ports", "0-65535"):
         include_ports.append(
             f'-p{self.config.get("include_ports", "0-65535")}')
     exclude_ports = list()
     if self.config.get("exclude_ports", None):
         exclude_ports.append("--exclude-ports")
         exclude_ports.append(f'{self.config.get("exclude_ports")}')
     target_url = url.parse_url(self.config.get("target"))
     task = subprocess.run(
         ["nmap", "-PN"] + include_ports + exclude_ports + [
             "--min-rate", "1000", "--max-retries", "0",
             "--max-rtt-timeout", "200ms", target_url.hostname
         ],
         stdout=subprocess.PIPE,
         stderr=subprocess.PIPE)
     log.log_subprocess_result(task)
     # Use discovered ports
     ports = list()
     tcp_ports = ""
     udp_ports = ""
     for each in re.findall(r'([0-9]*/[tcp|udp])', str(task.stdout)):
         if "/t" in each:
             tcp_ports += f'{each.replace("/t", "")},'
         elif "/u" in each:
             udp_ports += f'{each.replace("/u", "")},'
     if tcp_ports:
         ports.append(f"-pT:{tcp_ports[:-1]}")
     if udp_ports:
         ports.append(f"-pU:{udp_ports[:-1]}")
     if not ports:
         log.warning("No open ports found. Exiting")
         return
     # Make temporary files
     output_file_fd, output_file = tempfile.mkstemp()
     log.debug("Output file: %s", output_file)
     os.close(output_file_fd)
     # Scan target
     nmap_parameters = shlex.split(
         self.config.get("nmap_parameters", "-v -sVA"))
     nse_scripts = self.config.get(
         "nse_scripts",
         "ssl-date,http-mobileversion-checker,http-robots.txt,http-title,http-waf-detect,"
         "http-chrono,http-headers,http-comments-displayer,http-date")
     task = subprocess.run(["nmap"] + nmap_parameters + ports + [
         "--min-rate", "1000", "--max-retries", "0",
         f'--script={nse_scripts}', target_url.hostname, "-oX", output_file
     ],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
     log.log_subprocess_result(task)
     # Parse findings
     parse_findings(output_file, self)
     # Save intermediates
     self.save_intermediates(output_file, task)
     # Remove temporary files
     os.remove(output_file)
Пример #4
0
 def execute(self):
     """ Run the scanner """
     # Get target host IP
     target_url = url.parse_url(self.config.get("target"))
     host = target_url.hostname
     if not url.find_ip(host):
         task = subprocess.run(["getent", "hosts", host],
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
         log.log_subprocess_result(task)
         host = url.find_ip(task.stdout.decode("utf-8", errors="ignore"))
         if host:
             host = host[0].strip()
     if not host:
         log.warning("No target IP found. Exiting")
         return
     # Prepare config
     include_ports = list()
     if self.config.get("include_ports", "0-65535"):
         include_ports.append("-p")
         include_ports.append(
             f'{self.config.get("include_ports", "0-65535")}')
         include_ports.append(
             f'-pU:{self.config.get("include_ports", "0-65535")}')
     exclude_ports = list()
     if self.config.get("exclude_ports", None):
         exclude_ports.append("--exclude-ports")
         exclude_ports.append(f'{self.config.get("exclude_ports")}')
     # Make temporary files
     output_file_fd, output_file = tempfile.mkstemp()
     log.debug("Output file: %s", output_file)
     os.close(output_file_fd)
     # Scan target
     task = subprocess.run(["masscan", host] + include_ports + [
         "--rate",
         "1000",
         "-oJ",
         output_file,
     ] + exclude_ports,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
     log.log_subprocess_result(task)
     # Parse findings
     parse_findings(output_file, self)
     # Save intermediates
     self.save_intermediates(output_file, task)
     # Remove temporary files
     os.remove(output_file)
Пример #5
0
 def execute(self):
     """ Run the scanner """
     # Get target host IP
     target_url = url.parse_url(self.config.get("target"))
     # Make temporary files
     output_file_fd, output_file = tempfile.mkstemp()
     log.debug("Output file: %s", output_file)
     os.close(output_file_fd)
     # Scan target
     task = subprocess.run([
         "sslyze", "--regular", f"--json_out={output_file}", "--quiet",
         f"{target_url.hostname}:{url.get_port(target_url)}"
     ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     log.log_subprocess_result(task)
     # Parse findings
     parse_findings(output_file, self)
     # Save intermediates
     self.save_intermediates(output_file, task)
     # Remove temporary files
     os.remove(output_file)
Пример #6
0
def parse_findings(data, scanner):
    """ Parse findings """
    log.debug("Parsing findings")
    zap_json = json.loads(data)
    for site in zap_json["site"]:
        for alert in site["alerts"]:
            description = list()
            if "desc" in alert:
                description.append(markdown.html_to_text(alert["desc"]))
            if "solution" in alert:
                description.append(
                    f'\n**Solution:**\n {markdown.html_to_text(alert["solution"])}'
                )
            if "reference" in alert:
                description.append(
                    f'\n**Reference:**\n {markdown.html_to_text(alert["reference"])}'
                )
            if "otherinfo" in alert:
                description.append(
                    f'\n**Other information:**\n {markdown.html_to_text(alert["otherinfo"])}'
                )
            if alert["instances"]:
                description.append("\n**Instances:**\n")
                description.append(
                    "| URI | Method | Parameter | Attack | Evidence |")
                description.append(
                    "| --- | ------ | --------- | ------ | -------- |")
            # Prepare results
            finding_data = list()
            if scanner.config.get("split_by_endpoint", False):
                # Collect endpoints
                endpoints = list()
                for item in alert["instances"]:
                    if not item.get("uri", None):
                        continue
                    endpoint = url.parse_url(item.get("uri"))
                    if endpoint in endpoints:
                        continue
                    endpoints.append(endpoint)
                # Prepare data
                for endpoint in endpoints:
                    finding_data.append({
                        "title": f'{alert["name"]} on {endpoint.raw}',
                        "description": "\n".join(description + ["| {} |".format(" | ".join([
                            html.escape(markdown.markdown_table_escape(item.get("uri", "-"))),
                            html.escape(markdown.markdown_table_escape(item.get("method", "-"))),
                            html.escape(markdown.markdown_table_escape(item.get("param", "-"))),
                            html.escape(markdown.markdown_table_escape(item.get("attack", "-"))),
                            html.escape(markdown.markdown_table_escape(item.get("evidence", "-")))
                            ])) for item in alert["instances"] \
                                if item.get("uri", None) == endpoint.raw]),
                        "tool": scanner.get_name(),
                        "severity": constants.ZAP_SEVERITIES[alert["riskcode"]],
                        "confidence": constants.ZAP_CONFIDENCES[alert["confidence"]],
                        "endpoints": [endpoint]
                    })
            # Make one finding object if needed/requested
            if not finding_data:
                # Extend description
                for item in alert["instances"]:
                    description.append("| {} |".format(" | ".join([
                        html.escape(
                            markdown.markdown_table_escape(item.get(
                                "uri", "-"))),
                        html.escape(
                            markdown.markdown_table_escape(
                                item.get("method", "-"))),
                        html.escape(
                            markdown.markdown_table_escape(
                                item.get("param", "-"))),
                        html.escape(
                            markdown.markdown_table_escape(
                                item.get("attack", "-"))),
                        html.escape(
                            markdown.markdown_table_escape(
                                item.get("evidence", "-")))
                    ])))
                # Endpoints (for backwards compatibility)
                endpoints = list()
                for item in alert["instances"]:
                    if not item.get("uri", None):
                        continue
                    endpoint = url.parse_url(item.get("uri"))
                    if endpoint in endpoints:
                        continue
                    endpoints.append(endpoint)
                # Data
                finding_data.append({
                    "title":
                    alert["name"],
                    "description":
                    "\n".join(description),
                    "tool":
                    scanner.get_name(),
                    "severity":
                    constants.ZAP_SEVERITIES[alert["riskcode"]],
                    "confidence":
                    constants.ZAP_CONFIDENCES[alert["confidence"]],
                    "endpoints":
                    endpoints
                })
            # Make finding objects
            for object_data in finding_data:
                finding = DastFinding(title=object_data["title"],
                                      description=object_data["description"])
                finding.set_meta("tool", object_data["tool"])
                finding.set_meta("severity", object_data["severity"])
                finding.set_meta("confidence", object_data["confidence"])
                finding.set_meta("endpoints", object_data["endpoints"])
                log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
                scanner.findings.append(finding)
Пример #7
0
def parse_findings(data, scanner):
    """ Parse findings """
    log.debug("Parsing findings")
    parser = etree.XMLParser(remove_blank_text=True,
                             no_network=True,
                             recover=True)
    obj = etree.fromstring(data, parser)
    qids = obj.xpath("/WAS_WEBAPP_REPORT/GLOSSARY/QID_LIST/QID")
    disabled_titles = constants.QUALYS_DISABLED_TITLES
    for qid in qids:
        qid_title = qid.findtext("TITLE")
        if qid_title not in disabled_titles:
            _qid = qid.findtext("QID")
            qid_solution = qid.findtext("SOLUTION")
            qid_description = qid.findtext("DESCRIPTION")
            qid_impact = qid.findtext("IMPACT")
            qid_category = qid.findtext("CATEGORY")
            qid_severity = "Info"
            owasp = qid.findtext("OWASP") if qid.findtext("OWASP") else ""
            wasc = qid.findtext("WASC") if qid.findtext("WASC") else ""
            cwe = qid.findtext("CWE") if qid.findtext("CWE") else ""
            cvss_base = qid.findtext("CVSS_BASE") if qid.findtext(
                "CVSS_BASE") else ""
            if qid.xpath("SEVERITY"):
                qid_severity = constants.QUALYS_SEVERITIES[int(
                    qid.findtext("SEVERITY"))]
            references = []
            entrypoints = []
            if "Information Gathered" in qid_category:
                qid_severity = "Info"
                records = obj.xpath(
                    f'//INFORMATION_GATHERED_LIST/INFORMATION_GATHERED/QID[text()="{_qid}"]/..'
                )
                for record in records:
                    references.append(
                        html.escape(
                            base64.b64decode(record.findtext("DATA")).decode(
                                "utf-8", errors="ignore")))
            else:
                records = obj.xpath(
                    f'//VULNERABILITY_LIST/VULNERABILITY/QID[text()="{_qid}"]/..'
                )
                for record in records:
                    record_url = record.findtext('URL')
                    access_pass = [
                        a.text for a in records[0].xpath('ACCESS_PATH/URL')
                    ]
                    method = record.findtext('PAYLOADS/PAYLOAD/REQUEST/METHOD')
                    if not method:
                        log.error("Bad record: %s", str(record))
                        method = ""
                    request = record.findtext('PAYLOADS/PAYLOAD/REQUEST/URL')
                    request = html.escape(request)
                    response = record.findtext(
                        'PAYLOADS/PAYLOAD/RESPONSE/CONTENTS')
                    response = html.escape(
                        base64.b64decode(response).decode("utf-8",
                                                          errors="ignore"))
                    entrypoints.append(record_url)
                    entrypoints.extend(access_pass)
                    references.append(
                        f"{method.upper()}: {request}\n\nResponse: {response}\n\n"
                    )
            for reference in references:
                description = f"{markdown.html_to_text(qid_description)}\n\n"
                if qid_impact:
                    description += f"**Impact:**\n {markdown.html_to_text(qid_impact)}\n\n"
                if qid_solution:
                    description += f"**Mitigation:**\n {markdown.html_to_text(qid_solution)}\n\n"
                if reference:
                    description += f"**References:**\n {markdown.markdown_escape(reference)}\n\n"
                if cwe:
                    description += f"**CWE:** {markdown.markdown_escape(cwe)}\n\n"
                if owasp:
                    description += f"**OWASP:** {markdown.markdown_escape(owasp)}\n\n"
                if wasc:
                    description += f"**WASC:** {markdown.markdown_escape(wasc)}\n\n"
                if cvss_base:
                    description += f"**CVSS_BASE:** {markdown.markdown_escape(cvss_base)}\n\n"
                # Make finding object
                finding = DastFinding(title=f"{qid_title} - {qid_category}",
                                      description=description)
                finding.set_meta("tool", scanner.get_name())
                finding.set_meta("severity", qid_severity)
                # Endpoints (for backwards compatibility)
                endpoints = list()
                for item in entrypoints:
                    endpoint = url.parse_url(item)
                    if endpoint in endpoints:
                        continue
                    endpoints.append(endpoint)
                finding.set_meta("endpoints", endpoints)
                log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
                # Done
                scanner.findings.append(finding)
Пример #8
0
def parse_findings(output_file, scanner):
    """ Parse findings (code from dusty 1.0) """
    log.debug("Parsing findings")
    nscan = parse(output_file)
    root = nscan.getroot()
    # Check validity
    if "nmaprun" not in root.tag:
        log.error(
            "Exception during Nmap findings processing: invalid XML file")
        error = Error(
            tool=scanner.get_name(),
            error=f"Exception during Nmap findings processing",
            details=f"Output file doesn't seem to be a valid Nmap xml file.")
        scanner.errors.append(error)
        return
    dupes = dict()
    hostInfo = ""
    for host in root.iter("host"):
        ip = host.find("address[@addrtype='ipv4']").attrib["addr"]
        fqdn = None
        if host.find("hostnames/hostname[@type='PTR']") is not None:
            fqdn = host.find("hostnames/hostname[@type='PTR']").attrib["name"]
        #
        for os in root.iter("os"):
            if ip is not None:
                hostInfo += "IP Address: %s\n" % ip
            if fqdn is not None:
                fqdn += "FQDN: %s\n" % ip
            for osv in os.iter("osmatch"):
                if "name" in osv.attrib:
                    hostInfo += "Host OS: %s\n" % osv.attrib["name"]
                if "accuracy" in osv.attrib:
                    hostInfo += "Accuracy: {0}%\n".format(
                        osv.attrib["accuracy"])
            hostInfo += "\n"
        #
        xpath_port_selector = "ports/port[state/@state='open']"
        if scanner.config.get("include_unfiltered", False):
            xpath_port_selector = "ports/port[state/@state=('open','unfiltered')]"
        #
        for portelem in elementpath.select(host, xpath_port_selector):
            port = portelem.attrib["portid"]
            protocol = portelem.attrib["protocol"]
            #
            title = f"Open port: {ip}:{port}/{protocol}"
            description = hostInfo
            description += f"Port: {port}\n"
            serviceinfo = ""
            #
            if portelem.find("service") is not None:
                if "product" in portelem.find("service").attrib:
                    serviceinfo += "Product: %s\n" % portelem.find(
                        "service").attrib["product"]
                #
                if "version" in portelem.find("service").attrib:
                    serviceinfo += "Version: %s\n" % portelem.find(
                        "service").attrib["version"]
                #
                if "extrainfo" in portelem.find("service").attrib:
                    serviceinfo += "Extra Info: %s\n" % portelem.find(
                        "service").attrib["extrainfo"]
                #
                description += serviceinfo
            #
            description += "\n\n"
            #
            dupe_key = f"{port}_{protocol}_{ip}"
            if dupe_key in dupes:
                find = dupes[dupe_key]
                if description is not None:
                    find["description"] += description
            else:
                find = {
                    "title": title,
                    "description": description,
                    "endpoints": list()
                }
                find["endpoints"].append(f"{ip}:{port}/{protocol}")
                dupes[dupe_key] = find
    # Create finding objects
    for item in dupes.values():
        finding = DastFinding(title=item["title"],
                              description=markdown.markdown_escape(
                                  item["description"]))
        finding.set_meta("tool", scanner.get_name())
        finding.set_meta("severity", SEVERITIES[-1])
        # Endpoints (for backwards compatibility)
        endpoints = list()
        for entry in item["endpoints"]:
            endpoint = url.parse_url(entry)
            if endpoint in endpoints:
                continue
            endpoints.append(endpoint)
        finding.set_meta("endpoints", endpoints)
        log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
        # Done
        scanner.findings.append(finding)
Пример #9
0
def parse_findings(output_file, scanner):
    """ Parse findings (code from dusty 1.0) """
    log.debug("Parsing findings")
    parser = etree.XMLParser(resolve_entities=False, huge_tree=True)
    w3scan = etree.parse(output_file, parser)
    root = w3scan.getroot()
    dupes = dict()
    for vulnerability in root.findall("vulnerability"):
        name = vulnerability.attrib["name"]
        severity = constants.W3AF_SEVERITIES[vulnerability.attrib["severity"]]
        description = "%s are:\n\n" % vulnerability.find(
            "description").text.split("are:")[0]
        transactions = vulnerability.find("http-transactions")
        if transactions is not None:
            transactions = transactions.findall("http-transaction")
        for transaction in transactions:
            request = transaction.find("http-request")
            response = transaction.find("http-response")
            status = request.find("status").text.split(" ")
            response_code = response.find("status").text.split(" ")[1]
            http_method = status[0]
            request_url = status[1]
            data = ""
            for part in [request, response]:
                headers = [f"{h.attrib['field']} -> {h.attrib['content']}" \
                        for h in part.find("headers").findall("header")]
                headers = "\n".join(headers)
                request_body = part.find("body")
                if request_body.attrib["content-encoding"] == "base64":
                    if request_body.text:
                        request_body = base64.b64decode(
                            request_body.text).decode("utf-8", errors="ignore")
                    else:
                        request_body = ""
                else:
                    request_body = request_body.text if request_body.text else ""
                if not data:
                    data = f"Request: {request_url} {http_method} {response_code} \n\n"
                else:
                    data += "Response: \n"
                data += f"Headers: {headers}\n\nBody:{request_body}\n\n"
            dupe_url = urlparse(request_url)
            # Creating dupe path: need to think on more intelligent implementation
            dupe_path = dupe_url.path[:dupe_url.path.index("%")] \
                    if "%" in dupe_url.path else dupe_url.path
            dupe_path = dupe_path[:dupe_path.
                                  index("+"
                                        )] if "+" in dupe_path else dupe_path
            dupe_path = dupe_path[:dupe_path.
                                  index("."
                                        )] if "." in dupe_path else dupe_path
            dupe_path = dupe_path[:dupe_path.
                                  rindex("/"
                                         )] if "/" in dupe_path else dupe_path
            dupe_url = f"{dupe_url.scheme}://{dupe_url.netloc}{dupe_path}"
            dupe_code = f"{str(response_code)[0]}xx"
            dupe_key = hashlib.md5(
                f"{name} {dupe_url} {http_method} {dupe_code}".encode(
                    "utf-8")).hexdigest()
            # Create finding data dictionary
            if dupe_key not in dupes:
                dupes[dupe_key] = {
                    "title": f"{name} {dupe_url} {dupe_code}",
                    "description": description,
                    "severity": severity,
                    "references": data,
                    "endpoints": list()
                }
            elif data not in dupes[dupe_key]["references"]:
                dupes[dupe_key]["references"] += data
            if request_url not in dupes[dupe_key]["endpoints"]:
                dupes[dupe_key]["description"] += f"- {request_url}\n\n"
                dupes[dupe_key]["endpoints"].append(request_url)
    # Create finding objects
    for item in dupes.values():
        description = f"{markdown.markdown_escape(item['description'])}\n\n"
        description += f"**References:**\n {markdown.markdown_escape(item['references'])}\n\n"
        # Make finding object
        finding = DastFinding(title=item["title"], description=description)
        finding.set_meta("tool", scanner.get_name())
        finding.set_meta("severity", item["severity"])
        # Endpoints (for backwards compatibility)
        endpoints = list()
        for entry in item["endpoints"]:
            endpoint = url.parse_url(entry)
            if endpoint in endpoints:
                continue
            endpoints.append(endpoint)
        finding.set_meta("endpoints", endpoints)
        log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
        # Done
        scanner.findings.append(finding)
Пример #10
0
def parse_findings(output_file, scanner):
    """ Parse findings (code from dusty 1.0) """
    log.debug("Parsing findings")
    dupes = dict()
    #
    tree = ET.parse(output_file)
    root = tree.getroot()
    new_root = root.find("niktoscan")
    scan = new_root.find("scandetails")
    #
    for item in scan.findall("item"):
        # Title
        titleText = None
        description = item.find("description").text
        # Cut the title down to the first sentence
        sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s',
                             description)
        if sentences:
            titleText = sentences[0][:900]
        else:
            titleText = description[:900]
        #
        # Url
        ip = item.find("iplink").text
        # Remove the port numbers for 80/443
        ip = ip.replace(":80", "")
        ip = ip.replace(":443", "")
        #
        # Description
        description = "\nHost: " + ip + "\n" + item.find("description").text
        dupe_key = hashlib.md5(description.encode("utf-8")).hexdigest()
        #
        if dupe_key in dupes:
            finding = dupes[dupe_key]
            if finding["description"]:
                finding["description"] = \
                    finding["description"] + "\nHost:" + ip + "\n" + description
            finding["endpoints"].append(ip)
            dupes[dupe_key] = finding
        else:
            dupes[dupe_key] = True
            finding = {
                "title": titleText,
                "description": description,
                "endpoints": list()
            }
            dupes[dupe_key] = finding
            finding["endpoints"].append(ip)
    # Create finding objects
    for item in dupes.values():
        finding = DastFinding(title=item["title"],
                              description=markdown.markdown_escape(
                                  item["description"]))
        finding.set_meta("tool", scanner.get_name())
        finding.set_meta("severity", SEVERITIES[-1])
        # Endpoints (for backwards compatibility)
        endpoints = list()
        for entry in item["endpoints"]:
            endpoint = url.parse_url(entry)
            if endpoint in endpoints:
                continue
            endpoints.append(endpoint)
        finding.set_meta("endpoints", endpoints)
        log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
        # Done
        scanner.findings.append(finding)