コード例 #1
0
 def __init__(self, file, test):
     with open(file, "rb") as f:
         data = load(f)
     self.items = []
     severity = 'Medium'
     tool = 'sslyze'
     dynamic_finding = True
     scanner_confidence = 'certain'
     for target in data['accepted_targets']:
         chain_info = ""
         for each in target['commands_results']["certinfo"]['certificate_chain']:
             chain_info += f'{each["subject"]}\n'
         certificate_validation = []
         for validation_result in target['commands_results']['certinfo']['path_validation_result_list']:
             if validation_result['verify_string'] != 'ok':
                 certificate_validation.append(f"Certificate chain is not trusted by "
                                               f"{validation_result['trust_store']['name']} "
                                               f"trust_store version {validation_result['trust_store']['version']}")
         if certificate_validation:
             descr = "\n".join(certificate_validation)
             self.items.append(Finding(title="Certificate is not trusted",
                                       severity=severity,
                                       description=f'Certificate chain: {chain_info}\n {descr}',
                                       tool=tool,
                                       endpoint=[chain_info],
                                       dynamic_finding=dynamic_finding,
                                       scanner_confidence=scanner_confidence))
         if target['commands_results']['heartbleed']['is_vulnerable_to_heartbleed']:
             self.items.append(Finding(title="Certificate is vulnerable to Heardbleed",
                                       severity=severity,
                                       description=f'Certificate chain: {chain_info}\n is vulnerable to heartbleed',
                                       tool=tool,
                                       endpoint=[chain_info],
                                       dynamic_finding=dynamic_finding,
                                       scanner_confidence=scanner_confidence))
         if 'NOT_VULNERABLE' not in target['commands_results']['robot']['robot_result_enum']:
             self.items.append(Finding(title="Certificate is vulnerable to Robot",
                                       severity=severity,
                                       description=f'Certificate chain: {chain_info}\n '
                                                   f'is vulnerable to robot with '
                                                   f'{target["commands_results"]["robot"]["robot_result_enum"]}',
                                       tool=tool,
                                       endpoint=[chain_info],
                                       dynamic_finding=dynamic_finding,
                                       scanner_confidence=scanner_confidence))
         if target['commands_results']['openssl_ccs']['is_vulnerable_to_ccs_injection']:
             self.items.append(Finding(title="Certificate is vulnerable to CCS Injection",
                                       severity=severity,
                                       description=f'Certificate chain: {chain_info}\n '
                                                   f'is vulnerable to CCS Injection',
                                       tool=tool,
                                       endpoint=[chain_info],
                                       dynamic_finding=dynamic_finding,
                                       scanner_confidence=scanner_confidence))
コード例 #2
0
ファイル: parser.py プロジェクト: LifeDJIK/dusty
 def __init__(self, aem_hacker_output):
     tool = "AEM Hacker"
     severity = "Info"
     item_regex = re.compile(
         "".join([
             "^(\[\+\] New Finding!!!)$", "\s*Name: (?P<name>.*)$",
             "\s*Url: (?P<url>.*)$",
             "\s*Description: (?P<description>[\s\S]*?)\n\n"
         ]), re.MULTILINE)
     # Populate items
     self.items = list()
     for item in item_regex.finditer(aem_hacker_output):
         finding = Finding(
             title=item.group("name"),
             url=item.group("url"),
             description=md(item.group("description")),
             tool=tool,
             test=tool,
             severity=severity,
             active=False,
             verified=False,
             dynamic_finding=True,
             numerical_severity=Finding.get_numerical_severity(severity))
         finding.unsaved_endpoints = [
             make_endpoint_from_url(item.group("url"))
         ]
         self.items.append(finding)
コード例 #3
0
ファイル: parser.py プロジェクト: LifeDJIK/dusty
 def __init__(self, zap_result, tool_name):
     zap_json = json.loads(zap_result)
     # Populate items
     self.items = list()
     for site in zap_json["site"]:
         for alert in site["alerts"]:
             description = list()
             if "desc" in alert:
                 description.append(md(alert["desc"]))
             if "solution" in alert:
                 description.append(f'**Solution**:\n {md(alert["solution"])}')
             if "reference" in alert:
                 description.append(f'**Reference**:\n {md(alert["reference"])}')
             if "otherinfo" in alert:
                 description.append(f'**Other information**:\n {md(alert["otherinfo"])}')
             description.append(f'**Confidence**: {md(c.ZAP_CONFIDENCES[alert["confidence"]])}')
             description = "\n".join(description)
             instances = list()
             if alert["instances"]:
                 instances.append("\n")
                 instances.append("| URI | Method | Parameter | Attack | Evidence |")
                 instances.append("| --- | ------ | --------- | ------ | -------- |")
             for item in alert["instances"]:
                 instances.append("| {} |".format(" | ".join([
                     html.escape(md_table_escape(item.get("uri", "-"))),
                     html.escape(md_table_escape(item.get("method", "-"))),
                     html.escape(md_table_escape(item.get("param", "-"))),
                     html.escape(md_table_escape(item.get("attack", "-"))),
                     html.escape(md_table_escape(item.get("evidence", "-")))
                 ])))
             finding = Finding(
                 title=alert["name"],
                 url=site["@name"],
                 description=description,
                 payload="\n".join(instances),
                 tool=tool_name,
                 test=tool_name,
                 severity=c.ZAP_SEVERITIES[alert["riskcode"]],
                 active=False,
                 verified=False,
                 dynamic_finding=True,
                 numerical_severity=Finding.get_numerical_severity(
                     c.ZAP_SEVERITIES[alert["riskcode"]]
                 )
             )
             finding.unsaved_endpoints = list()
             added_endpoints = set()
             for item in alert["instances"]:
                 if not item.get("uri", None):
                     continue
                 endpoint = make_endpoint_from_url(
                     item.get("uri"),
                     include_query=False, include_fragment=False
                 )
                 if str(endpoint) in added_endpoints:
                     continue
                 finding.unsaved_endpoints.append(endpoint)
                 added_endpoints.add(str(endpoint))
             self.items.append(finding)
コード例 #4
0
ファイル: parser.py プロジェクト: LifeDJIK/dusty
 def __init__(self, file, test):
     self.items = []
     parser = etree.XMLParser(remove_blank_text=True, no_network=True, recover=True)
     d = etree.parse(file, parser)
     qids = d.xpath('/WAS_WEBAPP_REPORT/GLOSSARY/QID_LIST/QID')
     disabled_titles = ['Scan Diagnostics']
     for qid in qids:
         qid_title = qid.findtext('TITLE')
         if qid_title not in disabled_titles:
             _qid = qid.findtext('QID')
             qid_solution = qid.findtext('SOLUTION')
             qid_description = qid.findtext('DESCRIPTION')
             qid_impact = qid.findtext('IMPACT')
             qid_category = qid.findtext('CATEGORY')
             qid_severity = 'Info'
             owasp = qid.findtext('OWASP') if qid.findtext('OWASP') else ''
             wasc = qid.findtext('WASC') if qid.findtext('WASC') else ''
             cwe = qid.findtext('CWE') if qid.findtext('CWE') else ''
             cvss_base = qid.findtext('CVSS_BASE') if qid.findtext('CVSS_BASE') else ''
             if qid.xpath('SEVERITY'):
                 qid_severity = c.QUALYS_SEVERITIES[int(qid.findtext('SEVERITY'))]
             description = f'{qid_description}\n\n**OWASP**:{owasp}\n\n**WASC**:{wasc}\n\n**CVSS_BASE**:{cvss_base}\n\n'
             references = []
             entrypoints = []
             if 'Information Gathered' in qid_category:
                 qid_severity = 'Info'
                 records = d.xpath(f'//INFORMATION_GATHERED_LIST/INFORMATION_GATHERED/QID[text()="{_qid}"]/..')
                 for record in records:
                     references.append(html.escape(base64.b64decode(record.findtext('DATA')).decode("utf-8", errors="ignore")))
             else:
                 records = d.xpath(f'//VULNERABILITY_LIST/VULNERABILITY/QID[text()="{_qid}"]/..')
                 for record in records:
                     url = record.findtext('URL')
                     access_pass = [a.text for a in records[0].xpath('ACCESS_PATH/URL')]
                     method = record.findtext('PAYLOADS/PAYLOAD/REQUEST/METHOD')
                     if not method:
                         logging.error("Bad record: %s", str(record))
                         method = ""
                     request = record.findtext('PAYLOADS/PAYLOAD/REQUEST/URL')
                     response = record.findtext('PAYLOADS/PAYLOAD/RESPONSE/CONTENTS')
                     response = html.escape(base64.b64decode(response).decode("utf-8", errors="ignore"))
                     entrypoints.append(url)
                     entrypoints.extend(access_pass)
                     references.append(f"{method.upper()}: {request}\n\nResponse: {response}\n\n")
             for reference in references:
                 finding = Finding(title=f'{qid_title} - {qid_category}', tool="QualysWAS", cwe=cwe,
                                   description=description, test=test, severity=qid_severity,
                                   mitigation=qid_solution, references=reference,
                                   active=False, verified=False, false_p=False, duplicate=False,
                                   out_of_scope=False, mitigated=None, impact=qid_impact)
                 finding.unsaved_endpoints.extend(entrypoints)
                 self.items.append(finding)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(description='jira check')
    parser.add_argument('-d',
                        '--delete',
                        type=str,
                        help="EPMPRJ-1 or 'EPMPRJ-1, EPMPRJ-2' or 1:2")
    parser.add_argument('-u', '--user', type=str, help="")
    parser.add_argument('-p', '--password', type=str, help="")
    args, unknown = parser.parse_known_args()
    if args.delete:
        default_config, test_configs = config_from_yaml()
        project = default_config.get('jira_service').project
        url = default_config.get('jira_service').url
        user = args.user if args.user else default_config.get(
            'jira_service').user
        password = args.password if args.password else default_config.get(
            'jira_service').password
        j = JiraWrapper(url, user, password, project)
        j.connect()
        ids = []
        if ':' in args.delete:
            start, end = args.delete.split(':')
            ids = [
                f'{project}-{str(i)}' for i in range(int(start),
                                                     int(end) + 1)
            ]
        else:
            ids = [item.strip() for item in args.delete.split(",")]

        try:
            for id in ids:
                j.client.issue(id).delete()
                print(f'Issue {id} was deleted.')
        finally:
            j.client.close()
    else:
        default_config, test_configs = config_from_yaml()
        title = 'Carrier test. Please remove this ticket. It was created for testing purposes only.'
        test = 'Carrier'
        description = 'Please remove this ticket. \nIt was created for testing purposes only.'
        severity = 'Info'
        item = Finding(title=title,
                       tool=test,
                       active=False,
                       verified=False,
                       description=description,
                       severity=severity)
        report_to_jira(default_config, [item])
        print(
            'Issue was created.\nTo delete created issue:\njira_check -s <test_name> -d <issue_key>\n'
            'Optional params:\nTo specify user with an ability to delete tickets:\n-u user_name -p password'
        )
コード例 #6
0
ファイル: parser.py プロジェクト: hunkom/dusty
    def __init__(self, filename, test):
        with open(filename, 'rb') as f:
            data = json.load(f)
        dupes = dict()
        find_date = None
        if "generated_at" in data:
            find_date = datetime.strptime(
                data["generated_at"],
                '%Y-%m-%dT%H:%M:%SZ').strftime("%Y-%m-%d %H:%M:%S")

        for item in data["results"]:
            impact = ''
            findingdetail = ''

            title = "Test Name: " + item["test_name"] + " Test ID: " + item[
                "test_id"]

            ###### Finding details information ######
            findingdetail += "Filename: " + item["filename"] + "\n"
            findingdetail += "Line number: " + str(item["line_number"]) + "\n"
            findingdetail += "Issue Confidence: " + item[
                "issue_confidence"] + "\n\n"
            findingdetail += "Code:\n"
            findingdetail += item["code"] + "\n"

            sev = item["issue_severity"]
            mitigation = item["issue_text"]
            references = item["test_id"]

            dupe_key = title + item["filename"] + str(item["line_number"])

            if dupe_key not in dupes:
                dupes[dupe_key] = Finding(
                    title=title,
                    tool="bandit",
                    active=False,
                    verified=False,
                    description=findingdetail,
                    severity=sev.title(),
                    numerical_severity=Finding.get_numerical_severity(sev),
                    mitigation=mitigation,
                    impact=impact,
                    references=references,
                    file_path=item["filename"],
                    line=item["line_number"],
                    url='N/A',
                    date=find_date,
                    static_finding=True)
        self.items = dupes.values()
コード例 #7
0
ファイル: parser.py プロジェクト: hunkom/dusty
 def __init__(self, file, test):
     with open(file, "rb") as f:
         data = load(f)
     self.items = []
     for issue in data:
         title = f'Open port {issue["ports"][0]["port"]} found on {issue["ip"]}'
         self.items.append(
             Finding(
                 title=title,
                 tool="masscan",
                 active=False,
                 verified=False,
                 description=title,
                 severity="Info",
                 endpoints=[f'{issue["ip"]}:{issue["ports"][0]["port"]}']))
コード例 #8
0
    def get_items(self, tree, test):
        """
        @return items A list of Host instances
        """

        items = list()
        for node in tree.findall('site'):
            site = Site(node)
            main_host = Endpoint(host=site.ip +
                                 site.port if site.port is not None else "")
            for item in site.items:
                severity = item.riskdesc.split(' ', 1)[0]
                references = ''
                for ref in item.ref:
                    references += ref + "\n"

                find = Finding(
                    title=item.name,
                    tool="ZAP",
                    cwe=item.cwe,
                    description=md(item.desc),
                    test=test,
                    severity=severity,
                    mitigation=md(item.resolution),
                    references=references,
                    active=False,
                    verified=False,
                    false_p=False,
                    duplicate=False,
                    out_of_scope=False,
                    mitigated=None,
                    impact="No impact provided",
                    numerical_severity=Finding.get_numerical_severity(
                        severity))

                find.unsaved_endpoints = [main_host]
                for i in item.items:
                    parts = urlparse(i['uri'])
                    find.unsaved_endpoints.append(
                        Endpoint(protocol=parts.scheme,
                                 host=parts.netloc,
                                 path=parts.path,
                                 query=parts.query,
                                 fragment=parts.fragment))
                items.append(find)
        return items
コード例 #9
0
ファイル: parser.py プロジェクト: hunkom/dusty
    def __init__(self, filename, test):
        dupes = dict()
        find_date = None

        data = xml.etree.ElementTree.parse(filename).getroot()

        for item in data.findall('BugInstance'):
            title = item.find('ShortMessage').text
            description = item.find('LongMessage').text
            category = item.get('category')
            issue_type = item.get('type')
            severity = item.get('priority')
            path = item.find('Class').find('SourceLine').get('sourcefile')
            line = item.find('Class').find('SourceLine').find('Message').text

            str = ''
            for element in item.findall('SourceLine'):
                str += (element.find('Message').text + "\n\n")

            dupe_key = title + ' ' + issue_type + ' ' + category

            severity_level = SEVERITY_TYPE.get(int(severity), "")

            if dupe_key not in dupes:
                dupes[dupe_key] = Finding(title=title,
                                          tool="spotbugs",
                                          active=False,
                                          verified=False,
                                          description=description,
                                          severity=severity_level,
                                          numerical_severity=severity,
                                          mitigation=False,
                                          impact=False,
                                          references=False,
                                          file_path=path,
                                          line=line,
                                          url='N/A',
                                          date=find_date,
                                          steps_to_reproduce=str,
                                          static_finding=True)
        self.items = dupes.values()
コード例 #10
0
ファイル: parser.py プロジェクト: hunkom/dusty
 def __init__(self, filename, test):
     dupes = dict()
     find_date = None
     self.items = []
     if not os.path.exists(filename):
         return
     data = json.load(open(filename))
     for item in ['good_finding', 'sec_issues', 'missing_sec_header']:
         for key, value in data[item].items():
             for sub_value in value:
                 title = sub_value['title']
                 description = sub_value['description']
                 file_path = sub_value.get('path', None)
                 line = sub_value.get('line', None)
                 steps_to_reproduce = f'<pre>{sub_value.get("lines", "")}</pre>\n\n'
                 dupe_key = key + ': ' + sub_value[
                     'title'] + ' with file ' + sub_value.get(
                         'filename', '')
                 if dupe_key not in dupes:
                     dupes[dupe_key] = Finding(title=title,
                                               tool=test,
                                               active=False,
                                               verified=False,
                                               description=description,
                                               severity='Medium',
                                               file_path=file_path,
                                               line=line,
                                               url='N/A',
                                               date=find_date,
                                               steps_to_reproduce=re.sub(
                                                   r'[^\x00-\x7f]', r'',
                                                   steps_to_reproduce),
                                               static_finding=True)
                 else:
                     dupes[dupe_key].finding['steps_to_reproduce'] += "\n\n"
                     dupes[dupe_key].finding[
                         'steps_to_reproduce'] += re.sub(
                             r'[^\x00-\x7f]', r'', steps_to_reproduce)
     self.items = dupes.values()
コード例 #11
0
 def __init__(self, output, _):
     self.items = list()
     # Parse JSON from gosec stdout
     data = json.loads(output[0].decode("utf-8"))
     # Populate findings
     all_items = OrderedDict()
     for item in data["Issues"]:
         # Prepare finding item
         title = f"{item['details']} - in {item['file']}"
         if title not in all_items:
             tool = "gosec"
             severity = GosecOutputParser.GOSEC_SEVERITY_MAPPING[item["severity"]]
             file_path = item["file"]
             description = \
                 f"{item['details']}\n" \
                 f"**Rule ID**: {item['rule_id']}\n" \
                 f"**Confidence**: {item['confidence']}"
             steps_to_reproduce = list()
             all_items[title] = Finding(
                 tool=tool, title=title,
                 severity=severity, numerical_severity=Finding.get_numerical_severity(severity),
                 file_path=file_path, description=description,
                 steps_to_reproduce=steps_to_reproduce,
                 active=False, url="N/A",
                 static_finding=True, verified=False,
                 mitigation=False, impact=False, references=False
             )
         # Fill steps to reproduce
         finding = all_items[title]
         finding.finding['steps_to_reproduce'].append(
             f"<pre>" \
             f"Location: {item['file']}:{item['line']}\n" \
             f"Code:\n{item['code']}" \
             f"</pre>"
         )
     # Populate items
     for key in all_items:
         self.items.append(all_items[key])
コード例 #12
0
ファイル: parser.py プロジェクト: hunkom/dusty
    def __init__(self, filename, test):
        with open(filename, 'rb') as f:
            data = json.load(f)
        dupes = dict()
        find_date = data['scan_info']['start_time']

        for item in data["warnings"]:
            dupe_key = f"{item['warning_type']} in {item['file']}"

            if dupe_key not in dupes:
                dupes[dupe_key] = Finding(title=dupe_key,
                                          tool="brakeman",
                                          active=False,
                                          verified=False,
                                          description=item['message'],
                                          scanner_confidence=item['confidence'],
                                          severity=item['confidence'],
                                          references=item['link'],
                                          file_path=item["file"],
                                          line=item["line"],
                                          url='N/A',
                                          date=find_date,
                                          static_finding=True)
        self.items = dupes.values()
コード例 #13
0
 def __init__(self, filename, test):
     dupes = dict()
     find_date = None
     self.items = []
     if not os.path.exists(filename):
         return
     data = json.load(open(filename))
     for vulnerability in data:
         package = vulnerability[0]
         affected = vulnerability[1]
         installed = vulnerability[2]
         description = vulnerability[3]
         title = 'Update {} {}'.format(package, installed)
         version_to_update = affected.split(',')[-1].replace('<', '')
         fixed_version = 'latest' if '=' in version_to_update else version_to_update
         title += ' to {} version'.format(fixed_version)
         if package not in dupes:
             dupes[package] = Finding(title=title,
                                      tool=test,
                                      active=False,
                                      verified=False,
                                      description=description,
                                      severity='Medium',
                                      date=find_date,
                                      static_finding=True)
         else:
             prev_version = re.findall('to (.+) version',
                                       dupes[package].finding['title'])[0]
             if fixed_version != prev_version:
                 if version.parse(fixed_version) > version.parse(
                         prev_version):
                     dupes[package].finding['title'] = title.replace(
                         prev_version, fixed_version)
                     dupes[package].finding[
                         'description'] += '  \n  \n' + description
     self.items = dupes.values()
コード例 #14
0
ファイル: parser.py プロジェクト: LifeDJIK/dusty
    def __init__(self, filename, filtered_statuses=constants.PTAI_DEFAULT_FILTERED_STATUSES):
        """
        :param filename:
        :param filtered_statuses: str with statuses, separated ', '
        """
        file_path_descriptions_list = ['Уязвимый файл', 'Vulnerable File']

        def trim_blank_lines(line):
            blank_lines_patterns = ['\n( *\n)', '\n+']
            for pattern in blank_lines_patterns:
                finds = re.findall(pattern, line)
                for find in finds:
                    line = line.replace(find, '\n')
            return line

        def get_value_by_description(table_soap, descriptions):
            option_descriptions_soup = table_soap.select('td[class*="option-description"]')
            option_descriptions = [item.text for item in option_descriptions_soup]
            value_index = -1
            value = ''
            for description in descriptions:
                if description in option_descriptions:
                    value_index = option_descriptions.index(description)
                    break
            if value_index >= 0:
                option_values_soup = table_soap.select('td[class*="option-value"]')
                value = option_values_soup[value_index].text
            return value

        dupes = dict()
        self.items = []
        if not os.path.exists(filename):
            return
        soup = BeautifulSoup(open(filename, encoding="utf8"), 'html.parser')
        vulnerabilities_info = {}
        vulnerabilities_info_soup = soup.find_all('div', {'class': 'type-description'})
        for vulnerability_info_soup in vulnerabilities_info_soup:
            id = vulnerability_info_soup.find('a', {'class': 'glossary-anchor'}).attrs.get('id')
            vulnerabilities_info[id] = vulnerability_info_soup.text.replace(id, '')
        vulnerabilities_soup = soup.find_all('div', {'class': 'vulnerability'})
        for vulnerability_soup in vulnerabilities_soup:
            if filtered_statuses:
                skip_flag = False
                for filter_status in filtered_statuses:
                    status = vulnerability_soup.find_all('i', {'class': '{}-icon'.format(filter_status)})
                    if status:
                        skip_flag = True
                        break
                if skip_flag:
                    continue
            severity_level_soup = vulnerability_soup.select('div[class*="vulnerability-type-name-level-"]')
            title = ''
            file_path = ''
            short_file_path = ''
            if severity_level_soup:
                title = severity_level_soup[0].text
                # Get file path (strip line number if present)
                file_path = get_value_by_description(vulnerability_soup, file_path_descriptions_list).rsplit(' : ', 1)[0]
                if '\\' in file_path:
                    short_file_path = ' in ...\\' + file_path.split('\\')[-1]
                severity_classes_soup = severity_level_soup[0].attrs.get('class')
                for severity_class_soup in severity_classes_soup:
                    if 'vulnerability-type-name-level-' in severity_class_soup:
                        severity = severity_class_soup.split('-')[-1]
            vulnerability_link_info_soup = vulnerability_soup.find_all('a', {'class': 'vulnerability-description-link'})
            if vulnerability_link_info_soup:
                vulnerability_info_href = vulnerability_link_info_soup[0].attrs.get('href').replace('#', '')
                vulnerability_info = ''
                if vulnerability_info_href in vulnerabilities_info:
                    vulnerability_info = vulnerabilities_info[
                                             vulnerability_info_href][
                                         vulnerabilities_info[vulnerability_info_href].find(title) + len(title):]
            detail_info_soup = vulnerability_soup.find_all('table', {'class': 'vulnerability-detail-info'})
            detail_info_values = {}
            if detail_info_soup:
                detail_info_soup_tds = detail_info_soup[0].find_all('td')
                detail_info_values[detail_info_soup_tds[0].text] = detail_info_soup_tds[1].text
            functions = vulnerability_soup.find_all('div', {'class': 'vulnerability-info'})
            function_blocks_strs = []
            for function in functions:
                function_info_values = {}
                for tr in function.find_all('table', {'class': 'vulnerability-detail-info'})[0].find_all('tr'):
                    tds = tr.find_all('td')
                    if tds:
                        param = tds[0].text
                        if param.startswith('\n'):
                            param = param[1:]
                        value = ' '
                        if len(tds) == 2:
                            value = tds[1].text
                            if value.startswith('\n'):
                                value = value[1:]
                            if 'CWE' in value:
                                link_str_list = vulnerability_info[vulnerability_info.find(value.strip()):].split('\n')
                                link_info = [x.strip() for x in link_str_list if x.strip()]
                                if not link_info or link_info == ['.']:
                                    a_soup = tds[1].find_all('a')
                                    if a_soup:
                                        a_href = a_soup[0].attrs.get('href')
                                        a_text = a_soup[0].text
                                        if a_text.startswith('\n'):
                                            a_text = value[1:]
                                        link_info = [a_text.strip(), a_href]
                                    else:
                                        link_info = [' ']
                                value = ': '.join(link_info)
                        function_info_values[param] = trim_blank_lines(value)
                tables_lines = []
                tables_soup = function.find_all('div', {'class': 'data-flow-entry-root'})
                for table_soup in tables_soup:
                    lines = {}
                    header_file_name = table_soup.find_all('span', {'class': 'data-flow-entry-header-file-name'})[0].text
                    header_type = table_soup.find_all('span', {'class': 'data-flow-entry-header-type'})[0].text
                    code_lines_soup = table_soup.find_all('div', {'class': 'data-flow-entry-code-line-root'})
                    for code_line_soup in code_lines_soup:
                        line_number = code_line_soup.find_all('span', {'class': 'data-flow-entry-code-line-number'})[0].text
                        line_content = code_line_soup.find_all('pre', {'class': 'data-flow-entry-code-line-content'})[0]
                        line_text = line_content.text
                        bold_text = line_content.find('span', {'class': ['code-line-part-EntryPoint',
                                                                         'code-line-part-DataEntryPoint',
                                                                         'code-line-part-DataOperation',
                                                                         'code-line-part-VulnerableCode']})
                        if bold_text:
                            line_text = line_text + '      <------'
                        lines[line_number] = line_text
                    tables_lines.append({'lines': lines,
                                        'header_file_name': header_file_name,
                                        'header_type': header_type})
                #  format strings
                srt_code_blocks = []
                for table_lines in tables_lines:
                    table_markdown_str = '{{code:title={} - {}|borderStyle=solid}}  \n{}  \n{{code}}'
                    code_lines = ''
                    for key, value in table_lines['lines'].items():
                        code_lines += '{} {}  \n'.format(key, value)
                    srt_code_blocks.append(table_markdown_str.format(table_lines['header_file_name'],
                                                                table_lines['header_type'],
                                                                code_lines))
                data_flow_panel_str = ''
                for str_code_block in srt_code_blocks:
                    if data_flow_panel_str:
                        # add arrow
                        data_flow_panel_str += '  \n  \n|{}|  \n  \n'.format(chr(129147))
                    data_flow_panel_str += str_code_block
                function_info_values_str = ''
                for param, value in detail_info_values.items():
                    if param not in function_info_values:
                        value = value.replace('\n                       ', ': ')\
                                .replace('|', '&#124; ').replace('{', '\{').replace('}', '\}')
                        str_line = '  \n  \n|| *{}* | *{}* |'.format(param, value)
                        function_info_values_str = str_line
                for param, value in function_info_values.items():
                    value = value.replace('*', '\*').replace('|', '&#124; ').replace('{', '\{')\
                        .replace('}', '\}')
                    str_line = '|| *{}* | {} |'.format(param, value)
                    str_line = str_line.replace('  ', '')
                    function_info_values_str += '  \n' + str_line
                function_full_info_str = function_info_values_str + '\n  \n '
                if data_flow_panel_str:
                    function_full_info_str += '  \n {panel:title=Data Flow:|borderStyle=dashed|borderColor' \
                                             '=#ccc|titleBGColor=#F7D6C1|bgColor=#FFFFCE}  \n  \n' + data_flow_panel_str \
                                             + '  \n  \n {panel}  \n  \n'
                function_blocks_strs.append(function_full_info_str)
            description = ' \n \n{}:  \n  \n{}  \n  \n'.format(title, vulnerability_info.strip())
            dup_key = title + ' in file: ' + file_path
            # Add finding data to de-duplication store
            if dup_key not in dupes:
                dupes[dup_key] = Finding(
                    title=title + short_file_path,
                    description=description,
                    tool='PTAI',
                    active=False,
                    verified=False,
                    severity=severity.title(),
                    file_path=file_path,
                    steps_to_reproduce=function_blocks_strs,
                    static_finding=True
                )
            else:
                dupes[dup_key].finding["steps_to_reproduce"].extend(function_blocks_strs)
        self.items = dupes.values()
コード例 #15
0
    def __init__(self, file, test):
        parser = le.XMLParser(resolve_entities=False, huge_tree=True)
        nscan = le.parse(file, parser)
        root = nscan.getroot()

        if 'nmaprun' not in root.tag:
            raise NamespaceErr(
                "This doesn't seem to be a valid Nmap xml file.")
        dupes = {}
        hostInfo = ""

        for host in root.iter("host"):
            ip = host.find("address[@addrtype='ipv4']").attrib['addr']
            fqdn = None
            if host.find("hostnames/hostname[@type='PTR']") is not None:
                fqdn = host.find(
                    "hostnames/hostname[@type='PTR']").attrib['name']

            for os in root.iter("os"):
                if ip is not None:
                    hostInfo += "IP Address: %s\n" % ip
                if fqdn is not None:
                    fqdn += "FQDN: %s\n" % ip
                for osv in os.iter('osmatch'):
                    if 'name' in osv.attrib:
                        hostInfo += "Host OS: %s\n" % osv.attrib['name']
                    if 'accuracy' in osv.attrib:
                        hostInfo += "Accuracy: {0}%\n".format(
                            osv.attrib['accuracy'])
                hostInfo += "\n"
            for portelem in host.xpath("ports/port[state/@state='open']"):
                port = portelem.attrib['portid']
                protocol = portelem.attrib['protocol']

                title = f"Open port: {ip}:{port}/{protocol}"
                description = hostInfo
                description += f"Port: {port}\n"
                serviceinfo = ""

                if portelem.find('service') is not None:
                    if 'product' in portelem.find('service').attrib:
                        serviceinfo += "Product: %s\n" % portelem.find(
                            'service').attrib['product']

                    if 'version' in portelem.find('service').attrib:
                        serviceinfo += "Version: %s\n" % portelem.find(
                            'service').attrib['version']

                    if 'extrainfo' in portelem.find('service').attrib:
                        serviceinfo += "Extra Info: %s\n" % portelem.find(
                            'service').attrib['extrainfo']

                    description += serviceinfo

                description += '\n\n'

                severity = "Info"

                dupe_key = f'{port}_{protocol}_{ip}'
                print(dupe_key)
                if dupe_key in dupes:
                    find = dupes[dupe_key]
                    if description is not None:
                        find.description += description
                else:
                    find = Finding(
                        title=title,
                        tool="NMAP",
                        test=test,
                        active=False,
                        verified=False,
                        description=description,
                        severity=severity,
                        numerical_severity=Finding.get_numerical_severity(
                            severity))
                    find.unsaved_endpoints.append(f'{ip}:{port}/{protocol}')
                    dupes[dupe_key] = find
        self.items = dupes.values()
コード例 #16
0
ファイル: parser.py プロジェクト: hunkom/dusty
 def __init__(self, filename, test, devdeps):
     dupes = dict()
     find_date = None
     self.items = []
     if not os.path.exists(filename):
         return
     data = json.load(open(filename))['data']
     components_data = {}
     for file_results in data:
         file_path = file_results.get('file')
         for version_results in file_results.get('results'):
             component = version_results.get('component')
             if component not in devdeps:
                 if component not in components_data:
                     components_data[component] = \
                         {'versions': set(), 'descriptions': {}, 'references': {},
                          'file_paths': {}, 'version_to_update': '0', 'severity': 'Info'}
                 components_data[component]['versions'].add(version_results.get('version'))
                 for vulnerability in version_results.get('vulnerabilities'):
                     summary = vulnerability.get('identifiers').get('summary')
                     if summary not in components_data[component]['file_paths']:
                         components_data[component]['file_paths'][summary] = set()
                         components_data[component]['references'][summary] = set()
                     components_data[component]['file_paths'][summary].add(file_path)
                     for reference in vulnerability.get('info'):
                         if reference not in components_data[component]['references']:
                             components_data[component]['references'][summary].add(reference)
                             if constants.NVD_URL in reference:
                                 url_text = requests.get(reference).text
                                 soup = BeautifulSoup(url_text, 'html.parser')
                                 recomendation = soup.find_all('a', {'id': 'showCPERanges'})
                                 if recomendation:
                                     ver_res = re.findall('versions up to \(excluding\)(.*)',
                                                          recomendation[0].attrs['data-range-description'])
                                     if ver_res:
                                         ver = ver_res[0].strip()
                                         if (LooseVersion(components_data[component]['version_to_update'])
                                                 < LooseVersion(ver)):
                                             components_data[component]['version_to_update'] = ver
                                 description = soup.find_all('p', {'data-testid': 'vuln-description'})
                                 if description:
                                     components_data[component]['descriptions'][summary] = description[0].text
                     cur_severity = vulnerability.get('severity').title()
                     if constants.SEVERITIES.get(components_data[component]['severity']) \
                             > constants.SEVERITIES.get(cur_severity):
                         components_data[component]['severity'] = cur_severity
     format_str = '  \n**{}**:  {}\n  \n'
     for key, value in components_data.items():
         title = 'Update {}'.format(key)
         if value.get('version_to_update') != '0':
             title += ' to version {}'.format(value.get('version_to_update'))
         severity = value.get('severity')
         description = '  \n'.join([format_str.format(key, val)
                                           for key, val in value.get('descriptions').items()])
         references = ''
         for ref_key, ref_val in value.get('references').items():
             _references = ','.join(['  \n- {}'.format(x) for x in ref_val]) + '  \n'
             references += format_str.format(ref_key, _references)
         file_path = ''
         for path_key, path_val in value.get('file_paths').items():
             _paths = ','.join(['  \n- {}'.format(x) for x in path_val]) + '  \n'
             file_path += format_str.format(path_key, _paths)
         dupes[title] = Finding(title=title,
                                   tool=test,
                                   active=False,
                                   verified=False,
                                   description=description,
                                   severity=severity,
                                   file_path=file_path,
                                   date=find_date,
                                   references=references,
                                   static_finding=True)
     self.items = dupes.values()
コード例 #17
0
ファイル: parser.py プロジェクト: hunkom/dusty
    def __init__(self, filename, test):
        dupes = dict()
        self.items = ()

        if filename is None:
            self.items = ()
            return

        tree = ET.parse(filename)
        root = tree.getroot()
        new_root = root.find('niktoscan')
        scan = new_root.find('scandetails')

        for item in scan.findall('item'):
            # Title
            titleText = None
            description = item.find("description").text
            # Cut the title down to the first sentence
            sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s',
                                 description)
            if len(sentences) > 0:
                titleText = sentences[0][:900]
            else:
                titleText = description[:900]

            # Url
            ip = item.find("iplink").text
            # Remove the port numbers for 80/443
            ip = ip.replace(":80", "")
            ip = ip.replace(":443", "")

            # Severity
            severity = "Info"  # Nikto doesn't assign severity, default to Info

            # Description
            description = "\nHost: " + ip + "\n" + \
                item.find("description").text
            mitigation = "N/A"
            impact = "N/A"
            references = "N/A"
            print(f"Description: {description}")
            dupe_key = hashlib.md5(description.encode('utf-8')).hexdigest()

            if dupe_key in dupes:
                finding = dupes[dupe_key]
                if finding.description:
                    finding.description = finding.description + "\nHost:" + ip + "\n" + description
                self.process_endpoints(finding, ip)
                dupes[dupe_key] = finding
            else:
                dupes[dupe_key] = True

                finding = Finding(
                    title=titleText,
                    tool='Nikto',
                    test=test,
                    active=False,
                    verified=False,
                    description=description,
                    severity=severity,
                    numerical_severity=Finding.get_numerical_severity(
                        severity),
                    mitigation=mitigation,
                    impact=impact,
                    references=references,
                    url='N/A',
                    dynamic_finding=True)

                dupes[dupe_key] = finding
                self.process_endpoints(finding, ip)

        self.items = dupes.values()
コード例 #18
0
 def __init__(self, filename, test, deps):
     dupes = dict()
     find_date = None
     self.items = []
     if not os.path.exists(filename):
         return
     data = json.load(open(filename))
     advisories = data.get('advisories')
     for action in data['actions']:
         module = action.get('module')
         if module in deps:
             EXTENDED_SEVERITIES = constants.SEVERITIES.copy()
             EXTENDED_SEVERITIES['Moderate'] = 2
             unique_ids = {}
             tmp_values = {
                 'file_paths': {},
                 'descriptions': [],
                 'urls': [],
                 'references_list': [],
                 'cwes': []
             }
             severity = 'Info'
             format_str = '  \n**{}**:  {}\n  \n'
             for resolve in action.get('resolves'):
                 id = resolve.get('id')
                 if id not in unique_ids:
                     advisory = advisories.get(str(id))
                     unique_ids[id] = advisory.get('title')
                     tmp_values['file_paths'][unique_ids[id]] = []
                     current_severity = advisory.get('severity').title()
                     tmp_values['cwes'].append(advisory.get('cwe'))
                     if EXTENDED_SEVERITIES.get(current_severity) \
                             < EXTENDED_SEVERITIES.get(severity):
                         severity = current_severity
                     if advisory.get('url'):
                         tmp_values['urls'].append(
                             format_str.format(unique_ids[id],
                                               advisory.get('url')))
                     if advisory.get('references'):
                         tmp_values['references_list'].append(
                             format_str.format(unique_ids[id],
                                               advisory.get('references')))
                         tmp_values['descriptions'].append(
                             format_str.format(unique_ids[id],
                                               advisory.get('overview')))
                 if id not in tmp_values['file_paths']:
                     tmp_values['file_paths'][unique_ids[id]].append(
                         '\n- {}'.format(resolve.get('path')))
             file_path = ''
             for key in tmp_values['file_paths']:
                 file_path = file_path + format_str.format(
                     key, ',  '.join(tmp_values['file_paths'][key]))
             rehearsal_str = ',  \n'
             url = rehearsal_str.join(tmp_values['urls'])
             references = rehearsal_str.join(tmp_values['references_list'])
             description = rehearsal_str.join(tmp_values['descriptions'])
             swe = rehearsal_str.join(tmp_values['cwes'])
             title = ' '.join([
                 action.get('action', ''),
                 action.get('module', ''),
                 action.get('target', '')
             ])
             if title not in dupes:
                 dupes[title] = Finding(title=title,
                                        tool=test,
                                        active=False,
                                        verified=False,
                                        description=description,
                                        severity=severity,
                                        file_path=file_path,
                                        url=url,
                                        date=find_date,
                                        references=references,
                                        cwe=swe,
                                        static_finding=True)
     self.items = dupes.values()
コード例 #19
0
ファイル: parser.py プロジェクト: hunkom/dusty
    def __init__(self, file, test=None):
        parser = le.XMLParser(resolve_entities=False, huge_tree=True)
        w3scan = le.parse(file, parser)
        root = w3scan.getroot()
        dupes = {}
        for vulnerability in root.findall("vulnerability"):
            name = vulnerability.attrib["name"]
            severity = vulnerability.attrib["severity"]
            description = "%s are:\n\n" % vulnerability.find(
                "description").text.split("are:")[0]
            transactions = vulnerability.find("http-transactions")
            if transactions is not None:
                transactions = transactions.findall("http-transaction")
            for transaction in transactions:
                request = transaction.find("http-request")
                response = transaction.find("http-response")
                status = request.find("status").text.split(" ")
                response_code = response.find("status").text.split(" ")[1]
                http_method = status[0]
                request_url = status[1]
                data = ""
                for part in [request, response]:
                    headers = [
                        f"{h.attrib['field']} -> {h.attrib['content']}"
                        for h in part.find("headers").findall("header")
                    ]
                    headers = "\n".join(headers)
                    request_body = part.find("body")
                    if request_body.attrib['content-encoding'] == "base64":
                        if request_body.text:
                            request_body = base64.b64decode(
                                request_body.text).decode("utf-8",
                                                          errors="ignore")
                        else:
                            request_body = ""
                    else:
                        request_body = request_body.text if request_body.text else ""
                    if not data:
                        data = f"Request: {request_url} {http_method} {response_code} \n\n"
                    else:
                        data += "Response: \n"
                    data += f"Headers: {headers}\n\nBody:{request_body}\n\n"
                dupe_url = urlparse(request_url)
                # Creating dupe path ned to think on more intelligent implementation
                dupe_path = dupe_url.path[:dupe_url.path.index(
                    "%")] if "%" in dupe_url.path else dupe_url.path
                dupe_path = dupe_path[:dupe_path.index(
                    "+")] if "+" in dupe_path else dupe_path
                dupe_path = dupe_path[:dupe_path.index(
                    ".")] if "." in dupe_path else dupe_path
                dupe_path = dupe_path[:dupe_path.rindex(
                    "/")] if "/" in dupe_path else dupe_path
                dupe_url = f"{dupe_url.scheme}://{dupe_url.netloc}{dupe_path}"
                dupe_code = f"{str(response_code)[0]}xx"
                dupe_key = hashlib.md5(
                    f"{name} {dupe_url} {http_method} {dupe_code}".encode(
                        'utf-8')).hexdigest()
                if dupe_key not in dupes:
                    dupes[dupe_key] = Finding(
                        title=f"{name} {dupe_url} {dupe_code}",
                        tool='W3AF',
                        test=test,
                        description=description,
                        severity=severity,
                        numerical_severity=Finding.get_numerical_severity(
                            severity),
                        references=data,
                        dynamic_finding=True)
                elif data not in dupes[dupe_key].finding['references']:
                    dupes[dupe_key].finding['references'] += data
                if request_url not in dupes[dupe_key].unsaved_endpoints:
                    dupes[dupe_key].finding[
                        'description'] += f"- {request_url}\n\n"

                    dupes[dupe_key].unsaved_endpoints.append(request_url)
        self.items = dupes.values()
        print(len(self.items))
コード例 #20
0
ファイル: parser.py プロジェクト: LifeDJIK/dusty
    def __init__(self, spotbugs_xml, test):
        self.spotbugs_xml = spotbugs_xml

        dupes = dict()
        find_date = None

        logging.debug("Spotbugs parser initialization")

        bugs_details = self.extract_bugs_details()

        context = etree.iterparse(self.spotbugs_xml,
                                  events=('end', ),
                                  tag='BugInstance')

        for _, item in context:
            title = item.findtext('ShortMessage')
            description = item.findtext('LongMessage')
            category = item.get('category')
            issue_type = item.get('type')
            severity = item.get('priority')
            classname = item.find('Class').get('classname')
            filename = item.find('Class').find('SourceLine').get('sourcefile')
            file_path = item.find('Class').find('SourceLine').get('sourcepath')
            line = item.find('Class').find('SourceLine').findtext('Message')
            steps_to_reproduce = '\n' * 2

            # TODO: rewrite this to avoid <IndexError: list index out of range> errors
            for i, element in enumerate(item.findall('Method')):
                steps_to_reproduce += f"Classname: {classname}\t{element.findtext('Message')}\t"
                try:
                    steps_to_reproduce += f"{sanitize(item.findall('SourceLine')[i].findtext('Message'))}"
                except IndexError:
                    pass

            details = bugs_details.get(issue_type)

            if details:
                description += f'\n\n Details: {md(details)}'

            severity_level = SEVERITY_TYPE.get(int(severity), "")

            dupe_key = hashlib.md5(f'{title} {issue_type} {category}'.encode(
                'utf-8')).hexdigest()

            if file_path:
                dupe_key += f' {file_path}'

            if filename:
                title += f' in {filename}'

            if dupe_key not in dupes:
                dupes[dupe_key] = Finding(
                    title=title,
                    tool=category.lower().replace(" ", "_"),
                    active=False,
                    verified=False,
                    description=description,
                    severity=severity_level,
                    numerical_severity=severity,
                    mitigation=False,
                    impact=False,
                    references=False,
                    file_path=file_path,
                    line=line,
                    url='N/A',
                    date=find_date,
                    steps_to_reproduce=
                    f'<pre>{issue_type} issue {steps_to_reproduce}</pre>',
                    static_finding=True)
            else:
                dupes[dupe_key].finding['steps_to_reproduce'].append(
                    f"<pre>{steps_to_reproduce}</pre>")

            item.clear()
            while item.getprevious() is not None:
                del item.getparent()[0]

        del context

        self.items = dupes.values()

        logging.debug("Spotbugs output parsing done")