Example #1
0
 def gen_result(self):
     """
     Generate results
     """
     logger.log('DEBUG', f'Generating final results')
     if not len(self.subdomains):  # 该模块一个子域都没有发现的情况
         logger.log('DEBUG', f'{self.source} module result is empty')
         result = {
             'id': None,
             'alive': None,
             'request': None,
             'resolve': None,
             'url': None,
             'subdomain': None,
             'port': None,
             'level': None,
             'cname': None,
             'ip': None,
             'public': None,
             'cdn': None,
             'status': None,
             'reason': None,
             'title': None,
             'banner': None,
             'header': None,
             'history': None,
             'response': None,
             'ip_times': None,
             'cname_times': None,
             'ttl': None,
             'cidr': None,
             'asn': None,
             'org': None,
             'addr': None,
             'isp': None,
             'resolver': None,
             'module': self.module,
             'source': self.source,
             'elapse': self.elapse,
             'find': None
         }
         self.results.append(result)
     else:
         for subdomain in self.subdomains:
             url = 'http://' + subdomain
             level = subdomain.count('.') - self.domain.count('.')
             info = self.infos.get(subdomain)
             if info is None:
                 info = dict()
             cname = info.get('cname')
             ip = info.get('ip')
             ip_times = info.get('ip_times')
             cname_times = info.get('cname_times')
             ttl = info.get('ttl')
             if isinstance(cname, list):
                 cname = ','.join(cname)
                 ip = ','.join(ip)
                 ip_times = ','.join([str(num) for num in ip_times])
                 cname_times = ','.join([str(num) for num in cname_times])
                 ttl = ','.join([str(num) for num in ttl])
             result = {
                 'id': None,
                 'alive': info.get('alive'),
                 'request': info.get('request'),
                 'resolve': info.get('resolve'),
                 'url': url,
                 'subdomain': subdomain,
                 'port': 80,
                 'level': level,
                 'cname': cname,
                 'ip': ip,
                 'public': info.get('public'),
                 'cdn': info.get('cdn'),
                 'status': None,
                 'reason': info.get('reason'),
                 'title': None,
                 'banner': None,
                 'header': None,
                 'history': None,
                 'response': None,
                 'ip_times': ip_times,
                 'cname_times': cname_times,
                 'ttl': ttl,
                 'cidr': info.get('cidr'),
                 'asn': info.get('asn'),
                 'org': info.get('org'),
                 'addr': info.get('addr'),
                 'isp': info.get('isp'),
                 'resolver': info.get('resolver'),
                 'module': self.module,
                 'source': self.source,
                 'elapse': self.elapse,
                 'find': len(self.subdomains)
             }
             self.results.append(result)
Example #2
0
def match_subdomains(domain, text):
    subdomains = utils.match_subdomains(domain, text, fuzzy=False)
    logger.log('DEBUG', f'matched subdomains: {subdomains}')
    return subdomains
Example #3
0
 def get_resp_by_url(self, table_name, url):
     table_name = table_name.replace('.', '_')
     sql = f'select response from "{table_name}" where url = "{url}"'
     logger.log('TRACE', f'Get response data from {url}')
     return self.query(sql).scalar()
Example #4
0
def deal_output(output_path):
    logger.log('INFOR', f'Processing resolved results')
    records = dict()  # 用来记录所有域名解析数据
    ip_asn = IPAsnInfo()
    ip_geo = IpGeoInfo
    db_path = settings.data_storage_dir.joinpath('ip2region.db')
    ip_reg = IpRegInfo(db_path)
    with open(output_path) as fd:
        for line in fd:
            line = line.strip()
            try:
                items = json.loads(line)
            except Exception as e:
                logger.log('ERROR', e.args)
                logger.log('ERROR',
                           f'Error resolve line {line}, skip this line')
                continue
            record = dict()
            record['resolver'] = items.get('resolver')
            qname = items.get('name')[:-1]  # 去除最右边的`.`点号
            status = items.get('status')
            if status != 'NOERROR':
                record['alive'] = 0
                record['resolve'] = 0
                record['reason'] = status
                records[qname] = record
                continue
            data = items.get('data')
            if 'answers' not in data:
                record['alive'] = 0
                record['resolve'] = 0
                record['reason'] = 'NOANSWER'
                records[qname] = record
                continue
            flag = False
            cname = list()
            ips = list()
            public = list()
            ttls = list()
            cidrs = list()
            asns = list()
            orgs = list()
            locs = list()
            regs = list()
            answers = data.get('answers')
            for answer in answers:
                if answer.get('type') == 'A':
                    flag = True
                    cname.append(answer.get('name')[:-1])  # 去除最右边的`.`点号
                    ip = answer.get('data')
                    ips.append(ip)
                    ttl = answer.get('ttl')
                    ttls.append(str(ttl))
                    is_public = utils.ip_is_public(ip)
                    public.append(str(is_public))
                    asn_info = ip_asn.find(ip)
                    cidrs.append(asn_info.get('cidr'))
                    asns.append(asn_info.get('asn'))
                    orgs.append(asn_info.get('org'))
                    loc = f'{ip_geo.get_country_long(ip)} ' \
                          f'{ip_geo.get_region(ip)} ' \
                          f'{ip_geo.get_city(ip)}'
                    locs.append(loc)
                    reg = ip_reg.memory_search(ip).get('region').decode(
                        'utf-8')
                    regs.append(reg)
                    record['resolve'] = 1
                    record['reason'] = status
                    record['cname'] = ','.join(cname)
                    record['content'] = ','.join(ips)
                    record['public'] = ','.join(public)
                    record['ttl'] = ','.join(ttls)
                    record['cidr'] = ','.join(cidrs)
                    record['asn'] = ','.join(asns)
                    record['org'] = ','.join(orgs)
                    record['ip2location'] = ','.join(locs)
                    record['ip2region'] = ','.join(regs)
                    records[qname] = record
            if not flag:
                record['alive'] = 0
                record['resolve'] = 0
                record['reason'] = 'NOARECORD'
                records[qname] = record
    return records
Example #5
0
    def run(self):
        start = time.time()
        logger.log('INFOR', f'Start running {self.source} module')
        if isinstance(self.targets, set):
            self.subdomains = self.targets
        else:
            self.subdomains = utils.get_domains(self.target, self.targets)
        self.format = utils.check_format(self.format, len(self.subdomains))
        timestamp = utils.get_timestamp()
        name = f'takeover_check_result_{timestamp}'
        self.path = utils.check_path(self.path, name, self.format)
        if self.subdomains:
            logger.log('INFOR', f'Checking subdomain takeover')
            self.fingerprints = get_fingerprint()
            self.results.headers = ['subdomain', 'cname']
            # 创建待检查的子域队列
            for domain in self.subdomains:
                self.subdomainq.put(domain)
            # 检查线程
            for _ in range(self.thread):
                check_thread = Thread(target=self.check, daemon=True)
                check_thread.start()
            # 进度线程
            progress_thread = Thread(target=self.progress, daemon=True)
            progress_thread.start()

            self.subdomainq.join()
            self.save()
        else:
            logger.log('FATAL', f'Failed to obtain domain')
        end = time.time()
        elapse = round(end - start, 1)
        logger.log('ALERT', f'{self.source} module takes {elapse} seconds, '
                            f'There are {len(self.results)} subdomains exists takeover')
        logger.log('INFOR', f'Subdomain takeover results: {self.path}')
        logger.log('INFOR', f'Finished {self.source} module')
Example #6
0
def save_db(name, data):
    logger.log('INFOR', f'Saving cdn check results')
    utils.save_db(name, data, 'cdn')
Example #7
0
def save_subdomains(save_path, subdomain_list):
    logger.log('DEBUG', f'Saving resolved subdomain')
    subdomain_data = '\n'.join(subdomain_list)
    if not utils.save_data(save_path, subdomain_data):
        logger.log('FATAL', 'Save resolved subdomain error')
        exit(1)
Example #8
0
    def _init_rules(self):
        self.text_to_find = []
        self.regex_to_find = []
        self.text_to_exclude = []
        self.regex_to_exclude = []
        self.rules_set = set()
        self.rules_set_root_only = set()

        p_tag = re.compile('{tag="(.*?)"}')
        p_status = re.compile(r'{status=(\d{3})}')
        p_content_type = re.compile('{type="(.*?)"}')
        p_content_type_no = re.compile('{type_no="(.*?)"}')

        _files = self.args.rule_files if self.args.rule_files else glob.glob(
            'rules/*.txt')
        # 读取规则
        for rule_file in _files:
            with open(rule_file, 'r', encoding='utf-8') as infile:
                vul_type = os.path.basename(rule_file)[:-4]
                for url in infile.readlines():
                    url = url.strip()
                    if url.startswith('/'):
                        _ = p_tag.search(url)
                        tag = _.group(1) if _ else ''  # 没有tag字段时,赋空

                        _ = p_status.search(url)
                        status = int(_.group(1)) if _ else 0

                        _ = p_content_type.search(url)
                        content_type = _.group(1) if _ else ''

                        _ = p_content_type_no.search(url)
                        content_type_no = _.group(1) if _ else ''

                        root_only = True if url.find(
                            '{root_only}') >= 0 else False
                        rule = (url.split()[0], tag, status, content_type,
                                content_type_no, root_only, vul_type)

                        if root_only:
                            if rule not in self.rules_set_root_only:
                                self.rules_set_root_only.add(rule)
                            else:
                                logger.log(
                                    'ERROR',
                                    f'Duplicated root only rule: {rule}')
                        else:
                            if rule not in self.rules_set:
                                self.rules_set.add(rule)
                            else:
                                logger.log('ERROR', f'Duplicated rule: {rule}')

        # 读取匹配黑/白名单
        re_text = re.compile('{text="(.*)"}')
        re_regex_text = re.compile('{regex_text="(.*)"}')
        file_path = 'rules/white.list'
        if not os.path.exists(file_path):
            logger.log('ERROR', f'File not exist: {file_path}')
            return
        for _line in open(file_path, 'r', encoding='utf-8'):
            _line = _line.strip()
            if not _line or _line.startswith('#'):
                continue
            _m = re_text.search(_line)
            if _m:
                self.text_to_find.append(_m.group(1))
            else:
                _m = re_regex_text.search(_line)
                if _m:
                    self.regex_to_find.append(re.compile(_m.group(1)))

        file_path = 'rules/black.list'
        if not os.path.exists(file_path):
            logger.log('ERROR', f'File not exist: {file_path}')
            return
        for _line in open(file_path, 'r', encoding='utf-8'):
            _line = _line.strip()
            if not _line or _line.startswith('#'):
                continue
            _m = re_text.search(_line)
            if _m:
                self.text_to_exclude.append(_m.group(1))
            else:
                _m = re_regex_text.search(_line)
                if _m:
                    self.regex_to_exclude.append(re.compile(_m.group(1)))
Example #9
0
    def scan_worker(self, item):
        if not self.flag and time.time() - self.start_time > self.timeout:
            self.flag = True
            if self.flag:
                self.url_list.clear()
                # self.flag = False
                logger.log('ALERT',
                           '[ERROR] Timed out task: %s' % self.base_url)
            return
        url, url_description, tag, status_to_match, content_type, content_type_no, root_only, vul_type, prefix = None, None, None, None, None, None, None, None, None

        try:
            if len(item) == 2:  # Script Scan
                check_func = getattr(item[0], 'do_check')
                check_func(self, item[1])
            else:
                # ({'prefix': '', 'full_url': '/trace'}, 'Spring boot serverProperties', 200, '', '', True, 'springboot')
                url_description, tag, status_to_match, content_type, content_type_no, root_only, vul_type = item
                prefix = url_description['prefix']

                # todo  有点问题
                # print(url_description)
                # print('   -----   ')
                # print(prefix)
                url = url_description['full_url']
                '''
                {sub} 这个是规则里设置的, 主要是根据当前域名来做字典,
                比如{sub}.sql ,当前域名为baidu.com ,则规则改为 baidu.sql
                '''
                if url.find('{sub}') >= 0:
                    if not self.domain_sub:
                        return
                    url = url.replace('{sub}', self.domain_sub)

        except Exception as e:
            logger.log('ERROR', '[scan_worker.1][%s  %s]' % (item[0], item[1]))
            logger.log('ERROR', traceback.format_exc())
            return
        if not item or not url:
            return

        # 开始规则目录探测
        try:

            status, headers, html_doc = self.http_request(url)
            cur_content_type = headers.get('content-type', '')
            cur_content_length = headers.get('content-length', len(html_doc))

            if self.find_exclude_text(html_doc):  # 黑名单规则排除
                return
            if 0 <= int(cur_content_length) <= 10:  # text too short
                return
            if cur_content_type.find('image/') >= 0:  # exclude image
                return

            # 当指定 content_type 时,
            if content_type and content_type != 'json' and cur_content_type.find(
                    'json') >= 0:
                return
            # content type mismatch
            if (content_type and cur_content_type.find(content_type) < 0) or (
                    content_type_no
                    and cur_content_type.find(content_type_no) >= 0):
                return
            if tag and html_doc.find(tag) < 0:
                return  # tag mismatch

            # 在页面中匹配rules的白名单规则
            if self.find_text(html_doc):
                valid_item = True
            else:
                # status code check
                if status_to_match == 206 and status != 206:
                    return
                if status_to_match in (200, 206) and status in (200, 206):
                    valid_item = True
                elif status_to_match and status != status_to_match:
                    return
                elif status in (403, 404) and status != status_to_match:
                    return
                else:
                    valid_item = True

                if status == self._404_status and url != '/':
                    len_doc = len(html_doc)
                    len_sum = self.len_404_doc + len_doc
                    if len_sum == 0 or (0.4 <= float(len_doc) / len_sum <=
                                        0.6):
                        return

            if valid_item:
                m = re.search('<title>(.*?)</title>', html_doc)
                title = m.group(1) if m else ''
                if prefix not in self.results:
                    self.results[prefix] = []
                _ = {
                    'status': status,
                    'url': '%s%s' % (self.base_url, url),
                    'title': title,
                    'vul_type': vul_type
                }
                if _ not in self.results[prefix]:
                    self.results[prefix].append(_)
        except Exception as e:
            logger.log('ERROR', '[scan_worker.2][%s%s]' % (self.base_url, url))
            logger.log('ERROR', traceback.print_exc())
Example #10
0
    def http_request(self, url, timeout=20):
        try:
            if not url:
                url = '/'
            if not self.session:
                return -1, {}, ''

            resp = self.session.get(self.base_url + url,
                                    allow_redirects=False,
                                    headers=setting.default_headers,
                                    timeout=timeout,
                                    verify=False)

            headers = resp.headers
            status = resp.status_code

            # 403 bypass test
            if status == 403:
                self.bypass_403(resp)
            elif status == 401:
                self.bypass_403(resp)

            # 502出现3次以上,排除该站点
            if status == 502:
                self.status_502_count += 1
                if self.status_502_count > 3:
                    self.url_list.clear()
                    try:
                        if self.session:
                            self.session.close()
                    except Exception as e:
                        pass
                    self.session = None
                    # logger.log('ALERT', 'Website 502: %s' % self.base_url)
            # 301 永久移动时,重新获取response
            if status == 301:
                target = headers.get('Location')
                if not target.startswith('/file:'):
                    try:
                        resp = self.session.get(
                            URL(target, encoded=True),
                            headers=setting.default_headers,
                            allow_redirects=False,
                            timeout=timeout,
                            verify=False)
                        headers = resp.headers
                    except Exception as e:
                        logger.log('ERROR',
                                   f'{e},  {target}  {self.base_url + url}')

            # 前面禁止重定向, 但有时,网页重定向后才会有东西
            if status == 302:
                new_url = headers["Location"]

                if new_url not in self._302_url:
                    resp = self.session.get(URL(new_url, encoded=True),
                                            headers=setting.default_headers,
                                            timeout=timeout,
                                            verify=False)
                    headers = resp.headers
                    self._302_url.add(new_url)

            html_doc = get_html(headers, resp)
            logger.log('DEBUG', f'--> {url}  {status, headers}')
            return status, headers, html_doc
        except requests.exceptions.RetryError as e:
            # logger.log('ERROR', repr(e))
            return -1, {}, ''
        except requests.exceptions.ReadTimeout:
            # logger.log('ERROR', '请求超时')
            return -1, {}, ''
        except requests.exceptions.ConnectionError as e:
            # logger.log('ERROR', f'IP可能被封了  {repr(e)}    {self.base_url + url}')
            return -1, {}, ''
        except TypeError as e:
            # logger.log('ERROR', repr(e))
            return -1, {}, ''
        except Exception as e:
            # logger.log('ERROR', f'{repr(e)}   {self.base_url + url}')
            return -1, {}, ''
Example #11
0
    def enqueue(self, url):
        try:
            url = str(url)
        except Exception as e:
            return False
        try:
            # 当url中存在数字时,将url中的数字替换成 {num}  test1.baidu.com >> test{num}.baidu.com
            # todo 看不懂在干嘛
            url_pattern = re.sub(r'\d+', '{num}', url)

            if url_pattern in self.urls_processed or len(
                    self.urls_processed) >= self.links_limit:
                return False

            self.urls_processed.add(url_pattern)
            # logger.log('INFOR', 'Entered Queue: %s' % url_pattern)
            if self.args.crawl:  # 爬取网站的 a 标签
                self.crawl(url)
            else:
                self.index_status, self.index_headers, self.index_html_doc = self.http_request(
                    '/')
                # 计算首页页面的 md5 值 ,通过对比 md5 值, 判断页面是否相等
                self.index_md5 = hashlib.md5(
                    self.index_html_doc.encode('utf-8')).hexdigest()

            if self._404_status != -1:  # valid web service
                # 网站主目录下扫描全部rule, 即rule和root_only标记的rule, 其他目录下扫描 只扫描rule
                rule_set_to_process = [
                    self.rules_set, self.rules_set_root_only
                ] if url == '/' else [self.rules_set]
                # 加载规则
                for rule_set in rule_set_to_process:
                    for _ in rule_set:
                        # _  ('/scripts/samples', 'IIS', 200, '', '', True, 'iis')
                        try:
                            full_url = url.rstrip('/') + _[0]
                        except Exception as e:
                            logger.log('ERROR', f'{str(e)}')
                            continue
                        if full_url in self.urls_enqueued:
                            continue
                        url_description = {
                            'prefix': url.rstrip('/'),
                            'full_url': full_url
                        }
                        item = (url_description, _[1], _[2], _[3], _[4], _[5],
                                _[6])
                        self.url_list.append(item)
                        self.urls_enqueued.add(full_url)

            # 本来若只找到 /asdd/asd/ 这种链接,没有/asdd/ 这个子目录,会将/asdd/子目录添加进去处理
            if url.count('/') >= 2:
                self.enqueue('/'.join(url.split('/')[:-2]) +
                             '/')  # sub folder enqueue

            if url != '/' and not self.no_scripts:
                for script in self.user_scripts:
                    self.url_list.append((script, url))

            return True
        except Exception as e:
            logger.log('ERROR', '[_enqueue.exception] %s' % str(e))
            return False
Example #12
0
    def run(self):
        """
        OneForAll running entrance

        :return: All subdomain results
        :rtype: list
        """
        print(oneforall_banner)
        dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        print(f'[*] Starting OneForAll @ {dt}\n')
        logger.log('DEBUG', 'Python ' + utils.python_version())
        logger.log('DEBUG', 'OneForAll ' + version)
        utils.check_dep()
        self.access_internet, self.in_china = utils.get_net_env()
        if self.access_internet and settings.enable_check_version:
            utils.check_version(version)
        logger.log('INFOR', 'Start running OneForAll')
        self.config_param()
        self.check_param()
        self.domains = utils.get_domains(self.target, self.targets)
        count = len(self.domains)
        logger.log('INFOR', f'Got {count} domains')
        if not count:
            logger.log('FATAL', 'Failed to obtain domain')
            exit(1)
        for domain in self.domains:
            self.domain = utils.get_main_domain(domain)
            self.main()
        if count > 1:
            utils.export_all(self.alive, self.fmt, self.path, self.datas)
        logger.log('INFOR', 'Finished OneForAll')
Example #13
0
    def main(self):
        """
        OneForAll main process

        :return: subdomain results
        :rtype: list
        """
        utils.init_table(self.domain)

        if not self.access_internet:
            logger.log(
                'ALERT', 'Because it cannot access the Internet, '
                'OneForAll will not execute the subdomain collection module!')
        if self.access_internet:
            self.enable_wildcard = wildcard.detect_wildcard(self.domain)

            collect = Collect(self.domain)
            collect.run()

        srv = BruteSRV(self.domain)
        srv.run()

        if self.brute:
            # Due to there will be a large number of dns resolution requests,
            # may cause other network tasks to be error
            brute = Brute(self.domain, word=True, export=False)
            brute.enable_wildcard = self.enable_wildcard
            brute.in_china = self.in_china
            brute.quite = True
            brute.run()

        utils.deal_data(self.domain)
        # Export results without resolve
        if not self.dns:
            self.data = self.export_data()
            self.datas.extend(self.data)
            return self.data

        self.data = utils.get_data(self.domain)

        # Resolve subdomains
        utils.clear_data(self.domain)
        self.data = resolve.run_resolve(self.domain, self.data)
        # Save resolve results
        resolve.save_db(self.domain, self.data)

        # Export results without HTTP request
        if not self.req:
            self.data = self.export_data()
            self.datas.extend(self.data)
            return self.data

        if self.enable_wildcard:
            # deal wildcard
            self.data = wildcard.deal_wildcard(self.data)

        # HTTP request
        utils.clear_data(self.domain)
        request.run_request(self.domain, self.data, self.port)

        # Finder module
        if settings.enable_finder_module:
            finder = Finder()
            finder.run(self.domain, self.data, self.port)

        # altdns module
        if settings.enable_altdns_module:
            altdns = Altdns(self.domain)
            altdns.run(self.data, self.port)

        # Information enrichment module
        if settings.enable_enrich_module:
            enrich = Enrich(self.domain)
            enrich.run()

        self.data = self.export_data()
        self.datas.extend(self.data)

        # Scan subdomain takeover
        if self.takeover:
            subdomains = utils.get_subdomains(self.data)
            takeover = Takeover(targets=subdomains)
            takeover.run()
        return self.data
Example #14
0
 def gen_result(self, find=0, brute=None, valid=0):
     """
     Generate results
     """
     logger.log('DEBUG', f'Generating final results')
     if not len(self.subdomains):  # 该模块一个子域都没有发现的情况
         logger.log('DEBUG', f'{self.source} module result is empty')
         result = {
             'id': None,
             'type': self.type,
             'alive': None,
             'request': None,
             'resolve': None,
             'new': None,
             'url': None,
             'subdomain': None,
             'level': None,
             'cname': None,
             'content': None,
             'public': None,
             'port': None,
             'status': None,
             'reason': None,
             'title': None,
             'banner': None,
             'header': None,
             'response': None,
             'times': None,
             'ttl': None,
             'resolver': None,
             'module': self.module,
             'source': self.source,
             'elapse': self.elapse,
             'find': find,
             'brute': brute,
             'valid': valid
         }
         self.results.append(result)
     else:
         for subdomain in self.subdomains:
             url = 'http://' + subdomain
             level = subdomain.count('.') - self.domain.count('.')
             record = self.records.get(subdomain)
             if record is None:
                 record = dict()
             resolve = record.get('resolve')
             request = record.get('request')
             alive = record.get('alive')
             if self.type != 'A':  # 不是利用的DNS记录的A记录查询子域默认都有效
                 resolve = 1
                 request = 1
                 alive = 1
             reason = record.get('reason')
             resolver = record.get('resolver')
             cname = record.get('cname')
             content = record.get('content')
             times = record.get('times')
             ttl = record.get('ttl')
             public = record.get('public')
             if isinstance(cname, list):
                 cname = ','.join(cname)
                 content = ','.join(content)
                 times = ','.join([str(num) for num in times])
                 ttl = ','.join([str(num) for num in ttl])
                 public = ','.join([str(num) for num in public])
             result = {
                 'id': None,
                 'type': self.type,
                 'alive': alive,
                 'request': request,
                 'resolve': resolve,
                 'new': None,
                 'url': url,
                 'subdomain': subdomain,
                 'level': level,
                 'cname': cname,
                 'content': content,
                 'public': public,
                 'port': 80,
                 'status': None,
                 'reason': reason,
                 'title': None,
                 'banner': None,
                 'header': None,
                 'response': None,
                 'times': times,
                 'ttl': ttl,
                 'resolver': resolver,
                 'module': self.module,
                 'source': self.source,
                 'elapse': self.elapse,
                 'find': find,
                 'brute': brute,
                 'valid': valid
             }
             self.results.append(result)
Example #15
0
def deal_output(output_paths, ip_times, wildcard_ips, wildcard_ttl):
    logger.log('INFOR', f'Processing result')
    records = dict()  # 用来记录所有域名解析数据
    subdomains = list()  # 用来保存所有通过有效性检查的子域
    for output_path in output_paths:
        logger.log('DEBUG', f'Processing {output_path}')
        with open(output_path) as fd:
            for line in fd:
                line = line.strip()
                try:
                    items = json.loads(line)
                except Exception as e:
                    logger.log('ERROR', e.args)
                    logger.log('ERROR', f'Error parsing {line} Skip this line')
                    continue
                qname = items.get('name')[:-1]  # 去除最右边的`.`点号
                status = items.get('status')
                if status != 'NOERROR':
                    logger.log('TRACE', f'Found {qname}\'s result {status} '
                                        f'while processing {line}')
                    continue
                data = items.get('data')
                if 'answers' not in data:
                    logger.log('TRACE', f'Processing {line}, {qname} no response')
                    continue
                records, subdomains = gen_records(items, records, subdomains,
                                                  ip_times, wildcard_ips,
                                                  wildcard_ttl)
    return records, subdomains
Example #16
0
    def main(self, domain):
        start = time.time()
        logger.log('INFOR', f'Blasting {domain} ')
        massdns_dir = settings.third_party_dir.joinpath('massdns')
        result_dir = settings.result_save_dir
        temp_dir = result_dir.joinpath('temp')
        utils.check_dir(temp_dir)
        massdns_path = utils.get_massdns_path(massdns_dir)
        timestring = utils.get_timestring()

        wildcard_ips = list()  # 泛解析IP列表
        wildcard_ttl = int()  # 泛解析TTL整型值
        ns_list = query_domain_ns(self.domain)
        ns_ip_list = query_domain_ns_a(ns_list)  # DNS权威名称服务器对应A记录列表
        self.enable_wildcard = detect_wildcard(domain, ns_ip_list)

        if self.enable_wildcard:
            wildcard_ips, wildcard_ttl = collect_wildcard_record(
                domain, ns_ip_list)
        ns_path = get_nameservers_path(self.enable_wildcard, ns_ip_list)

        dict_set = self.gen_brute_dict(domain)
        dict_len = len(dict_set)

        dict_name = f'generated_subdomains_{domain}_{timestring}.txt'
        dict_path = temp_dir.joinpath(dict_name)
        save_brute_dict(dict_path, dict_set)
        del dict_set
        gc.collect()

        output_name = f'resolved_result_{domain}_{timestring}.json'
        output_path = temp_dir.joinpath(output_name)
        log_path = result_dir.joinpath('massdns.log')
        check_dict()
        logger.log('INFOR', f'Running massdns to brute subdomains')
        utils.call_massdns(massdns_path,
                           dict_path,
                           ns_path,
                           output_path,
                           log_path,
                           quiet_mode=self.quite,
                           process_num=self.process_num,
                           concurrent_num=self.concurrent_num)
        output_paths = []
        if self.process_num == 1:
            output_paths.append(output_path)
        else:
            for i in range(self.process_num):
                output_name = f'resolved_result_{domain}_{timestring}.json{i}'
                output_path = temp_dir.joinpath(output_name)
                output_paths.append(output_path)
        ip_times = stat_ip_times(output_paths)
        self.records, self.subdomains = deal_output(output_paths, ip_times,
                                                    wildcard_ips, wildcard_ttl)
        delete_file(dict_path, output_paths)
        end = time.time()
        self.elapse = round(end - start, 1)
        logger.log(
            'INFOR', f'{self.source} module takes {self.elapse} seconds, '
            f'found {len(self.subdomains)} subdomains of {domain}')
        logger.log(
            'DEBUG', f'{self.source} module found subdomains of {domain}:\n'
            f'{self.subdomains}')
        self.gen_result(brute=dict_len, valid=len(self.subdomains))
        self.save_db()
        return self.subdomains
Example #17
0
def save_brute_dict(dict_path, dict_set):
    dict_data = '\n'.join(dict_set)
    if not utils.save_data(dict_path, dict_data):
        logger.log('FATAL', 'Saving dictionary error')
        exit(1)
Example #18
0
def collect_wildcard_record(domain, authoritative_ns):
    logger.log('INFOR', f'Collecting wildcard dns record for {domain}')
    if not authoritative_ns:
        return list(), int()
    resolver = utils.dns_resolver()
    resolver.nameservers = authoritative_ns  # 使用权威名称服务器
    resolver.rotate = True  # 随机使用NS
    resolver.cache = None  # 不使用DNS缓存
    ips = set()
    ttl = int()
    ttls_check = list()
    ips_stat = dict()
    ips_check = list()
    while True:
        token = secrets.token_hex(4)
        random_subdomain = f'{token}.{domain}'
        try:
            ip, ttl = get_wildcard_record(random_subdomain, resolver)
        except Exception as e:
            logger.log('DEBUG', e.args)
            logger.log(
                'ALERT', f'Multiple query errors,'
                f'try to query a new random subdomain')
            continue
        # 每5次查询检查结果列表 如果都没结果则结束查询
        ips_check.append(ip)
        ttls_check.append(ttl)
        if len(ips_check) == 5:
            if not any(ips_check):
                logger.log(
                    'ALERT', 'The query ends because there are '
                    'no results for 5 consecutive queries.')
                break
            ips_check = list()
        if len(ttls_check) == 5 and len(set(ttls_check)) == 5:
            logger.log(
                'ALERT', 'The query ends because there are '
                '5 different TTL results for 5 consecutive queries.')
            ips, ttl = set(), int()
            break
        if ip is None:
            continue
        ips.update(ip)
        # 统计每个泛解析IP出现次数
        for addr in ip:
            count = ips_stat.setdefault(addr, 0)
            ips_stat[addr] = count + 1
        # 筛选出出现次数2次以上的IP地址
        addrs = list()
        for addr, times in ips_stat.items():
            if times >= 2:
                addrs.append(addr)
        # 大部分的IP地址出现次数大于2次停止收集泛解析IP记录
        if len(addrs) / len(ips) >= 0.8:
            break
    logger.log('DEBUG',
               f'Collected the wildcard dns record of {domain}\n{ips}\n{ttl}')
    return ips, ttl