Exemplo n.º 1
0
    def head(self, url, params=None, check=True, **kwargs):
        """
        Custom head request

        :param str  url: request url
        :param dict params: request parameters
        :param bool check: check response
        :param kwargs: other params
        :return: response object
        """
        session = requests.Session()
        session.trust_env = False
        try:
            resp = session.head(url,
                                params=params,
                                cookies=self.cookie,
                                headers=self.header,
                                proxies=self.proxy,
                                timeout=self.timeout,
                                verify=self.verify,
                                **kwargs)
        except Exception as e:
            logger.log('ERROR', e.args)
            return None
        if not check:
            return resp
        if utils.check_response('HEAD', resp):
            return resp
        return None
Exemplo n.º 2
0
    def save_db(self, table_name, results, module_name=None):
        """
        Save the results of each module in the database

        :param str table_name: table name
        :param list results: results list
        :param str module_name: module
        """
        logger.log(
            'TRACE', f'Saving the subdomain results of {table_name} '
            f'found by module {module_name} into database')
        table_name = table_name.replace('.', '_')
        if results:
            try:
                self.conn.bulk_query(
                    f'insert into "{table_name}" '
                    f'(id, alive, resolve, request, new, url, subdomain, port, level,'
                    f'cname, ip, public, cdn, status, reason, title, banner, header,'
                    f'history, response, times, ttl, cidr, asn, org, addr, isp, resolver,'
                    f'module, source, elapse, find) '
                    f'values (:id, :alive, :resolve, :request, :new, :url,'
                    f':subdomain, :port, :level, :cname, :ip, :public, :cdn,'
                    f':status, :reason, :title, :banner, :header, :history, :response,'
                    f':times, :ttl, :cidr, :asn, :org, :addr, :isp, :resolver, :module,'
                    f':source, :elapse, :find)', results)
            except Exception as e:
                logger.log('ERROR', e)
Exemplo n.º 3
0
 def search(self):
     """
     向接口查询子域并做子域匹配
     """
     page_num = 1
     while True:
         time.sleep(self.delay)
         params = {'pageno': page_num, 'q': self.domain, 'type': 'code'}
         try:
             resp = self.get(self.addr, params=params)
         except Exception as e:
             logger.log('ERROR', e.args)
             break
         if not resp:
             break
         if resp.status_code != 200:
             logger.log('ERROR', f'{self.source} module query failed')
             break
         if 'class="empty-box"' in resp.text:
             break
         soup = BeautifulSoup(resp.text, 'html.parser')
         subdomains = self.match_subdomains(soup, fuzzy=False)
         if not self.check_subdomains(subdomains):
             break
         self.subdomains.update(subdomains)
         if '<li class="disabled"><a href="###">' in resp.text:
             break
         page_num += 1
         if page_num >= 100:
             break
Exemplo n.º 4
0
def check_cdn(data):
    logger.log('DEBUG', f'Start cdn check module')
    for index, item in enumerate(data):
        cname = item.get('cname')
        if cname:
            if check_cname_keyword(cname):
                data[index]['cdn'] = 1
                continue
        header = item.get('header')
        if header:
            header = json.loads(header)
            if check_header_key(header):
                data[index]['cdn'] = 1
                continue
        ip = item.get('ip')
        if ip:
            if check_cdn_cidr(ip):
                data[index]['cdn'] = 1
                continue
        asn = item.get('asn')
        if asn:
            asn = asn[2:]  # 去除AS
            if check_cdn_asn(asn):
                data[index]['cdn'] = 1
                continue
        data[index]['cdn'] = 0
    return data
Exemplo n.º 5
0
 def check_param(self):
     """
     Check parameter
     """
     if self.target is None and self.targets is None:
         logger.log('FATAL', 'You must provide either target or targets parameter')
         exit(1)
Exemplo n.º 6
0
    def run(self):
        """
        OneForAll running entrance

        :return: All subdomain results
        :rtype: list
        """
        print(oneforall_banner)
        dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        print(f'[*] Starting OneForAll @ {dt}\n')
        utils.check_env()
        utils.auto_select_nameserver()
        if settings.enable_check_version:
            utils.check_version(version)
        logger.log('DEBUG', 'Python ' + utils.python_version())
        logger.log('DEBUG', 'OneForAll ' + version)
        logger.log('INFOR', 'Start running OneForAll')
        self.config_param()
        self.check_param()
        self.domains = utils.get_domains(self.target, self.targets)
        if self.domains:
            for domain in self.domains:
                self.domain = utils.get_main_domain(domain)
                self.main()
            return self.datas
            #utils.export_all(self.alive, self.format, self.path, self.datas)
        else:
            logger.log('FATAL', 'Failed to obtain domain')
            return None
        logger.log('INFOR', 'Finished OneForAll')
Exemplo n.º 7
0
def gen_fuzz_subdomains(expression, rule, fuzzlist):
    """
    Generate subdomains based on fuzz mode

    :param  str  expression: generate subdomains's expression
    :param  str  rule: regexp rule
    :param  str  fuzzlist: fuzz dictionary
    :return set  subdomains: list of subdomains
    """
    subdomains = set()
    if fuzzlist:
        fuzz_domain = gen_subdomains(expression, fuzzlist)
        subdomains.update(fuzz_domain)
    if rule:
        fuzz_count = exrex.count(rule)
        if fuzz_count > 10000000:
            logger.log(
                'ALERT',
                f'The dictionary generated by this rule is too large: '
                f'{fuzz_count} > 10000000')
        for fuzz_string in exrex.generate(rule):
            fuzz_string = fuzz_string.lower()
            if not fuzz_string.isalnum():
                continue
            fuzz_domain = expression.replace('*', fuzz_string)
            subdomains.add(fuzz_domain)
        utils.check_random_subdomain(subdomains)
    logger.log('DEBUG',
               f'Dictionary size based on fuzz mode: {len(subdomains)}')
    return subdomains
Exemplo n.º 8
0
def gen_subdomains(expression, path):
    """
    Generate subdomains

    :param  str  expression: generate subdomains's expression
    :param  str  path: path of wordlist
    :return set  subdomains: list of subdomains
    """
    subdomains = set()
    with open(path, encoding='utf-8', errors='ignore') as fd:
        for line in fd:
            word = line.strip().lower()
            if len(word) == 0:
                continue
            if not utils.is_subname(word):
                continue
            if word.startswith('.'):
                word = word[1:]
            if word.endswith('.'):
                word = word[:-1]
            subdomain = expression.replace('*', word)
            subdomains.add(subdomain)
    size = len(subdomains)
    logger.log('DEBUG',
               f'The size of the dictionary generated by {path} is {size}')
    if size == 0:
        logger.log('ALERT', 'Please check the dictionary content!')
    else:
        utils.check_random_subdomain(subdomains)
    return subdomains
Exemplo n.º 9
0
def is_enable_wildcard(domain):
    is_enable = detect_wildcard(domain)
    if is_enable:
        logger.log('ALERT', f'The domain {domain} enables wildcard')
    else:
        logger.log('ALERT', f'The domain {domain} disables wildcard')
    return is_enable
Exemplo n.º 10
0
 def save(self):
     logger.log('DEBUG', 'Saving results')
     if self.format == 'txt':
         data = str(self.results)
     else:
         data = self.results.export(self.format)
     utils.save_data(self.path, data)
Exemplo n.º 11
0
    def axfr(self, server):
        """
        Perform domain transfer

        :param server: domain server
        """
        logger.log('DEBUG', f'Trying to perform domain transfer in {server} '
                            f'of {self.domain}')
        try:
            xfr = dns.query.xfr(where=server, zone=self.domain,
                                timeout=5.0, lifetime=10.0)
            zone = dns.zone.from_xfr(xfr)
        except Exception as e:
            logger.log('DEBUG', e.args)
            logger.log('DEBUG', f'Domain transfer to server {server} of '
                                f'{self.domain} failed')
            return
        names = zone.nodes.keys()
        for name in names:
            full_domain = str(name) + '.' + self.domain
            subdomain = self.match_subdomains(full_domain)
            self.subdomains.update(subdomain)
            record = zone[name].to_text(name)
            self.results.append(record)
        if self.results:
            logger.log('DEBUG', f'Found the domain transfer record of '
                                f'{self.domain} on {server}')
            logger.log('DEBUG', '\n'.join(self.results))
            self.results = []
Exemplo n.º 12
0
    def save_json(self):
        """
        Save the results of each module as a json file

        :return bool: whether saved successfully
        """
        if not settings.save_module_result:
            return False
        logger.log(
            'TRACE', f'Save the subdomain results found by '
            f'{self.source} module as a json file')
        path = settings.result_save_dir.joinpath(self.domain, self.module)
        path.mkdir(parents=True, exist_ok=True)
        name = self.source + '.json'
        path = path.joinpath(name)
        with open(path, mode='w', errors='ignore') as file:
            result = {
                'domain': self.domain,
                'name': self.module,
                'source': self.source,
                'elapse': self.elapse,
                'find': len(self.subdomains),
                'subdomains': list(self.subdomains),
                'infos': self.infos
            }
            json.dump(result, file, ensure_ascii=False, indent=4)
        return True
Exemplo n.º 13
0
def match_subdomains(domain, html, distinct=True, fuzzy=True):
    """
    Use regexp to match subdomains

    :param  str domain: main domain
    :param  str html: response html text
    :param  bool distinct: deduplicate results or not (default True)
    :param  bool fuzzy: fuzzy match subdomain or not (default True)
    :return set/list: result set or list
    """
    logger.log('TRACE', f'Use regexp to match subdomains in the response body')
    if fuzzy:
        regexp = r'(?:[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?\.){0,}' \
                 + domain.replace('.', r'\.')
        result = re.findall(regexp, html, re.I)
        if not result:
            return set()
        deal = map(lambda s: s.lower(), result)
        if distinct:
            return set(deal)
        else:
            return list(deal)
    else:
        regexp = r'(?:\>|\"|\'|\=|\,)(?:http\:\/\/|https\:\/\/)?' \
                 r'(?:[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?\.){0,}' \
                 + domain.replace('.', r'\.')
        result = re.findall(regexp, html, re.I)
    if not result:
        return set()
    regexp = r'(?:http://|https://)'
    deal = map(lambda s: re.sub(regexp, '', s[1:].lower()), result)
    if distinct:
        return set(deal)
    else:
        return list(deal)
Exemplo n.º 14
0
def bulk_request(urls):
    logger.log('INFOR', 'Requesting urls in bulk')
    resp_list = list()
    urls_queue = Queue()
    for url in urls:
        urls_queue.put(url)
    total = len(urls)
    session = get_session()
    thread_count = req_thread_count()
    bar = get_progress_bar(total)

    progress_thread = Thread(target=progress,
                             name='ProgressThread',
                             args=(bar, total, urls_queue),
                             daemon=True)
    progress_thread.start()

    for i in range(thread_count):
        request_thread = Thread(target=request,
                                name=f'RequestThread-{i}',
                                args=(urls_queue, resp_list, session),
                                daemon=True)
        request_thread.start()

    urls_queue.join()

    return resp_list
Exemplo n.º 15
0
    def load_rules(self):
        new_rules = {}
        new_rule_types = set()
        for rule_type in os.listdir(self.rule_dir):
            rule_type_dir = os.path.join(self.rule_dir, rule_type)
            if not os.path.isdir(rule_type_dir):
                continue
            new_rule_types.add(rule_type)
            for i in os.listdir(rule_type_dir):
                if not i.endswith('.json'):
                    continue

                with open(os.path.join(rule_type_dir, i), encoding='utf-8') as fd:
                    try:
                        data = json.load(fd)
                        for match in data['matches']:
                            if 'regexp' in match:  # 默认 大小写不敏感 可信度100%
                                match['regexp'] = re.compile(
                                    match['regexp'], re.I)
                            if 'certainty' not in match:
                                match['certainty'] = 100

                        data['origin'] = rule_type
                        key = '%s_%s' % (rule_type, data['name'])
                        new_rules[key] = data
                    except Exception as e:
                        logger.log('ERROR', f'Parse {i} failed, error: {e}')

        RULES = new_rules
        RULE_TYPES = new_rule_types
        return len(RULES), RULES, RULE_TYPES
Exemplo n.º 16
0
    def run(self):
        """
        Class entrance
        """
        logger.log('INFOR', f'Start collecting subdomains of {self.domain}')
        self.get_mod()
        self.import_func()

        threads = []
        # Create subdomain collection threads
        for collect_func in self.collect_funcs:
            func_obj, func_name = collect_func
            thread = threading.Thread(target=func_obj,
                                      name=func_name,
                                      args=(self.domain, ),
                                      daemon=True)
            threads.append(thread)
        # Start all threads
        for thread in threads:
            thread.start()
        # Wait for all threads to finish
        for thread in threads:
            # 挨个线程判断超时 最坏情况主线程阻塞时间=线程数*module_thread_timeout
            # 超时线程将脱离主线程 由于创建线程时已添加守护属于 所有超时线程会随着主线程结束
            thread.join(settings.module_thread_timeout)

        for thread in threads:
            if thread.is_alive():
                logger.log('ALERT', f'{thread.name} module thread timed out')
Exemplo n.º 17
0
def update_data(data, infos):
    """
    更新解析结果

    :param list data: 待更新的数据列表
    :param dict infos: 子域有关结果信息
    :return: 更新后的数据列表
    """
    logger.log('DEBUG', f'Updating resolved results')
    if not infos:
        logger.log('ALERT', f'No valid resolved result')
        return data
    for index, items in enumerate(data):
        if items.get('ip'):
            continue
        subdomain = items.get('subdomain')
        record = infos.get(subdomain)
        if record:
            items.update(record)
        else:
            items['resolve'] = 0
            items['alive'] = 0
            items['reason'] = 'NoResult'
        data[index] = items
    return data
Exemplo n.º 18
0
 def query(self):
     """
     向接口查询子域并做子域匹配
     """
     self.header = self.get_header()
     self.proxy = self.get_proxy(self.source)
     data = {
         'query': f'parsed.names: {self.domain}',
         'page': 1,
         'fields': ['parsed.subject_dn', 'parsed.names'],
         'flatten': True
     }
     resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
     if not resp:
         return
     json = resp.json()
     status = json.get('status')
     if status != 'ok':
         logger.log('ALERT', f'{self.source} module {status}')
         return
     subdomains = self.match_subdomains(resp.text)
     self.subdomains.update(subdomains)
     pages = json.get('metadata').get('pages')
     for page in range(2, pages + 1):
         data['page'] = page
         resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
         self.subdomains = self.collect_subdomains(resp)
Exemplo n.º 19
0
 def query(self):
     """
     向接口查询子域并做子域匹配
     """
     self.header = self.get_header()
     self.proxy = self.get_proxy(self.source)
     self.header.update({
         'Referer': 'https://phonebook.cz/',
         'Origin': 'https://phonebook.cz'
     })
     addr = 'https://public.intelx.io/phonebook/search'
     key = 'd7d1ed06-f0c5-49d4-a9ca-a167e6d2ffab'
     url = f'{addr}?k={key}'
     data = {
         "term": self.domain,
         "maxresults": 10000,
         "media": 0,
         "target": 1,
         "terminate": [],
         "timeout": 20
     }
     resp = self.post(url, json=data)
     if not resp:
         return
     json = resp.json()
     ids = json.get('id')
     if not ids:
         logger.log('ALERT', f'Get PhoneBook id fail')
         return
     url = f'{addr}/result?k={key}&id={ids}&limit=10000'
     resp = self.get(url)
     self.subdomains = self.collect_subdomains(resp)
Exemplo n.º 20
0
    def query(self):
        """
        向接口查询子域并做子域匹配
        """

        base_addr = 'http://114.55.181.28/check_web/' \
                    'databaseInfo_mainSearch.action'
        page_num = 1
        while True:
            time.sleep(self.delay)
            self.header = self.get_header()
            self.proxy = self.get_proxy(self.source)
            params = {
                'isSearch': 'true',
                'searchType': 'url',
                'term': self.domain,
                'pageNo': page_num
            }
            try:
                resp = self.get(base_addr, params)
            except Exception as e:
                logger.log('ERROR', e.args)
                break
            if not resp:
                break
            subdomains = self.match_subdomains(resp.text)
            if not subdomains:  # 没有发现子域名则停止查询
                break
            self.subdomains.update(subdomains)
            if not subdomains:
                break
            if page_num > 10:
                break
            page_num += 1
Exemplo n.º 21
0
 def begin(self):
     """
     begin log
     """
     logger.log(
         'DEBUG', f'Start {self.source} module to '
         f'collect subdomains of {self.domain}')
Exemplo n.º 22
0
 def query(self, sql):
     try:
         results = self.conn.query(sql)
     except Exception as e:
         logger.log('ERROR', e.args)
         return None
     return results
Exemplo n.º 23
0
def check_path(path, name, format):
    """
    检查结果输出目录路径

    :param path: 保存路径
    :param name: 导出名字
    :param format: 保存格式
    :return: 保存路径
    """
    filename = f'{name}.{format}'
    default_path = settings.result_save_dir.joinpath(filename)
    if isinstance(path, str):
        path = repr(path).replace('\\', '/')  # 将路径中的反斜杠替换为正斜杠
        path = path.replace('\'', '')  # 去除多余的转义
    else:
        path = default_path
    path = Path(path)
    if not path.suffix:  # 输入是目录的情况
        path = path.joinpath(filename)
    parent_dir = path.parent
    if not parent_dir.exists():
        logger.log('ALERT', f'{parent_dir} does not exist, directory will be created')
        parent_dir.mkdir(parents=True, exist_ok=True)
    if path.exists():
        logger.log('ALERT', f'The {path} exists and will be overwritten')
    return path
Exemplo n.º 24
0
def check_random_subdomain(subdomains):
    if not subdomains:
        logger.log('ALERT', f'The generated dictionary is empty')
        return
    for subdomain in subdomains:
        if subdomain:
            logger.log('ALERT', f'Please check whether {subdomain} is correct or not')
            return
Exemplo n.º 25
0
def match_subdomains(domain, text):
    if isinstance(text, str):
        subdomains = utils.match_subdomains(domain, text, fuzzy=False)
    else:
        logger.log('DEBUG', f'abnormal object: {type(text)}')
        subdomains = set()
    logger.log('TRACE', f'matched subdomains: {subdomains}')
    return subdomains
Exemplo n.º 26
0
def req_thread_count():
    count = settings.request_thread_count
    if isinstance(count, int):
        count = max(16, count)
    else:
        count = utils.get_request_count()
    logger.log('DEBUG', f'Number of request threads {count}')
    return count
Exemplo n.º 27
0
    def drop_table(self, table_name):
        """
        Delete table

        :param str table_name: table name
        """
        table_name = table_name.replace('.', '_')
        logger.log('TRACE', f'Deleting {table_name} table')
        self.query(f'drop table if exists "{table_name}"')
Exemplo n.º 28
0
def save_db(name, data):
    """
    Save request results to database

    :param str  name: table name
    :param list data: data to be saved
    """
    logger.log('INFOR', f'Saving requested results')
    utils.save_db(name, data, 'request')
Exemplo n.º 29
0
    def get_data(self, table_name):
        """
        Get all the data in the table

        :param str table_name: table name
        """
        table_name = table_name.replace('.', '_')
        logger.log('TRACE', f'Get all the data from {table_name} table')
        return self.query(f'select * from "{table_name}"')
Exemplo n.º 30
0
def ip_to_int(ip):
    if isinstance(ip, int):
        return ip
    try:
        ipv4 = IPv4Address(ip)
    except Exception as e:
        logger.log('ERROR', e.args)
        return 0
    return int(ipv4)