def output(target): ''' name: HTTP Request priority: 1 version: 0.2 ''' req = urlopen(target.geturl()) log.debug("encoding: " + req.encoding) if req is None: raise PluginWarning('target %s connection refused' % target.host) target.data = { 'headers': dict(req.headers.lower_items()), 'cookies': req.cookies.get_dict(), 'content': req.text, 'robots' : urlopen(target.geturl('robots.txt'), attr=('text', '')) } log.debug('Headers: %s' % str(target.data['headers'])) log.debug('Cookies: %s' % str(target.data['cookies'])) if 'server' in req.headers: cprint('Server: %s' % req.headers['server'] , '+') target.raw_build = urlopen('http://builtwith.com/%s' % target.host, attr=('text', ''))
def output(target): ''' name: WhatCMS Guess depends: request version: 0.3 ''' if not getattr(target, 'data', None): return if option_input() != 'y': return cms = [] patterns = ['md5', 'type', 'status'] queue = Queue.PriorityQueue() files = glob.glob(os.path.join('plugins/whatcms', '*.json')) for patt in map(json_dict, files): if not patt: continue # 失败时跳过 count = counter(target.data, patt['keyword']) # 统计匹配次数 if count is True: target.cms = patt['name'] break elif count > 0: cms.append(patt['name']) for banner in patt['path']: url = target.geturl(banner.pop('url')) # 计算优先级,算法:patterns顺序 + 匹配次数,越低优先级越高 priority = patterns.index(banner.keys()[0]) + (2 - count) queue.put((priority, patt['name'], url, banner)) if not getattr(target, 'cms', None): # 采集404结果 rand_uuid = str(uuid.uuid4()) status_file = urlopen(target.geturl(rand_uuid + '.js'), attr=('headers', {})) status_dir = urlopen(target.geturl(rand_uuid + '/'), attr=('headers', {})) # 将404的页面长度存入urlcheck中 urlcheck.status_404 = (status_file.get('content-length', 0), status_file.get('content-length', 0)) pool = ThreadPool(processes=3) async_result = pool.apply_async(urlcheck, (queue, )) val = async_result.get() # URL检测失败则尝试从候选列表中获取 if val: target.cms = val elif cms: target.cms = ','.join(cms) if getattr(target, 'cms', None): cprint(target.cms, '+')
def output(target): ''' name: WebServer Parser priority: 8 depends: request ''' if not getattr(target, 'data', None): return server = target.data['headers'].get('server', '') if 'nginx' in server: cprint('testing nginx parsing vulnerability...') path = urlsrc(target.data['content'], target.host) url = target.geturl(path) log.debug('URL: %s' % url) for url in itertools.product([url], ['/.php', '/1.php', '%00.php']): req = urlopen(''.join(url)) if req and not req.history and \ 'text/html' in req.headers['content-type']: cprint('Nginx Parser Vulnerability', '+') break elif 'iis' in server: pass elif 'apache' in server: pass
def worker(queue): while True: task = queue.get() if task is None: break req = urlopen(urlparse.urljoin(*task)) # 状态码200且不重定向 if req and not req.history: cprint('%s status %d' % (req.url, req.status_code), '+')
def producer(task, queue): ''' 生产函数 ''' req = urlopen('http://{}:{}'.format(*task)) if req is None: return # 解析请求内容 server, title, banner = parse(req) cprint('%s %s %s [%s]' % (req.url, server, title, banner)) for uri in paths: queue.put((req.url, uri))
def output(target): ''' name: SameIP Finder depends: cdn ''' if getattr(target, 'cdn', False): return sameip = [] for url, regex in apis: content = urlopen(url % target.host, attr=('text', '')) sameip.extend(re.findall(regex, content)) target.raw_sameip = set(sameip[:50]) print target.raw_sameip
def producer(task, queue): req = urlopen('http://{}:{}'.format(*task)) if req is None: return match = re.search( '<[Tt][Ii][Tt][Ll][Ee][^>]*>([^<]*)</[Tt][Ii][Tt][Ll][Ee]>', req.text) title = (match.group(1) if match else '').strip() server = req.headers.get('server', '') banner = 'unknown' if 'servlet' in req.headers.get('x-powered-by', ''): banner = 'servlet' cprint('%s %s %s [%s]' % (req.url, server, title, banner)) for uri in uris: queue.put((req.url, uri))
def output(target): ''' name: Reverse Email Finder depends: whois ''' if not getattr(target, 'domain', None): return domains = [] for url, regex in apis: content = urlopen(url % target.domain[0], attr=('text', '')) domains.extend(re.findall(regex, content)) target.email_domains = set(domains[:50]) log.debug('DOMAINS: %s' % ', '.join(target.email_domains)) print target.email_domains
def urlcheck(queue): ''' 多进程并发检查url规则 ''' while not queue.empty(): _, name, url, pattern = queue.get() req = urlopen(url) if not req: continue if req.headers.get('content-length', 0) in urlcheck.status_404: continue # 匹配MD5 if pattern.get('md5', '') == 'afsfasfd': return name if req.history: continue # 防止URL跳转 # 匹配Content-Type信息 if pattern.get('type', 'unknown') in req.headers['content-type']: return name if pattern.get('status', 0) == req.status_code: return name return None
def output(target): ''' name: Sub-Domain Finder depends: request,axfr,sameip version: 0.2 ''' def valid_tld(domain): # 解决UNICODE编码问题 if type(domain) == unicode: domain = domain.encode('utf-8') return str.endswith(domain, target.tld) target.domains = set() # 从zone里过滤子域名 for val in getattr(target, 'zone', {}).values(): domains = filter(valid_tld, map(itemgetter(0), val)) target.domains.update(domains) # 域传送漏洞检测 if getattr(target, 'axfr', False): return # 从content中匹配子域名 regex = re.compile('(?<=\/\/)([\w\-]+\.)*?%s' % target.tld) for m in regex.finditer(target.data['content']): target.domains.add(m.group()) # 从同IP中提取子域名 domains = filter(valid_tld, getattr(target, 'raw_sameip', [])) target.domains.update(domains) # API 获取子域名 for url, param, regex in apis_url: text = urlopen(url, attr=('text', ''), params=param % target.tld if param else None) target.domains.update(re.findall(regex, text)) # 从SSL证书中提取子域名 if target.scheme == 'https': certs = ssl_cert(target.host, target.port) # 过滤子域名 for _, domain in certs.get('subjectAltName', []): if not valid_tld(domain): continue target.domains.add(domain.replace('*.', '')) print target.domains
def output(target): ''' name: Hosting History priority: 8 depends: cdn version: 0.1 ''' if not getattr(target, 'cdn', False): return ipdate = set() for url, regex, fmt in apis_url: content = urlopen(url % target.host, attr=('text', '')) for m in re.finditer(regex, content): date = datetime.strptime(m.group('date'), fmt) ipdate.add((m.group('ip'), date)) for ip, date in sorted(ipdate, key=itemgetter(1)): cprint('%s %s' % (ip, date.strftime('%Y-%m-%d')), '+')