def multi_target_test(options, target): # 删除文件 tem_dir = os.getcwd() + "\\tmp\\" if os.path.exists(tem_dir): shutil.rmtree(tem_dir) tmp_dir = os.getcwd() + '//tmp/%s_%s' % (target, int(time.time())) if not os.path.exists(tmp_dir): # 生成路径 os.makedirs(tmp_dir) multiprocessing.freeze_support() all_process = [] dns_servers = load_dns_servers() # 载入DNS服务器 next_subs = load_next_sub(options) scan_count = multiprocessing.Value('i', 0) found_count = multiprocessing.Value('i', 0) queue_size_list = multiprocessing.Array('i', options.process) try: print '[+] Init %s scan process.' % options.process for process_num in range(options.process): p = multiprocessing.Process( target=run_process, args=(target, options, process_num, dns_servers, next_subs, scan_count, found_count, queue_size_list, tmp_dir)) all_process.append(p) p.start() while all_process: for p in all_process: if not p.is_alive(): all_process.remove(p) # 如果进程结束就退出 groups_count = 0 for c in queue_size_list: groups_count += c msg = '[*] %s found, %s scanned in %.1f seconds, %s groups left' % ( found_count.value, scan_count.value, time.time() - start_time, groups_count) print_msg(msg) time.sleep(1.0) except KeyboardInterrupt as e: for p in all_process: p.terminate() print '[ERROR] User aborted the scan!' except Exception as e: print e msg = '[+] All Done. %s found, %s scanned in %.1f seconds.' % ( found_count.value, scan_count.value, time.time() - start_time) print_msg(msg, line_feed=True) out_file_name = get_out_file_name(target, options) # 输出路径位置 with open(out_file_name, 'w') as f: for _file in glob.glob(tmp_dir + '/*.txt'): with open(_file, 'r') as tmp_f: content = tmp_f.read() f.write(content) # 删除文件 shutil.rmtree(tem_dir) print '[+] The output file is %s' % out_file_name
def run(self): threads = [gevent.spawn(self._scan, i) for i in range(self.threads)] gevent.joinall(threads) msg = '[+] All Done. Success:%d Saved in:%s' % (self.success, self.filename) print_msg(msg, line_feed=True)
def get_sub_file_path(): if options.full_scan and options.file == 'subnames.txt': path = 'dict/subnames_full.txt' else: if os.path.exists(options.file): path = options.file elif os.path.exists('subDomainsBrute/dict/%s' % options.file): path = 'dict/%s' % options.file else: print_msg('[ERROR] Names file not found: %s' % options.file) exit(-1) return path
def init_final(self): try: self.conn_pool.close() except: pass default_port = 443 if self.schema.lower() == 'https' else 80 self.host, self.port = self.host.split( ':') if self.host.find(':') > 0 else (self.host, default_port) self.port = int(self.port) if self.schema == 'http' and self.port == 80 or self.schema == 'https' and self.port == 443: self.base_url = '%s://%s' % (self.schema, self.host) else: self.base_url = '%s://%s:%s' % (self.schema, self.host, self.port) is_port_open = self.is_port_open() if is_port_open: if self.schema == 'https': self.conn_pool = HTTPSConnPool(self.host, port=self.port, maxsize=self.args.t * 2, headers=headers) else: self.conn_pool = HTTPConnPool(self.host, port=self.port, maxsize=self.args.t * 2, headers=headers) if self.args.scripts_only or not is_port_open and ( not self.args.no_scripts): for _ in self.user_scripts: self.url_queue.put((_, '/')) self.lock.acquire() print_msg('Scan with user scripts: %s' % self.host) self.lock.release() return if not is_port_open: return self.max_depth = cal_depth(self, self.path)[1] + 5 if self.args.no_check404: self._404_status = 404 self.has_404 = True else: self.check_404() # check existence of HTTP 404 if not self.has_404: print_msg('[Warning] %s has no HTTP 404.' % self.host) _path, _depth = cal_depth(self, self.path) self._enqueue('/') self._enqueue(_path) if not self.args.no_crawl and not self.log_file: self.crawl_index(_path)
def get_sub_file_path(): path = None if options.full_scan and options.file == "subnames.txt": path = "dict/subnames_full.txt" else: if os.path.exists(options.file): path = options.file elif os.path.exists("dict/%s" % options.file): path = "dict/%s" % options.file else: print_msg("[ERROR] Names file not found: %s" % options.file) exit(-1) return path
def _scan_worker(self): while self.url_queue.qsize() > 0: if time.time() - self.START_TIME > self.TIME_OUT: self.url_queue.queue.clear() print_msg('[ERROR] Timed out task: %s' % self.host) return try: item = self.url_queue.get(timeout=0.1) except Exception as e: print e return try: if len(item) == 2: # User Script check_func = getattr(item[0], 'do_check') check_func(self, item[1]) continue except Exception as e: logging.error('[_scan_worker Exception] [1] %s' % str(e)) traceback.print_exc() continue url_description, tag, status_to_match, content_type, content_type_no, root_only, lang, rewrite = item prefix = url_description['prefix'] url = url_description['full_url'] valid_item, status, headers, html_doc = self.apply_rules(item) try: if valid_item: m = re.search('<title>(.*?)</title>', html_doc) title = m.group(1) if m else '' self.lock.acquire() # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host + url) if prefix not in self.results: self.results[prefix] = [] _ = { 'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title } if _ not in self.results[prefix]: self.results[prefix].append(_) self.lock.release() if len(self.results) >= 10: print '[Warning] Over 10 vulnerabilities found [%s], seems to be false positives.' % prefix self.url_queue.queue.clear() except Exception as e: logging.error('[_scan_worker.Exception][2][%s] %s' % (url, str(e))) traceback.print_exc()
def _load_sub_names(self): if self.options.full_scan and self.options.file == 'subnames.txt': _file = 'dict/subnames_full.txt' else: if os.path.exists(self.options.file): _file = self.options.file elif os.path.exists('dict/%s' % self.options.file): _file = 'dict/%s' % self.options.file else: print_msg('[ERROR] Names file not found: %s' % self.options.file) exit(-1) normal_lines = [] wildcard_lines = [] wildcard_list = [] regex_list = [] lines = set() with open(_file) as f: for line in f.xreadlines(): sub = line.strip() if not sub or sub in lines: continue lines.add(sub) if sub.find('{alphnum}') >= 0 or sub.find( '{alpha}') >= 0 or sub.find('{num}') >= 0: wildcard_lines.append(sub) sub = sub.replace('{alphnum}', '[a-z0-9]') sub = sub.replace('{alpha}', '[a-z]') sub = sub.replace('{num}', '[0-9]') if sub not in wildcard_list: wildcard_list.append(sub) regex_list.append('^' + sub + '$') else: normal_lines.append(sub) if regex_list: pattern = '|'.join(regex_list) _regex = re.compile(pattern) for line in normal_lines[:]: if _regex.search(line): normal_lines.remove(line) for item in normal_lines[self.process_num::self.options.process]: self.priority += 1 self.queue.put((self.priority, item)) for item in wildcard_lines[self.process_num::self.options.process]: self.queue.put((88888888, item))
def _load_sub_names(self): if self.options.full_scan and self.options.file == 'subnames.txt': _file = 'dict/subnames_full.txt' else: if os.path.exists(self.options.file): _file = self.options.file elif os.path.exists('dict/%s' % self.options.file): _file = 'dict/%s' % self.options.file else: print_msg('[ERROR] Names file not found: %s' % self.options.file) exit(-1) normal_lines = [] wildcard_lines = [] wildcard_list = [] regex_list = [] lines = set() with open(_file) as f: for line in f.xreadlines(): sub = line.strip() if not sub or sub in lines: continue lines.add(sub) if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0: wildcard_lines.append(sub) sub = sub.replace('{alphnum}', '[a-z0-9]') sub = sub.replace('{alpha}', '[a-z]') sub = sub.replace('{num}', '[0-9]') if sub not in wildcard_list: wildcard_list.append(sub) regex_list.append('^' + sub + '$') else: normal_lines.append(sub) if regex_list: pattern = '|'.join(regex_list) _regex = re.compile(pattern) for line in normal_lines[:]: if _regex.search(line): normal_lines.remove(line) for item in normal_lines[self.process_num::self.options.process]: self.priority += 1 self.queue.put((self.priority, item)) for item in wildcard_lines[self.process_num::self.options.process]: self.queue.put((88888888, item))
def _scan(self, j): while not self.queue.empty(): try: item = self.queue.get(timeout=3.0) host, domain, port = item, self.host, 80 html = httpServer((host, domain, port), self.timeout) if html is not None and self.keyword in html: self.outfile.write(item + '\n') self.outfile.flush() self.success += 1 except: pass finally: self.i += 1 msg = '[*] %s found, %s scanned , %s groups left' % ( self.success, self.i, self.num - self.i) print_msg(msg) time.sleep(1.0)
def is_port_open(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(5.0) if s.connect_ex((self.host, int(self.port))) == 0: self.lock.acquire() print_msg('Scan web: %s' % self.base_url) self.lock.release() return True else: print_msg('[Warning] Fail to connect to %s:%s' % (self.host, self.port)) return False except Exception as e: return False finally: s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) s.close()
def domain_lookup(): r = Resolver() r.timeout = r.lifetime = 10.0 # r.nameservers = ['182.254.116.116', '223.5.5.5'] + r.nameservers while True: try: host = queue_hosts.get(timeout=0.1) except: break _schema, _host, _path = parse_url(host) try: m = re.search('\d+\.\d+\.\d+\.\d+', _host.split(':')[0]) if m: q_targets.put({'file': '', 'url': host}) ips_to_scan.append(m.group(0)) else: answers = r.query(_host.split(':')[0]) if answers: q_targets.put({'file': '', 'url': host}) for _ in answers: ips_to_scan.append(_.address) except Exception as e: print_msg('Invalid domain: %s' % host)
def save_report_thread(q_results, file): start_time = time.time() a_template = template['markdown'] if args.md else template['html'] t_general = Template(a_template['general']) t_host = Template(a_template['host']) t_list_item = Template(a_template['list_item']) output_file_suffix = a_template['suffix'] all_results = [] report_name = os.path.basename(file).lower().replace('.txt', '') \ + '_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + output_file_suffix global STOP_ME try: while not STOP_ME: if q_results.qsize() == 0: time.sleep(0.1) continue html_doc = "" while q_results.qsize() > 0: all_results.append(q_results.get()) for item in all_results: host, results = item _str = "" for key in results.keys(): for _ in results[key]: _str += t_list_item.substitute({ 'status': _['status'], 'url': _['url'], 'title': _['title'] }) _str = t_host.substitute({'host': host, 'list': _str}) html_doc += _str cost_time = time.time() - start_time cost_min = int(cost_time / 60) cost_seconds = '%.2f' % (cost_time % 60) html_doc = t_general.substitute({ 'cost_min': cost_min, 'cost_seconds': cost_seconds, 'content': html_doc }) with codecs.open('report/%s' % report_name, 'w', encoding='utf-8') as outFile: outFile.write(html_doc) if all_results: print_msg('Scan report saved to report/%s' % report_name) if not args.no_browser: webbrowser.open_new_tab( os.path.abspath('report/%s' % report_name)) else: lock.acquire() print_msg('No vulnerabilities found on sites in %s.' % file) lock.release() except Exception as e: print_msg('[save_report_thread Exception] %s %s' % (type(e), str(e))) sys.exit(-1)
input_files = glob.glob(args.d + '/*.txt') elif args.crawler: input_files = ['crawler'] elif args.host: input_files = ['hosts'] # several hosts from command line ips_to_scan = [] # all IPs to scan during current scan for file in input_files: if args.host: lines = args.host elif args.f or args.d: with open(file) as inFile: lines = inFile.readlines() try: print_msg('Batch web scan start.') q_results = multiprocessing.Manager().Queue() q_targets = multiprocessing.Manager().Queue() lock = multiprocessing.Manager().Lock() STOP_ME = False threading.Thread(target=save_report_thread, args=(q_results, file)).start() print_msg('Report thread created, prepare target Queue...') if args.crawler: _input_files = glob.glob(args.crawler + '/*.log') for _file in _input_files: q_targets.put({'file': _file, 'url': ''}) else:
def _scan(self, j): self.resolvers[j].nameservers = [self.dns_servers[j % 2]] while not self.queue.empty(): try: item = self.queue.get(timeout=3.0)[1] self.scan_count_local += 1 if time.time() - self.local_time > 3.0: self.scan_count.value += self.scan_count_local self.scan_count_local = 0 self.queue_size_list[self.process_num] = self.queue.qsize() except Exception as e: break try: if item.find('{alphnum}') >= 0: for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789': self.put_item(item.replace('{alphnum}', _letter, 1)) continue elif item.find('{alpha}') >= 0: for _letter in 'abcdefghijklmnopqrstuvwxyz': self.put_item(item.replace('{alpha}', _letter, 1)) continue elif item.find('{num}') >= 0: for _letter in '0123456789': self.put_item(item.replace('{num}', _letter, 1)) continue elif item.find('{next_sub}') >= 0: for _ in self.next_subs: self.queue.put((0, item.replace('{next_sub}', _, 1))) continue else: sub = item if sub in self.found_subs: continue cur_sub_domain = sub + '.' + self.target _sub = sub.split('.')[-1] try: answers = self.resolvers[j].query(cur_sub_domain) except dns.resolver.NoAnswer, e: answers = self.ex_resolver.query(cur_sub_domain) if answers: self.found_subs.add(sub) ips = ', '.join(sorted([answer.address for answer in answers])) try: self.scan_count_local += 1 answers = self.resolvers[j].query(cur_sub_domain, 'cname') cname = answers[0].target.to_unicode().rstrip('.') if cname.endswith(self.target) and cname not in self.found_subs: self.found_subs.add(cname) cname_sub = cname[:len(cname) - len(self.target) - 1] # new sub self.queue.put((0, cname_sub)) except: pass if (_sub, ips) not in self.ip_dict: self.ip_dict[(_sub, ips)] = 1 else: self.ip_dict[(_sub, ips)] += 1 if self.ip_dict[(_sub, ips)] > 30: continue self.found_count_local += 1 if time.time() - self.local_time > 3.0: self.found_count.value += self.found_count_local self.found_count_local = 0 self.queue_size_list[self.process_num] = self.queue.qsize() self.local_time = time.time() msg = cur_sub_domain.ljust(30) + ips print_msg(msg, line_feed=True) self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n') self.outfile.flush() try: self.resolvers[j].query('lijiejietest.' + cur_sub_domain) except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e: self.queue.put((999999999, '{next_sub}.' + sub)) except: pass except Exception as e: print(e)
def _scan_worker(self): while self.url_queue.qsize() > 0: if time.time() - self.START_TIME > self.TIME_OUT: self.url_queue.queue.clear() print_msg('[ERROR] Timed out task: %s' % self.host) return try: item = self.url_queue.get(timeout=0.1) except: return try: if len(item) == 2: # User Script check_func = getattr(item[0], 'do_check') check_func(self, item[1]) continue else: url_description, tag, status_to_match, content_type, content_type_no, root_only = item prefix = url_description['prefix'] url = url_description['full_url'] # print url url = url.replace('{sub}', self.domain_sub) if url.find('{hostname_or_folder}') >= 0: _url = url[:url.find('{hostname_or_folder}')] folders = _url.split('/') for _folder in reversed(folders): if _folder not in ['', '.', '..']: url = url.replace('{hostname_or_folder}', _folder) break url = url.replace('{hostname_or_folder}', self.domain_sub) url = url.replace('{hostname}', self.domain_sub) except Exception as e: logging.error('[_scan_worker Exception] [1] %s' % str(e)) traceback.print_exc() continue if not item or not url: break # print '[%s]' % url.strip() try: status, headers, html_doc = self._http_request(url) cur_content_type = headers.get('content-type', '') if self.find_exclude_text(html_doc): # excluded text found continue if ('html' in cur_content_type or 'text' in cur_content_type) and \ 0 <= len(html_doc) <= 10: # text too short continue if cur_content_type.find('image/') >= 0: # exclude image continue valid_item = False if self.find_text(html_doc): valid_item = True else: if cur_content_type.find( 'application/json') >= 0 and not url.endswith( '.json'): # no json continue if status != status_to_match and status != 206: # status in [301, 302, 400, 404, 501, 502, 503, 505] continue if tag: if html_doc.find(tag) >= 0: valid_item = True else: continue # tag mismatch if content_type and cur_content_type.find(content_type) < 0 \ or content_type_no and cur_content_type.find(content_type_no) >= 0: continue # type mismatch if self.has_404 or status != self._404_status: if status_to_match in (200, 206) and status == 206: valid_item = True elif status_to_match and status != status_to_match: # status mismatch continue elif status_to_match != 403 and status == 403: continue else: valid_item = True if not self.has_404 and status in ( 200, 206) and url != '/' and not tag: _len = len(html_doc) _min = min(_len, self.len_404_doc) if _min == 0: _min = 10.0 if float(_len - self.len_404_doc) / _min > 0.3: valid_item = True if status == 206 and tag == '' and cur_content_type.find( 'text') < 0 and cur_content_type.find('html') < 0: valid_item = True if valid_item: m = re.search('<title>(.*?)</title>', html_doc) title = m.group(1) if m else '' self.lock.acquire() # print '[+] [Prefix:%s] [%s] %s' % (prefix, status, 'http://' + self.host + url) if prefix not in self.results: self.results[prefix] = [] _ = { 'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title } if _ not in self.results[prefix]: self.results[prefix].append(_) self.lock.release() if len(self.results) >= 10: print '[Warning] Over 10 vulnerabilities found [%s], seems to be false positives.' % prefix self.url_queue.queue.clear() except Exception as e: logging.error('[_scan_worker.Exception][2][%s] %s' % (url, str(e))) traceback.print_exc()
scan_count, found_count,queue_size_list, tmp_dir,cdns) ) all_process.append(p) p.start() while all_process: for p in all_process: if not p.is_alive(): all_process.remove(p) groups_count = 0 for c in queue_size_list: groups_count += c msg = '[*] %s found, %s scanned in %.1f seconds, %s groups left' % ( found_count.value, scan_count.value, time.time() - start_time, groups_count) print_msg(msg) time.sleep(1.0) except KeyboardInterrupt as e: for p in all_process: p.terminate() print '[ERROR] User aborted the scan!' except Exception as e: print e msg = '[+] All Done. %s found, %s scanned in %.1f seconds.' % ( found_count.value, scan_count.value, time.time() - start_time) print_msg(msg, line_feed=True) out_file_name = get_out_file_name(args[0], options) with open(out_file_name, 'w') as f: for _file in glob.glob(tmp_dir + '/*.txt'): with open(_file,'r') as tmp_f:
scan_count, found_count,queue_size_list, tmp_dir) ) all_process.append(p) p.start() while all_process: for p in all_process: if not p.is_alive(): all_process.remove(p) groups_count = 0 for c in queue_size_list: groups_count += c msg = '[*] %s found, %s scanned in %.1f seconds, %s groups left' % ( found_count.value, scan_count.value, time.time() - start_time, groups_count) print_msg(msg) time.sleep(1.0) except KeyboardInterrupt as e: for p in all_process: p.terminate() print '[ERROR] User aborted the scan!' except Exception as e: print e msg = '[+] All Done. %s found, %s scanned in %.1f seconds.' % ( found_count.value, scan_count.value, time.time() - start_time) print_msg(msg, line_feed=True) out_file_name = get_out_file_name(args[0], options) with open(out_file_name, 'w') as f: for _file in glob.glob(tmp_dir + '/*.txt'): with open(_file,'r') as tmp_f: