def run(self): for i in Processor.get_process_chains(): func = getattr(self, i) try: func() except Func_timeout_error, e: LOGGER.warn(str(e) + self.request.url)
def get_domain_from_url(url): """get domain from url""" domain = '' # url is http://a.b.com/ads/asds if re.search(r'://.*?/', url): try: domain = url.split('//', 1)[1].split('/', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e))
def check_install(): try: br=webdriver.Chrome() except Exception, e: LOGGER.info(e) try: br=webdriver.PhantomJS() except Exception, e: LOGGER.info(e) LOGGER.warn('No browser is installed correctly!')
def clear(id): traffic_path = [] files = os.listdir(TRAFFIC_DIR) for i in files: if re.search(id + '.traffic\d*', i): traffic_path.append(os.path.join(TRAFFIC_DIR, i)) if traffic_path: for i in traffic_path: try: os.remove(i) except Exception, e: LOGGER.warn(e)
def detect_param(request): """ :param request: httprequest :return: param_dict """ param_dict = {} method, url, body = request.method, request.url, request.body if method == 'GET': url_parsed = urlparse.urlparse(url) param_dict = dict([ (k, v[0]) for k, v in urlparse.parse_qs(url_parsed.query).items() ]) elif method == 'POST': if body == '': return param_dict # {a:1} if re.search(r'^{.*}$', body): param_dict = Detector.detect_json(body) # body={a:1} elif re.search(r'^.*?={.*?}$', body): body = re.search(r'^.*?=({.*?})$', body).group() param_dict = Detector.detect_json(body) # ignore elif request.get_header( 'Content-Type' ) and 'multipart/form-data; boundary=' in request.get_header( 'Content-Type'): pass elif '&' not in body: param_dict = Detector.parse_by_token(body) if param_dict: return param_dict # a=1&b=2 else: try: if '&' in body: tmp = body.split('&') for i in tmp: try: param, value = i.split('=')[0], i.split('=')[1] except IndexError: pass else: if param not in param_dict: param_dict[param] = value else: tmp = body.split['='] param_dict = tmp[0], tmp[1] except TypeError: LOGGER.warn('Json is not valid:%s' % body) return param_dict
def get_cookie_ip(ip, ): domain_scope = ip cookie_file_path = os.path.join(COOKIE_DIR, '_'.join([domain_scope, 'cookie'])) if os.path.exists(cookie_file_path): with open(cookie_file_path, "r") as cookie_file: cookie_file_list = cookie_file.readlines() expire = cookie_file_list[2] # check expire if int(time.time()) < int(expire): cookies_text = cookie_file_list[0].strip() return cookies_text else: LOGGER.warn('Cookie of %s is expired!!!' % domain_scope) else: pass
class Traffic_generator(Process): DEFAULT_HEADER = { 'User-Agent': 'Mozilla/2.0 (X11; Linux x86_64) AppleWebKit/237.36 (KHTML, like Gecko) Chrome/62.0.3322.146 Safari/237.36', } def __init__(self, id, url_list, coroutine): Process.__init__(self) self.id = id self.url_list = url_list self.coroutine = coroutine def gen_traffic(self, url): domain = get_domain_from_url(url) # add cookie to DEFAULT_HEADER cookie = get_cookie(domain) self.DEFAULT_HEADER['Cookie'] = cookie # add referer self.DEFAULT_HEADER['Referer'] = 'https"//' + domain + '/' request = HttpRequest(method='GET', url=url, headers=self.DEFAULT_HEADER, body='') req = urllib2.Request(url=url, headers=self.DEFAULT_HEADER) with gevent.Timeout(10, False) as t: try: resp = urllib2.urlopen(req) except urllib2.URLError, e: REQUEST_ERROR.append(('gen_traffic()', url, e.reason)) except CertificateError: REQUEST_ERROR.append( ('gen_traffic()', url, 'ssl.CertificateError')) except (ValueError, BadStatusLine, SocketError, InvalidURL) as e: LOGGER.warn(e)
def run(self): while True: try: traffic_obj = traffic_queue.get(timeout=3) except Empty: LOGGER.warn('traffic_queue is empty!') time.sleep(1) else: if traffic_obj == None: break else: processor = Processor(traffic_obj) processor.run() if processor.reflect: rtn = self.rfxss(processor) if rtn and isinstance(rtn, list): case_list.extend(rtn)
def save(result, id): result_dict = {} if result: for vul, location, poc in result: LOGGER.warn('%s found in: %s\n' % (vul, location)) if vul in result_dict.keys(): result_dict[vul].append((location, poc)) else: result_dict[vul] = [] result_dict[vul].append((location, poc)) print_result_table(result) result_file = os.path.join( RESULT_DIR, id + '-' + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + '.json') with open(result_file, 'w') as json_f: json.dump(result_dict, json_f) LOGGER.info('The result of %s has been saved to %s' % (id, result_file))
def print_result_table(result): ''' :param domain: :param task_id: :return: ''' table = PrettyTable(['ID', 'VUL', 'URL', 'POC']) table.align = 'l' table.sortby = 'ID' id = 1 if result: for vul, url, poc in result: table.add_row([id, vul, url, poc]) id += 1 try: print table except UnicodeDecodeError, e: LOGGER.warn(e)
def get_cookie(target_domain, ): if '.' in target_domain: domain_scope = '.' + target_domain.split( '.')[-2] + '.' + target_domain.split('.')[-1] cookie_file_path = os.path.join(COOKIE_DIR, '_'.join([domain_scope, 'cookie'])) if os.path.exists(cookie_file_path): with open(cookie_file_path, "r") as cookie_file: cookie_file_list = cookie_file.readlines() expire = cookie_file_list[2] # check expire if int(time.time()) < int(expire): cookies_text = cookie_file_list[0].strip() return cookies_text else: LOGGER.warn('Cookie of %s is expired!!!' % domain_scope) # cookie not exists else: pass
def _retry(self, f): """ The query is executed multiple times to overcome any operational error. Database error is logged immediately.""" count = 0 while True: try: return f() # http://initd.org/psycopg/docs/module.html#psycopg2.DatabaseError # handle operational error - memory allocation, unexpected disconnect except psycopg2.OperationalError, oe: count += 1 if count < self._max_retries: LOGGER.warn("Transient Error Received %s ", oe) time.sleep(self._retry_period) else: LOGGER.error("Unrecoverable Error %s", oe) raise oe # other database errors - integrity, internal, programming error etc except psycopg2.DatabaseError, de: LOGGER.error("Database Error %s", de) raise de
def run(self): blocked_urls = [] if self.browser == 'chrome': browser = chrome() elif self.browser == 'chrome-headless': browser = chrome(headless=True) else: browser = phantomjs() # add cookie, the scope is case_list[0].url's top-domain add_cookie(browser, case_list[0].url) for case in self.case_list: if case.method == 'POST': continue vul = case.vul url = case.url args = case.args splited = url.split('/', 3) path = '/'.join(splited) # if not block if path not in blocked_urls: try: browser.get(url) except TimeoutException, e: LOGGER.warn(e) # mark if browser get() exception REQUEST_ERROR.append(('Openner get()', url, 'timeout')) # browser blocked sometimes. rtn = self.handle_block(browser) if rtn is not None: browser = rtn splited = url.split('/', 3) path = '/'.join(splited) blocked_urls.append(path) except BadStatusLine, e: LOGGER.warn(e) REQUEST_ERROR.append( ('Render get()', url, 'BadStatusLine')) splited = url.split('/', 3) path = '/'.join(splited) blocked_urls.append(path)
def detect_json(json_str): """ :param json_str: json-type string,e.g.,"{a:'x','b':'y'}" :return: dict-type e.g.,{'a':'x','b':'y'} """ result_dict = {} json_str.replace('\'', '\"') try: json_dict = json.loads(json_str) except ValueError: LOGGER.warn('Error in detect_json():%s' % json_str) else: # other type to str for k, v in json_dict.items(): if isinstance(v, str): result_dict.update(k=v) elif isinstance(v, int): result_dict.update(k=str(v)) else: pass return result_dict
def run(self): blocked_urls = [] if self.browser == 'chrome': browser = chrome() elif self.browser == 'chrome-headless': browser = chrome(headless=True) else: browser = phantomjs() # add cookie, the scope is url_list[0]'s top-domain add_cookie(browser, self.url_list[0]) for url in self.url_list: splited = url.split('/', 3) path = '/'.join(splited) # if not block if path not in blocked_urls: try: browser.get(url) except TimeoutException, e: LOGGER.warn(e) # save if browser get() exception REQUEST_ERROR.append(('Render get()', url, 'timeout')) # browser blocks sometimes. rtn = self.handle_block(browser) if rtn is not None: browser = rtn splited = url.split('/', 3) path = '/'.join(splited) blocked_urls.append(path) except BadStatusLine, e: LOGGER.warn(e) REQUEST_ERROR.append( ('Render get()', url, 'BadStatusLine')) splited = url.split('/', 3) path = '/'.join(splited) blocked_urls.append(path) except UnicodeDecodeError: pass
from xml.etree import cElementTree from selenium.common.exceptions import TimeoutException, UnexpectedAlertPresentException from config import TRAFFIC_DIR, REQUEST_ERROR, REDIRECT, MULTIPART from cookie import get_cookie from model import Case, HttpRequest, HttpResponse from util import functimeout, Func_timeout_error, change_by_param, list2dict, chrome, phantomjs, \ getResponseHeaders, check_type, add_cookie, \ get_domain_from_url, divide_list, make_request, gen_poc, get_api import gevent from gevent import pool from socket import error as SocketError from httplib import InvalidURL try: from bs4 import BeautifulSoup except ImportError, e: LOGGER.warn(e) # def _pickle_method(m): # if m.im_self is None: # return getattr, (m.im_class, m.im_func.func_name) # else: # return getattr, (m.im_self, m.im_func.func_name) static_reg = re.compile( r'\.html$|\.htm$|\.shtml$|\.css$|\.png$|\.js$|\.dpg$|\.jpg$|\.svg$|\.jpeg$|' r'\.gif$|\.webp$|\.ico$|\.woff$|\.ttf$|css\?|js\?|jpg\?|png\?|woff\?v=' r'|woff2\?v=|ttf\?|woff\?|woff2$|html\?v=|ico$') burp_traffic = [] manager = Manager() case_list = manager.list() openner_result = manager.list() # for deduplicate
def put_burp_to_trafficqueue(self): """ parse xxx.xml from burpsuite proxy. :return: """ if os.path.exists(self.burp): import base64 from xml.etree import cElementTree as ET from model import HttpRequest, HttpResponse with open(self.burp) as f: xmlstr = f.read() try: root = ET.fromstring(xmlstr) except cElementTree.ParseError, e: LOGGER.error('Parse burpsuite data error: ' + str(e)) exit(0) for child in root: if child.tag == 'item': req_headers = {} resp_headers = {} code = '' request, response = '', '' for child2 in child: if child2.tag == 'method': method = child2.text if child2.tag == 'url': url = child2.text # static url in burp if static_reg.search(url): break if child2.tag == 'status': code = child2.text if child2.tag == 'request': req_text = child2.text # base64 decode req_text = base64.b64decode(req_text) headers_list = req_text.split( '\r\n\r\n', 1)[0].split('\r\n')[1:] for header in headers_list: try: header_key, header_value = header.split( ': ')[0], header.split(': ')[1] if header_key not in req_headers.keys(): req_headers[header_key] = header_value # split header error except IndexError, e: LOGGER.warn(e) body = req_text.split('\r\n\r\n', 1)[1] request = HttpRequest(method, url, req_headers, body) if child2.tag == 'response': resp_text = child2.text # if response is not None if resp_text: # base64 decode resp_text = base64.b64decode(resp_text) reason = resp_text.split('\r\n')[0] headers_list = resp_text.split( '\r\n\r\n', 1)[0].split('\r\n')[1:] for header in headers_list: header_key, header_value = header.split( ': ')[0], header.split(': ')[1] if header_key not in resp_headers.keys(): resp_headers[header_key] = header_value data = resp_text.split('\r\n\r\n', 1)[1] response = HttpResponse( code, reason, resp_headers, data) if request and response: if request.method == 'GET' and '?' in request.url: # filter static URL if not static_reg.search(url): burp_traffic.append((request, response)) traffic_queue.put((request, response)) elif request.method == 'POST' and request.body: content_type = request.get_header('Content-Type') # save multipart if content_type and 'multipart/form-data; boundary=' in content_type: MULTIPART.append((request, response)) else: burp_traffic.append((request, response)) traffic_queue.put((request, response))
def check_url(url): try: urllib2.urlopen(url,timeout=20) except Exception,e: LOGGER.warn('Check url error: '+str(e)) exit(0)
def get_domain_from_url(url): """get domain from url""" domain = '' # url is http://a.b.com/ads/asds if re.search(r'://.*?/', url): try: domain = url.split('//', 1)[1].split('/', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) # http://a.b.com?a=adsd elif re.search(r'://.*?\?', url): try: domain = url.split('//', 1)[1].split('?', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) elif re.search(r'://.*?', url): try: domain = url.split('//', 1)[1].split('/', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) # url is a.b.com/a/b/c, a.b.com, /a/b/c, elif re.search(r'/', url): value = url.split('/', 1)[0] if value == '': pass elif value == '.': pass elif '.' not in value: pass elif domain == '..':
def add_cookie(browser, url): try: browser.get(url) except Exception, e: LOGGER.warn('First visit Error:%s' % e)