def process_uri(self, uri): sql_driver = MySQLDriver(self.db_name) output_store = OutputStore(self.db_name) phantom_driver = PhantomDriver( '--ignore-ssl-errors=true --ssl-protocol=any', 'wbxr_logger.js') # this can be higher or lower depending on network load # generally, 90 seems to be fine, so keep with it try: phantom_output = phantom_driver.execute(uri, 90) except: print("\t\t%-50s Phantomjs Did Not Return." % uri[:50]) sql_driver.log_error(uri, "FAIL: Phantomjs Did Not Return.") return if re.match('^FAIL.+', phantom_output): print("\t\t%-50s Phantom Error\n\t%s" % (uri[:50], phantom_output)) sql_driver.log_error(uri, phantom_output) else: print("\t\t%-50s %s" % (uri[:50], output_store.store(uri, phantom_output))) # closes our db connections sql_driver.close() output_store.close() return
def process_uri(self, uri): sql_driver = MySQLDriver(self.db_name) output_store = OutputStore(self.db_name) phantom_driver = PhantomDriver('--ignore-ssl-errors=true --ssl-protocol=any', 'wbxr_logger.js') # this can be higher or lower depending on network load # generally, 90 seems to be fine, so keep with it try: phantom_output = phantom_driver.execute(uri, 90) except: print("\t\t%-50s Phantomjs Did Not Return." % uri[:50]) sql_driver.log_error(uri, "FAIL: Phantomjs Did Not Return.") return if re.match('^FAIL.+', phantom_output): print("\t\t%-50s Phantom Error\n\t%s" % (uri[:50], phantom_output)) sql_driver.log_error(uri, phantom_output) else: print("\t\t%-50s %s" % (uri[:50], output_store.store(uri, phantom_output))) # closes our db connections sql_driver.close() output_store.close() return
class OutputStore: def __init__(self, dbname): self.uri_parser = ParseURI() self.sql_driver = MySQLDriver(dbname) # end init def store(self, uri, phantom_output): # parse out the json from our phantom_output # sometimes phantom prints out errors before the json, (despite us turning # it off!), so we match inside of the {}s to get json only try: data = json.loads(re.search('(\{.+\})', phantom_output).group(1)) except Exception as e: self.sql_driver.log_error(uri, "Could Not Load JSON: %s" % e) return 'Could Not Load JSON' # we need to parse the domain to determine if requests are local or 3rd party # we need pubsuffix and tld for later analysis so store them now origin_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld(uri) origin_domain = origin_domain_pubsuffix_tld[0] origin_pubsuffix = origin_domain_pubsuffix_tld[1] origin_tld = origin_domain_pubsuffix_tld[2] if re.match('^Exception.+', origin_domain): self.sql_driver.log_error(uri, 'Could not parse TLD for %s' % uri) return 'Could not parse TLD for %s' % uri page_domain_id = self.sql_driver.add_domain(origin_domain, origin_pubsuffix, origin_tld) # newFollowLogger is giving us the follow JSON data: # note source is null for now, but store anyway # big_out = { # final_uri: final_uri, # title: page.title, # meta_desc : meta_desc, # requested_uris: JSON.stringify(requested_uris), # received_uris: JSON.stringify(received_uris), # cookies: phantom.cookies, # source: 'NULL', # }; # we are now storing uris with and without args and saving args # we must unquote to uri to get back to original state so can parse start_uri_original = urllib.parse.unquote(uri) try: start_uri_no_args = re.search('^(.+?)\?.+$', start_uri_original).group(1) # start uri no args except: start_uri_no_args = uri try: start_uri_args = re.search('^.+(\?.+)$', start_uri_original).group(1) # start uri args except: start_uri_args = 'NULL' # same for the final uri (this is where we are after potential redirects) final_uri = re.sub('\"', '', json.dumps(data["final_uri"])) final_uri_original = urllib.parse.unquote(final_uri) try: final_uri_no_args = re.search('^(.+?)\?.+$', final_uri_original).group(1) # start uri no args except: final_uri_no_args = final_uri try: final_uri_args = re.search('^.+(\?.+)$', final_uri_original).group(1) # start uri args except: final_uri_args = 'NULL' # add page # json.dumps to make sure strings go out ok for db page_id = self.sql_driver.add_page( str(re.sub('\"', '', json.dumps(data["title"]))), str(re.sub('\"', '', json.dumps(data["meta_desc"]))), uri, start_uri_no_args, start_uri_args, final_uri, final_uri_no_args, final_uri_args, str(re.sub('\"', '', json.dumps(data["source"]))), str(re.sub('\"', '', json.dumps(data["requested_uris"]))), str(re.sub('\"', '', json.dumps(data["received_uris"]))), page_domain_id) for cookie in data["cookies"]: # store external cookies, uri_parser fails on non-http, we should fix this # right now a lame hack is to prepend http:// cookie_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld("http://"+cookie["domain"]) cookie_domain = cookie_domain_pubsuffix_tld[0] cookie_pubsuffix = cookie_domain_pubsuffix_tld[1] cookie_tld = cookie_domain_pubsuffix_tld[2] # something went wrong, but carry on... if re.match('^Exception.+', cookie_domain): self.sql_driver.log_error(uri, 'Error parsing cookie: '+cookie_domain) continue # this is a 3party cookie if origin_domain != cookie_domain: cookie_domain_id = self.sql_driver.add_domain(cookie_domain, cookie_pubsuffix, cookie_tld) # name and domain are required, so if they fail we just continue try: name = cookie["name"] except: continue try: domain = cookie_domain except: continue # these are optional, keep going with "N/A" vals try: secure = cookie["secure"] except: secure = "N/A" try: path = cookie["path"] except: path = "N/A" try: expires = cookie["expires"] except: expires = "N/A" try: httponly = cookie["httponly"] except: httponly = "N/A" try: expiry = cookie["expiry"] except: expiry = "N/A" try: value = cookie["value"] except: value = "N/A" cookie_id = self.sql_driver.add_cookie( name, secure, path, domain, expires, httponly, expiry, value, cookie_domain_id) self.sql_driver.add_cookie_to_page(cookie_id, page_id) for request in data["requested_uris"]: # if the request starts with "data" we can't parse tld anyway, so skip if re.match('^(data|about|chrome).+', request): continue # get domain, pubsuffix, and tld from request requested_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld(request) requested_domain = requested_domain_pubsuffix_tld[0] requested_pubsuffix = requested_domain_pubsuffix_tld[1] requested_tld = requested_domain_pubsuffix_tld[2] # see if we got back what we requested, if not a few things may have happened # * malformed uri # * resource is gone or never existed # * network latency (ie it didn't arrive in window specified) # * we could be behind a firewall or censorship mechanism (eg gfw, golden shield) # * our IP is blacklisted b/c we are totally a bot X-D # the point being, interpret this value with an open mind if request in data['received_uris']: recieved = '1' else: recieved = '0' # catch exceptions if re.match('^Exception.+', requested_domain): self.sql_driver.log_error(uri, 'Error parsing element request: '+request) continue # store new elements if origin_domain != requested_domain: full_uri = request try: element_uri = re.search('^(.+?)\?.+$', full_uri).group(1) # start uri no args except: element_uri = full_uri # attempt to parse off the extension try: element_extension = re.search('\.([0-9A-Za-z]+)$', element_uri).group(1).lower() except: element_extension = "NULL" # figure out what type of element it is if element_extension == 'png' or element_extension == 'jpg' or element_extension == 'jpgx' or element_extension == 'jpeg' or element_extension == 'gif' or element_extension == 'svg' or element_extension == 'bmp' or element_extension == 'tif' or element_extension == 'tiff' or element_extension == 'webp' or element_extension == 'srf': element_type = 'image' elif element_extension == 'js' or element_extension == 'javascript': element_type = 'javascript' elif element_extension == 'json' or element_extension == 'jsonp' or element_extension == 'xml': element_type = 'data_structured' elif element_extension == 'css': element_type = 'style_sheet' elif element_extension == 'woff' or element_extension == 'ttf' or element_extension == 'otf': element_type = 'font' elif element_extension == 'htm' or element_extension == 'html' or element_extension == 'shtml': element_type = 'page_static' elif element_extension == 'php' or element_extension == 'asp' or element_extension == 'jsp' or element_extension == 'aspx' or element_extension == 'ashx' or element_extension == 'pl' or element_extension == 'cgi' or element_extension == 'fcgi': element_type = 'page_dynamic' elif element_extension == 'swf' or element_extension == 'fla': element_type = 'Shockwave Flash' elif element_extension == 'NULL': element_type = 'NULL' else: element_type = 'unknown' try: args = re.search('^.+(\?.+)$', full_uri).group(1) # start uri args except: args = 'NULL' element_domain_id = self.sql_driver.add_domain(requested_domain, requested_pubsuffix, requested_tld) element_id = self.sql_driver.add_element("NULL", full_uri, element_uri, recieved, element_extension, element_type, args, element_domain_id) self.sql_driver.add_element_to_page(element_id, page_id) return 'Successfully Added to DB' # end report() def close(self): # close mysql connections self.uri_parser.close() self.sql_driver.close() return
class ParseURI: def __init__(self): # load up the tld list now as only hit it once this way self.pubsuffix_list = self.get_pubsuffix_list() # this is to speed up tlds lookup with hash table # we can share among all runs over time self.sql_driver = MySQLDriver() # check if sub_domain_tld exists, if not create one # this should only really happen once # # bug? # when using pool this can be done several times in tandem # resulting in some lost entries - e.g. two threads see db not exist the first # one to break the tie will create a new blank db and add a new entry # it is possible the slightly slower second thread will also create a blank db # erasing the entry of the first thread, however, after the first round the db # should not be re-created again # # it should be emphasized this db is ONLY used to speed up parsing the tld # (partially because my regex method is slow and should be refactored!) # the data is this db is NOT used for analysis - thus deleting a few records # will have a minor impact on speed for the first few entries, and then be # wholly irrelevant # # still...it irks me that this entire class could be smarter, so I will # fix it another day if self.sql_driver.check_db_exist('sub_domain_tld') == False: self.sql_driver.create_sub_domain_tld_db() else: self.sql_driver.db_switch('sub_domain_tld') # end __init__ def get_pubsuffix_list(self): # get the file from the local dir pubsuffix_raw_list = open( os.path.join( os.path.dirname(__file__), './resources/pubsuffix/patchedPublicSuffixList-20150514.txt'), 'r') pubsuffix_list = [] for line in pubsuffix_raw_list: # the last part of the list is random shit we don't care about, so stop reading if re.match("^// ===BEGIN PRIVATE DOMAINS===", line): break # skip lines that are comments or blank, add others to list # also remove leading ., !, and * as it f***s up regex later if not re.match("^//.+$|^$", line): pubsuffix_list.append(re.sub('^[\!\*]\.?', '', line.strip())) # we sort long->short so we can match deeper TLD first (ie, ac.uk *before* .uk) pubsuffix_list.sort(key=len, reverse=True) # to speed things up we move the most common TLD to the front of the line # that said, if it's not one of these we take a MASSIVE performance hit popular_pubsuffixs = [ 'gov', 'co.uk', 'mil', 'org', 'net', 'edu', 'com' ] for popular_pubsuffix in popular_pubsuffixs: pubsuffix_list.remove(popular_pubsuffix) pubsuffix_list.insert(0, popular_pubsuffix) return pubsuffix_list # get_pubsuffix_list def get_domain_pubsuffix_tld(self, uri): # pull out the first chunk of the domain, possibly including subdomains # if this fails, the domain is f****d up (or not https? at least), kill # only handles if the domain is followed by $, ?, \, or / - other shit will break... # adding to handle with port at end, ie blah.com:8080, so rule is '|\:[0-9].+ # adding \.? at the top to handle leading '.' try: sub_domain = re.search( '^https?:\/\/\.?(.*?)(\:[0-9].+)?($|\/|\?|\\\\|=)', uri).group(1) except: return ('Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50]) # strip off leading or trailing '.', this is f*****g things up sub_domain = re.sub('(^\.|\.$)', '', sub_domain) # see if it is an ip address, if so return it # this is only ipv4 pattern though, maybe should thinking about ipv6? try: re.search('(^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$)', sub_domain).group(1) return (sub_domain, 'None', 'None') except: pass # first, we will see if it is in the db already record = self.sql_driver.sub_domain_exists(sub_domain) if record: return record # the pubsuffix list is large -> small, # so the first match is the one we want # after we find it, break out and continue for pubsuffix_try in self.pubsuffix_list: if re.match(".+\." + pubsuffix_try + "$", sub_domain): pubsuffix = pubsuffix_try break # if we didn't find the pubsuffix we fail try: pubsuffix except: return ('Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50]) # if we have sub.domain.tld_match, we just want domain.tld_match # there is no reason this should fail if we get this far, but try/except to be safe try: domain = re.search('(.*\.)?\.?(.*\.' + pubsuffix + ')$', sub_domain).group(2) except: return ('Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50], 'Exception: Unable to parse: ' + uri[:50]) # grab the tld off of the pubsuffix # if regex fails the tld and pubsuffix are the same try: tld = re.search('\.([0-9A-Za-z]+)$', pubsuffix).group(1).lower() except: tld = pubsuffix self.sql_driver.add_sub_domain_pubsuffix_tld(sub_domain, domain, pubsuffix, tld) return (domain, pubsuffix, tld) #end get_domain_pubsuffix_tld def close(self): # close mysql connection self.sql_driver.close() return # end close #end ParseURI
def process_url(self, url): """ this function takes a specified url, loads it in the browser (currently phantomjs) and returns json-formatted output with relevant request data, etc. the output_store class then puts this data in the db for later analysis """ # set up sql connection used to log errors and do timeseries checks if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # output store does the heavy lifting of analyzing browser output and storing to db output_store = OutputStore(self.db_engine, self.db_name) # support for loading same page with multiple browsers - purposefully undocumented for browser_type in self.browser_types: # import and set up specified browser driver # note we need to set up a new browser each time to # get a fresh profile if browser_type == 'phantomjs': browser_driver = PhantomDriver() elif browser_type == 'chrome': browser_driver = ChromeDriver(ua=self.chrome_ua) # support for timeseries collections - purposefully undocumented if self.allow_timeseries: page_last_accessed_browser_type = sql_driver.get_page_last_accessed_by_browser_type(url,browser_type) if page_last_accessed_browser_type: time_diff = datetime.now()-page_last_accessed_browser_type[0] if time_diff < timedelta(minutes=self.interval_minutes) and page_last_accessed_browser_type[1] == browser_type: print("\t\t%-50s Scanned too recently with %s" % (url[:50], browser_type)) continue # attempt to load the page, fail gracefully try: browser_output = browser_driver.get_webxray_scan_data(url, self.browser_wait) except: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # if there was a problem we log the error if browser_output['success'] == False: print('\t\t%-50s Browser %s Error: %s' % (url[:50], browser_type, browser_output['result'])) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return else: # no error, treat result as browser output browser_output = browser_output['result'] # attempt to store the output if output_store.store(url, browser_output): print('\t\t%-50s Success with %s' % (url[:50],browser_type)) else: print('\t\t%-50s Fail with %s' % (url[:50],browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return
def run(self, pool_size): """ this function manages the parallel processing of the url list using the python Pool class the function first reads the list of urls out of the page_lists directory, cleans it for known issues (eg common binary files), and issues with idna encoding (tricky!) then the page list is mapped to the process_url function and executed in parallell pool_size is defined in the run_webxray.py file, see details there """ # the list of url MUST be in the page_lists directory! try: url_list = open(os.path.dirname(os.path.abspath(__file__)) + '/../page_lists/' + self.pages_file_name, 'r', encoding='utf-8') except: print('File "%s" does not exist, file must be in ./page_lists directory. Exiting.' % self.pages_file_name) exit() # set up sql connection used to determine if items are already in the db if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # this list gets mapped to the Pool, very important! urls_to_process = set() # simple counter used solely for updates to CLI count = 0 print('\t------------------------') print('\t Building List of Pages ') print('\t------------------------') for url in url_list: # skip lines that are comments if "#" in url[0]: continue count += 1 # only do lines starting with https?:// if not (re.match('^https?://.+', url)): print("\t\t%s | %-50s Not a valid address, Skipping." % (count, url[:50])) continue # non-ascii domains will crash phantomjs, so we need to convert them to # idna/ascii/utf-8 # this requires splitting apart the url, converting the domain to idna, # and pasting it all back together split_url = urlsplit(url.strip()) idna_fixed_netloc = split_url.netloc.encode('idna').decode('utf-8') url = urlunsplit((split_url.scheme,idna_fixed_netloc,split_url.path,split_url.query,split_url.fragment)) # if it is a m$ office or other doc, skip if re.match('.+(pdf|ppt|pptx|doc|docx|txt|rtf|xls|xlsx)$', url): print("\t\t%s | %-50s Not an HTML document, Skipping." % (count, url[:50])) continue # skip if in db already unless we are doing a timeseries if self.allow_timeseries == False: if sql_driver.page_exists(url): print("\t\t%s | %-50s Exists in DB, Skipping." % (count, url[:50])) continue # only add if not in list already if url not in urls_to_process: print("\t\t%s | %-50s Adding." % (count, url[:50])) urls_to_process.add(url) else: print("\t\t%s | %-50s Already queued, Skipping." % (count, url[:50])) # close the db connection sql_driver.close() print('\t----------------------------------') print('\t%s addresses will now be webXray\'d' % len(urls_to_process)) print('\t\tBrowser(s) are %s' % self.browser_types) print('\t\tBrowser wait time is %s seconds' % self.browser_wait) print('\t\t...you can go take a walk. ;-)') print('\t----------------------------------') # for macOS (darwin) we must specify start method as 'forkserver' # this is essentially voodoo to ward off evil spirits which # appear when large pool sizes are used on macOS # get_start_method must be set to 'allow_none', otherwise upon # checking the method it gets set (!) - and if we then get/set again # we get an error if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver': multiprocessing.set_start_method('forkserver') myPool = multiprocessing.Pool(pool_size) myPool.map(self.process_url, urls_to_process) # FYI self.print_runtime()
def process_url(self, url): """ this function takes a specified url, loads it in the browser (currently phantomjs) and returns json-formatted output with relevant request data, etc. the output_store class then puts this data in the db for later analysis """ # set up sql connection used to log errors and do timeseries checks if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # output store does the heavy lifting of analyzing browser output and storing to db output_store = OutputStore(self.db_engine, self.db_name) # support for loading same page with multiple browsers - purposefully undocumented for browser_type in self.browser_types: # import and set up specified browser driver # note we need to set up a new browser each time to # get a fresh profile if browser_type == 'phantomjs': browser_driver = PhantomDriver() elif browser_type == 'chrome': browser_driver = ChromeDriver(ua=self.chrome_ua) # support for timeseries collections - purposefully undocumented if self.allow_timeseries: page_last_accessed_browser_type = sql_driver.get_page_last_accessed_by_browser_type(url,browser_type) if page_last_accessed_browser_type: time_diff = datetime.now()-page_last_accessed_browser_type[0] if time_diff < timedelta(minutes=self.interval_minutes) and page_last_accessed_browser_type[1] == browser_type: print("\t\t%-50s Scanned too recently with %s" % (url[:50], browser_type)) continue # attempt to load the page, fail gracefully try: browser_output = browser_driver.get_webxray_scan_data(url, self.browser_wait) except: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # if there was a problem browser_output will be None if browser_output == None: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # attempt to store the output if output_store.store(url, browser_output): print('\t\t%-50s Success with %s' % (url[:50],browser_type)) else: print('\t\t%-50s Fail with %s' % (url[:50],browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return
def run(self, pool_size): """ this function manages the parallel processing of the url list using the python Pool class the function first reads the list of urls out of the page_lists directory, cleans it for known issues (eg common binary files), and issues with idna encoding (tricky!) then the page list is mapped to the process_url function and executed in parallell pool_size is defined in the run_webxray.py file, see details there """ # the list of url MUST be in the page_lists directory! try: url_list = open(os.path.dirname(os.path.abspath(__file__)) + '/../page_lists/' + self.pages_file_name, 'r') except: print('File "%s" does not exist, file must be in ./page_lists directory. Exiting.' % self.pages_file_name) exit() # set up sql connection used to determine if items are already in the db if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # this list gets mapped to the Pool, very important! urls_to_process = set() # simple counter used solely for updates to CLI count = 0 print('\t------------------------') print('\t Building List of Pages ') print('\t------------------------') for url in url_list: # skip lines that are comments if "#" in url[0]: continue count += 1 # only do lines starting with https?:// if not (re.match('^https?://.+', url)): print("\t\t%s | %-50s Not a valid address, Skipping." % (count, url[:50])) continue # non-ascii domains will crash phantomjs, so we need to convert them to # idna/ascii/utf-8 # this requires splitting apart the url, converting the domain to idna, # and pasting it all back together split_url = urlsplit(url.strip()) idna_fixed_netloc = split_url.netloc.encode('idna').decode('utf-8') url = urlunsplit((split_url.scheme,idna_fixed_netloc,split_url.path,split_url.query,split_url.fragment)) # if it is a m$ office or other doc, skip if re.match('.+(pdf|ppt|pptx|doc|docx|txt|rtf|xls|xlsx)$', url): print("\t\t%s | %-50s Not an HTML document, Skipping." % (count, url[:50])) continue # skip if in db already unless we are doing a timeseries if self.allow_timeseries == False: if sql_driver.page_exists(url): print("\t\t%s | %-50s Exists in DB, Skipping." % (count, url[:50])) continue # only add if not in list already if url not in urls_to_process: print("\t\t%s | %-50s Adding." % (count, url[:50])) urls_to_process.add(url) else: print("\t\t%s | %-50s Already queued, Skipping." % (count, url[:50])) # close the db connection sql_driver.close() print('\t----------------------------------') print('\t%s addresses will now be webXray\'d' % len(urls_to_process)) print('\t\tBrowser(s) are %s' % self.browser_types) print('\t\tBrowser wait time is %s seconds' % self.browser_wait) print('\t\t...you can go take a walk. ;-)') print('\t----------------------------------') # for macOS (darwin) we must specify start method as 'forkserver' # this is essentially voodoo to ward off evil spirits which # appear when large pool sizes are used on macOS # get_start_method must be set to 'allow_none', otherwise upon # checking the method it gets set (!) - and if we then get/set again # we get an error if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver': multiprocessing.set_start_method('forkserver') myPool = multiprocessing.Pool(pool_size) myPool.map(self.process_url, urls_to_process) # FYI self.print_runtime()
class ParseURI: def __init__(self): # load up the tld list now as only hit it once this way self.pubsuffix_list = self.get_pubsuffix_list() # this is to speed up tlds lookup with hash table # we can share among all runs over time self.sql_driver = MySQLDriver() # check if sub_domain_tld exists, if not create one # this should only really happen once # # bug? # when using pool this can be done several times in tandem # resulting in some lost entries - e.g. two threads see db not exist the first # one to break the tie will create a new blank db and add a new entry # it is possible the slightly slower second thread will also create a blank db # erasing the entry of the first thread, however, after the first round the db # should not be re-created again # # it should be emphasized this db is ONLY used to speed up parsing the tld # (partially because my regex method is slow and should be refactored!) # the data is this db is NOT used for analysis - thus deleting a few records # will have a minor impact on speed for the first few entries, and then be # wholly irrelevant # # still...it irks me that this entire class could be smarter, so I will # fix it another day if self.sql_driver.check_db_exist('sub_domain_tld') == False: self.sql_driver.create_sub_domain_tld_db() else: self.sql_driver.db_switch('sub_domain_tld') # end __init__ def get_pubsuffix_list(self): # get the file from the local dir pubsuffix_raw_list = open(os.path.join(os.path.dirname(__file__), './resources/pubsuffix/wbxrPubSuffixList.txt'), 'r') pubsuffix_list = [] for line in pubsuffix_raw_list: # the last part of the list is random shit we don't care about, so stop reading if re.match("^// ===BEGIN PRIVATE DOMAINS===", line):break # skip lines that are comments or blank, add others to list # also remove leading ., !, and * as it f***s up regex later if not re.match("^//.+$|^$", line): pubsuffix_list.append(re.sub('^[\!\*]\.?', '', line.strip())) # we sort long->short so we can match deeper TLD first (ie, ac.uk *before* .uk) pubsuffix_list.sort(key=len,reverse=True) # to speed things up we move the most common TLD to the front of the line # that said, if it's not one of these we take a MASSIVE performance hit popular_pubsuffixs = ['gov', 'co.uk', 'mil', 'org', 'net', 'edu', 'com'] for popular_pubsuffix in popular_pubsuffixs: pubsuffix_list.remove(popular_pubsuffix) pubsuffix_list.insert(0, popular_pubsuffix) return pubsuffix_list # get_pubsuffix_list def get_domain_pubsuffix_tld(self, uri): # pull out the first chunk of the domain, possibly including subdomains # if this fails, the domain is f****d up (or not https? at least), kill # only handles if the domain is followed by $, ?, \, or / - other shit will break... # adding to handle with port at end, ie blah.com:8080, so rule is '|\:[0-9].+ # adding \.? at the top to handle leading '.' try: sub_domain = re.search('^https?:\/\/\.?(.*?)(\:[0-9].+)?($|\/|\?|\\\\|=)', uri).group(1) except: return('Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50]) # strip off leading or trailing '.', this is f*****g things up sub_domain = re.sub('(^\.|\.$)', '', sub_domain) # see if it is an ip address, if so return it # this is only ipv4 pattern though, maybe should thinking about ipv6? try: re.search('(^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$)', sub_domain).group(1) return (sub_domain, 'None', 'None') except: pass # first, we will see if it is in the db already record = self.sql_driver.sub_domain_exists(sub_domain) if record: return record # the pubsuffix list is large -> small, # so the first match is the one we want # after we find it, break out and continue for pubsuffix_try in self.pubsuffix_list: if re.match(".+\."+pubsuffix_try+"$", sub_domain): pubsuffix = pubsuffix_try break # if we didn't find the pubsuffix we fail try: pubsuffix except: return('Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50]) # if we have sub.domain.tld_match, we just want domain.tld_match # there is no reason this should fail if we get this far, but try/except to be safe try: domain = re.search('(.*\.)?\.?(.*\.'+pubsuffix+')$', sub_domain).group(2) except: return('Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50], 'Exception: Unable to parse: '+uri[:50]) # grab the tld off of the pubsuffix # if regex fails the tld and pubsuffix are the same try: tld = re.search('\.([0-9A-Za-z]+)$', pubsuffix).group(1).lower() except: tld = pubsuffix self.sql_driver.add_sub_domain_pubsuffix_tld(sub_domain, domain, pubsuffix, tld) return (domain, pubsuffix, tld) #end get_domain_pubsuffix_tld def close(self): # close mysql connection self.sql_driver.close() return # end close #end ParseURI
def store(self, url, browser_output, store_source=False, store_1p=True, get_file_hashes=False, hash_3p_only=False): """ this is the primary function of this class, it takes the url of the given page and the request and cookie data generated by the browser data is cleaned up with some minor analysis (eg file types) and stored for later in-depth analysis. there is an option to store first party requests as well as third, turned on by default to save disk space turn off store_1p there is also an option to get file hashes, this introduces serious overhead and is turned off by default """ # open up a sql connection if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) else: print('INVALED DB ENGINE FOR %s, QUITTING!' % db_engine) exit() # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party origin_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld( url) # if we can't get page domain info we fail gracefully if origin_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error(url, 'Could not parse TLD for %s' % url) return False origin_ip = origin_ip_fqdn_domain_pubsuffix_tld[0] origin_fqdn = origin_ip_fqdn_domain_pubsuffix_tld[1] origin_domain = origin_ip_fqdn_domain_pubsuffix_tld[2] origin_pubsuffix = origin_ip_fqdn_domain_pubsuffix_tld[3] origin_tld = origin_ip_fqdn_domain_pubsuffix_tld[4] # sql_driver.add_domain both stores the new domain and returns its db row id # if it is already in db just return the existing id page_domain_id = sql_driver.add_domain(origin_ip, origin_fqdn, origin_domain, origin_pubsuffix, origin_tld) # figure out the privacy policy url and text, starts null priv_policy_url = None priv_policy_url_text = None # read in our list of privacy link terms from the json file in webxray/resources/policyxray privacy_policy_term_list = self.utilities.get_privacy_policy_term_list( ) # we reverse links return from browser to check footer links first as that is where policy links tend to be all_links = browser_output['all_links'] all_links.reverse() # if we have links search for privacy policy if len(all_links) > 0: # links are tuple for link_text, link_url in all_links: # makes sure we have text, skip links without if link_text: # need lower for string matching link_text = link_text.lower().strip() # not a link we can use if 'javascript' in link_text: continue # see if the link_text is in our term list if link_text in privacy_policy_term_list: # if the link_url is relative this will convert to absolute priv_policy_url = self.utilities.get_absolute_url_from_page_link( url, link_url) priv_policy_url_text = link_text break # if the final page is https (often after a redirect), mark it appropriately if browser_output['final_url'][:5] == 'https': page_is_ssl = True else: page_is_ssl = False if store_source: # handles issue where postgres will crash on inserting null character source = browser_output['source'].replace('\x00', ' ') else: source = None # add page page_id = sql_driver.add_page( browser_output['browser_type'], browser_output['browser_version'], browser_output['browser_wait'], browser_output['title'], browser_output['meta_desc'], url, browser_output['final_url'], priv_policy_url, priv_policy_url_text, page_is_ssl, source, browser_output['load_time'], page_domain_id) # store cookies for cookie in browser_output['cookies']: # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party # note: # url_parser fails on non-http, we should fix this, right now a lame hack is to prepend http:// cookie_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld( 'http://' + cookie['domain']) # something went wrong, log and fail gracefully if cookie_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error( url, 'Error parsing cookie with domain: ' + cookie['domain']) continue # otherwise, everything went fine cookie_ip = cookie_ip_fqdn_domain_pubsuffix_tld[0] cookie_fqdn = cookie_ip_fqdn_domain_pubsuffix_tld[1] cookie_domain = cookie_ip_fqdn_domain_pubsuffix_tld[2] cookie_pubsuffix = cookie_ip_fqdn_domain_pubsuffix_tld[3] cookie_tld = cookie_ip_fqdn_domain_pubsuffix_tld[4] # mark third-party cookies if origin_domain != cookie_domain: is_3p_cookie = True else: is_3p_cookie = False # this is a first party cookie, see if we want to store it if is_3p_cookie is False and store_1p is False: continue # sql_driver.add_domain both stores the new domain and returns its id cookie_domain_id = sql_driver.add_domain(cookie_ip, cookie_fqdn, cookie_domain, cookie_pubsuffix, cookie_tld) # name and domain are required, so if they fail we just continue try: name = cookie['name'] except: continue try: domain = cookie_domain except: continue # these are optional, fill with null values if fail try: secure = cookie['secure'] except: secure = None try: path = cookie['path'] except: path = None try: httponly = cookie['httponly'] except: httponly = None try: expiry = cookie['expiry'] except: expiry = None try: value = cookie['value'] except: value = None # all done with this cookie sql_driver.add_cookie(page_id, name, secure, path, domain, httponly, expiry, value, is_3p_cookie, cookie_domain_id) # process requests now for request in browser_output['processed_requests']: # if the request starts with the following we can't parse anyway, so skip if re.match('^(data|about|chrome|blob).+', request): continue # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party element_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld( request) # problem with this request, log and fail gracefully if element_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error( url, 'Error parsing element request: ' + request) continue element_ip = element_ip_fqdn_domain_pubsuffix_tld[0] element_fqdn = element_ip_fqdn_domain_pubsuffix_tld[1] element_domain = element_ip_fqdn_domain_pubsuffix_tld[2] element_pubsuffix = element_ip_fqdn_domain_pubsuffix_tld[3] element_tld = element_ip_fqdn_domain_pubsuffix_tld[4] # sql_driver.add_domain both stores the new domain and returns its db row id element_domain_id = sql_driver.add_domain(element_ip, element_fqdn, element_domain, element_pubsuffix, element_tld) # mark third-party elements based on domain if origin_domain != element_domain: is_3p_element = True else: is_3p_element = False # if we are not storing 1p elements continue if is_3p_element is False and store_1p is False: continue if request[:5] == 'https': element_is_ssl = True else: element_is_ssl = False try: received = browser_output['processed_requests'][request][ 'received'] except: received = None # get domain of referer and determine if page leaked by referer try: referer = browser_output['processed_requests'][request][ 'referer'] except: referer = None if referer and len(referer) != 0: referer_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld( referer) if referer_ip_fqdn_domain_pubsuffix_tld: if referer_ip_fqdn_domain_pubsuffix_tld[ 2] == origin_domain: page_domain_in_referer = True else: page_domain_in_referer = False else: page_domain_in_referer = None sql_driver.log_error( url, 'Error parsing referer header: ' + referer) else: page_domain_in_referer = None try: start_time_offset = browser_output['processed_requests'][ request]['start_time_offset'] except: start_time_offset = None try: load_time = browser_output['processed_requests'][request][ 'load_time'] except: load_time = None try: status = browser_output['processed_requests'][request][ 'status'] except: status = None try: status_text = browser_output['processed_requests'][request][ 'status_text'] except: status_text = None try: content_type = browser_output['processed_requests'][request][ 'content_type'] except: content_type = None try: body_size = browser_output['processed_requests'][request][ 'body_size'] except: body_size = None try: request_headers = str(browser_output['processed_requests'] [request]['request_headers']) except: request_headers = None try: response_headers = str(browser_output['processed_requests'] [request]['response_headers']) except: response_headers = None # consider anything before the "?" to be the element_url try: element_url = re.search('^(.+?)\?.+$', request).group(1) except: element_url = request # consider anything after the "?" to be the args try: element_args = re.search('^.+(\?.+)$', request).group(1) # start url args except: element_args = None # attempt to parse off the extension try: element_extension = re.search('\.([0-9A-Za-z]+)$', element_url).group(1).lower() except: element_extension = None # lists of common extensions, can be expanded image_extensions = [ 'png', 'jpg', 'jpgx', 'jpeg', 'gif', 'svg', 'bmp', 'tif', 'tiff', 'webp', 'srf' ] script_extensions = ['js', 'javascript'] data_extensions = ['json', 'jsonp', 'xml'] font_extentions = ['woff', 'ttf', 'otf'] static_extentions = ['html', 'htm', 'shtml'] dynamic_extentions = [ 'php', 'asp', 'jsp', 'aspx', 'ashx', 'pl', 'cgi', 'fcgi' ] # figure out what type of element it is if element_extension in image_extensions: element_type = 'image' elif element_extension in script_extensions: element_type = 'javascript' elif element_extension in data_extensions: element_type = 'data_structured' elif element_extension == 'css': element_type = 'style_sheet' elif element_extension in font_extentions: element_type = 'font' elif element_extension in static_extentions: element_type = 'page_static' elif element_extension == dynamic_extentions: element_type = 'page_dynamic' elif element_extension == 'swf' or element_extension == 'fla': element_type = 'Shockwave Flash' else: element_type = None # file hashing has non-trivial overhead and off by default # # what this does is uses the same ua/referer as the actual request # so we are just replaying the last one to get similar response # note that we aren't sending the same cookies so that could be an issue # otherwise it is equivalent to a page refresh in theory # option to hash only 3p elements observed here if (get_file_hashes and hash_3p_only and is_3p_element) or (get_file_hashes and hash_3p_only == False): replay_element_request = urllib.request.Request( request, headers={ 'User-Agent': browser_output['processed_requests'][request] ['user_agent'], 'Referer': referer, 'Accept': '*/*' }) try: file_md5 = hashlib.md5( urllib.request.urlopen(replay_element_request, timeout=10).read()).hexdigest() except: file_md5 = None else: file_md5 = None # store request sql_driver.add_element( page_id, request, element_url, is_3p_element, element_is_ssl, received, referer, page_domain_in_referer, start_time_offset, load_time, status, status_text, content_type, body_size, request_headers, response_headers, file_md5, element_extension, element_type, element_args, element_domain_id) # close db connection sql_driver.close() return True
def store(self, url, browser_output, store_source=False, store_1p=True, get_file_hashes=False, hash_3p_only=False): """ this is the primary function of this class, it takes the url of the given page and the request and cookie data generated by the browser data is cleaned up with some minor analysis (eg file types) and stored for later in-depth analysis. there is an option to store first party requests as well as third, turned on by default to save disk space turn off store_1p there is also an option to get file hashes, this introduces serious overhead and is turned off by default """ # open up a sql connection if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) else: print('INVALED DB ENGINE FOR %s, QUITTING!' % db_engine) exit() # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party origin_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld(url) # if we can't get page domain info we fail gracefully if origin_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error(url, 'Could not parse TLD for %s' % url) return False origin_ip = origin_ip_fqdn_domain_pubsuffix_tld[0] origin_fqdn = origin_ip_fqdn_domain_pubsuffix_tld[1] origin_domain = origin_ip_fqdn_domain_pubsuffix_tld[2] origin_pubsuffix = origin_ip_fqdn_domain_pubsuffix_tld[3] origin_tld = origin_ip_fqdn_domain_pubsuffix_tld[4] # sql_driver.add_domain both stores the new domain and returns its db row id # if it is already in db just return the existing id page_domain_id = sql_driver.add_domain(origin_ip, origin_fqdn, origin_domain, origin_pubsuffix, origin_tld) # figure out the privacy policy url and text, starts null priv_policy_url = None priv_policy_url_text = None # read in our list of privacy link terms from the json file in webxray/resources/policyxray privacy_policy_term_list = self.utilities.get_privacy_policy_term_list() # we reverse links return from browser to check footer links first as that is where policy links tend to be all_links = browser_output['all_links'] all_links.reverse() # if we have links search for privacy policy if len(all_links) > 0: # links are tuple for link_text,link_url in all_links: # makes sure we have text, skip links without if link_text: # need lower for string matching link_text = link_text.lower().strip() # not a link we can use if 'javascript' in link_text: continue # see if the link_text is in our term list if link_text in privacy_policy_term_list: # if the link_url is relative this will convert to absolute priv_policy_url = self.utilities.get_absolute_url_from_page_link(url,link_url) priv_policy_url_text = link_text break # if the final page is https (often after a redirect), mark it appropriately if browser_output['final_url'][:5] == 'https': page_is_ssl = True else: page_is_ssl = False if store_source: # handles issue where postgres will crash on inserting null character source = browser_output['source'].replace('\x00',' ') else: source = None # add page page_id = sql_driver.add_page( browser_output['browser_type'], browser_output['browser_version'], browser_output['browser_wait'], browser_output['title'], browser_output['meta_desc'], url, browser_output['final_url'], priv_policy_url, priv_policy_url_text, page_is_ssl, source, browser_output['load_time'], page_domain_id ) # store cookies for cookie in browser_output['cookies']: # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party # note: # url_parser fails on non-http, we should fix this, right now a lame hack is to prepend http:// cookie_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld('http://'+cookie['domain']) # something went wrong, log and fail gracefully if cookie_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error(url, 'Error parsing cookie with domain: '+cookie['domain']) continue # otherwise, everything went fine cookie_ip = cookie_ip_fqdn_domain_pubsuffix_tld[0] cookie_fqdn = cookie_ip_fqdn_domain_pubsuffix_tld[1] cookie_domain = cookie_ip_fqdn_domain_pubsuffix_tld[2] cookie_pubsuffix = cookie_ip_fqdn_domain_pubsuffix_tld[3] cookie_tld = cookie_ip_fqdn_domain_pubsuffix_tld[4] # mark third-party cookies if origin_domain != cookie_domain: is_3p_cookie = True else: is_3p_cookie = False # this is a first party cookie, see if we want to store it if is_3p_cookie is False and store_1p is False: continue # sql_driver.add_domain both stores the new domain and returns its id cookie_domain_id = sql_driver.add_domain(cookie_ip, cookie_fqdn, cookie_domain, cookie_pubsuffix, cookie_tld) # name and domain are required, so if they fail we just continue try: name = cookie['name'] except: continue try: domain = cookie_domain except: continue # these are optional, fill with null values if fail try: secure = cookie['secure'] except: secure = None try: path = cookie['path'] except: path = None try: httponly = cookie['httponly'] except: httponly = None try: expiry = cookie['expiry'] except: expiry = None try: value = cookie['value'] except: value = None # all done with this cookie sql_driver.add_cookie( page_id, name, secure, path, domain, httponly, expiry, value, is_3p_cookie, cookie_domain_id ) # process requests now for request in browser_output['processed_requests']: # if the request starts with the following we can't parse anyway, so skip if re.match('^(data|about|chrome|blob).+', request): continue # get the ip, fqdn, domain, pubsuffix, and tld # we need the domain to figure out if cookies/elements are third-party element_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld(request) # problem with this request, log and fail gracefully if element_ip_fqdn_domain_pubsuffix_tld is None: sql_driver.log_error(url, 'Error parsing element request: '+request) continue element_ip = element_ip_fqdn_domain_pubsuffix_tld[0] element_fqdn = element_ip_fqdn_domain_pubsuffix_tld[1] element_domain = element_ip_fqdn_domain_pubsuffix_tld[2] element_pubsuffix = element_ip_fqdn_domain_pubsuffix_tld[3] element_tld = element_ip_fqdn_domain_pubsuffix_tld[4] # sql_driver.add_domain both stores the new domain and returns its db row id element_domain_id = sql_driver.add_domain(element_ip, element_fqdn, element_domain, element_pubsuffix, element_tld) # mark third-party elements based on domain if origin_domain != element_domain: is_3p_element = True else: is_3p_element = False # if we are not storing 1p elements continue if is_3p_element is False and store_1p is False: continue if request[:5] == 'https' or request[:3] == 'wss': element_is_ssl = True else: element_is_ssl = False try: received = browser_output['processed_requests'][request]['received'] except: received = None # get domain of referer and determine if page leaked by referer try: referer = browser_output['processed_requests'][request]['referer'] except: referer = None if referer and len(referer) != 0: referer_ip_fqdn_domain_pubsuffix_tld = self.url_parser.get_ip_fqdn_domain_pubsuffix_tld(referer) if referer_ip_fqdn_domain_pubsuffix_tld: if referer_ip_fqdn_domain_pubsuffix_tld[2] == origin_domain: page_domain_in_referer = True else: page_domain_in_referer = False else: page_domain_in_referer = None sql_driver.log_error(url, 'Error parsing referer header: '+referer) else: page_domain_in_referer = None try: start_time_offset = browser_output['processed_requests'][request]['start_time_offset'] except: start_time_offset = None try: load_time = browser_output['processed_requests'][request]['load_time'] except: load_time = None try: status = browser_output['processed_requests'][request]['status'] except: status = None try: status_text = browser_output['processed_requests'][request]['status_text'] except: status_text = None try: content_type = browser_output['processed_requests'][request]['content_type'] except: content_type = None try: body_size = browser_output['processed_requests'][request]['body_size'] except: body_size = None try: request_headers = str(browser_output['processed_requests'][request]['request_headers']) except: request_headers = None try: response_headers = str(browser_output['processed_requests'][request]['response_headers']) except: response_headers = None # consider anything before the "?" to be the element_url try: element_url = re.search('^(.+?)\?.+$', request).group(1) except: element_url = request # consider anything after the "?" to be the args try: element_args = re.search('^.+(\?.+)$', request).group(1) # start url args except: element_args = None # attempt to parse off the extension try: element_extension = re.search('\.([0-9A-Za-z]+)$', element_url).group(1).lower() except: element_extension = None # lists of common extensions, can be expanded image_extensions = ['png', 'jpg', 'jpgx', 'jpeg', 'gif', 'svg', 'bmp', 'tif', 'tiff', 'webp', 'srf'] script_extensions = ['js', 'javascript'] data_extensions = ['json', 'jsonp', 'xml'] font_extentions = ['woff', 'ttf', 'otf'] static_extentions = ['html', 'htm', 'shtml'] dynamic_extentions = ['php', 'asp', 'jsp', 'aspx', 'ashx', 'pl', 'cgi', 'fcgi'] # figure out what type of element it is if element_extension in image_extensions: element_type = 'image' elif element_extension in script_extensions: element_type = 'javascript' elif element_extension in data_extensions: element_type = 'data_structured' elif element_extension == 'css': element_type = 'style_sheet' elif element_extension in font_extentions: element_type = 'font' elif element_extension in static_extentions: element_type = 'page_static' elif element_extension == dynamic_extentions: element_type = 'page_dynamic' elif element_extension == 'swf' or element_extension == 'fla': element_type = 'Shockwave Flash' else: element_type = None # file hashing has non-trivial overhead and off by default # # what this does is uses the same ua/referer as the actual request # so we are just replaying the last one to get similar response # note that we aren't sending the same cookies so that could be an issue # otherwise it is equivalent to a page refresh in theory # option to hash only 3p elements observed here if (get_file_hashes and hash_3p_only and is_3p_element) or (get_file_hashes and hash_3p_only == False): replay_element_request = urllib.request.Request( request, headers = { 'User-Agent' : browser_output['processed_requests'][request]['user_agent'], 'Referer' : referer, 'Accept' : '*/*' } ) try: file_md5 = hashlib.md5(urllib.request.urlopen(replay_element_request,timeout=10).read()).hexdigest() except: file_md5 = None else: file_md5 = None # final tasks is to truncate the request if it is # over 2k characters as it is likely # binary data and may cause problems inserting # into TEXT fields in database # # TODO: # better handle binary data in general if len(request) >= 2000: request = request[:2000] if len(element_url) >= 2000: element_url = element_url[:2000] # store request sql_driver.add_element( page_id, request, element_url, is_3p_element, element_is_ssl, received, referer, page_domain_in_referer, start_time_offset, load_time, status, status_text, content_type, body_size, request_headers, response_headers, file_md5, element_extension, element_type, element_args, element_domain_id ) # close db connection sql_driver.close() return True
class OutputStore: def __init__(self, dbname): self.uri_parser = ParseURI() self.sql_driver = MySQLDriver(dbname) # end init def store(self, uri, phantom_output): # parse out the json from our phantom_output # sometimes phantom prints out errors before the json, (despite us turning # it off!), so we match inside of the {}s to get json only try: data = json.loads(re.search('(\{.+\})', phantom_output).group(1)) except Exception as e: self.sql_driver.log_error(uri, "Could Not Load JSON: %s" % e) return 'Could Not Load JSON' # we need to parse the domain to determine if requests are local or 3rd party # we need pubsuffix and tld for later analysis so store them now origin_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld( uri) origin_domain = origin_domain_pubsuffix_tld[0] origin_pubsuffix = origin_domain_pubsuffix_tld[1] origin_tld = origin_domain_pubsuffix_tld[2] if re.match('^Exception.+', origin_domain): self.sql_driver.log_error(uri, 'Could not parse TLD for %s' % uri) return 'Could not parse TLD for %s' % uri page_domain_id = self.sql_driver.add_domain(origin_domain, origin_pubsuffix, origin_tld) # newFollowLogger is giving us the follow JSON data: # note source is null for now, but store anyway # big_out = { # final_uri: final_uri, # title: page.title, # meta_desc : meta_desc, # requested_uris: JSON.stringify(requested_uris), # received_uris: JSON.stringify(received_uris), # cookies: phantom.cookies, # source: 'NULL', # }; # we are now storing uris with and without args and saving args # we must unquote to uri to get back to original state so can parse start_uri_original = urllib.parse.unquote(uri) try: start_uri_no_args = re.search('^(.+?)\?.+$', start_uri_original).group( 1) # start uri no args except: start_uri_no_args = uri try: start_uri_args = re.search('^.+(\?.+)$', start_uri_original).group( 1) # start uri args except: start_uri_args = 'NULL' # same for the final uri (this is where we are after potential redirects) final_uri = re.sub('\"', '', json.dumps(data["final_uri"])) final_uri_original = urllib.parse.unquote(final_uri) try: final_uri_no_args = re.search('^(.+?)\?.+$', final_uri_original).group( 1) # start uri no args except: final_uri_no_args = final_uri try: final_uri_args = re.search('^.+(\?.+)$', final_uri_original).group( 1) # start uri args except: final_uri_args = 'NULL' # add page # json.dumps to make sure strings go out ok for db page_id = self.sql_driver.add_page( str(re.sub('\"', '', json.dumps(data["title"]))), str(re.sub('\"', '', json.dumps(data["meta_desc"]))), uri, start_uri_no_args, start_uri_args, final_uri, final_uri_no_args, final_uri_args, str(re.sub('\"', '', json.dumps(data["source"]))), str(re.sub('\"', '', json.dumps(data["requested_uris"]))), str(re.sub('\"', '', json.dumps(data["received_uris"]))), page_domain_id) for cookie in data["cookies"]: # store external cookies, uri_parser fails on non-http, we should fix this # right now a lame hack is to prepend http:// cookie_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld( "http://" + cookie["domain"]) cookie_domain = cookie_domain_pubsuffix_tld[0] cookie_pubsuffix = cookie_domain_pubsuffix_tld[1] cookie_tld = cookie_domain_pubsuffix_tld[2] # something went wrong, but carry on... if re.match('^Exception.+', cookie_domain): self.sql_driver.log_error( uri, 'Error parsing cookie: ' + cookie_domain) continue # this is a 3party cookie if origin_domain != cookie_domain: cookie_domain_id = self.sql_driver.add_domain( cookie_domain, cookie_pubsuffix, cookie_tld) # name and domain are required, so if they fail we just continue try: name = cookie["name"] except: continue try: domain = cookie_domain except: continue # these are optional, keep going with "N/A" vals try: secure = cookie["secure"] except: secure = "N/A" try: path = cookie["path"] except: path = "N/A" try: expires = cookie["expires"] except: expires = "N/A" try: httponly = cookie["httponly"] except: httponly = "N/A" try: expiry = cookie["expiry"] except: expiry = "N/A" try: value = cookie["value"] except: value = "N/A" cookie_id = self.sql_driver.add_cookie(name, secure, path, domain, expires, httponly, expiry, value, cookie_domain_id) self.sql_driver.add_cookie_to_page(cookie_id, page_id) for request in data["requested_uris"]: # if the request starts with "data" we can't parse tld anyway, so skip if re.match('^(data|about|chrome).+', request): continue # get domain, pubsuffix, and tld from request requested_domain_pubsuffix_tld = self.uri_parser.get_domain_pubsuffix_tld( request) requested_domain = requested_domain_pubsuffix_tld[0] requested_pubsuffix = requested_domain_pubsuffix_tld[1] requested_tld = requested_domain_pubsuffix_tld[2] # see if we got back what we requested, if not a few things may have happened # * malformed uri # * resource is gone or never existed # * network latency (ie it didn't arrive in window specified) # * we could be behind a firewall or censorship mechanism (eg gfw, golden shield) # * our IP is blacklisted b/c we are totally a bot X-D # the point being, interpret this value with an open mind if request in data['received_uris']: recieved = '1' else: recieved = '0' # catch exceptions if re.match('^Exception.+', requested_domain): self.sql_driver.log_error( uri, 'Error parsing element request: ' + request) continue # store new elements if origin_domain != requested_domain: full_uri = request try: element_uri = re.search('^(.+?)\?.+$', full_uri).group( 1) # start uri no args except: element_uri = full_uri # attempt to parse off the extension try: element_extension = re.search( '\.([0-9A-Za-z]+)$', element_uri).group(1).lower() except: element_extension = "NULL" # figure out what type of element it is if element_extension == 'png' or element_extension == 'jpg' or element_extension == 'jpgx' or element_extension == 'jpeg' or element_extension == 'gif' or element_extension == 'svg' or element_extension == 'bmp' or element_extension == 'tif' or element_extension == 'tiff' or element_extension == 'webp' or element_extension == 'srf': element_type = 'image' elif element_extension == 'js' or element_extension == 'javascript': element_type = 'javascript' elif element_extension == 'json' or element_extension == 'jsonp' or element_extension == 'xml': element_type = 'data_structured' elif element_extension == 'css': element_type = 'style_sheet' elif element_extension == 'woff' or element_extension == 'ttf' or element_extension == 'otf': element_type = 'font' elif element_extension == 'htm' or element_extension == 'html' or element_extension == 'shtml': element_type = 'page_static' elif element_extension == 'php' or element_extension == 'asp' or element_extension == 'jsp' or element_extension == 'aspx' or element_extension == 'ashx' or element_extension == 'pl' or element_extension == 'cgi' or element_extension == 'fcgi': element_type = 'page_dynamic' elif element_extension == 'swf' or element_extension == 'fla': element_type = 'Shockwave Flash' elif element_extension == 'NULL': element_type = 'NULL' else: element_type = 'unknown' try: args = re.search('^.+(\?.+)$', full_uri).group(1) # start uri args except: args = 'NULL' element_domain_id = self.sql_driver.add_domain( requested_domain, requested_pubsuffix, requested_tld) element_id = self.sql_driver.add_element( "NULL", full_uri, element_uri, recieved, element_extension, element_type, args, element_domain_id) self.sql_driver.add_element_to_page(element_id, page_id) return 'Successfully Added to DB' # end report() def close(self): # close mysql connections self.uri_parser.close() self.sql_driver.close() return