def process_uri(self, uri): sql_driver = MySQLDriver(self.db_name) output_store = OutputStore(self.db_name) phantom_driver = PhantomDriver( '--ignore-ssl-errors=true --ssl-protocol=any', 'wbxr_logger.js') # this can be higher or lower depending on network load # generally, 90 seems to be fine, so keep with it try: phantom_output = phantom_driver.execute(uri, 90) except: print("\t\t%-50s Phantomjs Did Not Return." % uri[:50]) sql_driver.log_error(uri, "FAIL: Phantomjs Did Not Return.") return if re.match('^FAIL.+', phantom_output): print("\t\t%-50s Phantom Error\n\t%s" % (uri[:50], phantom_output)) sql_driver.log_error(uri, phantom_output) else: print("\t\t%-50s %s" % (uri[:50], output_store.store(uri, phantom_output))) # closes our db connections sql_driver.close() output_store.close() return
def process_url(self, url): """ this function takes a specified url, loads it in the browser and returns json-formatted output with relevant request data, etc. the output_store class then puts this data in the db for later analysis """ # set up sql connection used to log errors and do checks if self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # output store does the heavy lifting of analyzing browser output and storing to db output_store = OutputStore(self.db_engine, self.db_name) # support for loading same page with multiple browsers - purposefully undocumented for browser_type in self.browser_types: # import and set up specified browser driver # note we need to set up a new browser each time to # get a fresh profile if browser_type == 'chrome': browser_driver = ChromeDriver(ua=self.chrome_ua) # attempt to load the page, fail gracefully try: browser_output = browser_driver.get_webxray_scan_data( url, self.browser_wait) except: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # if there was a problem we log the error if browser_output['success'] == False: print('\t\t%-50s Browser %s Error: %s' % (url[:50], browser_type, browser_output['result'])) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return else: # no error, treat result as browser output browser_output = browser_output['result'] # attempt to store the output if output_store.store(url, browser_output): print('\t\t%-50s Success with %s' % (url[:50], browser_type)) else: print('\t\t%-50s Fail with %s' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return
def process_uri(self, uri): sql_driver = MySQLDriver(self.db_name) output_store = OutputStore(self.db_name) phantom_driver = PhantomDriver('--ignore-ssl-errors=true --ssl-protocol=any', 'wbxr_logger.js') # this can be higher or lower depending on network load # generally, 90 seems to be fine, so keep with it try: phantom_output = phantom_driver.execute(uri, 90) except: print("\t\t%-50s Phantomjs Did Not Return." % uri[:50]) sql_driver.log_error(uri, "FAIL: Phantomjs Did Not Return.") return if re.match('^FAIL.+', phantom_output): print("\t\t%-50s Phantom Error\n\t%s" % (uri[:50], phantom_output)) sql_driver.log_error(uri, phantom_output) else: print("\t\t%-50s %s" % (uri[:50], output_store.store(uri, phantom_output))) # closes our db connections sql_driver.close() output_store.close() return
def store_result(self, params): """ Handles storing task_result and removing jobs from the task_queue. """ # unpack params target = params['target'] task = params['task'] task_result = params['task_result'] client_id = params['client_id'] # client_ip is optional if 'client_ip' in params: client_ip = params['client_ip'] else: client_ip = None # if db_name is specified we are running in server mode and we # connect to the db which corresponds to the result being # processed. otherwise, we use the global db_name as we are # running in non-server mode. if 'db_name' in params: if self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(params['db_name']) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(params['db_name']) else: print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine) quit() output_store = OutputStore(params['db_name'], self.db_engine) else: if self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) else: print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine) quit() output_store = OutputStore(self.db_name, self.db_engine) if task == 'get_policy': store_result = output_store.store_policy(task_result, client_id, client_ip=client_ip) # we never retry policies sql_driver.remove_task_from_queue(target, task) if store_result['success']: result = {'success': True} else: # log error sql_driver.log_error({ 'client_id': client_id, 'task': task, 'target': target, 'msg': 'output_store fail on ' + store_result['result'] }) result = {'success': False, 'result': store_result['result']} # elif task == 'get_crawl' or task == 'get_random_crawl': else: all_crawls_ok = True # We want to be able to re-run random crawls, and to do so we make sure # the crawl_id will match if task == 'get_crawl' or task == 'get_scan': crawl_id = target elif task == 'get_random_crawl': crawl_id = [] for result in task_result: crawl_id.append(result['start_url']) crawl_id = json.dumps(crawl_id) # tweak to account for differences between scans/crawls if task == 'get_scan': task_result = [task_result] # keep track of domains all_3p_cookie_domains = set() all_3p_dom_storage_domains = set() all_3p_request_domains = set() all_3p_response_domains = set() all_3p_websocket_domains = set() # When we store a crawl we add optional fields in the page table # that allow us to connect the page loads into a single crawl. # the crawl_id is a hash of the target (which is a json string # derived from the url_list), and the crawl_timestamp which is the # first accessed time from the crawl. for crawl_sequence, result in enumerate(task_result): store_result = output_store.store_scan({ 'browser_output': result, 'client_id': client_id, 'crawl_id': crawl_id, 'crawl_timestamp': task_result[0]['accessed'], 'crawl_sequence': crawl_sequence, 'client_ip': client_ip }) if store_result['success'] != True: all_crawls_ok = False else: # we are successful, create entries in page_lookup table page_lookup_table = self.build_lookup_table( 'page', store_result['page_id'], { 'requests': store_result['page_3p_request_domains'], 'responses': store_result['page_3p_response_domains'], 'websockets': store_result['page_3p_websocket_domains'], 'dom_storage': store_result['page_3p_dom_storage_domains'], 'cookies': store_result['page_3p_dom_storage_domains'] }) for lookup_item in page_lookup_table: sql_driver.add_page_id_domain_lookup_item( page_lookup_table[lookup_item]) # we are also making a lookup table for the crawl, keep joing the # sets as we go along all_3p_request_domains.update( store_result['page_3p_request_domains']) all_3p_response_domains.update( store_result['page_3p_response_domains']) all_3p_websocket_domains.update( store_result['page_3p_websocket_domains']) all_3p_dom_storage_domains.update( store_result['page_3p_dom_storage_domains']) all_3p_cookie_domains.update( store_result['page_3p_dom_storage_domains']) if all_crawls_ok: sql_driver.remove_task_from_queue(target, task) result = {'success': True} # build crawl lookup table crawl_lookup_table = self.build_lookup_table( 'crawl', crawl_id, { 'requests': all_3p_request_domains, 'responses': all_3p_response_domains, 'websockets': all_3p_websocket_domains, 'dom_storage': all_3p_dom_storage_domains, 'cookies': all_3p_cookie_domains }) # patch lookup table for lookup_item in crawl_lookup_table: sql_driver.add_crawl_id_domain_lookup_item( crawl_lookup_table[lookup_item]) else: sql_driver.unlock_task_in_queue(target, task) # log error sql_driver.log_error({ 'client_id': client_id, 'task': task, 'target': target, 'msg': 'output_store fail to store all scans for crawl_id_target ' + target }) result = { 'success': False, 'result': 'unable to store all crawl loads' } # tidy up output_store.close() sql_driver.close() # done return result
def process_url(self, url): """ this function takes a specified url, loads it in the browser (currently phantomjs) and returns json-formatted output with relevant request data, etc. the output_store class then puts this data in the db for later analysis """ # set up sql connection used to log errors and do timeseries checks if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # output store does the heavy lifting of analyzing browser output and storing to db output_store = OutputStore(self.db_engine, self.db_name) # support for loading same page with multiple browsers - purposefully undocumented for browser_type in self.browser_types: # import and set up specified browser driver # note we need to set up a new browser each time to # get a fresh profile if browser_type == 'phantomjs': browser_driver = PhantomDriver() elif browser_type == 'chrome': browser_driver = ChromeDriver(ua=self.chrome_ua) # support for timeseries collections - purposefully undocumented if self.allow_timeseries: page_last_accessed_browser_type = sql_driver.get_page_last_accessed_by_browser_type(url,browser_type) if page_last_accessed_browser_type: time_diff = datetime.now()-page_last_accessed_browser_type[0] if time_diff < timedelta(minutes=self.interval_minutes) and page_last_accessed_browser_type[1] == browser_type: print("\t\t%-50s Scanned too recently with %s" % (url[:50], browser_type)) continue # attempt to load the page, fail gracefully try: browser_output = browser_driver.get_webxray_scan_data(url, self.browser_wait) except: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # if there was a problem we log the error if browser_output['success'] == False: print('\t\t%-50s Browser %s Error: %s' % (url[:50], browser_type, browser_output['result'])) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return else: # no error, treat result as browser output browser_output = browser_output['result'] # attempt to store the output if output_store.store(url, browser_output): print('\t\t%-50s Success with %s' % (url[:50],browser_type)) else: print('\t\t%-50s Fail with %s' % (url[:50],browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return
def process_url(self, url): """ this function takes a specified url, loads it in the browser (currently phantomjs) and returns json-formatted output with relevant request data, etc. the output_store class then puts this data in the db for later analysis """ # set up sql connection used to log errors and do timeseries checks if self.db_engine == 'mysql': from webxray.MySQLDriver import MySQLDriver sql_driver = MySQLDriver(self.db_name) elif self.db_engine == 'postgres': from webxray.PostgreSQLDriver import PostgreSQLDriver sql_driver = PostgreSQLDriver(self.db_name) elif self.db_engine == 'sqlite': from webxray.SQLiteDriver import SQLiteDriver sql_driver = SQLiteDriver(self.db_name) # output store does the heavy lifting of analyzing browser output and storing to db output_store = OutputStore(self.db_engine, self.db_name) # support for loading same page with multiple browsers - purposefully undocumented for browser_type in self.browser_types: # import and set up specified browser driver # note we need to set up a new browser each time to # get a fresh profile if browser_type == 'phantomjs': browser_driver = PhantomDriver() elif browser_type == 'chrome': browser_driver = ChromeDriver(ua=self.chrome_ua) # support for timeseries collections - purposefully undocumented if self.allow_timeseries: page_last_accessed_browser_type = sql_driver.get_page_last_accessed_by_browser_type(url,browser_type) if page_last_accessed_browser_type: time_diff = datetime.now()-page_last_accessed_browser_type[0] if time_diff < timedelta(minutes=self.interval_minutes) and page_last_accessed_browser_type[1] == browser_type: print("\t\t%-50s Scanned too recently with %s" % (url[:50], browser_type)) continue # attempt to load the page, fail gracefully try: browser_output = browser_driver.get_webxray_scan_data(url, self.browser_wait) except: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # if there was a problem browser_output will be None if browser_output == None: print('\t\t%-50s Browser %s Did Not Return' % (url[:50], browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return # attempt to store the output if output_store.store(url, browser_output): print('\t\t%-50s Success with %s' % (url[:50],browser_type)) else: print('\t\t%-50s Fail with %s' % (url[:50],browser_type)) sql_driver.log_error(url, 'Unable to load page') sql_driver.close() return