def save_report(self, text): ext = '' if self.type == 'text': ext = '.txt' elif self.type == 'html': ext = '.html' elif self.type == 'pdf': ext = '.pdf' else: ext = '.txt' date = datetime.datetime.now().strftime("%d-%m-%Y-%H:%M:%S") filename = f'WAVS Report - {date}' path = f'reports/{filename}{ext}' if self.type == 'pdf': text.write_pdf(path) else: try: with open(path, 'w') as f: f.write(text) except IOError: warning(f'Could not save report: {path}') exit() info(f'Saved report to: {path}')
def run_module(self): """ the entrypoint into the module. takes any found webpages and uses threads to extract params from the html. """ info('Parsing HTML...') # get the list of found pages found_pages = self._get_previous_results('FileScanner') # if there are no found pages, theres no need to run this module if not found_pages: return # pass the found pages to threads with concurrent.futures.ProcessPoolExecutor() as executor: results = list(executor.map(self._run_thread, found_pages)) # clean up the results from the threads final = [] _ = [final.extend(p) for p in results if p] final = list(filter(None, final)) # remove duplicate found parameters final = [i for n, i in enumerate(final) if i not in final[n + 1:]] if self.main.options['verbose']: for params in final: success(f'Found params: {params["action"]} ' f'({" ".join(params["params"])})', prepend=' ') self._save_scan_results(final, update_count=False)
def run_module(self): """ Loads the attack strings from the database, and runs multiple processes Args: None Returns: None """ info("Searching for cross site scripting (stored)...") # load in the payloads self.attack_strings = self.main.db.get_wordlist( self.info['wordlist_name']) # the search strings will be the attack strings themselves # because python will not interpret any javascript self.re_search_strings = self.attack_strings injectable_params = self._get_previous_results('HTMLParser') with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(self._run_thread, injectable_params) final = [] for r in results: final.extend(r) # save the results self._save_scan_results(final)
def run_module(self): """ method that loads in a file wordlist and uses thread to search for the files Args: None Returns: None """ info('Searching for CSRF...') self.csrf_fields = self.main.db.\ get_wordlist(self.info['wordlist_name']) forms_discovered = self._get_previous_results('HTMLParser') # create the threads with concurrent.futures.ProcessPoolExecutor() as executor: results = list(executor.map(self._run_thread, forms_discovered)) # remove any empty results results = list(filter(None, results)) self._save_scan_results(results, update_count=False)
def _run_text_generator(self): """ Runs text generation system, which creates new payloads. This method uses a 'TextGenerator' instance to create payloads based upon payloads in the 'wordlist' database. Once text has been generated it saves the text in the 'wordlist' database. Args: None Returns: None """ info('Starting text generation...') # wait 2 seconds so user scan see the above message sleep(2) # suppress debugging output from tensorflow os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # start text generation and save it in database from utils.TextGenerator import TextGenerator self.text_generator = TextGenerator(self) self.run_text_generation() # clear the screen because tensorflow creates a lot of output clear_screen() info('Completed text generation')
def _database_reset(self): """ Resets the wordlist and/or scans database back to defaults. Method will reset the 'count' column of the wordlist database back to 0, and/or it will delete everyting in the scans database. Args: None Returns: None """ info('Resetting internal databases...') # if the --reset option is set if self.cmd_args.reset: self.db.reset_wordlist() self.db.reset_scans() # if the --reset_scans option is set elif self.cmd_args.reset_scans: self.db.reset_scans() # if the --reset_wordlist option is set elif self.cmd_args.reset_wordlist: self.db.reset_wordlist()
def run_module(self): """ Loads the attack strings from the database, and runs multiple processes Args: None Returns: None """ info('Searching for SQL injections...') # get the injectable params params = self._get_previous_results('HTMLParser') self.attack_strings = self.main.db.get_wordlist( self.info['wordlist_name']) self.re_search_strings = self.main.db.\ get_detect_wordlist('sql') # pass them off to threads with concurrent.futures.ProcessPoolExecutor() as executor: results = list(executor.map(self._run_thread, params)) final = [] for r in results: final.extend(r) self._save_scan_results(final)
def run_module(self): info('Running initial scans...') if not self._is_server_up(): exit() self._check_fuzzing() self._parse_robots()
def show_previous_scans(self): """ Displays a list of past scans and allows user to select one. This method prints out a list of all the past scans saved in the 'scans' database, along with their scan IDs. It prompts the user to enter the scan ID of the scan to be selected. Args: None Returns: the scan ID of the selected scan """ info('Previous scans:') # store an instance of the 'scans' database scans_table = self.db.get_scan_db().table('scans') # get all the saved scans scans = scans_table.all() # loop through each scan for scan in scans: # print the scan details to stdout success( f'ID: {scan.doc_id},' f'Host: {scan["host"]}', f'Port: {scan["port"]}', f'Time: {scan["timestamp"]}', prepend=' ') # prompt the user to enter the scan ID of the scan to be selected # it will reject scan IDs which dont exist, and any input which is # not an int check = False while not check: try: choice = int(input('> ')) if not self.db.scan_exists(choice): warning(f'Scan ID: {choice} does not exist') else: check = True except (ValueError): pass return choice
def run_module(self): """ Performs the actual scanning of the target application. Loads in the payloads, patterns and calls the _run_thread method to inject payloads into parameters. Args: None Returns: None """ info("Searching for OS command injection...") # load in the attack strings initial_attack_strings = self.main.db.get_wordlist( self.info['wordlist_name']) self.attack_strings = [] for attack in initial_attack_strings: self.attack_strings.append(f'test;{attack}') self.attack_strings.append(f'test && {attack}') self.attack_strings.append(f'test || {attack}') # the patterns to search the final page for self.re_search_strings = [ 'www-data', 'root', 'nt-authority', 'os_command_found' ] # the parameters to inject payloads into injectable_params = self._get_previous_results('HTMLParser') # use multiple processes to inject payloads into parameters with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(self._run_thread, injectable_params) # put all the results into one list final = [] for r in results: final.extend(r) # save the results self._save_scan_results(final)
def run_module(self): """ Performs the actual scanning of the target application. Either automatically crawls the target application for links, or sets up a proxy so that user can manually crawl the target. Args: None Returns: None """ info('Crawling links...') # get found pages self.found_pages = self._get_previous_results('FileScanner') # if there are no found pages, use the default path if not self.found_pages: self.found_pages = ['/'] # if the manual crawl option is set if self.main.options['manual_crawl']: self.manual_found_pages = self.found_pages proxy_port = self.main.options['proxy_port'] highlight(f'Proxy server started on http://127.0.0.1:{proxy_port}', prepend=' ') highlight('Use browser to crawl target. CTRL+C to exit.', prepend=' ') # set up the interceptin proxy and start it proxy = InterceptingProxy(self.main.host, proxy_port, self) proxy.start() self._save_scan_results(self.manual_found_pages, update_count=False) # automatically crawl the target self.auto_crawl() else: # automatically crawl the target self.auto_crawl()
def _run_report_generator(self): """ Creates a report for a past scan. Method will display a list of past scans along with their scan id. Prompts the user to select a past scan, then generates a report for that scan. Args: None Returns: None """ info(f'Generating report...') # display a list of past scans with scan ids scan_id = self.show_previous_scans() # generate a report for the selected scan self.report_gen.generate_report(scan_id)
def _parse_robots(self): # construct url for robots.txt url = f'{self.main.get_host_url_base()}/robots.txt' resp = http_get_request(url, self.main.cookies) dir_paths = [] file_paths = [] # checking is robots.txt exists if resp.status_code == 200: success('robots.txt found', prepend=' ') info('parsing robots.txt', prepend=' ') lines = resp.text.split('\n') # if there are no lines then theres nothing to do if not lines: return # loop through every line in robots.txt for line in lines: if line.startswith('Allow:') or line.startswith('Disallow:'): path = line.split(': ')[1] success(f'Found path: {path}', prepend=' ') if not path: continue if path[:-1] == '/': dir_paths.append(path) else: file_paths.append(path) if dir_paths: table = self.main.db.get_scan_db().table('directories_discovered') table.insert({"scan_id": self.main.id, "results": dir_paths}) if file_paths: table = self.main.db.get_scan_db().table('files_discovered') table.insert({"scan_id": self.main.id, "results": file_paths})
def run_module(self): """ method that loads in a file wordlist and uses thread to search for the files :return: """ info('Searching for files...') files_found = [] wordlist = self.generate_full_wordlist() # debug wordlist # wordlist = ['index.php', 'contact.php', 'comments.php', 'about.html'] with concurrent.futures.ProcessPoolExecutor() as executor: files_found += list(executor.map(self._run_thread, wordlist)) # remove None results files_found = list(filter(None, files_found)) files_found = [file for sublist in files_found for file in sublist] self._save_scan_results(files_found, update_count=False)
def run_module(self): info("Searching for local file inclusions...") # load in a list of lfi attach strings self.attack_strings = self.main.db.get_wordlist( self.info['wordlist_name']) self.re_search_strings = self.main.db.\ get_detect_wordlist('lfi') # load in params injectable_params = self._get_previous_results('HTMLParser') # create thread pool with concurrent.futures.ProcessPoolExecutor() as executor: results = list(executor.map(self._run_thread, injectable_params)) final = [] for r in results: final.extend(r) # save the results self._save_scan_results(final)
def run_module(self): """ Performs the actual scanning of the target application. Loads in the payloads, patterns and calls the _run_thread method to inject payloads into parameters. Args: None Returns: None """ info("Searching for cross site scripting (reflected)...") # load in a list of lfi attach strings #self.attack_strings = self.main.db.get_wordlist( # self.info['wordlist_name']) self.attack_strings = [ '<script>alert(1)</script>', '<img srx="x" onerror="alert(1)>"' ] # the search strings will be the attack strings themselves # because python will not interpret any javascript self.re_search_strings = self.attack_strings injectable_params = self._get_previous_results('HTMLParser') with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(self._run_thread, injectable_params) final = [] for r in results: final.extend(r) # save the results self._save_scan_results(final)
def run_module(self): info('Searching for directories...') # load in the wordlist from database word_list = self.main.db.get_wordlist(self.info['wordlist_name']) # debug word list # word_list = ['', 'css', 'data', 'images', 'js'] # add an empty string so that the root directory is scanned # word_list.append('') # map the wordlist to threads with _thread_scan method with concurrent.futures.ProcessPoolExecutor() as executor: directories_found = list(executor.map(self._run_thread, word_list)) # remove None results directories_found = [ directory for directory in directories_found if directory is not None ] # save the directories found to the database self._save_scan_results(directories_found)
def run_module(self): """ method that loads in a file wordlist and uses thread to search for the files :return: """ info('Searching for information disclosure...') self.extension_list = self.main.db.get_wordlist( self.info['wordlist_name']) files_found = [] # wordlist = self.generate_full_wordlist() # debug wordlist wordlist = [] with concurrent.futures.ProcessPoolExecutor() as executor: files_found += list(executor.map(self._run_thread, wordlist)) # remove None results files_found = list(filter(None, files_found)) files_found = [file for sublist in files_found for file in sublist] self._save_scan_results(files_found, update_count=False)