class PlaintextWithcount(InputParser): ''' Parser for files containing a password counter and the according plaintext password per line. :requires: Counter and password must be devided by a single whitespace character: ' 4238 mypassword!'. ''' def __init__(self, pw_file): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.pw_file = pw_file self.pw_counter = 0 # counter for the amount of passwords in the leak self.error_counter = 0 # counter for the errors occuring during the file-parsing self.pws_multi = { } # Dict to store the passwords from the file including an occurence-counter for each password def get_filetype(self): ''' Return the input type indicator to run the according analysis module and the execution module correctly. ''' return 'plaintext' def parse_pw_file(self): ''' Parses the passwords (plaintext) from the password leak and analyzes the occurences of the passwords. :requires: One password per line in the file. :return: (Dict{'occ', 'lookups'}, Int, Int): Dict containing the parsed passwords and a dict of their occurences in the leak ('occ') as well as a counter for the amount of lookups ('lookups') which is used to count the amout of duplicate candidates that might be generated by a guesser. The integers are a password counter and a parsing-error counter. ''' self.logger.debug("Start parsing the password file ...") counter_re = re.compile('^\s*[0-9]*') pw_re = re.compile('^\s*[0-9]*\s') f = open(self.pw_file, 'rU') # open the password file if f is None: self.logger.debug( "The pw file could not be opened!\nAnalysis closed!") exit(-1) #TODO: check if this is correct/smartest solution?! for line in f: try: self.pw_counter += 1 # increment counter pw = line.replace(pw_re.findall(line)[0], '')[:-1] occ = int(counter_re.findall(line)[0].replace(' ', '')) self.pws_multi[pw] = { 'occ': occ, 'lookups': 0 } # add a dict to the dict-entry [pw] with init values 1 and 0 except: self.error_counter += 1 # silently ignore decode-errors but count it f.close() # close the file self.logger.debug("Parsing done!") return self.pws_multi, self.pw_counter, self.error_counter
class PlaintextWithcount(InputParser): ''' Parser for files containing a password counter and the according plaintext password per line. :requires: Counter and password must be devided by a single whitespace character: ' 4238 mypassword!'. ''' def __init__(self, pw_file): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.pw_file = pw_file self.pw_counter = 0 # counter for the amount of passwords in the leak self.error_counter = 0 # counter for the errors occuring during the file-parsing self.pws_multi = {} # Dict to store the passwords from the file including an occurence-counter for each password def get_filetype(self): ''' Return the input type indicator to run the according analysis module and the execution module correctly. ''' return 'plaintext' def parse_pw_file(self): ''' Parses the passwords (plaintext) from the password leak and analyzes the occurences of the passwords. :requires: One password per line in the file. :return: (Dict{'occ', 'lookups'}, Int, Int): Dict containing the parsed passwords and a dict of their occurences in the leak ('occ') as well as a counter for the amount of lookups ('lookups') which is used to count the amout of duplicate candidates that might be generated by a guesser. The integers are a password counter and a parsing-error counter. ''' self.logger.debug("Start parsing the password file ...") counter_re = re.compile('^\s*[0-9]*') pw_re = re.compile('^\s*[0-9]*\s') f = open(self.pw_file, 'rU') # open the password file if f is None: self.logger.debug("The pw file could not be opened!\nAnalysis closed!") exit(-1) #TODO: check if this is correct/smartest solution?! for line in f: try: self.pw_counter += 1 # increment counter pw = line.replace(pw_re.findall(line)[0], '')[:-1] occ = int(counter_re.findall(line)[0].replace(' ', '')) self.pws_multi[pw] = {'occ':occ, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 except: self.error_counter += 1 # silently ignore decode-errors but count it f.close() # close the file self.logger.debug("Parsing done!") return self.pws_multi, self.pw_counter, self.error_counter
class HashPure(InputParser): ''' Parser for files containing one hash value per line. ''' def __init__(self, pw_file): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.pw_file = pw_file self.hash_counter = 0 # counter for the amount of passwords in the leak self.error_counter = 0 # counter for the errors occuring during the file-parsing self.hashes_multi = {} # Dict to store the hashes from the file including an occurence-counter for each hash def get_filetype(self): ''' Return the input type indicator to run the according analysis module and the execution module correctly. ''' return 'hashvalues' def parse_pw_file(self): ''' Parses the passwords (plaintext) from the password leak and analyzes the occurences of the passwords. :requires: One password per line in the file. :return: (Dict{'occ', 'lookups'}, Int, Int): Dict containing the parsed hashes and a dict of their occurences in the leak ('occ') as well as a counter for the amount of lookups ('lookups') which is used to count the amout of duplicate candidates that might be generated by a guesser. The integers are a password counter and a parsing-error counter. ''' self.logger.debug("Start parsing the password file ...") f = open(self.pw_file, 'rU') # open the password file if f is None: self.logger.debug("The pw file could not be opened!\nAnalysis closed!") exit(-1) #TODO: check if this is correct/smartest solution?! for hashvalue in f: try: self.hash_counter += 1 # increment counter hashvalue = hashvalue[:len(hashvalue)-1] if hashvalue not in self.hashes_multi: self.hashes_multi[hashvalue] = {'occ':1, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 else: self.hashes_multi[hashvalue]['occ'] += 1 # increment occurence counter for the current password except: self.error_counter += 1 # silently ignore decode-errors but count it f.close() # close the file self.logger.debug("Parsing done!") return self.hashes_multi, self.hash_counter, self.error_counter
class Analysis(): ''' Class to generate an analysis scheme depending on the input format. :param label: Job label. :param pw_format: Format indicator for the inpur file (e.g. 'plaintext_pure'). :param pw_file: Path to the file containing the leaked passwords/hashes. :param pid: Process ID of the guesser. :param analysis_interval: Interval in which the progress is written to the file. :param terminate_guessing: Maximum amount of candidates to be generated by the guesser before it is killed by the framework (for those guessers that do not support a 'maximum' parameter). :param jtr_pot_file: '.pot' file of JtR to parse for cracked candidates. :param output_file: Path of the output file. :param progress_file: Path of the progress file. :param plot_file: Path of the plot file. ''' def __init__(self, label, pw_format, pw_file, pid, analysis_interval, terminate_guessing, jtr_pot_file, output_file, progress_file, plot_file): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.label = label self.pw_format = pw_format self.pw_file = pw_file self.guesser_pid = ast.literal_eval(pid) self.analysis_interval = ast.literal_eval(analysis_interval) self.terminate_guessing = ast.literal_eval(terminate_guessing) self.jtr_pot_file = jtr_pot_file self.output_file = output_file self.progress_file = progress_file self.plot_file = plot_file # generate inputhandler depenging on input format self.generate_inputhandler() # get filetype self.filetype = self.inputhandler.get_filetype() # parse password file self.pws_multi, self.pw_counter, self.error_counter = self.inputhandler.parse_pw_file() # generate analysis scheme (which will do the actual analysis of cracked passwords self.generate_analysisscheme() def generate_inputhandler(self): ''' Sets the parser for the input file depending on it's content. ''' if self.pw_format == 'plaintext_pure': self.inputhandler = PlaintextPure(self.pw_file) elif self.pw_format == 'hash_pure': self.inputhandler = HashPure(self.pw_file) elif self.pw_format == 'plaintext_withcount': self.inputhandler = PlaintextWithcount(self.pw_file) else: raise AttributeError('Unsupported file type <%s>! "plaintext_pure", "hash_pure".' % self.pw_format) def generate_analysisscheme(self): ''' Generate the analysisscheme object depending on file type of the provided password file. The file type is 'plaintext' for input files with the format 'plaintext_pure' or 'plaintext_colon' and 'hashvalues' for input files with the format 'hash_pure'. ''' self.analysisscheme = None # init analysisscheme if self.filetype == 'hashvalues': if self.terminate_guessing is None: self.logger.warning("The guesser might run in endless mode as at least one of the job parameters 'terminate_guessing' is 'None'!\n") self.analysisscheme = HashAnalysis(self.label, self.pws_multi, self.pw_counter, self.error_counter, self.jtr_pot_file, self.output_file, self.progress_file, self.plot_file, self.analysis_interval) elif self.filetype == 'plaintext': self.analysisscheme = PlaintextAnalysis(self.label, self.pws_multi, self.pw_counter, self.error_counter, self.output_file, self.progress_file, self.plot_file) else: raise AttributeError('Unsupported execution strategy!') def execute(self): ''' Start processing the generated pw candidates for plaintext input or parsing the JtR logfile for hash input. ''' self.received_candidates = [None] * self.analysis_interval # array to collect all received candidates self.index = 0 self.candidate_counter = 0 # to count the received candidates --> kill process on certain amount status_line_re = re.compile('^[0-9]*g\s[0-9]*p') candidates_processed_re = re.compile('[0-9]*p') if self.filetype == 'plaintext': for candidate in sys.stdin: self.received_candidates[self.index] = candidate[:-1] # add candidate to array (without '\n') self.candidate_counter += 1 # increment candidate counter self.index += 1 # increment index self.candidate = '' # reset buffer if (self.terminate_guessing is not None) and (self.candidate_counter == self.terminate_guessing): self.logger.debug("Breaking loop at candidate_number %d" % self.candidate_counter) self.kill_guesser() # kill the guesser when #['terminate_guesser'] of candidates has been generated break if self.index == self.analysis_interval: # when #[analysis_interval] passwords are stored in buffer, they are analyzed self.analysisscheme.process_candidates(self.received_candidates) # analyze the received candidates self.index = 0 # write next condidates from the beginning into the array # handle the end of candidate receiving self.handle_close() else: # self.filetype == 'hashvalues' # Instead of the candidates, the status lines of JtR are processed for hashed input for line in sys.stdin: # self.logger.debug(line) if not status_line_re.match(line): if 'Session completed' in line: # all candidates cracked before amout max. guesses reached self.logger.warning("Breaking loop as 'Session completed' line received by john-hash.") if self.terminate_guessing is not None: self.kill_guesser() self.handle_close() # process the status lines one by one return # don't process last line! else: continue # other line than status line else: # lines wil be such as: '736g 4008p 0:00:00:04 152.0g/s 828.0p/s 828.0c/s 885086C/s carama..marcia' temp = candidates_processed_re.findall(line)[0] # get '4008p' temp = temp[:-4] + '000' # remove p and replace 8 by 0 (and resepctively '4' and '12') self.candidate_counter = int(temp) # cast '4000' to int if (self.terminate_guessing is not None) and (self.candidate_counter >= self.terminate_guessing): self.logger.debug("Breaking loop at candidate_number %d" % self.candidate_counter) self.kill_guesser() # kill the guesser when #['terminate_guesser'] of candidates has been generated break self.analysisscheme.process_status_line(line) # process the status lines one by one self.handle_close(last_line=line) def kill_guesser(self): ''' Sends a 'SIGKILL' signal to all processes spawned by the guesser.sh file to terminate the guessing process. ''' self.logger.debug("Starting kill_guesser()") if psutil.__version__[0] == str(1): # sudo apt-get install python-psutil = version 1.2.1 (Ubuntu 14.04, September 2015) self.logger.debug("Python psutil version 1 detected (%s)" % str(psutil.__version__)) self.logger.debug("Calling p.get_children()") psversion = 1 else: # sudo pip install psutil = version 3.2.0 (Ubuntu 14.04, September 2015) self.logger.debug("Python psutil version greater than 1 detected (%s)" % str(psutil.__version__)) self.logger.debug("Calling p.children()") psversion = 3 self.logger.debug("Parend PID to be killed: %s" % str(self.guesser_pid)) try: try: p = psutil.Process(self.guesser_pid) except psutil.NoSuchProcess: pass self.logger.debug("Parent process with PID %s not found!" % str(self.guesser_pid)) try: if psversion > 1: children = p.children(recursive=True) else: children = p.get_children(recursive=True) for child in children: self.logger.debug("Killed child <%s> with pid %s" % (child.name, str(child.pid))) os.kill(child.pid, signal.SIGKILL) except psutil.NoSuchProcess: pass self.logger.debug("No child processes found!") os.kill(self.guesser_pid, signal.SIGKILL) self.logger.debug("Killed parent process with pid %s" % str(self.guesser_pid)) except Exception, e: self.logger.debug("An exception occurred while killing the guesser: <%s>" % str(e))
class HashAnalysis(AnalysisScheme): ''' Analysis class for hashed password leaks. :param label: Label of the current job :param pws_multi: Dict of passwords/hashes (key) and as values another dict of two counters. One for the occurrences of the pw/hash in the leak ('occ') and one to count the lookups ('lookups') by the analysis module (^=amount of duplicately generated candidate) :param pw_counter: Counter of overall passwords in the leak :param error_counter: Amount parsing errors :param jtr_pot_file: '.pot' file of JtR to parse for cracked candidates. :param output_file: Path to the output file. :param progress_file: Path to the progress file :param plot_file: Path to the plot file :param analysis_interval: Analysis interval to update the progress file. ''' def __init__(self, label, pws_multi, pw_counter, error_counter, jtr_pot_file, output_file, progress_file, plot_file, analysis_interval): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.logger.debug('Starting Hash Analysis') self.label = label self.jtr_pot_file = jtr_pot_file self.output_file = output_file self.progress_file = progress_file self.plot_file = plot_file self.analysis_interval = analysis_interval self.interval_counter = 0 self.pws_multi = pws_multi # Dict to store the passwords from the file including an occurence-counter for each password self.pw_counter = pw_counter # counter for the amount of passwords (hashes) in the leak self.pws_unique_counter = 0 # counter for the amount of unique passwords (hashes) in the leak self.guesses = 0 # counter for the candidates self.cracked_counter = 0 # counter for the no. of cracked passwords (multi) self.error_counter = error_counter # counter for the errors occuring during the file-parsing self.cracked_pws = { } # Dict to store the cracked passwords and the number of guesses to crack the pw self.x_axis_values = list( ) # List to store the intervals for the plot file # Definitions of regular expressions to parse the JtR status lines self.guesses_re = re.compile('[0-9]*p') self.cracked_counter_re = re.compile('^[0-9]*') # Declaration of counters for analysis of cracked pws self.only_letters_counter = 0 self.only_digits_counter = 0 self.only_symbols_counter = 0 self.letters_digits_counter = 0 self.letters_symbols_counter = 0 self.digits_symbols_counter = 0 self.letters_digits_symbols_counter = 0 # Declarations for the calculation of average char occurences self.avg_length = 0.0 self.avg_letters = 0.0 self.avg_digits = 0.0 self.avg_symbols = 0.0 # Parse the values written into the plot file by the Preparation module. self.parse_x_axis_values() # Write guesser label to file self.write_line_to_file(self.plot_file, "%s,0.000" % self.label) def write_line_to_file(self, path, lines): ''' Write a single line or a list of lines to the specified file. :param file: Path of the file to write in :param lines: List-type object of lines to write, or a single string ''' with open(path, 'a') as f: # open file if type(lines) is list: # list of lines passed for line in lines: # iterate through lines in the list if not line.endswith('\n'): line = '%s\n' % line # append newline char f.write(line) else: line = lines # single line (string) passed if not line.endswith('\n'): line = '%s\n' % lines # append newline char f.write(line) def parse_x_axis_values(self): ''' Parse the values for the x-axis of the plot. ''' with open(self.plot_file, 'r') as f: header_line = f.readline() # read first line of plot file temp = header_line.split(',') temp.pop(0) # pop 'Categories' temp.pop(0) # pop 0 for value in temp: self.x_axis_values.append(int(value)) def update_plot_file(self, percentage): ''' Appends the provided value (percentage) to the last line of the plotfile. ''' with open(self.plot_file, 'r') as f: lines = f.readlines() last_line = lines[ len(lines) - 1][:-1] #.replace('\n', '') # get last line and remove '\n' last_line_new = "%s,%s\n" % (last_line, "%.3f" % percentage ) # append value to last line in file lines[len(lines) - 1] = last_line_new # update last line with open(self.plot_file, 'w') as f: # write lines back to file for line in lines: f.write(line) def process_status_line(self, line): ''' Processes the status lines of JtR. Received lines will look like this: '736g 4008p 0:00:00:04 152.0g/s 828.0p/s 828.0c/s 885086C/s carama..marcia' :param line: ONE status line, received by the analysos.py.exeute() method. ''' # parse amount of processed candidates temp = self.guesses_re.findall(line)[0] # get '4008p' temp = temp[:-4] + '000' # remove p and replace 8 by 0 (and resepctively '4' and '12') self.guesses = int(temp) # cast '4000' to int try: # parse amount of cracked pws self.cracked_counter = int( self.cracked_counter_re.findall(line)[0]) except ValueError: self.logger.debug( "A parsing error occured for JtR terminal-line <%s>" % line) # calculate the percentage of cracked passwords BEFORE entire block is processed percentage_cracked = float(self.cracked_counter) / float( self.pw_counter) * 100 try: if self.guesses == self.x_axis_values[0]: self.update_plot_file(percentage_cracked) # update plotfile self.x_axis_values.pop( 0) # remove value already written value for except IndexError: # silently ignore 'Index Out of Range' errors if more candidates are generated than specified for the x_axis of the plotfile pass # update progress_file if self.guesses >= (self.analysis_interval * self.interval_counter): self.interval_counter += 1 # write the current status into the file '[output_file]_progress.txt' status_line = '%d,%d,%7.3f\n' % ( self.guesses, self.cracked_counter, percentage_cracked) self.write_line_to_file(self.progress_file, str(status_line)) def process_candidates(self): ''' Required only for plaintext analysis. ''' pass def parse_jtr_pot_file(self): ''' Parse the PGF.pot file of JtR to determine which passwords have been cracked. ''' with open(self.jtr_pot_file, 'r') as f: for line in f: splitline = line.split(':') pw = ':'.join( splitline[1:] )[: -1] # get last element of split-list ('pw\n') and remove '\n' self.cracked_pws[ pw] = 0 # add the candidate and a 0 for its guessing no. as not guessing-no. can be determined from the '.pot' file if len( self.cracked_pws ) == self.cracked_counter: # only read the cracked amount of pws from the jtr pot file break def count_unique_hashes(self): ''' Counts the total amount of unique hashes in the leak. ''' self.pws_unique_counter = 0 # reset prior to counting for _, value in self.pws_multi.items(): if value['occ'] == 1: self.pws_unique_counter += 1 def categorize_pws(self): ''' Process the cracked passwords and categorize them. The function sorts the pws by: --> letters only --> digits only --> symbols only --> includes letters and digits --> includes letters and symbols --> includes letters, digits and symbols ''' # Declaration of regular expressions only_letters_re = re.compile('^[a-zA-Z]*$') only_digits_re = re.compile('^[0-9]*$') only_symbols_re = re.compile( '^[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') letters_digits_re = re.compile('^[a-zA-Z0-9]*$') letters_symbols_re = re.compile( '^[a-zA-Z\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$' ) digits_symbols_re = re.compile( '^[0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$' ) letters_digits_symbols_re = re.compile( '^[a-zA-Z0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$' ) # Start analysis of characters in the cracked pws for pw in self.cracked_pws.iterkeys(): if only_letters_re.search(pw): self.only_letters_counter += 1 continue if only_digits_re.search(pw): self.only_digits_counter += 1 continue if only_symbols_re.search(pw): self.only_symbols_counter += 1 continue if letters_digits_re.search(pw): self.letters_digits_counter += 1 continue if letters_symbols_re.search(pw): self.letters_symbols_counter += 1 continue if digits_symbols_re.search(pw): self.digits_symbols_counter += 1 continue if letters_digits_symbols_re.search(pw): self.letters_digits_symbols_counter += 1 continue def calc_average_chars(self): ''' Calculates the average of the following per cracked pw: --> letters --> digits --> symbols ''' # Declaration of regular expressions letters_re = re.compile('[a-zA-Z]') digits_re = re.compile('[0-9]') symbols_re = re.compile( '[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]') # Counter declarations length_counter = 0 letters_counter = 0 digits_counter = 0 symbols_counter = 0 # iterate through all cracked pws (and the counters per pw) for pw in self.cracked_pws.iterkeys(): length_counter += len(pw) # add length of pw to counter # add (no. of found chars in pw * counter of pw occurrance) to the counters letters_counter += len(letters_re.findall(pw)) digits_counter += len(digits_re.findall(pw)) symbols_counter += len(symbols_re.findall(pw)) # calc. the average(s) try: self.avg_length = float(length_counter) / float( self.cracked_counter) self.avg_letters = float(letters_counter) / float( self.cracked_counter) self.avg_digits = float(digits_counter) / float( self.cracked_counter) self.avg_symbols = float(symbols_counter) / float( self.cracked_counter) except ZeroDivisionError: # no pw cracked --> ignore error as average values will stay 0 pass def execute_analysis_plugins(self): ''' Searches the 'analysis_plugins' folder for scripts to execute along with the default analysis modules. ''' try: plugins = os.listdir(os.path.abspath('./analysis_plugins')) temp = list(plugins) for script in temp: if script[0] == '_': self.logger.debug( "The analysis-plugin <%s> will be skipped." % script) plugins.remove( script) # skip files with filename starting with '_' if len(plugins) > 0: self.logger.debug("Executing %d analysis plugins..." % len(plugins)) for script in plugins: self.logger.debug("Starting plugin <%s>" % script) path = os.path.abspath('./analysis_plugins/%s' % script) execfile( path, { 'self_name': script, 'label': self.label, 'pws_multi': self.pws_multi, 'pw_counter': self.pw_counter, 'pws_unique_counter': self.pws_unique_counter, 'guesses': self.guesses, 'cracked_counter': self.cracked_counter, 'cracked_pws': self.cracked_pws, 'output_file': self.output_file }) else: self.logger.debug("No analysis plugins found.") except Exception, e: self.logger.debug(str(e))
class Template(InputParser): ''' Parser for files containing one plaintext password per line. ''' def __init__(self, pw_file): ''' Generator. ''' # don't change the __init__ method! # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG # initiate variables self.pw_file = pw_file # path of the input file self.pw_counter = 0 # counter for the amount of passwords in the leak self.error_counter = 0 # counter for the errors occuring during the file-parsing self.pws_multi = {} # Dict to store the passwords from the file including an occurence-counter for each password def get_filetype(self): ''' Return the input type indicator to run the according analysis module and the execution module correctly. ''' # uncomment the return value according to your input type. return 'hashvalues' # return 'plaintext' def parse_pw_file(self): ''' Parses the passwords (plaintext) from the password leak and analyzes the occurences of the passwords. :requires: One password per line in the file. :return: (Dict{'occ', 'lookups'}, Int, Int): Dict containing the parsed passwords and a dict of their occurences in the leak ('occ') as well as a counter for the amount of lookups ('lookups') which is used to count the amout of duplicate candidates that might be generated by a guesser. The integers are a password counter and a parsing-error counter. ''' self.logger.debug("Start parsing the password file ...") f = open(self.pw_file, 'rU') # open the password file if f is None: self.logger.debug("The pw file could not be opened!\nAnalysis closed!") exit(-1) for line in f: try: self.pw_counter += 1 # increment counter # The main changes have to be done here. # Parse the lines in the file and extract the password/hash value # # It is important to only parse the plaintext password or the UNSALTED hashvalue. # JtR will evaluate the salt as well of course, but the analysis module is not (yet) # capable of processing the salt. # # Parsing a pure plaintext leak: # --> pw = line[:len(line)-1] (removing the '\n' character from the plaintext password) # # Parsing a plaintext leak containing lines "username:password": # --> splitted = line.split(':') (split the line with seperator ':') # --> pw = ':'.join(splitted[1:])[:-1] (concatenate splitted parts again with ':' as sepeperator) # ^ ^ # | | # | remove '\n' character # remove first element (username (which MUST NOT contain ':' for correct parsing)) # # The following part remains the same if the occurences (as in withcount format) is not included in the line itself. # For withcount formats, also parse the counter and apply the following change below: ''' if pw not in self.pws_multi: self.pws_multi[pw] = {'occ':1, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 else: self.pws_multi[pw]['occ'] += 1 # increment occurence counter for the current password ''' # --> is to be replaced with: ''' # counter = [PARSE THE COUNTER FROM LINE HERE] self.pws_multi[pw] = {'occ':counter, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 ''' # Note that 'pw' would be the hash value for hashed passwords. # Explanation: # Each password/hash (key of the dictionary) has a second dictionaly as value, storing an # occurence counter (accessable via: self.pws_multi['MyPassword']['occ']) and a # lookup counter (accessable via: self.pws_multi['MyPassword']['lookups']). # The latter is used to identify multiple guesses made by the guesser in the analysis module. # The occurrence counter is needed to check how many passwords a if pw not in self.pws_multi: self.pws_multi[pw] = {'occ':1, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 else: self.pws_multi[pw]['occ'] += 1 # increment occurence counter for the current password except: self.error_counter += 1 # silently ignore decode-errors but count it f.close() # close the file self.logger.debug("Parsing done!") return self.pws_multi, self.pw_counter, self.error_counter
class PlaintextAnalysis(AnalysisScheme): ''' Analysis class for plaintext password leaks. :param label: Label of the current job :param pws_multi: Dict of passwords/hashes (key) and as values another dict of two counters. One for the occurrences of the pw/hash in the leak ('occ') and one to count the lookups ('lookups') by the analysis module (^=amount of duplicately generated candidate) :param pw_counter: Counter of overall passwords in the leak :param error_counter: Amount parsing errors :param output_file: Path to the output file. :param progress_file: Path to the progress file :param plot_file: Path to the plot file ''' def __init__(self, label, pws_multi, pw_counter, error_counter, output_file, progress_file, plot_file): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.logger.debug('Starting Plaintext Analysis') self.label = label self.output_file = output_file self.progress_file = progress_file self.plot_file = plot_file self.pws_multi = pws_multi # Dict to store the passwords from the file including an occurence-counter for each password self.pw_counter = pw_counter # counter for the amount of passwords in the leak self.pws_unique_counter = 0 # counter for the amount of unique passwords in the leak self.guesses = 0 # counter for the candidates self.duplicate_candidates = 0 # counter for candidates that have been generated multiple times self.duplicate_guesses_total = 0 # counter for the total amount of dublicate lookups self.cracked_counter = 0 # counter for the no. of cracked passwords (multi) self.cracked_unique_counter = 0 # counter for the amount of cracked passwords that occured only once in the leak self.error_counter = error_counter # counter for the errors occuring during the file-parsing self.cracked_pws = {} # Dict to store the cracked passwords and the number of guesses to crack the pw self.x_axis_values = list() # List to store the intervals for the plot file # Declaration of counters for analysis of cracked pws self.only_letters_counter = 0 self.only_digits_counter = 0 self.only_symbols_counter = 0 self.letters_digits_counter = 0 self.letters_symbols_counter = 0 self.digits_symbols_counter = 0 self.letters_digits_symbols_counter = 0 # Declarations for the calculation of average char occurences self.avg_length = 0.0 self.avg_letters = 0.0 self.avg_digits = 0.0 self.avg_symbols = 0.0 # Parse the values written into the plot file by the Preparation module. self.parse_x_axis_values() # Write guesser label to file self.write_line_to_file(self.plot_file, "%s,0.000" % self.label) def write_line_to_file(self, path, lines): ''' Write a single line or a list of lines to the specified file. :param file: Path of the file to write in :param lines: List-type object of lines to write, or a single string ''' with open(path, 'a') as f: # open file if type(lines) is list: # list of lines passed for line in lines: # iterate through lines in the list if not line.endswith('\n'): line = '%s\n' % line # append newline char f.write(line) else: line = lines # single line (string) passed if not line.endswith('\n'): line = '%s\n' % lines # append newline char f.write(line) def parse_x_axis_values(self): ''' Parse the values for the x-axis of the plot. ''' with open(self.plot_file, 'r') as f: header_line = f.readline() # read first line of plot file temp = header_line.split(',') temp.pop(0) # pop 'Categories' temp.pop(0) # pop 0 for value in temp: self.x_axis_values.append(int(value)) def update_plot_file(self, percentage): ''' Appends the provided value (percentage) to the last line of the plotfile. ''' with open(self.plot_file, 'r') as f: lines = f.readlines() last_line = lines[len(lines)-1][:-1] #.replace('\n', '') # get last line and remove '\n' last_line_new = "%s,%s\n" % (last_line, "%.3f" % percentage) # append value to last line in file lines[len(lines)-1] = last_line_new # update last line with open(self.plot_file, 'w') as f: # write lines back to file for line in lines: f.write(line) def process_candidates(self, candidate_block): ''' Starts the analysis. :param candidate_block: list-type collection of password candidates received by the server. ''' for candidate in candidate_block: self.guesses += 1 # increment guessing counter if candidate in self.pws_multi: # did the candidate crack a password? if self.pws_multi[candidate]['lookups'] == 0: # password has not yet been cracked self.pws_multi[candidate]['lookups'] += 1 # increment lookup-counter --> candidate has already been received self.cracked_counter += self.pws_multi[candidate]['occ'] # add the amount of occurences in the leak of the cracked pw if self.pws_multi[candidate]['occ'] == 1: self.cracked_unique_counter += 1 # increment counter of cracked pws that occured uniquely self.cracked_pws[candidate] = self.guesses # add the candidate and its guessing no. to the dict else: self.pws_multi[candidate]['lookups'] += 1 # increment lookup-counter --> candidate has already been received try: if self.guesses == self.x_axis_values[0]: # calculate the percentage of cracked passwords BEFORE entire block is processed percentage_cracked = float(self.cracked_counter)/float(self.pw_counter)*100 self.update_plot_file(percentage_cracked) # update plotfile self.x_axis_values.pop(0) # remove value already written value for except IndexError: # silently ignore 'Index Out of Range' errors if more candidates are generated than specified for the x_axis of the plotfile pass # calculate the percentage of cracked passwords at the end of any block processing to write it to the progress file percentage_cracked = float(self.cracked_counter)/float(self.pw_counter)*100 # write the current status into the file '[output_file]_progress.txt' status_line = '%d,%d,%7.3f\n' % (self.guesses, self.cracked_counter, percentage_cracked) self.write_line_to_file(self.progress_file, str(status_line)) def parse_jtr_pot_file(self): ''' Required for hash analysis, but will be called from analysis.py.execute for plaintext as well. ''' pass def count_unique_pws(self): ''' Counts the total amount of unique passwords in the leak. ''' self.pws_unique_counter = 0 # reset prior to counting for _, value in self.pws_multi.items(): if value['occ'] == 1: self.pws_unique_counter += 1 def count_duplicate_guesses(self): ''' Counts the amount of pw candidates which have been genereated multiple times by the guesser as well as the total amount of duplicates. ''' for occ_lookups in self.pws_multi.itervalues(): lu_count = occ_lookups['lookups'] if lu_count > 1: # a lookup of 0 means 'pw uncracked'; 1 means 'pw_cracked'; # >1 means 'candidate generated AT LEAST twice' self.duplicate_candidates += 1 # increment counter for candidates that have been generated multiple times self.duplicate_guesses_total += lu_count # add amount of duplicate lookups to the counter def categorize_pws(self): ''' Process the cracked passwords and categorize them. The function sorts the pws by: --> letters only --> digits only --> symbols only --> includes letters and digits --> includes letters and symbols --> includes letters, digits and symbols ''' # Declaration of regular expressions only_letters_re = re.compile('^[a-zA-Z]*$') only_digits_re = re.compile('^[0-9]*$') only_symbols_re = re.compile('^[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') letters_digits_re = re.compile('^[a-zA-Z0-9]*$') letters_symbols_re = re.compile('^[a-zA-Z\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') digits_symbols_re = re.compile('^[0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') letters_digits_symbols_re = re.compile('^[a-zA-Z0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') # Start analysis of characters in the cracked pws for pw in self.cracked_pws.iterkeys(): if only_letters_re.search(pw): self.only_letters_counter += 1 continue if only_digits_re.search(pw): self.only_digits_counter += 1 continue if only_symbols_re.search(pw): self.only_symbols_counter += 1 continue if letters_digits_re.search(pw): self.letters_digits_counter += 1 continue if letters_symbols_re.search(pw): self.letters_symbols_counter += 1 continue if digits_symbols_re.search(pw): self.digits_symbols_counter += 1 continue if letters_digits_symbols_re.search(pw): self.letters_digits_symbols_counter += 1 continue def calc_average_chars(self): ''' Calculates the average of the following per cracked pw: --> letters --> digits --> symbols ''' # Declaration of regular expressions letters_re = re.compile('[a-zA-Z]') digits_re = re.compile('[0-9]') symbols_re = re.compile('[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]') # Counter declarations length_counter = 0 letters_counter = 0 digits_counter = 0 symbols_counter = 0 occ_counter = 0 # iterate through all cracked pws (and the counters per pw) for pw in self.cracked_pws.iterkeys(): occ_counter = self.pws_multi[pw]['occ'] length_counter += len(pw) * occ_counter # add length of pw to counter letters_counter += len(letters_re.findall(pw)) * occ_counter digits_counter += len(digits_re.findall(pw)) * occ_counter symbols_counter += len(symbols_re.findall(pw)) * occ_counter # calc. the averages try: self.avg_length = float(length_counter) / float(self.cracked_counter) self.avg_letters = float(letters_counter) / float(self.cracked_counter) self.avg_digits = float(digits_counter) / float(self.cracked_counter) self.avg_symbols = float(symbols_counter) / float(self.cracked_counter) except ZeroDivisionError: # no pw cracked --> ignore error as average values will stay 0 pass def execute_analysis_plugins(self): ''' Searches the 'analysis_plugins' folder for scripts to execute along with the default analysis modules. ''' try: plugins = os.listdir(os.path.abspath('./analysis_plugins')) temp = list(plugins) for script in temp: if script[0] == '_': self.logger.debug("The analysis-plugin <%s> will be skipped." % script) plugins.remove(script) # skip files with filename starting with '_' if len(plugins) > 0: self.logger.debug("Executing %d analysis plugins..." % len(plugins)) for script in plugins: self.logger.debug("Starting plugin <%s>" % script) path = os.path.abspath('./analysis_plugins/%s' % script) execfile(path,{'self_name':script, 'label':self.label, 'pws_multi':self.pws_multi, 'pw_counter':self.pw_counter, 'pws_unique_counter':self.pws_unique_counter, 'guesses':self.guesses, 'cracked_counter':self.cracked_counter, 'cracked_pws':self.cracked_pws, 'output_file':self.output_file }) else: self.logger.debug("No analysis plugins found.") except Exception, e: self.logger.debug(str(e))
def main(): ''' Starts the Password Guessing Framework. :requires: a configuration file named "run.ini" in the directory "[...]/Password Guessing Framework/configfiles/" ''' runtimes = OrderedDict() # dict to store the runtimes of each job start = timeit.default_timer() # Initiate logger logger = Logger() logger.basicConfig('DEBUG') # set logger level to DEBUG # Initiate ConfigHelper instance ch = ConfigHelper('./configfiles/run.ini', logger=logger) # parse jobs from 'run.ini' job_queue = ch.parse_jobs() job_counter = 0 # helper to get corrent job-object from json file at the end of each job run # Clear 'PGF.log' file in te JtR directory to reset it # The path of the logfile will be generated and stored while parsing the jobs thus the get # it from the first job in the list job_queue[0].clear_jtr_log() # iterate through the job queue while len(job_queue) > 0: logger.debug("Remaining jobs: %2d\n\n" % len(job_queue)) job = job_queue.pop(0) # get first job in queue job_start = timeit.default_timer() logger.debug("Starting Job <%s>" % job.label) # Clear 'PGF.pot' and 'PGF.rec' files in te JtR directory to reset hashing state for each job job.clear_jtr_pot_rec() # Preparation is called from the config helper # Executor instance executor = Executor(job) executor.execute() # Analysis is called from the executor as it is run as a subprocess! # calc runtime of the current job job_end = timeit.default_timer() job_runtime = job_end - job_start job_human_runtime = ("%2dd:%2dh:%2dm:%2ds" % ((job_runtime/86400), (fmod(job_runtime,86400)/3600), (fmod(job_runtime,3600)/60), (fmod(job_runtime,60)) )) with open(job.output_file, 'a') as output_file: # write runtime of the current job the according outfile output_file.write("\nRuntime: '%s': %s" % (job.label, job_human_runtime)) # job finished! logger.debug("---------------- JOB <%s> DONE! --------------------------\n" % job.label) logger.debug("Runtime: %28s: %s\n" % (job.label, job_human_runtime)) # add runtime to list of job-runtimes runtimes[job.label] = job_human_runtime # Write Runtime of curent job into jobs.json with open('./results/jobs.json', 'r') as f: json_obj = json.load(f) json_obj['jobs'][job_counter]['runtime'] = job_human_runtime with open('./results/jobs.json', 'w') as f: f.write(json.dumps(json_obj, sort_keys=True, indent=4)) job_counter += 1 # **** **** WHILE LOOP ENDS HERE! **** **** # calc runtime of PGF end = timeit.default_timer() runtime = end - start human_runtime = ("%3dd:%2dh:%2dm:%2ds" % ((runtime/86400), (fmod(runtime,86400)/3600), (fmod(runtime,3600)/60), (fmod(runtime,60)) )) logger.debug("---------------- ALL JOBS PROCESSED! --------------------------\n") # Run the shell script processing the progress files final_processing(logger, ch.get_option('DEFAULT', 'final_processing')) # print summery of job runtimes logger.debug("Job-Runtimes:") for label, rt in runtimes.iteritems(): logger.debug("%37s: %s" % (label, rt)) logger.debug("Overall Runtime:%s%s\n" % (22*' ', human_runtime)) logger.debug("PGF closed.") # backup all created files AT LAST STEP! result_backup(ch.get_option('DEFAULT', 'backup_dir'), ch.get_timestamp_uuid())
class Template(InputParser): ''' Parser for files containing one plaintext password per line. ''' def __init__(self, pw_file): ''' Generator. ''' # don't change the __init__ method! # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG # initiate variables self.pw_file = pw_file # path of the input file self.pw_counter = 0 # counter for the amount of passwords in the leak self.error_counter = 0 # counter for the errors occuring during the file-parsing self.pws_multi = { } # Dict to store the passwords from the file including an occurence-counter for each password def get_filetype(self): ''' Return the input type indicator to run the according analysis module and the execution module correctly. ''' # uncomment the return value according to your input type. return 'hashvalues' # return 'plaintext' def parse_pw_file(self): ''' Parses the passwords (plaintext) from the password leak and analyzes the occurences of the passwords. :requires: One password per line in the file. :return: (Dict{'occ', 'lookups'}, Int, Int): Dict containing the parsed passwords and a dict of their occurences in the leak ('occ') as well as a counter for the amount of lookups ('lookups') which is used to count the amout of duplicate candidates that might be generated by a guesser. The integers are a password counter and a parsing-error counter. ''' self.logger.debug("Start parsing the password file ...") f = open(self.pw_file, 'rU') # open the password file if f is None: self.logger.debug( "The pw file could not be opened!\nAnalysis closed!") exit(-1) for line in f: try: self.pw_counter += 1 # increment counter # The main changes have to be done here. # Parse the lines in the file and extract the password/hash value # # It is important to only parse the plaintext password or the UNSALTED hashvalue. # JtR will evaluate the salt as well of course, but the analysis module is not (yet) # capable of processing the salt. # # Parsing a pure plaintext leak: # --> pw = line[:len(line)-1] (removing the '\n' character from the plaintext password) # # Parsing a plaintext leak containing lines "username:password": # --> splitted = line.split(':') (split the line with seperator ':') # --> pw = ':'.join(splitted[1:])[:-1] (concatenate splitted parts again with ':' as sepeperator) # ^ ^ # | | # | remove '\n' character # remove first element (username (which MUST NOT contain ':' for correct parsing)) # # The following part remains the same if the occurences (as in withcount format) is not included in the line itself. # For withcount formats, also parse the counter and apply the following change below: ''' if pw not in self.pws_multi: self.pws_multi[pw] = {'occ':1, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 else: self.pws_multi[pw]['occ'] += 1 # increment occurence counter for the current password ''' # --> is to be replaced with: ''' # counter = [PARSE THE COUNTER FROM LINE HERE] self.pws_multi[pw] = {'occ':counter, 'lookups':0} # add a dict to the dict-entry [pw] with init values 1 and 0 ''' # Note that 'pw' would be the hash value for hashed passwords. # Explanation: # Each password/hash (key of the dictionary) has a second dictionaly as value, storing an # occurence counter (accessable via: self.pws_multi['MyPassword']['occ']) and a # lookup counter (accessable via: self.pws_multi['MyPassword']['lookups']). # The latter is used to identify multiple guesses made by the guesser in the analysis module. # The occurrence counter is needed to check how many passwords a if pw not in self.pws_multi: self.pws_multi[pw] = { 'occ': 1, 'lookups': 0 } # add a dict to the dict-entry [pw] with init values 1 and 0 else: self.pws_multi[pw][ 'occ'] += 1 # increment occurence counter for the current password except: self.error_counter += 1 # silently ignore decode-errors but count it f.close() # close the file self.logger.debug("Parsing done!") return self.pws_multi, self.pw_counter, self.error_counter
class HashAnalysis(AnalysisScheme): ''' Analysis class for hashed password leaks. :param label: Label of the current job :param pws_multi: Dict of passwords/hashes (key) and as values another dict of two counters. One for the occurrences of the pw/hash in the leak ('occ') and one to count the lookups ('lookups') by the analysis module (^=amount of duplicately generated candidate) :param pw_counter: Counter of overall passwords in the leak :param error_counter: Amount parsing errors :param jtr_pot_file: '.pot' file of JtR to parse for cracked candidates. :param output_file: Path to the output file. :param progress_file: Path to the progress file :param plot_file: Path to the plot file :param analysis_interval: Analysis interval to update the progress file. ''' def __init__(self, label, pws_multi, pw_counter, error_counter, jtr_pot_file, output_file, progress_file, plot_file, analysis_interval): ''' Generator. ''' # Initiate logger self.logger = Logger() self.logger.basicConfig('DEBUG') # set logger level to DEBUG self.logger.debug('Starting Hash Analysis') self.label = label self.jtr_pot_file = jtr_pot_file self.output_file = output_file self.progress_file = progress_file self.plot_file = plot_file self.analysis_interval = analysis_interval self.interval_counter = 0 self.pws_multi = pws_multi # Dict to store the passwords from the file including an occurence-counter for each password self.pw_counter = pw_counter # counter for the amount of passwords (hashes) in the leak self.pws_unique_counter = 0 # counter for the amount of unique passwords (hashes) in the leak self.guesses = 0 # counter for the candidates self.cracked_counter = 0 # counter for the no. of cracked passwords (multi) self.error_counter = error_counter # counter for the errors occuring during the file-parsing self.cracked_pws = {} # Dict to store the cracked passwords and the number of guesses to crack the pw self.x_axis_values = list() # List to store the intervals for the plot file # Definitions of regular expressions to parse the JtR status lines self.guesses_re = re.compile('[0-9]*p') self.cracked_counter_re = re.compile('^[0-9]*') # Declaration of counters for analysis of cracked pws self.only_letters_counter = 0 self.only_digits_counter = 0 self.only_symbols_counter = 0 self.letters_digits_counter = 0 self.letters_symbols_counter = 0 self.digits_symbols_counter = 0 self.letters_digits_symbols_counter = 0 # Declarations for the calculation of average char occurences self.avg_length = 0.0 self.avg_letters = 0.0 self.avg_digits = 0.0 self.avg_symbols = 0.0 # Parse the values written into the plot file by the Preparation module. self.parse_x_axis_values() # Write guesser label to file self.write_line_to_file(self.plot_file, "%s,0.000" % self.label) def write_line_to_file(self, path, lines): ''' Write a single line or a list of lines to the specified file. :param file: Path of the file to write in :param lines: List-type object of lines to write, or a single string ''' with open(path, 'a') as f: # open file if type(lines) is list: # list of lines passed for line in lines: # iterate through lines in the list if not line.endswith('\n'): line = '%s\n' % line # append newline char f.write(line) else: line = lines # single line (string) passed if not line.endswith('\n'): line = '%s\n' % lines # append newline char f.write(line) def parse_x_axis_values(self): ''' Parse the values for the x-axis of the plot. ''' with open(self.plot_file, 'r') as f: header_line = f.readline() # read first line of plot file temp = header_line.split(',') temp.pop(0) # pop 'Categories' temp.pop(0) # pop 0 for value in temp: self.x_axis_values.append(int(value)) def update_plot_file(self, percentage): ''' Appends the provided value (percentage) to the last line of the plotfile. ''' with open(self.plot_file, 'r') as f: lines = f.readlines() last_line = lines[len(lines)-1][:-1] #.replace('\n', '') # get last line and remove '\n' last_line_new = "%s,%s\n" % (last_line, "%.3f" % percentage) # append value to last line in file lines[len(lines)-1] = last_line_new # update last line with open(self.plot_file, 'w') as f: # write lines back to file for line in lines: f.write(line) def process_status_line(self, line): ''' Processes the status lines of JtR. Received lines will look like this: '736g 4008p 0:00:00:04 152.0g/s 828.0p/s 828.0c/s 885086C/s carama..marcia' :param line: ONE status line, received by the analysos.py.exeute() method. ''' # parse amount of processed candidates temp = self.guesses_re.findall(line)[0] # get '4008p' temp = temp[:-4] + '000' # remove p and replace 8 by 0 (and resepctively '4' and '12') self.guesses = int(temp) # cast '4000' to int try: # parse amount of cracked pws self.cracked_counter = int(self.cracked_counter_re.findall(line)[0]) except ValueError: self.logger.debug("A parsing error occured for JtR terminal-line <%s>" % line) # calculate the percentage of cracked passwords BEFORE entire block is processed percentage_cracked = float(self.cracked_counter)/float(self.pw_counter)*100 try: if self.guesses == self.x_axis_values[0]: self.update_plot_file(percentage_cracked) # update plotfile self.x_axis_values.pop(0) # remove value already written value for except IndexError: # silently ignore 'Index Out of Range' errors if more candidates are generated than specified for the x_axis of the plotfile pass # update progress_file if self.guesses >= (self.analysis_interval * self.interval_counter): self.interval_counter += 1 # write the current status into the file '[output_file]_progress.txt' status_line = '%d,%d,%7.3f\n' % (self.guesses, self.cracked_counter, percentage_cracked) self.write_line_to_file(self.progress_file, str(status_line)) def process_candidates(self): ''' Required only for plaintext analysis. ''' pass def parse_jtr_pot_file(self): ''' Parse the PGF.pot file of JtR to determine which passwords have been cracked. ''' with open(self.jtr_pot_file, 'r') as f: for line in f: splitline = line.split(':') pw = ':'.join(splitline[1:])[:-1] # get last element of split-list ('pw\n') and remove '\n' self.cracked_pws[pw] = 0 # add the candidate and a 0 for its guessing no. as not guessing-no. can be determined from the '.pot' file if len(self.cracked_pws) == self.cracked_counter: # only read the cracked amount of pws from the jtr pot file break def count_unique_hashes(self): ''' Counts the total amount of unique hashes in the leak. ''' self.pws_unique_counter = 0 # reset prior to counting for _, value in self.pws_multi.items(): if value['occ'] == 1: self.pws_unique_counter += 1 def categorize_pws(self): ''' Process the cracked passwords and categorize them. The function sorts the pws by: --> letters only --> digits only --> symbols only --> includes letters and digits --> includes letters and symbols --> includes letters, digits and symbols ''' # Declaration of regular expressions only_letters_re = re.compile('^[a-zA-Z]*$') only_digits_re = re.compile('^[0-9]*$') only_symbols_re = re.compile('^[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') letters_digits_re = re.compile('^[a-zA-Z0-9]*$') letters_symbols_re = re.compile('^[a-zA-Z\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') digits_symbols_re = re.compile('^[0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') letters_digits_symbols_re = re.compile('^[a-zA-Z0-9\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]*$') # Start analysis of characters in the cracked pws for pw in self.cracked_pws.iterkeys(): if only_letters_re.search(pw): self.only_letters_counter += 1 continue if only_digits_re.search(pw): self.only_digits_counter += 1 continue if only_symbols_re.search(pw): self.only_symbols_counter += 1 continue if letters_digits_re.search(pw): self.letters_digits_counter += 1 continue if letters_symbols_re.search(pw): self.letters_symbols_counter += 1 continue if digits_symbols_re.search(pw): self.digits_symbols_counter += 1 continue if letters_digits_symbols_re.search(pw): self.letters_digits_symbols_counter += 1 continue def calc_average_chars(self): ''' Calculates the average of the following per cracked pw: --> letters --> digits --> symbols ''' # Declaration of regular expressions letters_re = re.compile('[a-zA-Z]') digits_re = re.compile('[0-9]') symbols_re = re.compile('[\s\%\$\^\*\@\&\/\#\!\?\_\-\+\.\,\=\:\;\'\"\<\>\(\)\{\}\[\]]') # Counter declarations length_counter = 0 letters_counter = 0 digits_counter = 0 symbols_counter = 0 # iterate through all cracked pws (and the counters per pw) for pw in self.cracked_pws.iterkeys(): length_counter += len(pw) # add length of pw to counter # add (no. of found chars in pw * counter of pw occurrance) to the counters letters_counter += len(letters_re.findall(pw)) digits_counter += len(digits_re.findall(pw)) symbols_counter += len(symbols_re.findall(pw)) # calc. the average(s) try: self.avg_length = float(length_counter) / float(self.cracked_counter) self.avg_letters = float(letters_counter) / float(self.cracked_counter) self.avg_digits = float(digits_counter) / float(self.cracked_counter) self.avg_symbols = float(symbols_counter) / float(self.cracked_counter) except ZeroDivisionError: # no pw cracked --> ignore error as average values will stay 0 pass def execute_analysis_plugins(self): ''' Searches the 'analysis_plugins' folder for scripts to execute along with the default analysis modules. ''' try: plugins = os.listdir(os.path.abspath('./analysis_plugins')) temp = list(plugins) for script in temp: if script[0] == '_': self.logger.debug("The analysis-plugin <%s> will be skipped." % script) plugins.remove(script) # skip files with filename starting with '_' if len(plugins) > 0: self.logger.debug("Executing %d analysis plugins..." % len(plugins)) for script in plugins: self.logger.debug("Starting plugin <%s>" % script) path = os.path.abspath('./analysis_plugins/%s' % script) execfile(path,{'self_name':script, 'label':self.label, 'pws_multi':self.pws_multi, 'pw_counter':self.pw_counter, 'pws_unique_counter':self.pws_unique_counter, 'guesses':self.guesses, 'cracked_counter':self.cracked_counter, 'cracked_pws':self.cracked_pws, 'output_file':self.output_file }) else: self.logger.debug("No analysis plugins found.") except Exception, e: self.logger.debug(str(e))
def main(): ''' Starts the Password Guessing Framework. :requires: a configuration file named "run.ini" in the directory "[...]/Password Guessing Framework/configfiles/" ''' runtimes = OrderedDict() # dict to store the runtimes of each job start = timeit.default_timer() # Initiate logger logger = Logger() logger.basicConfig('DEBUG') # set logger level to DEBUG # Initiate ConfigHelper instance ch = ConfigHelper('./configfiles/run.ini', logger=logger) # parse jobs from 'run.ini' job_queue = ch.parse_jobs() job_counter = 0 # helper to get corrent job-object from json file at the end of each job run # Clear 'PGF.log' file in te JtR directory to reset it # The path of the logfile will be generated and stored while parsing the jobs thus the get # it from the first job in the list job_queue[0].clear_jtr_log() # iterate through the job queue while len(job_queue) > 0: logger.debug("Remaining jobs: %2d\n\n" % len(job_queue)) job = job_queue.pop(0) # get first job in queue job_start = timeit.default_timer() logger.debug("Starting Job <%s>" % job.label) # Clear 'PGF.pot' and 'PGF.rec' files in te JtR directory to reset hashing state for each job job.clear_jtr_pot_rec() # Preparation is called from the config helper # Executor instance executor = Executor(job) executor.execute() # Analysis is called from the executor as it is run as a subprocess! # calc runtime of the current job job_end = timeit.default_timer() job_runtime = job_end - job_start job_human_runtime = ("%2dd:%2dh:%2dm:%2ds" % ((job_runtime / 86400), (fmod(job_runtime, 86400) / 3600), (fmod(job_runtime, 3600) / 60), (fmod(job_runtime, 60)))) with open( job.output_file, 'a' ) as output_file: # write runtime of the current job the according outfile output_file.write("\nRuntime: '%s': %s" % (job.label, job_human_runtime)) # job finished! logger.debug( "---------------- JOB <%s> DONE! --------------------------\n" % job.label) logger.debug("Runtime: %28s: %s\n" % (job.label, job_human_runtime)) # add runtime to list of job-runtimes runtimes[job.label] = job_human_runtime # Write Runtime of curent job into jobs.json with open('./results/jobs.json', 'r') as f: json_obj = json.load(f) json_obj['jobs'][job_counter]['runtime'] = job_human_runtime with open('./results/jobs.json', 'w') as f: f.write(json.dumps(json_obj, sort_keys=True, indent=4)) job_counter += 1 # **** **** WHILE LOOP ENDS HERE! **** **** # calc runtime of PGF end = timeit.default_timer() runtime = end - start human_runtime = ("%3dd:%2dh:%2dm:%2ds" % ((runtime / 86400), (fmod(runtime, 86400) / 3600), (fmod(runtime, 3600) / 60), (fmod(runtime, 60)))) logger.debug( "---------------- ALL JOBS PROCESSED! --------------------------\n") # Run the shell script processing the progress files final_processing(logger, ch.get_option('DEFAULT', 'final_processing')) # print summery of job runtimes logger.debug("Job-Runtimes:") for label, rt in runtimes.iteritems(): logger.debug("%37s: %s" % (label, rt)) logger.debug("Overall Runtime:%s%s\n" % (22 * ' ', human_runtime)) logger.debug("PGF closed.") # backup all created files AT LAST STEP! result_backup(ch.get_option('DEFAULT', 'backup_dir'), ch.get_timestamp_uuid())