def run_health_check(self, attack, hashlist_alias): args = " --machine-readable --quiet" args += " --restore-disable --potfile-disable --session=health " args += update_files(attack).replace(hashlist_alias, "../../hashlists/health_check.txt") args += " -o ../../hashlists/health_check.out" full_cmd = self.callPath + args if Initialize.get_os() == 1: full_cmd = full_cmd.replace("/", '\\') logging.debug("CALL: " + full_cmd) proc = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) output, error = proc.communicate() logging.debug("Started health check attack") # wait until done, on the health check we don't send any update during running. Maybe later we could at least # introduce some heartbeat update to make visible that the agent is still alive. proc.wait() errors = [] states = [] if error: error = escape_ansi(error.replace(b"\r\n", b"\n").decode('utf-8')) error = error.split('\n') for line in error: if not line: continue errors.append(line) if output: output = escape_ansi(output.replace(b"\r\n", b"\n").decode('utf-8')) output = output.split('\n') for line in output: if not line: continue logging.debug(line) status = HashcatStatus(line) if status.is_valid(): states.append(status) return [states, errors]
def run_benchmark(self, task): if task['benchType'] == 'speed': # do a speed benchmark return self.run_speed_benchmark(task) args = " --machine-readable --quiet --runtime=" + str(task['bench']) args += " --restore-disable --potfile-disable --session=hashtopolis -p \"" + str( chr(9)) + "\" " args += update_files(task['attackcmd']).replace( task['hashlistAlias'], "../../hashlists/" + str(task['hashlistId'])) + ' ' + task['cmdpars'] args += " -o ../../hashlists/" + str(task['hashlistId']) + ".out" full_cmd = self.callPath + args if Initialize.get_os() == 1: full_cmd = full_cmd.replace("/", '\\') logging.debug("CALL: " + full_cmd) proc = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) output, error = proc.communicate() logging.debug("started benchmark") proc.wait() # wait until done if error: error = escape_ansi(error.replace(b"\r\n", b"\n").decode('utf-8')) # parse errors and send it to server error = error.split('\n') for line in error: if not line: continue query = copy_and_set_token(dict_clientError, self.config.get_value('token')) query['taskId'] = task['taskId'] query['message'] = line req = JsonRequest(query) req.execute() # return 0 it might not be ideal to return here. In case of errors still try to read the benchmark. if output: output = output.replace(b"\r\n", b"\n").decode('utf-8') output = output.split('\n') last_valid_status = None for line in output: if not line: continue logging.debug("HCSTAT: " + line.strip()) status = HashcatStatus(line) if status.is_valid(): last_valid_status = status if last_valid_status is None: return 0 # we just calculate how far in the task the agent went during the benchmark time return (last_valid_status.get_progress() - last_valid_status.get_rejected()) / float( last_valid_status.get_progress_total()) return 0
def run_loop(self, proc, chunk, task): self.cracks = [] piping_threshold = 95 enable_piping = False if self.config.get_value('piping-threshold'): piping_threshold = self.config.get_value('piping-threshold') if self.config.get_value('allow-piping') != '': enable_piping = self.config.get_value('allow-piping') while True: try: # Block for 1 second. if not self.first_status and self.last_update < time.time() - 5: # send update query = copy_and_set_token(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = 0 query['speed'] = 0 query['state'] = 2 query['cracks'] = [] req = JsonRequest(query) logging.info( "Sending keepalive progress to avoid timeout...") req.execute() self.last_update = time.time() item = self.io_q.get(True, 1) except Empty: # No output in either streams for a second. Are we done? if proc.poll() is not None: # is the case when the process is finished break else: identifier, line = item if identifier == 'OUT': status = HashcatStatus(line.decode()) if status.is_valid(): self.statusCount += 1 # test if we have a low utility # not allowed if brain is used if enable_piping and not self.uses_slow_hash_flag and ( 'useBrain' not in task or not task['useBrain'] ) and 'slowHash' in task and task[ 'slowHash'] and not self.usePipe: if task['files'] and not ( 'usePrince' in task and task['usePrince'] ) and not ( 'usePreprocessor' in task and task['usePreprocessor'] ) and 1 < self.statusCount < 10 and status.get_util( ) != -1 and status.get_util() < piping_threshold: # we need to try piping -> kill the process and then wait for issuing the chunk again self.usePipe = True chunk_start = int( status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) self.progressVal = status.get_progress_total( ) - chunk_start logging.info( "Detected low UTIL value, restart chunk with piping..." ) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return self.first_status = True # send update to server logging.debug(line.decode().replace('\n', '').replace( '\r', '')) total = status.get_progress_total() if self.usePipe: # if we are piping, we might have saved the total progress before switching to piping, so we can use this total = self.progressVal # we need to calculate the chunk start, because progress does not start at 0 for a chunk chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) if total > 0: relative_progress = int( (status.get_progress() - chunk_start) / float(total - chunk_start) * 10000) else: # this is the case when we cannot say anything about the progress relative_progress = 0 speed = status.get_speed() initial = True if status.get_state() == 4 or status.get_state() == 5: time.sleep( 5 ) # we wait five seconds so all output is loaded from file # reset piping stuff when a chunk is successfully finished self.progressVal = 0 self.usePipe = False while self.cracks or initial: self.lock.acquire() initial = False cracks_backup = [] if len(self.cracks) > 1000: # we split cnt = 0 new_cracks = [] for crack in self.cracks: cnt += 1 if cnt > 1000: cracks_backup.append(crack) else: new_cracks.append(crack) self.cracks = new_cracks query = copy_and_set_token( dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = status.get_curku() if (self.usePipe or 'usePrince' in task and task['usePrince'] or 'usePreprocessor' in task and task['usePreprocessor'] ) and status.get_curku() == 0: query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = relative_progress query['speed'] = speed query['state'] = status.get_state() # crack format: hash[:salt]:plain:hex_plain:crack_pos (separator will be tab instead of :) prepared = [] for crack in self.cracks: prepared.append(crack.rsplit(":", 3)) query['cracks'] = prepared if status.get_temps(): query['gpuTemp'] = status.get_temps() if status.get_all_util(): query['gpuUtil'] = status.get_all_util() query['cpuUtil'] = [round(psutil.cpu_percent(), 1)] req = JsonRequest(query) logging.debug("Sending " + str(len(self.cracks)) + " cracks...") ans = req.execute() if ans is None: logging.error("Failed to send solve!") elif ans['response'] != 'SUCCESS': self.wasStopped = True logging.error("Error from server on solve: " + str(ans)) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass sleep(5) return elif 'agent' in ans.keys( ) and ans['agent'] == 'stop': # server set agent to stop self.wasStopped = True logging.info( "Received stop order from server!") try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass sleep(5) return else: cracks_count = len(self.cracks) self.cracks = cracks_backup zaps = ans['zaps'] if zaps: logging.debug("Writing zaps") zap_output = "\tFF\n".join(zaps) + '\tFF\n' f = open( "hashlist_" + str(task['hashlistId']) + "/" + str(time.time()), 'a') f.write(zap_output) f.close() logging.info("Progress:" + str("{:6.2f}".format( relative_progress / 100)) + "% Speed: " + print_speed(speed) + " Cracks: " + str(cracks_count) + " Accepted: " + str(ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(zaps))) self.lock.release() else: # hacky solution to exclude warnings from hashcat if str(line[0]) not in string.printable: continue else: pass # logging.warning("HCOUT: " + line.strip()) elif identifier == 'ERR': msg = escape_ansi( line.replace(b"\r\n", b"\n").decode('utf-8')).strip() if msg and str( msg ) != '^C': # this is maybe not the fanciest way, but as ctrl+c is sent to the underlying process it reports it to stderr logging.error("HC error: " + msg) send_error(msg, self.config.get_value('token'), task['taskId'], chunk['chunkId']) sleep( 0.1 ) # we set a minimal sleep to avoid overreaction of the client sending a huge number of errors, but it should not be slowed down too much, in case the errors are not critical and the agent can continue
def run_loop(self, proc, chunk, task): self.cracks = [] while True: try: # Block for 1 second. if not self.first_status and self.last_update < time.time() - 5: # send update query = copyAndSetToken(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = 0 query['speed'] = 0 query['state'] = 2 query['cracks'] = [] req = JsonRequest(query) logging.info("Sending keepalive progress to avoid timeout...") req.execute() self.last_update = time.time() item = self.io_q.get(True, 1) except Empty: # No output in either streams for a second. Are we done? if proc.poll() is not None: # is the case when the process is finished break else: identifier, line = item if identifier == 'OUT': status = HashcatStatus(line.decode()) if status.is_valid(): self.first_status = True # send update to server chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) relative_progress = int((status.get_progress() - chunk_start) / float(status.get_progress_total() - chunk_start) * 10000) speed = status.get_speed() initial = True if status.get_state() == 5: time.sleep(1) # we wait for a second so all output is loaded from file while len(self.cracks) > 0 or initial: self.lock.acquire() initial = False cracks_backup = [] if len(self.cracks) > 1000: # we split cnt = 0 new_cracks = [] for crack in self.cracks: cnt += 1 if cnt > 1000: cracks_backup.append(crack) else: new_cracks.append(crack) self.cracks = new_cracks query = copyAndSetToken(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = status.get_curku() query['relativeProgress'] = relative_progress query['speed'] = speed query['state'] = status.get_state() query['cracks'] = self.cracks req = JsonRequest(query) logging.debug("Sending " + str(len(self.cracks)) + " cracks...") ans = req.execute() if ans is None: logging.error("Failed to send solve!") elif ans['response'] != 'SUCCESS': logging.error("Error from server on solve: " + str(ans)) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return elif 'agent' in ans.keys() and ans['agent'] == 'stop': # server set agent to stop logging.info("Received stop order from server!") try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return else: cracks_count = len(self.cracks) self.cracks = cracks_backup zaps = ans['zaps'] if len(zaps) > 0: logging.debug("Writing zaps") zap_output = '\n'.join(zaps) + '\n' f = open("hashlist_" + str(task['hashlistId']) + "/" + str(time.time()), 'a') f.write(zap_output) f.close() logging.info("Progress:" + str( "{:6.2f}".format(relative_progress / 100)) + "% Speed: " + print_speed( speed) + " Cracks: " + str(cracks_count) + " Accepted: " + str( ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(zaps))) self.lock.release() else: # hacky solution to exclude warnings from hashcat if str(line[0]) not in string.printable: continue else: pass # logging.warning("HCOUT: " + line.strip()) else: logging.error("HC error: " + str(line).strip()) msg = str(line).strip() send_error(msg, self.config.get_value('token'), task['taskId'])