def __check_utils(self): path = '7zr' + Initialize.get_os_extension() if not os.path.isfile(path): query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) query['type'] = '7zr' req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get 7zr!") sleep(5) self.__check_utils() elif ans['response'] != 'SUCCESS' or not ans['executable']: logging.error("Getting 7zr failed: " + str(ans)) sleep(5) self.__check_utils() else: Download.download(ans['executable'], path) os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC) path = 'uftpd' + Initialize.get_os_extension() if not os.path.isfile(path) and self.config.get_value('multicast'): query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) query['type'] = 'uftpd' req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get uftpd!") sleep(5) self.__check_utils() elif ans['response'] != 'SUCCESS' or not ans['executable']: logging.error("Getting uftpd failed: " + str(ans)) sleep(5) self.__check_utils() else: Download.download(ans['executable'], path) os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC)
def send_error(error, token, task_id, chunk_id): query = copy_and_set_token(dict_clientError, token) query['message'] = error query['chunkId'] = chunk_id query['taskId'] = task_id req = JsonRequest(query) req.execute()
def run_benchmark(self, task): if task['benchType'] == 'speed': # do a speed benchmark return self.run_speed_benchmark(task) args = " --machine-readable --quiet --runtime=" + str(task['bench']) args += " --restore-disable --potfile-disable --session=hashtopolis -p \"" + str( chr(9)) + "\" " args += update_files(task['attackcmd']).replace( task['hashlistAlias'], "../../hashlists/" + str(task['hashlistId'])) + ' ' + task['cmdpars'] args += " -o ../../hashlists/" + str(task['hashlistId']) + ".out" full_cmd = self.callPath + args if Initialize.get_os() == 1: full_cmd = full_cmd.replace("/", '\\') logging.debug("CALL: " + full_cmd) proc = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.cracker_path) output, error = proc.communicate() logging.debug("started benchmark") proc.wait() # wait until done if error: error = escape_ansi(error.replace(b"\r\n", b"\n").decode('utf-8')) # parse errors and send it to server error = error.split('\n') for line in error: if not line: continue query = copy_and_set_token(dict_clientError, self.config.get_value('token')) query['taskId'] = task['taskId'] query['message'] = line req = JsonRequest(query) req.execute() # return 0 it might not be ideal to return here. In case of errors still try to read the benchmark. if output: output = output.replace(b"\r\n", b"\n").decode('utf-8') output = output.split('\n') last_valid_status = None for line in output: if not line: continue logging.debug("HCSTAT: " + line.strip()) status = HashcatStatus(line) if status.is_valid(): last_valid_status = status if last_valid_status is None: return 0 # we just calculate how far in the task the agent went during the benchmark time return (last_valid_status.get_progress() - last_valid_status.get_rejected()) / float( last_valid_status.get_progress_total()) return 0
def check_client_version(self): if self.args.disable_update: return if os.path.isfile("old.zip"): os.unlink("old.zip") # cleanup old version query = copy_and_set_token(dict_checkVersion, self.config.get_value('token')) query['version'] = Initialize.get_version_number() req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Agent version check failed!") elif ans['response'] != 'SUCCESS': logging.error("Error from server: " + str(ans['message'])) else: if ans['version'] == 'OK': logging.info("Client is up-to-date!") else: url = ans['url'] if not url: logging.warning("Got empty URL for client update!") else: logging.info("New client version available!") if os.path.isfile("update.zip"): os.unlink("update.zip") Download.download(url, "update.zip") if os.path.isfile("update.zip") and os.path.getsize("update.zip"): if os.path.isfile("old.zip"): os.unlink("old.zip") os.rename("hashtopolis.zip", "old.zip") os.rename("update.zip", "hashtopolis.zip") logging.info("Update received, restarting client...") if os.path.exists("lock.pid"): os.unlink("lock.pid") os.execl(sys.executable, sys.executable, "hashtopolis.zip") exit(0)
def check_version(self, cracker_id): path = "crackers/" + str(cracker_id) + "/" query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) query['type'] = 'cracker' query['binaryVersionId'] = cracker_id req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to load cracker!") sleep(5) return False elif ans['response'] != 'SUCCESS' or not ans['url']: logging.error("Getting cracker failed: " + str(ans)) sleep(5) return False else: self.last_version = ans if not os.path.isdir(path): # we need to download the 7zip if not Download.download(ans['url'], "crackers/" + str(cracker_id) + ".7z"): logging.error("Download of cracker binary failed!") sleep(5) return False if Initialize.get_os() == 1: os.system("7zr" + Initialize.get_os_extension() + " x -ocrackers/temp crackers/" + str(cracker_id) + ".7z") else: os.system("./7zr" + Initialize.get_os_extension() + " x -ocrackers/temp crackers/" + str(cracker_id) + ".7z") os.unlink("crackers/" + str(cracker_id) + ".7z") for name in os.listdir("crackers/temp"): if os.path.isdir("crackers/temp/" + name): os.rename("crackers/temp/" + name, "crackers/" + str(cracker_id)) else: os.rename("crackers/temp", "crackers/" + str(cracker_id)) break return True
def check_files(self, files, task_id): for file in files: if os.path.isfile("files/" + file) or os.path.isfile( "files/" + file.replace(".7z", ".txt")): continue query = copyAndSetToken(ditc_getFile, self.config.get_value('token')) query['taskId'] = task_id query['file'] = file req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get file!") sleep(5) return False elif ans['response'] != 'SUCCESS': logging.error("Getting of file failed: " + str(ans)) sleep(5) return False else: Download.download( self.config.get_value('url').replace("api/server.php", "") + ans['url'], "files/" + file) if os.path.splitext("files/" + file)[1] == '.7z' and not os.path.isfile( "files/" + file.replace(".7z", ".txt")): # extract if needed if Initialize.get_os() != 1: os.system("./7zr" + Initialize.get_os_extension() + " x -aoa -ofiles/ -y files/" + file) else: os.system("7zr" + Initialize.get_os_extension() + " x -aoa -ofiles/ -y files/" + file) return True
def deletion_check(self): if self.config.get_value('file-deletion-disable'): return elif self.last_check is not None and time.time( ) - self.last_check < self.check_interval: return query = copy_and_set_token(dict_getFileStatus, self.config.get_value('token')) req = JsonRequest(query) ans = req.execute() self.last_check = time.time() if ans is None: logging.error("Failed to get file status!") elif ans['response'] != 'SUCCESS': logging.error("Getting of file status failed: " + str(ans)) else: files = ans['filenames'] for filename in files: if filename.find("/") != -1 or filename.find("\\") != -1: continue # ignore invalid file names elif os.path.dirname("files/" + filename) != "files": continue # ignore any case in which we would leave the files folder elif os.path.exists("files/" + filename): logging.info("Delete file '" + filename + "' as requested by server...") if os.path.splitext("files/" + filename)[1] == '.7z': if os.path.exists("files/" + filename.replace(".7z", ".txt")): logging.info( "Also delete assumed wordlist from archive of same file..." ) os.unlink("files/" + filename.replace(".7z", ".txt")) os.unlink("files/" + filename)
def load_task(self): if self.taskId != 0: return self.task = None query = copy_and_set_token(dict_getTask, self.config.get_value('token')) req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get task!") sleep(5) elif ans['response'] != 'SUCCESS': logging.error("Error from server: " + str(ans)) sleep(5) else: if ans['taskId'] is None: logging.info("No task available!") sleep(5) return elif ans['taskId'] == -1: self.taskId = -1 return self.task = ans self.taskId = ans['taskId'] logging.info("Got task with id: " + str(ans['taskId']))
def get_chunk(self, task_id): query = copy_and_set_token(dict_getChunk, self.config.get_value('token')) query['taskId'] = task_id req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get chunk!") sleep(5) return 0 elif ans['response'] != 'SUCCESS': logging.error("Getting of chunk failed: " + str(ans)) sleep(5) return 0 else: # test what kind the answer is if ans['status'] == 'keyspace_required': return -1 elif ans['status'] == 'benchmark': return -2 elif ans['status'] == 'fully_dispatched': return 0 elif ans['status'] == 'health_check': return -3 else: self.chunk = ans return 1
def check_files(self, files, task_id): for file in files: file_localpath = "files/" + file query = copy_and_set_token(dict_getFile, self.config.get_value('token')) query['taskId'] = task_id query['file'] = file req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get file!") sleep(5) return False elif ans['response'] != 'SUCCESS': logging.error("Getting of file failed: " + str(ans)) sleep(5) return False else: file_size = int(ans['filesize']) if os.path.isfile(file_localpath) and os.stat( file_localpath).st_size == file_size: continue elif self.config.get_value('multicast'): sleep( 5 ) # in case the file is not there yet (or not completely), we just wait some time and then try again return False # TODO: we might need a better check for this if os.path.isfile(file_localpath.replace(".7z", ".txt")): continue if self.config.get_value('rsync') and Initialize.get_os() != 1: Download.rsync( self.config.get_value('rsync-path') + '/' + file, file_localpath) else: Download.download( self.config.get_value('url').replace( "api/server.php", "") + ans['url'], file_localpath) if os.path.isfile(file_localpath) and os.stat( file_localpath).st_size != file_size: logging.error("file size mismatch on file: %s" % file) sleep(5) return False if os.path.splitext("files/" + file)[1] == '.7z' and not os.path.isfile( "files/" + file.replace(".7z", ".txt")): # extract if needed if Initialize.get_os() != 1: os.system("./7zr" + Initialize.get_os_extension() + " x -aoa -ofiles/ -y files/" + file) else: os.system("7zr" + Initialize.get_os_extension() + " x -aoa -ofiles/ -y files/" + file) return True
def run_benchmark(self, task): ksp = self.keyspace if ksp == 0: ksp = task['keyspace'] args = task['attackcmd'].replace( task['hashlistAlias'], "../hashlists/" + str(task['hashlistId'])) full_cmd = self.callPath + " crack " + args + " -s 0 -l " + str( ksp) + " --timeout=" + str(task['bench']) if Initialize.get_os() == 1: full_cmd = full_cmd.replace("/", '\\') logging.debug("CALL: " + full_cmd) output = subprocess.check_output(full_cmd, shell=True, cwd='files') if output: output = output.replace(b"\r\n", b"\n").decode('utf-8') output = output.split('\n') last_valid_status = None for line in output: if not line: continue status = GenericStatus(line) if status.is_valid(): last_valid_status = status if last_valid_status is None: query = copy_and_set_token(dict_clientError, self.config.get_value('token')) query['taskId'] = task['taskId'] query['message'] = "Generic benchmark failed!" req = JsonRequest(query) req.execute() return 0 return float(last_valid_status.get_progress()) / 10000 else: query = copy_and_set_token(dict_clientError, self.config.get_value('token')) query['taskId'] = task['taskId'] query['message'] = "Generic benchmark gave no output!" req = JsonRequest(query) req.execute() return 0
def send_keyspace(self, keyspace, task_id): query = copyAndSetToken(dict_sendKeyspace, self.config.get_value('token')) query['taskId'] = task_id query['keyspace'] = int(keyspace) req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to send keyspace!") sleep(5) return 0 elif ans['response'] != 'SUCCESS': logging.error("Sending of keyspace failed: " + str(ans)) sleep(5) return 0 else: logging.info("Keyspace got accepted!")
def load_hashlist(self, hashlist_id): query = copy_and_set_token(dict_getHashlist, self.config.get_value('token')) query['hashlistId'] = hashlist_id req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get hashlist!") sleep(5) return False elif ans['response'] != 'SUCCESS': logging.error("Getting of hashlist failed: " + str(ans)) sleep(5) return False else: Download.download( self.config.get_value('url').replace("api/server.php", "") + ans['url'], "hashlists/" + str(hashlist_id), True) return True
def check_prince(self): logging.debug("Checking if PRINCE is present...") path = "prince/" if os.path.isdir( path): # if it already exists, we don't need to download it logging.debug("PRINCE is already downloaded") return True logging.debug("PRINCE not found, download...") query = copy_and_set_token(dict_downloadBinary, self.config.get_value('token')) query['type'] = 'prince' req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to load prince!") sleep(5) return False elif ans['response'] != 'SUCCESS' or not ans['url']: logging.error("Getting prince failed: " + str(ans)) sleep(5) return False else: if not Download.download(ans['url'], "prince.7z"): logging.error("Download of prince failed!") sleep(5) return False if Initialize.get_os() == 1: os.system("7zr" + Initialize.get_os_extension() + " x -otemp prince.7z") else: os.system("./7zr" + Initialize.get_os_extension() + " x -otemp prince.7z") for name in os.listdir( "temp" ): # this part needs to be done because it is compressed with the main subfolder of prince if os.path.isdir("temp/" + name): os.rename("temp/" + name, "prince") break os.unlink("prince.7z") os.rmdir("temp") logging.debug("PRINCE downloaded and extracted") return True
def load_found(self, hashlist_id, cracker_id): query = copy_and_set_token(dict_getFound, self.config.get_value('token')) query['hashlistId'] = hashlist_id req = JsonRequest(query) ans = req.execute() if ans is None: logging.error("Failed to get found of hashlist!") sleep(5) return False elif ans['response'] != 'SUCCESS': logging.error("Getting of hashlist founds failed: " + str(ans)) sleep(5) return False else: logging.info("Saving found hashes to hashcat potfile...") Download.download( self.config.get_value('url').replace("api/server.php", "") + ans['url'], "crackers/" + str(cracker_id) + "/hashcat.potfile", True) return True
def run_loop(self, proc, chunk, task): self.cracks = [] while True: try: # Block for 1 second. if not self.first_status and self.last_update < time.time() - 5: # send update query = copyAndSetToken(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = 0 query['speed'] = 0 query['state'] = 2 query['cracks'] = [] req = JsonRequest(query) logging.info("Sending keepalive progress to avoid timeout...") req.execute() self.last_update = time.time() item = self.io_q.get(True, 1) except Empty: # No output in either streams for a second. Are we done? if proc.poll() is not None: # is the case when the process is finished break else: identifier, line = item if identifier == 'OUT': status = HashcatStatus(line.decode()) if status.is_valid(): self.first_status = True # send update to server chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) relative_progress = int((status.get_progress() - chunk_start) / float(status.get_progress_total() - chunk_start) * 10000) speed = status.get_speed() initial = True if status.get_state() == 5: time.sleep(1) # we wait for a second so all output is loaded from file while len(self.cracks) > 0 or initial: self.lock.acquire() initial = False cracks_backup = [] if len(self.cracks) > 1000: # we split cnt = 0 new_cracks = [] for crack in self.cracks: cnt += 1 if cnt > 1000: cracks_backup.append(crack) else: new_cracks.append(crack) self.cracks = new_cracks query = copyAndSetToken(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = status.get_curku() query['relativeProgress'] = relative_progress query['speed'] = speed query['state'] = status.get_state() query['cracks'] = self.cracks req = JsonRequest(query) logging.debug("Sending " + str(len(self.cracks)) + " cracks...") ans = req.execute() if ans is None: logging.error("Failed to send solve!") elif ans['response'] != 'SUCCESS': logging.error("Error from server on solve: " + str(ans)) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return elif 'agent' in ans.keys() and ans['agent'] == 'stop': # server set agent to stop logging.info("Received stop order from server!") try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return else: cracks_count = len(self.cracks) self.cracks = cracks_backup zaps = ans['zaps'] if len(zaps) > 0: logging.debug("Writing zaps") zap_output = '\n'.join(zaps) + '\n' f = open("hashlist_" + str(task['hashlistId']) + "/" + str(time.time()), 'a') f.write(zap_output) f.close() logging.info("Progress:" + str( "{:6.2f}".format(relative_progress / 100)) + "% Speed: " + print_speed( speed) + " Cracks: " + str(cracks_count) + " Accepted: " + str( ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(zaps))) self.lock.release() else: # hacky solution to exclude warnings from hashcat if str(line[0]) not in string.printable: continue else: pass # logging.warning("HCOUT: " + line.strip()) else: logging.error("HC error: " + str(line).strip()) msg = str(line).strip() send_error(msg, self.config.get_value('token'), task['taskId'])
def run_loop(self, process, chunk, task): cracks = [] while True: try: # Block for 1 second. item = self.io_q.get(True, 1) except Empty: # No output in either streams for a second. Are we done? if process.poll() is not None: # is the case when the process is finished break else: identifier, line = item if identifier == 'OUT': status = GenericStatus(line.decode()) if status.is_valid(): # send update to server progress = status.get_progress() speed = status.get_speed() initial = True while cracks or initial: initial = False cracks_backup = [] if len(cracks) > 1000: # we split cnt = 0 new_cracks = [] for crack in cracks: cnt += 1 if cnt > 1000: cracks_backup.append(crack) else: new_cracks.append(crack) cracks = new_cracks query = copy_and_set_token( dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = progress query['speed'] = speed query['state'] = (4 if progress == 10000 else 2) query['cracks'] = cracks req = JsonRequest(query) logging.debug("Sending " + str(len(cracks)) + " cracks...") ans = req.execute() if ans is None: logging.error("Failed to send solve!") elif ans['response'] != 'SUCCESS': logging.error("Error from server on solve: " + str(ans)) else: if ans['zaps']: with open( "files/zap", "wb" ) as zapfile: # need to check if we are in the main dir here zapfile.write('\n'.join( ans['zaps']).encode()) zapfile.close() cracks = cracks_backup logging.info("Progress: " + str(progress / 100) + "% Cracks: " + str(len(cracks)) + " Accepted: " + str(ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(ans['zaps']))) else: line = line.decode() if ":" in line: cracks.append(line.strip()) else: logging.warning("OUT: " + line.strip()) else: print("ERROR: " + str(line).strip())
def run_loop(self, proc, chunk, task): self.cracks = [] piping_threshold = 95 enable_piping = False if self.config.get_value('piping-threshold'): piping_threshold = self.config.get_value('piping-threshold') if self.config.get_value('allow-piping') != '': enable_piping = self.config.get_value('allow-piping') while True: try: # Block for 1 second. if not self.first_status and self.last_update < time.time() - 5: # send update query = copy_and_set_token(dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = 0 query['speed'] = 0 query['state'] = 2 query['cracks'] = [] req = JsonRequest(query) logging.info( "Sending keepalive progress to avoid timeout...") req.execute() self.last_update = time.time() item = self.io_q.get(True, 1) except Empty: # No output in either streams for a second. Are we done? if proc.poll() is not None: # is the case when the process is finished break else: identifier, line = item if identifier == 'OUT': status = HashcatStatus(line.decode()) if status.is_valid(): self.statusCount += 1 # test if we have a low utility # not allowed if brain is used if enable_piping and not self.uses_slow_hash_flag and ( 'useBrain' not in task or not task['useBrain'] ) and 'slowHash' in task and task[ 'slowHash'] and not self.usePipe: if task['files'] and not ( 'usePrince' in task and task['usePrince'] ) and not ( 'usePreprocessor' in task and task['usePreprocessor'] ) and 1 < self.statusCount < 10 and status.get_util( ) != -1 and status.get_util() < piping_threshold: # we need to try piping -> kill the process and then wait for issuing the chunk again self.usePipe = True chunk_start = int( status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) self.progressVal = status.get_progress_total( ) - chunk_start logging.info( "Detected low UTIL value, restart chunk with piping..." ) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass return self.first_status = True # send update to server logging.debug(line.decode().replace('\n', '').replace( '\r', '')) total = status.get_progress_total() if self.usePipe: # if we are piping, we might have saved the total progress before switching to piping, so we can use this total = self.progressVal # we need to calculate the chunk start, because progress does not start at 0 for a chunk chunk_start = int(status.get_progress_total() / (chunk['skip'] + chunk['length']) * chunk['skip']) if total > 0: relative_progress = int( (status.get_progress() - chunk_start) / float(total - chunk_start) * 10000) else: # this is the case when we cannot say anything about the progress relative_progress = 0 speed = status.get_speed() initial = True if status.get_state() == 4 or status.get_state() == 5: time.sleep( 5 ) # we wait five seconds so all output is loaded from file # reset piping stuff when a chunk is successfully finished self.progressVal = 0 self.usePipe = False while self.cracks or initial: self.lock.acquire() initial = False cracks_backup = [] if len(self.cracks) > 1000: # we split cnt = 0 new_cracks = [] for crack in self.cracks: cnt += 1 if cnt > 1000: cracks_backup.append(crack) else: new_cracks.append(crack) self.cracks = new_cracks query = copy_and_set_token( dict_sendProgress, self.config.get_value('token')) query['chunkId'] = chunk['chunkId'] query['keyspaceProgress'] = status.get_curku() if (self.usePipe or 'usePrince' in task and task['usePrince'] or 'usePreprocessor' in task and task['usePreprocessor'] ) and status.get_curku() == 0: query['keyspaceProgress'] = chunk['skip'] query['relativeProgress'] = relative_progress query['speed'] = speed query['state'] = status.get_state() # crack format: hash[:salt]:plain:hex_plain:crack_pos (separator will be tab instead of :) prepared = [] for crack in self.cracks: prepared.append(crack.rsplit(":", 3)) query['cracks'] = prepared if status.get_temps(): query['gpuTemp'] = status.get_temps() if status.get_all_util(): query['gpuUtil'] = status.get_all_util() query['cpuUtil'] = [round(psutil.cpu_percent(), 1)] req = JsonRequest(query) logging.debug("Sending " + str(len(self.cracks)) + " cracks...") ans = req.execute() if ans is None: logging.error("Failed to send solve!") elif ans['response'] != 'SUCCESS': self.wasStopped = True logging.error("Error from server on solve: " + str(ans)) try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass sleep(5) return elif 'agent' in ans.keys( ) and ans['agent'] == 'stop': # server set agent to stop self.wasStopped = True logging.info( "Received stop order from server!") try: kill_hashcat(proc.pid, Initialize.get_os()) except ProcessLookupError: pass sleep(5) return else: cracks_count = len(self.cracks) self.cracks = cracks_backup zaps = ans['zaps'] if zaps: logging.debug("Writing zaps") zap_output = "\tFF\n".join(zaps) + '\tFF\n' f = open( "hashlist_" + str(task['hashlistId']) + "/" + str(time.time()), 'a') f.write(zap_output) f.close() logging.info("Progress:" + str("{:6.2f}".format( relative_progress / 100)) + "% Speed: " + print_speed(speed) + " Cracks: " + str(cracks_count) + " Accepted: " + str(ans['cracked']) + " Skips: " + str(ans['skipped']) + " Zaps: " + str(len(zaps))) self.lock.release() else: # hacky solution to exclude warnings from hashcat if str(line[0]) not in string.printable: continue else: pass # logging.warning("HCOUT: " + line.strip()) elif identifier == 'ERR': msg = escape_ansi( line.replace(b"\r\n", b"\n").decode('utf-8')).strip() if msg and str( msg ) != '^C': # this is maybe not the fanciest way, but as ctrl+c is sent to the underlying process it reports it to stderr logging.error("HC error: " + msg) send_error(msg, self.config.get_value('token'), task['taskId'], chunk['chunkId']) sleep( 0.1 ) # we set a minimal sleep to avoid overreaction of the client sending a huge number of errors, but it should not be slowed down too much, in case the errors are not critical and the agent can continue
def send_error(error, token, task_id): query = copyAndSetToken(dict_clientError, token) query['message'] = error query['taskId'] = task_id req = JsonRequest(query) req.execute()