def init_slurm_config(self): #import pdb;pdb.set_trace() try: slurm_config_file = self.config.get("slurm_conf_file") if slurm_config_file is None: slurm_config_file = Slurm.find_slurm_config() return Slurm.parser_slurm_conf(slurm_config_file) except Exception as error: print_failure(error)
def __init__(self, *, hashType:str = None, hashesFile:str = None, wordlists: List[str] = None, slurm=None): """ Initialization of John combination attack Args: hashType (str): Jonh's hash type hashesFile (str): Hashes file to attack slurm (Slurm): Instance of Slurm class """ attackOptions = { 'hash_type': Argument(hashType, False, "Hashcat hash type"), 'hashes_file': Argument(hashesFile, True, "Hashes file"), 'wordlists': Argument(wordlists, True, "Hashes file"), } if slurm is None: slurm = Slurm() initOptions = { 'mname' : JohnCombination.MNAME, 'author': JohnCombination.AUTHOR, 'description': JohnCombination.DESCRIPTION, 'fulldescription': JohnCombination.FULLDESCRIPTION, 'attackOptions': attackOptions, 'slurm': slurm } super().__init__(**initOptions)
def __init__(self, slurm:Slurm = None, pre_attack: Auxiliary = None, post_attack: Auxiliary = None): """ Initialization of Hashcat benchmark class """ pre_attack_name = pre_attack.mname if isinstance(pre_attack, Auxiliary) else None post_attack_name = post_attack.mname if isinstance(post_attack, Auxiliary) else None attack_options = {} if slurm is None: slurm_options = { #"array", "account": Argument(None, False, "Cluster account to submit the job"), "dependency": Argument(None, False, "Defer the start of this job until the specified dependencies have been satisfied completed"), "chdir" : Argument(os.getcwd(), True, "Working directory path"), "error": Argument(None, False, "Error file"), "job_name" : Argument('attack', False, "Name for the job allocation"), "cluster" : Argument(None, False, "Cluster Name"), "distribution": Argument('block', True, "Distribution methods for remote processes (<block|cyclic|plane|arbitrary>)"), "mail_type": Argument(None, False, "Event types to notify user by email(<BEGIN|END|FAIL|REQUEUE|ALL|TIME_LIMIT_PP>)"), "main_user": Argument(None, False, "User email"), "mem": Argument(None, False, "Memory per node (<size[units]>)"), "mem_per_cpu": Argument(None, False, "Minimum memory required per allocated CPU (<size[units]>)"), "cpus_per_task": Argument(1, True, "Number of processors per task"), "nodes": Argument(1, True, "Number of nodes(<minnodes[-maxnodes]>)"), "gpu": Argument(1, True, "Number of GPUS"), "ntasks": Argument(1, True, "Number of tasks"), "nice": Argument(None, False, "Run the job with an adjusted scheduling"), "output": Argument('slurm-%j.out', True, "Output file name"), "open_mode": Argument('truncate', True, "Output open mode (<append|truncate>)"), "partition": Argument(None, True, "Partition to submit job"), "reservation": Argument(None, False, "Resource reservation name"), "time": Argument(None, False, "Limit of time (format: DD-HH:MM:SS)"), "test_only": Argument(False, True, "Validate the batch script and return an estimate of when a job would be scheduled to run. No job is actually submitted"), "verbose": Argument(False, True, "Increase the verbosity of sbatch's informational messages"), "nodelist": Argument(None, False, "Nodelist"), "wait": Argument(False, True, "Do not exit until the submitted job terminates"), "exclude": Argument(None, False, "Do not exit until the submitted job terminates"), 'batch_script': Argument('attack.sh', True, "Name for the generated batch script"), 'pmix': Argument('pmix_v3', True, "MPI type") } slurm = Slurm(**slurm_options) init_options = { 'mname' : HashcatBenchmark.MNAME, 'author': HashcatBenchmark.AUTHOR, 'description': HashcatBenchmark.DESCRIPTION, 'fulldescription': HashcatBenchmark.FULLDESCRIPTION, 'references': HashcatBenchmark.REFERENCES, 'pre_attack': pre_attack, 'attack_options': attack_options, 'post_attack': post_attack, 'slurm': slurm } super().__init__(**init_options)
def __init__(self, *, hashType=None, hashesFile=None, masksFile=None, wordlist=None, slurm=None, inverse=False): """ Initialization of John hybrid attack Args: hashType (str): Jonh's hash type hashesFile (str): Hashes file masksFile (str): Masks file wordlist (str): Wordlist slurm (Slurm): Instance of Slurm class """ attackOptions = { 'hash_type': Argument(hashType, True, "John hash type"), 'hashes_file': Argument(hashesFile, True, "Hashes file"), 'masks_file': Argument(masksFile, True, "Masks file"), 'wordlist': Argument(wordlist, True, "Wordlist file"), 'inverse': Argument(inverse, True, "False: wordlits + masks <> True: masks + wordlist") } if slurm is None: slurm = Slurm() initOptions = { 'mname': JohnHybrid.MNAME, 'author': JohnHybrid.AUTHOR, 'description': JohnHybrid.DESCRIPTION, 'fulldescription': JohnHybrid.FULLDESCRIPTION, 'attackOptions': attackOptions, 'slurm': slurm } super().__init__(**initOptions)
def __init__(self, *, hash_type: str = None, hashes_file: str = None, inverse:bool = False, wordlists: List[str] = None, masks = None, sleep:int = 1, slurm:Slurm = None, pre_attack:Auxiliary = None, post_attack:Auxiliary = None): """ Initialization of Hybrid Attack using hashcat cracker """ attack_options = { 'hash_type': Argument(hash_type, True, "Hashcat hash type"), 'hashes_file': Argument(hashes_file, True, "Hashes file"), 'wordlists': Argument(wordlists, True, "Wordlist files (split by commas)"), 'masks': Argument(masks, True, "Masks(split by commas) or masks file"), 'inverse': Argument(inverse, True, "Combine wordlists and masks in inverse form (masks + wordlists)"), 'sleep': Argument(sleep, True, 'Sleep time between each attack (seconds)') } if slurm is None: slurm_options = { "account": Argument(None, False, "Cluster account to submit the job"), "dependency": Argument(None, False, "Defer the start of this job until the specified dependencies have been satisfied completed"), "chdir" : Argument(os.getcwd(), True, "Working directory path"), "error": Argument(None, False, "Error file"), "job_name" : Argument('attack', False, "Name for the job allocation"), "cluster" : Argument(None, False, "Cluster Name"), "distribution": Argument('block', True, "Distribution methods for remote processes (<block|cyclic|plane|arbitrary>)"), "mail_type": Argument(None, False, "Event types to notify user by email(<BEGIN|END|FAIL|REQUEUE|ALL|TIME_LIMIT_PP>)"), "main_user": Argument(None, False, "User email"), "mem": Argument(None, False, "Memory per node (<size[units]>)"), "mem_per_cpu": Argument(None, False, "Minimum memory required per allocated CPU (<size[units]>)"), "cpus_per_task": Argument(1, True, "Number of processors per task"), "nodes": Argument(1, True, "Number of nodes(<minnodes[-maxnodes]>)"), "gpu": Argument(1, True, "Number of GPUS"), "ntasks": Argument(1, True, "Number of tasks"), "nice": Argument(None, False, "Run the job with an adjusted scheduling"), "output": Argument('slurm-%j.out', True, "Output file name"), "open_mode": Argument('truncate', True, "Output open mode (<append|truncate>)"), "partition": Argument(None, True, "Partition to submit job"), "reservation": Argument(None, False, "Resource reservation name"), "time": Argument(None, False, "Limit of time (format: DD-HH:MM:SS)"), "test_only": Argument(False, True, "Validate the batch script and return an estimate of when a job would be scheduled to run. No job is actually submitted"), "verbose": Argument(False, True, "Increase the verbosity of sbatch's informational messages"), "nodelist": Argument(None, False, "Nodelist"), "wait": Argument(False, True, "Do not exit until the submitted job terminates"), "exclude": Argument(None, False, "Do not exit until the submitted job terminates"), 'batch_script': Argument('attack.sh', True, "Name for the generated batch script"), 'pmix': Argument('pmix_v3', True, "MPI type") } slurm = Slurm(**slurm_options) init_options = { 'mname' : HashcatHybrid.MNAME, 'author': HashcatHybrid.AUTHOR, 'description': HashcatHybrid.DESCRIPTION, 'fulldescription': HashcatHybrid.FULLDESCRIPTION, 'references': HashcatHybrid.REFERENCES, 'pre_attack': pre_attack, 'attack_options': attack_options, 'post_attack': post_attack, 'slurm': slurm } super().__init__(**init_options)
def hybrid_attack(self, *, hash_types: List[str], hashes_file: str, inverse: bool = False, wordlists: List[str], masks: List[str] = None, masks_file: Path = None, sleep: int = 1, slurm: Slurm, local: bool = False, db_status: bool = False, workspace: str = None, db_credential_file: Path = None): """ hybrid attack using hashcat submiting parallel tasks in a cluster with Slurm Args: hash_type (str): Hashcat's hash type hashes_file (str): Hash file to attack inverse (bool): If inverse is false the attack combine WORDLISTS + MASKS, otherwise it combine MASKS + WORDLISTS wordlists (List[str]): wordlists to attack masks (List[str]): masks to attack masks_file (str): masks file to attack slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: if not inverse: attack_mode = 6 else: attack_mode = 7 permission = [os.R_OK] Path.access(permission, hashes_file, *wordlists) Hashcat.check_hash_type(hash_types) if masks and masks_file: raise Exception("Only supplied masks or a masks file") elif masks: valid_masks = [] for mask in masks: if Mask.is_mask(mask): valid_masks.append(mask) if not valid_masks: raise Exception("No valid masks supplied") masks = valid_masks else: # masks_file supplied Path.access(permission, masks_file) masks = [] with open(masks_file, 'r') as _masks_file: while mask := _masks_file.readline().rstrip(): if Mask.is_mask(mask): masks.append(mask) if not inverse: print_status( f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file in hibrid mode {ColorStr('WORDLISTS + MASKS').StyleBRIGHT} " ) else: print_status( f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file in hibrid mode {ColorStr('MASKS + WORDLISTS').StyleBRIGHT}" ) print_status(f"Wordlists: {ColorStr(wordlists).StyleBRIGHT}") if masks and masks_file is None: print_status(f"Masks: {ColorStr(masks).StyleBRIGHT}") else: print_status( f"Masks file: {ColorStr(masks_file).StyleBRIGHT}") hash_types_names = [ Hashcat.HASHES[hash_type]['Name'] for hash_type in hash_types ] print_status( f"Possible hashes identities: {ColorStr(hash_types_names).StyleBRIGHT}" ) #import pdb; pdb.set_trace() if (not local) and slurm and slurm.partition: parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["GPU"]: raise InvalidParallelJob(parallel_job_type) parallel_work = [] for hash_type in hash_types: for wordlist in wordlists: for mask in masks: if not inverse: attack_cmd = ( f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {wordlist} {mask}") else: attack_cmd = ( f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {mask} {wordlist}") header_attack = f"echo -e '\\n\\n[*] Running: {attack_cmd}'" if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {Hashcat.MAINNAME} -j {hashes_file}" ) parallel_work.append( (header_attack, attack_cmd, insert_cracked_hashes)) else: parallel_work.append( (header_attack, attack_cmd)) slurm_script_name = slurm.gen_batch_script(parallel_work) #import pdb; pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") else: #import pdb; pdb.set_trace() all_cracked = False for hash_type in hash_types: for wordlist in wordlists: for mask in masks: all_cracked = Hashcat.are_all_hashes_cracked( hashes_file) if not all_cracked: # some hash isn't cracked yet if not inverse: attack_cmd = ( f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {wordlist} {mask}" ) else: attack_cmd = ( f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {mask} {wordlist}" ) print() print_status( f"Running: {ColorStr(attack_cmd).StyleBRIGHT}" ) Bash.exec(attack_cmd) if sleep > 0: print_status( f"{ColorStr('Sleeping ...').StyleBRIGHT}" ) time.sleep(sleep) else: print_successful( f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked" ) break if all_cracked: break if all_cracked: break #import pdb; pdb.set_trace() if db_status and workspace and db_credential_file: Hashcat.insert_hashes_to_db(hashes_file, workspace, db_credential_file) except Exception as error: print_failure(error)
def gen_masks_attack(*, hash_types: List[str], hashes_file: Path, masks_file: Path, masks_attack_script: Path, slurm: Slurm, db_status: bool, workspace: str, db_credential_file: Path): #import pdb; pdb.set_trace() parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["GPU"]: raise InvalidParallelJob(parallel_job_type) _hc_main_exec = "{hc.main_exec}" _mask = "{mask}" _hash_type = "{hash_type}" _hashes_file = "{hashes_file}" _mask_attack = "{mask_attack}" _header_attack = "{header_attack}" _workspace = "{workspace}" __hash_types = f"'{hash_types}'" __hashes_file = f"'{hashes_file}'" __masks_file = f"'{masks_file}'" __workspace = f"'{workspace}'" __db_credential_file = f"'{db_credential_file}'" masks_attack = (f""" #!/bin/env python3 from sbash import Bash from ama.core.plugins.cracker import Hashcat from ama.core.files import Path hash_types = {hash_types} hashes_file = Path({__hashes_file}) masks_file = {__masks_file} db_status = {db_status} workspace = {__workspace if workspace else None} db_credential_file = Path({__db_credential_file}) hc = Hashcat() all_cracked = False for hash_type in hash_types: with open(masks_file, 'r') as masks: while mask := masks.readline().rstrip(): all_cracked = Hashcat.are_all_hashes_cracked(hashes_file) if not all_cracked: mask_attack = ( f"srun {_hc_main_exec} -a 3" f" -m {_hash_type} {_hashes_file} {_mask}" ) header_attack = f"[*] Running: {_mask_attack}" Bash.exec(f"echo -e '\\n\\n\\n{_header_attack}'") Bash.exec(mask_attack) else: break if all_cracked := Hashcat.are_all_hashes_cracked(hashes_file): print(f"\\n[*] Hashes in {_hashes_file} were cracked") break if db_status and workspace and db_credential_file: Hashcat.insert_hashes_to_db(hashes_file, workspace, db_credential_file) """) with open(masks_attack_script, 'w') as attack: attack.write(masks_attack) print_successful( f"Masks attack script generated: {ColorStr(masks_attack_script).StyleBRIGHT}" )
def masks_attack(self, *, hash_types: List[int], hashes_file: str, masks_file: str, masks_attack_script: str, sleep: int = 1, slurm: Slurm, local: bool = False, db_status: bool = False, workspace: str = None, db_credential_file: Path = None): """ Masks attack using hashcat submitting parallel tasks in a cluster with Slurm Args: hash_type (str): Jonh's hash type hashes_file (str): Hash file to attack masks_file (str): Masks file to attack slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: attack_mode = 3 permission = [os.R_OK] Path.access(permission, hashes_file, masks_file) Hashcat.check_hash_type(hash_types) print_status( f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file with {ColorStr(masks_file).StyleBRIGHT} masks file." ) hash_types_names = [ Hashcat.HASHES[hash_type]['Name'] for hash_type in hash_types ] print_status( f"Possible hashes identities: {ColorStr(hash_types_names).StyleBRIGHT}" ) if (not local) and slurm and slurm.partition: Hashcat.gen_masks_attack( hash_types=hash_types, hashes_file=hashes_file, masks_file=masks_file, masks_attack_script=masks_attack_script, slurm=slurm, db_status=db_status, workspace=workspace, db_credential_file=db_credential_file) parallel_work = [(f"python3 {masks_attack_script}", )] slurm_script_name = slurm.gen_batch_script(parallel_work) #import pdb; pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") else: #import pdb; pdb.set_trace() all_cracked = False for hash_type in hash_types: with open(masks_file, 'r') as masks: while mask := masks.readline().rstrip(): if not Mask.is_mask(mask): print_failure("Invalid mask: {mask}") break all_cracked = Hashcat.are_all_hashes_cracked( hashes_file) if not all_cracked: attack_cmd = (f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {mask}") print() print_status( f"Running: {ColorStr(attack_cmd).StyleBRIGHT}" ) Bash.exec(attack_cmd) if sleep > 0: print_status( f"{ColorStr('Sleeping ...').StyleBRIGHT}" ) time.sleep(sleep) else: break #all hashes were cracked so stop attack if all_cracked := Hashcat.are_all_hashes_cracked( hashes_file): print_successful( f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked" ) break #import pdb; pdb.set_trace() if db_status and workspace and db_credential_file: Hashcat.insert_hashes_to_db(hashes_file, workspace, db_credential_file)
def brute_force_attack(self, *, hash_types: str, hashes_file: str, mask: str, sleep: int = 1, slurm: Slurm, local: bool = False, db_status: bool = False, workspace: str = None, db_credential_file: Path = None): """ Brute force attack using hashcat submiting parallel tasks in a cluster with Slurm Args: hash_type (str): Hashcat's hash type hashes_file (str): Hash file to attack mask (str): mask to attack slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: attack_mode = 3 permission = [os.R_OK] Mask.is_valid_mask(mask) Path.access(permission, hashes_file) Hashcat.check_hash_type(hash_types) print_status( f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file with {ColorStr(mask).StyleBRIGHT} mask" ) hash_types_names = [ Hashcat.HASHES[hash_type]['Name'] for hash_type in hash_types ] print_status( f"Possible hashes identities: {ColorStr(hash_types_names).StyleBRIGHT}" ) if (not local) and slurm and slurm.partition: parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["GPU"]: raise InvalidParallelJob(parallel_job_type) parallel_work = [] for hash_type in hash_types: attack_cmd = (f"srun {self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {mask}") header_attack = f"echo -e '\\n\\n[*] Running: {attack_cmd}'" if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {Hashcat.MAINNAME} -j {hashes_file}" ) parallel_work.append((header_attack, attack_cmd, insert_cracked_hashes)) else: parallel_work.append((header_attack, attack_cmd)) slurm_script_name = slurm.gen_batch_script(parallel_work) #import pdb;pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") else: #import pdb;pdb.set_trace() for hash_type in hash_types: are_all_hashes_cracked = Hashcat.are_all_hashes_cracked( hashes_file) if not are_all_hashes_cracked: # some hash isn't cracked yet attack_cmd = (f"{self.main_exec}" f" -a {attack_mode}" f" -m {hash_type}" f" {hashes_file} {mask}") print() print_status( f"Running: {ColorStr(attack_cmd).StyleBRIGHT}") Bash.exec(attack_cmd) if sleep > 0: print_status( f"{ColorStr('Sleeping ...').StyleBRIGHT}") time.sleep(sleep) else: print_successful( f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked" ) break #import pdb;pdb.set_trace() if db_status and workspace and db_credential_file: Hashcat.insert_hashes_to_db(hashes_file, workspace, db_credential_file) except Exception as error: print_failure(error) else: print_failure( f"Cracker {ColorStr(self.main_name).StyleBRIGHT} is disable")
def __init__( self, *, users=None, passwords=None, #users_passwd_file=None, port=None, ip4=True, output=None, output_format="text", verbose=True, stopInSuccess=False, stopInSuccessPerTarget=True, targets=None, service=None, slurm=None): attack_options = { 'users': Argument(users, True, "User or users file to login"), #'users_passwd_file': Argument(users_passwd_file), 'passwords': Argument(passwords, True, "Password or Passwords file"), 'port': Argument(port, False, "Service port"), 'ip4': Argument(ip4, True, "Use Ip4 otherwise use IP6"), 'output': Argument(output, False, "Output file"), 'output_format': Argument(output_format, True, "Output format file <text|json|jsonv1>"), 'verbose': Argument(verbose, True, "Show login+pass for each attempt"), 'stop_in_success': Argument(stopInSuccess, True, "Exit when a login/pass pair is found (global)"), 'stop_in_success_per_target': Argument(stopInSuccessPerTarget, True, "Exit when a login/pass pair is found (per target)"), 'targets': Argument( targets, True, "Server (DNS, IP or 192.168.0.0/24) or servers (one target per line, ':' to specify port) to attack" ), 'service': Argument(service, True, "Service to crack") } if slurm is None: slurm_options = { "account": Argument(None, False, "Cluster account to submit the job"), "dependency": Argument( None, False, "Defer the start of this job until the specified dependencies have been satisfied completed" ), "chdir": Argument(os.getcwd(), True, "Working directory path"), "error": Argument(None, False, "Error file"), "job_name": Argument('attack', False, "Name for the job allocation"), "cluster": Argument(None, False, "Cluster Name"), "distribution": Argument( 'block', True, "Distribution methods for remote processes (<block|cyclic|plane|arbitrary>)" ), "mail_type": Argument( None, False, "Event types to notify user by email(<BEGIN|END|FAIL|REQUEUE|ALL|TIME_LIMIT_PP>)" ), "main_user": Argument(None, False, "User email"), "mem": Argument(None, False, "Memory per node (<size[units]>)"), "mem_per_cpu": Argument( None, False, "Minimum memory required per allocated CPU (<size[units]>)" ), "cpus_per_task": Argument(1, True, "Number of processors per task"), "nodes": Argument(1, True, "Number of nodes(<minnodes[-maxnodes]>)"), "ntasks": Argument(1, True, "Number of tasks"), "nice": Argument(None, False, "Run the job with an adjusted scheduling"), "output": Argument('slurm-%j.out', True, "Output file name"), "open_mode": Argument('truncate', True, "Output open mode (<append|truncate>)"), "partition": Argument(None, True, "Partition to submit job"), "reservation": Argument(None, False, "Resource reservation name"), "time": Argument(None, False, "Limit of time (format: DD-HH:MM:SS)"), "test_only": Argument( False, True, "Validate the batch script and return an estimate of when a job would be scheduled to run. No job is actually submitted" ), "verbose": Argument( False, True, "Increase the verbosity of sbatch's informational messages" ), "nodelist": Argument(None, False, "Nodelist"), "wait": Argument(False, True, "Do not exit until the submitted job terminates"), "exclude": Argument(None, False, "Do not exit until the submitted job terminates"), 'batch_script': Argument('attack.sh', True, "Name for the generated batch script"), 'pmix': Argument('pmix_v3', True, "MPI type") } slurm = Slurm(**slurm_options) init_options = { 'mname': HydraWordlist.MNAME, 'author': HydraWordlist.AUTHOR, 'description': HydraWordlist.DESCRIPTION, 'fulldescription': HydraWordlist.FULLDESCRIPTION, 'references': HydraWordlist.REFERENCES, 'attack_options': attack_options, 'slurm': slurm } super().__init__(**init_options)
def masks_attack(self, *, hash_types: List[str] = None, hashes_file: Path, masks_file: Path, slurm: Slurm, local: bool = False, db_status:bool = False, workspace:str = None, db_credential_file: Path = None): """ Masks attack using john submiting parallel tasks in a cluster with Slurm Args: hash_type (str): John's hash type hashes_file (str): Hash file to attack masks_file (str): Masks file mask_attack_script (str): Name for generated mask attack script slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: permission = [os.R_OK] Path.access(permission, hashes_file, masks_file) if hash_types: John.check_hash_type(hash_types) print_status(f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file with {ColorStr(masks_file).StyleBRIGHT} masks file") print_status(f"Possible hashes identities: {ColorStr(hash_types).StyleBRIGHT}") if (not local) and slurm and slurm.partition: self.check_slurm_partition(slurm.partition, slurm.config['partitions']) parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["MPI", "OMP"]: raise InvalidParallelJob(parallel_job_type) array_tasks = slurm.sbatch['array'].value if array_tasks is None: array_tasks = 1 base_path = masks_file.parent name_masks_file = masks_file.name suffix = masks_file.suffix if array_tasks > 1: self.array_masks(masks_file, array_tasks) only_name_masks_file = name_masks_file[:-len(suffix)] for a in range(array_tasks): name_split_masks_file = only_name_masks_file + str(a) + suffix split_masks_file = Path.joinpath(base_path, name_split_masks_file) print_status(f"(array id {a}) Processing: masks file = {split_masks_file}") MASKS_FILE = only_name_masks_file + "${SLURM_ARRAY_TASK_ID}" + suffix else: MASKS_FILE = masks_file.name MASKS_FILE = Path.joinpath(base_path, MASKS_FILE) HASHES_FILE = hashes_file HID = self.pylist2bash(hash_types) #ARRAY = slurm.sbatch['array'].value variable_definition_block = ( f"HASHES_FILE={HASHES_FILE}", f"MASKS_FILE={MASKS_FILE}", f"HID={HID}", #f"ARRAY=" ) attack_cmd = f"{self.main_exec}" attack_cmd += " --mask=${mask}" attack_cmd += " --format=${hid}" if parallel_job_type == "MPI": attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd elif parallel_job_type == "OMP": attack_cmd = f"srun " + attack_cmd attack_cmd += " ${HASHES_FILE}" header_attack = f"echo -e \"\\n\\n[*] Running: {attack_cmd}\"" insert_cracked_hashes = '' if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {John.MAINNAME} -j {hashes_file}" ) cracking_block = ( "while read mask", "do", "\tfor hid in ${HID[@]}; do", "\t\t" + header_attack, "\t\t" + attack_cmd, "\t\t" + insert_cracked_hashes, "\t\t" + "all_cracked=false", "\t\t" + "if $all_cracked; then break; fi", "\tdone", "done < ${MASKS_FILE}" ) parallel_work = (variable_definition_block, cracking_block) slurm_script_name = slurm.gen_batch_script(parallel_work) import pdb; pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") else: all_cracked = False for hash_type in hash_types: with open(masks_file, 'r') as masks: while mask := masks.readline().rstrip(): all_cracked = John.are_all_hashes_cracked(hashes_file) if not all_cracked: attack_cmd = f"{self.main_exec} --mask={mask}" if hash_type: attack_cmd += f" --format={hash_type}" attack_cmd += f" {hashes_file}" print() print_status(f"Running: {ColorStr(attack_cmd).StyleBRIGHT}") Bash.exec(attack_cmd) else: break if all_cracked := John.are_all_hashes_cracked(hashes_file): print_successful(f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked") break if db_status and workspace and db_credential_file: John.insert_hashes_to_db(hashes_file, workspace, db_credential_file, pretty=True)
def incremental_attack(self, *, hash_types: List[str] = None, hashes_file: str, slurm: Slurm , local:bool = False, db_status:bool = False, workspace:str = None, db_credential_file: Path = None): """ Incemental attack using john submiting parallel tasks in a cluster with Slurm Args: hash_type (str): John's hash type hashes_file (str): Hash file to attack slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: permission = [os.R_OK] Path.access(permission, hashes_file) if hash_types: John.check_hash_type(hash_types) print_status(f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file in incremental mode") print_status(f"Possible hashes identities: {ColorStr(hash_types).StyleBRIGHT}") if (not local) and slurm and slurm.partition: parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["MPI", "OMP"]: raise InvalidParallelJob(parallel_job_type) parallel_work = [] for hash_type in hash_types: attack_cmd = f"{self.main_exec} --incremental" if parallel_job_type == "MPI": attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd elif parallel_job_type == "OMP": attack_cmd = f"srun " + attack_cmd if hash_type: attack_cmd += f" --format={hash_type}" attack_cmd += f" {hashes_file}" header_attack = f"echo -e '\\n\\n[*] Running: {attack_cmd}'" if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {John.MAINNAME} -j {hashes_file}" ) parallel_work.append((header_attack, attack_cmd, insert_cracked_hashes)) else: parallel_work.append((header_attack, attack_cmd)) slurm_script_name = slurm.gen_batch_script(parallel_work) Bash.exec(f"sbatch {slurm_script_name}") else: #import pdb;pdb.set_trace() for hash_type in hash_types: attack_cmd = f"{self.main_exec} --incremental" if hash_type: attack_cmd += f" --format={hash_type}" attack_cmd += f" {hashes_file}" if are_all_hashes_cracked := John.are_all_hashes_cracked(hashes_file): print_successful(f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked") break else: # some hash isn't cracked yet print() print_status(f"Running: {ColorStr(attack_cmd).StyleBRIGHT}") Bash.exec(attack_cmd) if db_status and workspace and db_credential_file: John.insert_hashes_to_db(hashes_file, workspace, db_credential_file, pretty=True) except Exception as error: #cmd2.Cmd.pexcept(error) print_failure(error) else: print_failure(f"Cracker {ColorStr(self.main_name).StyleBRIGHT} is disable")
def wordlist_attack(self , *, hash_types: List[str] = None , hashes_file: Path, wordlists: List[Path], rules:str = None, rules_file:Path = None, slurm: Slurm, local:bool = False, db_status:bool = False, workspace:str = None, db_credential_file: Path = None): """ Wordlist attack using john submiting parallel tasks in a cluster with Slurm Args: hash_type (str): Jonh's hash type hashes_file (str): Hash file to attack wordlist (str): wordlist to attack slurm (Slurm): Instance of Slurm class """ #import pdb; pdb.set_trace() if self.enable: try: permission = [os.R_OK] Path.access(permission, hashes_file, *wordlists) if hash_types: John.check_hash_type(hash_types) if rules and rules_file: Path.access(permission, rules_file) print_status(f"Attacking hashes in {ColorStr(hashes_file).StyleBRIGHT} file in wordlist mode") print_status(f"Wordlists: {ColorStr(wordlists).StyleBRIGHT}") print_status(f"Possible hashes identities: {ColorStr(hash_types).StyleBRIGHT}") if (not local) and slurm and slurm.partition: #import pdb; pdb.set_trace() self.check_slurm_partition(slurm.partition, slurm.config['partitions']) parallel_job_type = slurm.parallel_job_parser() if not parallel_job_type in ["MPI", "OMP"]: raise InvalidParallelJob(parallel_job_type) hash_types_len = len(hash_types) wordlists_len = len(wordlists) array_tasks = slurm.sbatch['array'].value #import pdb;pdb.set_trace() if array_tasks is None: array_tasks = 1 #debugged - date Apr 9 if wordlists_len > 1: # hash_types_len >= 1 if array_tasks > 1: if array_tasks > wordlists_len: print_failure(f"These is more array jobs that work to process (ARRAY={array_tasks}, WLS={wordlists_len})") print_status(f"Adjusting {ColorStr('ARRAY').StyleBRIGHT} to {wordlists_len} (1 job per wordlist)") array_tasks = wordlists_len slurm.set_option('array', array_tasks) for array_task_id in range(array_tasks): init = floor(wordlists_len/array_tasks)*array_task_id if array_task_id == (array_tasks - 1): end = wordlists_len else: end = floor(wordlists_len/array_tasks)*(array_task_id+1) print_status(f"(array id {array_task_id}) Processing: wordlists={ColorStr(wordlists[init:end]).StyleBRIGHT}, hash_types={ColorStr('ALL').StyleBRIGHT}") WLS = self.pylist2bash(wordlists) HID = self.pylist2bash(hash_types) ARRAY = slurm.sbatch['array'].value #array enumeration: 0-(ARRAY-1) LEN_WLS = "${#WLS[@]}" INIT = "$((LEN_WLS/ARRAY * SLURM_ARRAY_TASK_ID))" END = "$((LEN_WLS/ARRAY * (SLURM_ARRAY_TASK_ID+1)))" variable_definition_block = ( f"WLS={WLS}", f"HID={HID}", f"LEN_WLS={LEN_WLS}", f"ARRAY={ARRAY}", f"INIT={INIT}", "\nif [[ $SLURM_ARRAY_TASK_ID -eq $((ARRAY -1)) ]]; then", "\t" + "END=$LEN_WLS", "else", "\t" + f"END={END}", "fi", ) else: WLS = self.pylist2bash(wordlists) HID = self.pylist2bash(hash_types) INIT = 0 END = wordlists_len variable_definition_block = ( f"WLS={WLS}", f"HID={HID}", f"INIT={INIT}", f"END={END}", ) attack_cmd = f"{self.main_exec}" attack_cmd += " --format=${identity}" attack_cmd += " -w ${wl}" if parallel_job_type == "MPI": attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd elif parallel_job_type == "OMP": attack_cmd = f"srun " + attack_cmd if rules and rules_file: attack_cmd += f" --rules={rules} {rules_file}" attack_cmd += f" {hashes_file}" header_attack = f"echo -e \"\\n\\n[*] Running: {attack_cmd}\"" insert_cracked_hashes = '' if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {John.MAINNAME} -j {hashes_file}" ) cracking_block = ( "for wl in ${WLS[@]:INIT:END-INIT}; do", "\tfor identity in ${HID[@]}; do", "\t\t" + header_attack, "\t\t" + attack_cmd, "\t\t" + insert_cracked_hashes, "\t\t" + "all_cracked=false", "\t\t" + "if $all_cracked; then break; fi", "\tdone", "done" ) parallel_work = (variable_definition_block, cracking_block) slurm_script_name = slurm.gen_batch_script(parallel_work) #import pdb;pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") #debugged - date apr 9 2021 elif hash_types_len > 1 and wordlists_len == 1: #import pdb;pdb.set_trace() if array_tasks > 1: if array_tasks > hash_types_len: print_failure(f"These is more array jobs that work to process (ARRAY={array_tasks}, HID={hash_types_len})") print_status(f"Adjusting {ColorStr('ARRAY').StyleBRIGHT} to {hash_type_len} (1 job per hash type)") array_tasks = hash_types_len slurm.set_option('array', array_tasks) for array_task_id in range(array_tasks): init = floor(hash_types_len/array_tasks)*array_task_id if array_task_id == (array_tasks - 1): end = hash_types_len else: end = floor(hash_types_len/array_tasks)*(array_task_id+1) print_status(f"(array id {array_task_id}) Processing: hash-types={ColorStr(hash_types[init:end]).StyleBRIGHT}, wordlists={ColorStr('ALL').StyleBRIGHT}") HID = self.pylist2bash(hash_types) ARRAY = slurm.sbatch['array'].value #array enumeration: 0-(ARRAY-1) LEN_HID = "${#HID[@]}" INIT = "$((LEN_HID/ARRAY * SLURM_ARRAY_TASK_ID))" END = "$((LEN_HID/ARRAY * (SLURM_ARRAY_TASK_ID+1)))" variable_definition_block = ( f"HID={HID}", f"LEN_HID={LEN_HID}", f"ARRAY={ARRAY}", f"INIT={INIT}", "\nif [[ $SLURM_ARRAY_TASK_ID -eq $((ARRAY -1)) ]]; then", "\t" + "END=$LEN_HID", "else", "\t" + f"END={END}", "fi", ) else: HID = self.pylist2bash(hash_types) INIT = 0 END = hash_types_len variable_definition_block = ( f"HID={HID}", f"INIT={INIT}", f"END={END}", ) wordlist = wordlists[0] attack_cmd = f"{self.main_exec} --wordlist={wordlist}" attack_cmd += " --format=${identity}" if parallel_job_type == "MPI": attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd elif parallel_job_type == "OMP": attack_cmd = f"srun " + attack_cmd if rules and rules_file: attack_cmd += f" --rules={rules} {rules_file}" attack_cmd += f" {hashes_file}" header_attack = f"echo -e \"\\n\\n[*] Running: {attack_cmd}\"" insert_cracked_hashes = '' if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {John.MAINNAME} -j {hashes_file}" ) cracking_block = ( "for identity in ${HID[@]:INIT:END-INIT}; do", "\t" + header_attack, "\t" + attack_cmd, "\t" + insert_cracked_hashes, "\t" + "all_cracked=false", "\t" + "if $all_cracked; then break; fi", "done" ) parallel_work = (variable_definition_block, cracking_block) slurm_script_name = slurm.gen_batch_script(parallel_work) #import pdb;pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") # replaced by case: hash_types_len >= 1 and wordlists_len > 1 #debugged - date apr 9 2021 # elif hash_types_len == 1 and wordlists_len > 1: # #import pdb;pdb.set_trace() # if array_tasks > 1: # if array_tasks > wordlists_len: # print_failure(f"These is more array jobs that work to process (ARRAY={array_tasks}, WLS={wordlists_len})") # print_status(f"Adjusting {ColorStr('ARRAY').StyleBRIGHT} to {wordlists_len} (1 job per wordlist)") # array_tasks = wordlists_len # slurm.set_option('array', array_tasks) # for array_task_id in range(array_tasks): # init = floor(wordlists_len/array_tasks)*array_task_id # if array_task_id == (array_tasks - 1): # end = wordlists_len # else: # end = floor(wordlists_len/array_tasks)*(array_task_id+1) # print_status(f"(array id {array_task_id}) Processing: wordlists={ColorStr(wordlists[init:end]).StyleBRIGHT}, hash types={ColorStr('ALL').StyleBRIGHT}") # WLS = self.pylist2bash(wordlists) # ARRAY = slurm.sbatch['array'].value #array enumeration: 0-(ARRAY-1) # LEN_WLS = "${#WLS[@]}" # INIT = "$((LEN_WLS/ARRAY * SLURM_ARRAY_TASK_ID))" # END = "$((LEN_WLS/ARRAY * (SLURM_ARRAY_TASK_ID+1)))" # variable_definition_block = ( # f"WLS={WLS}", # f"LEN_WLS={LEN_WLS}", # f"ARRAY={ARRAY}", # f"INIT={INIT}", # "\nif [[ $SLURM_ARRAY_TASK_ID -eq $((ARRAY -1)) ]]; then", # "\t" + "END=$LEN_WLS", # "else", # "\t" + f"END={END}", # "fi", # ) # else: # WLS = self.pylist2bash(wordlists) # INIT = 0 # END = wordlists_len # variable_definition_block = ( # f"WLS={WLS}", # f"INIT={INIT}", # f"END={END}" # ) # hash_type = hash_types[0] # attack_cmd = f"{self.main_exec} --format={hash_type}" # attack_cmd += " -w ${wl}" # if parallel_job_type == "MPI": # attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd # elif parallel_job_type == "OMP": # attack_cmd = f"srun " + attack_cmd # if rules and rules_file: # attack_cmd += f" --rules={rules} {rules_file}" # attack_cmd += f" {hashes_file}" # header_attack = f"echo -e \"\\n\\n[*] Running: {attack_cmd}\"" # insert_cracked_hashes = '' # if db_status and workspace and db_credential_file: # insert_cracked_hashes = ( # f"amadb -c {db_credential_file} -w {workspace}" # f" --cracker {John.MAINNAME} -j {hashes_file}" # ) # cracking_block = ( # "for wl in ${WLS[@]:INIT:END-INIT}; do", # "\t" + header_attack, # "\t" + attack_cmd, # "\t" + insert_cracked_hashes, # "\t" + "all_cracked=false", # "\t" + "if $all_cracked; then break; fi", # "done" # ) # parallel_work = (variable_definition_block, # cracking_block) # slurm_script_name = slurm.gen_batch_script(parallel_work) # #import pdb;pdb.set_trace() # Bash.exec(f"sbatch {slurm_script_name}") # debugged - date apr 9 2021 else: # hash_types_len == 1 and wordlists_len == 1: if array_tasks > 1: print_failure("There is not much work for performing an array attack") slurm.set_option('array', None) if slurm.sbatch['output'] == "slurm-%A_%a.out": # default output name for array jobs slurm.set_option('output', 'slurm-%j.out') #import pdb;pdb.set_trace() hash_type = hash_types[0] wordlist = wordlists[0] attack_cmd = ( f"{self.main_exec}" f" --wordlist={wordlist}" f" --format={hash_type}" ) if parallel_job_type == "MPI": attack_cmd = f"srun --mpi={slurm.pmix} " + attack_cmd elif parallel_job_type == "OMP": attack_cmd = f"srun " + attack_cmd if rules and rules_file: attack_cmd += f" --rules={rules} {rules_file}" attack_cmd += f" {hashes_file}" header_attack = f"echo -e \"\\n\\n[*] Running: {attack_cmd}\"" insert_cracked_hashes = '' if db_status and workspace and db_credential_file: insert_cracked_hashes = ( f"amadb -c {db_credential_file} -w {workspace}" f" --cracker {John.MAINNAME} -j {hashes_file}" ) cracking_block = (header_attack, attack_cmd, insert_cracked_hashes) parallel_work = [cracking_block] slurm_script_name = slurm.gen_batch_script(parallel_work) import pdb;pdb.set_trace() Bash.exec(f"sbatch {slurm_script_name}") else: for hash_type in hash_types: are_all_hashes_cracked = John.are_all_hashes_cracked(hashes_file) if not are_all_hashes_cracked: # some hash isn't cracked yet attack_cmd = f"{self.main_exec} --wordlist={wordlist}" if hash_type: attack_cmd += f" --format={hash_type}" if rules and rules_file: attack_cmd += f" --rules={rules} {rules_file}" attack_cmd += f" {hashes_file}" print() print_status(f"Running: {ColorStr(attack_cmd).StyleBRIGHT}") Bash.exec(attack_cmd) else: print_successful(f"Hashes in {ColorStr(hashes_file).StyleBRIGHT} were cracked") break if db_status and workspace and db_credential_file: John.insert_hashes_to_db(hashes_file, workspace, db_credential_file, pretty=True) except Exception as error: #cmd2.Cmd.pexcept(error) print_failure(error) else: print_failure(f"Cracker {ColorStr(self.main_name).StyleBRIGHT} is disable")