Exemple #1
0
    def q_add(self, q_obj, arg_dict, timeout=30240):
        """
        This method adds a new crack job to the queue

        Parameters
        ---------
        job_id: str
                uuid string corresponding to job ID
        q_obj: object
                queue object to use (returned from q_connect)
        arg_dict: dict
                hc_worker function arguments to run hashcat
        timeout: int
                number of seconds before job will time out

        Returns
        -------
        boolean
            Success or failure
        """
        logger.info('Adding task to job queue: '
                    '{:s}'.format(arg_dict['job_id']))
        if 'speed_session' in arg_dict['kwargs']:
            q_obj.enqueue_call(func=run_hashcat.show_speed, job_id=arg_dict['job_id'],
                               kwargs=arg_dict['kwargs'], timeout=timeout,
                               result_ttl=-1)
        else:
            q_obj.enqueue_call(func=run_hashcat.hc_worker, job_id=arg_dict['job_id'],
                               kwargs=arg_dict['kwargs'], timeout=timeout,
                               result_ttl=-1)
        return
Exemple #2
0
def abort_callback(sender):
    """
    Callback function to take action following Hashcat aborting
    """
    logger.info('Callback Triggered: Aborted')
    event_log = sender.hashcat_status_get_log()
    raise ValueError('Aborted: {}'.format(event_log))
Exemple #3
0
def test_init_check():
    """Check the queue is empty first"""
    cur_list = rq.registry.StartedJobRegistry(queue=q).get_job_ids()
    if len(cur_list) > 0:
        try:
            job_id = cur_list[0]
            logger.info('Deleting job: {:s}'.format(job_id))
            job = q.fetch_job(job_id)
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(5)
            counter = 0
            while len(cur_list) > 0 and counter < 9:
                cur_list = rq.registry.StartedJobRegistry(
                    queue=q).get_job_ids()
                time.sleep(5)
                counter += 2
            job.delete()
            time.sleep(21)
            comp_list = crack_q.check_complete(q)
            assert job_id not in comp_list
            assert len(cur_list) < 1
        except AttributeError as err:
            logger.error('Failed to delete job: {}'.format(err))
    assert len(cur_list) < 1
Exemple #4
0
def hc_conf():
    """
        Parse config file and return dictionary of file
        locations
        for rules, wordlists, logs etc
        :return: dictionary containing conf entries
        """
    logger.info("Reading from config file {}".format(conf_file))
    config = configparser.ConfigParser()
    config.optionxform = str
    config.read(conf_file)
    conf_dict = {s: dict(config.items(s)) for s in config.sections()}
    #logger.debug("Conf Dictionary:\n{}".format(conf_dict))
    return conf_dict
Exemple #5
0
def test_wl_del():
    job_id = '0b7b91482fc24274b7d04fc0d6e61a96'
    try:
        logger.info('Deleting job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(16)
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
        job.delete()
        time.sleep(10)
        comp_list = crack_q.check_complete(q)
        assert job_id not in comp_list
    except AttributeError as err:
        logger.error('Failed to delete job: {}'.format(err))
Exemple #6
0
def test_stop_wl():
    job_id = '0b7b91482fc24274b7d04fc0d6e61a96'
    try:
        logger.info('Stopping job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
        wait_counter = 0
        while wait_counter < 5:
            time.sleep(21)
            cur_list = started.get_job_ids()
            wait_counter += 1
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
    except AttributeError as err:
        logger.error('Failed to stop job: {}'.format(err))
Exemple #7
0
def test_del():
    job_id = '63ece9904eb8478896baf3300a2c9513'
    try:
        logger.info('Deleting job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(21)
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
        job.delete()
        time.sleep(20)
        comp_list = crack_q.check_complete(q)
        assert job_id not in comp_list
    except AttributeError as err:
        logger.error('Failed to delete job: {}'.format(err))
Exemple #8
0
def test_stop():
    job_id = '63ece9904eb8478896baf3300a2c9513'
    try:
        logger.info('Stopping job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        wait_counter = 0
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
        time.sleep(21)
        while wait_counter < 5 and not cur_list:
            time.sleep(15)
            cur_list = started.get_job_ids()
            wait_counter += 1
            if len(cur_list) > 0:
                assert job_id not in cur_list
        #assert job_id not in cur_list
    except AttributeError as err:
        logger.error('Failed to stop job: {}'.format(err))
Exemple #9
0
def hc_worker(crack=None, hash_file=None, session=None,
              wordlist=None, outfile=None, hash_mode=1000,
              attack_mode=None, mask=None, rules=None, name=None,
              username=False, pot_path=None, restore=None,
              brain=True, mask_file=False, increment=False,
              increment_min=None, increment_max=None, speed=True,
              benchmark=False, benchmark_all=False, wordlist2=None):
    """
    Method to load a rq worker to take jobs from redis queue for execution

    ###***finish this
    Arguments
    ---------
    crack: object
        Hashcat execution python object for rq to execute
    hash_file: string
        File containing hashes to feed to hashcat
    session: Hashcat session
    wordlist: Wordlist to feed Hashcat
    Returns
    -------
    """

    if attack_mode:
        if not isinstance(attack_mode, int):
            attack_mode = None
    #job = redis_q.fetch_job(session)
    hcat = runner(hash_file=hash_file, mask=mask,
                  session=session, wordlist=wordlist,
                  outfile=outfile, attack_mode=attack_mode,
                  hash_mode=hash_mode, rules=rules,
                  username=username, pot_path=pot_path,
                  restore=restore, brain=brain, wordlist2=wordlist2,
                  benchmark=benchmark, benchmark_all=benchmark_all)
    hcat.event_connect(callback=error_callback,
                       signal="EVENT_LOG_ERROR")
    hcat.event_connect(callback=warning_callback,
                       signal="EVENT_LOG_WARNING")
    if benchmark:
        hcat.event_connect(callback=bench_callback,
                           signal="EVENT_CRACKER_FINISHED")
        hcat.event_connect(callback=finished_callback,
                           signal="EVENT_OUTERLOOP_FINISHED")
        hcat.event_connect(callback=any_callback,
                           signal="ANY")
    else:
        hcat.event_connect(callback=finished_callback,
                           signal="EVENT_CRACKER_FINISHED")
        hcat.event_connect(callback=cracked_callback,
                           signal="EVENT_CRACKER_HASH_CRACKED")
    try:
        main_counter = 0
        while True:
            hc_state = hcat.status_get_status_string()
            logger.debug('MAIN loop')
            if hc_state == 'Exhausted' and not mask_file:
                finished_callback(hcat)
                return 'Exhausted'
            if hc_state == 'Exhausted' and mask_file:
                # workaround for mask files
                ###***this needs to be better, some cases could exit early
                sleep(30)
                if hc_state == 'Exhausted':
                    logger.info('checking mask file')
                    if hc_state == 'Exhausted':
                        finished_callback(hcat)
                        return 'Exhausted'
            elif hc_state == 'Cracked':
                cracked_callback(hcat)
                return 'Cracked'
            elif hc_state == 'Aborted':
                logger.debug('Hashcat Abort status returned')
                event_log = hcat.hashcat_status_get_log()
                raise ValueError('Aborted: {}'.format(event_log))
            elif main_counter > 2000 and hc_state != 'Running' and mask_file == False:
                logger.debug('Reseting job, seems to be hung')
                raise ValueError('Error: Hashcat hung - Initialize timeout')
            else:
                logger.debug('HC State: {}'.format(hc_state))
                if 'Initializing' not in hc_state:
                    init_callback(hcat)
                    logger.debug('Hashcat initialized')
                job = redis_q.fetch_job(str(hcat.session))
                speed_started = rq.registry.StartedJobRegistry(queue=speed_q)
                cur_speed = speed_started.get_job_ids()
                if job:
                    if job.meta['CrackQ State'] == 'Stop':
                        logger.info('Stopping Job: {}'.format(hcat.session))
                        hcat.hashcat_session_quit()
                        return
                    elif job.meta['CrackQ State'] == 'Delete':
                        logger.info('Deleting Job: {}'.format(hcat.session))
                        speed_session = '{}_speed'.format(hcat.session)
                        speed_job = speed_q.fetch_job(speed_session)
                        if speed_job:
                            logger.debug('Deleting speed job')
                            speed_status = speed_job.get_status()
                            finished_states = ['finished',
                                               'failed']
                            del_count = 0
                            while (speed_status not in finished_states
                                   and del_count < 100):
                                logger.debug('DELETE wait loop')
                                speed_status = speed_job.get_status()
                                del_count += 1
                            logger.debug('Breaking runner loop speed check job has finished')
                            speed_job.delete()
                        cq_api.del_jobid(hcat.session)
                        hcat.hashcat_session_quit()
                        hcat.reset()
                        return
                    elif job.meta['CrackQ State'] == 'Pause':
                        hcat.hashcat_session_pause()
                        pause_counter = 0
                        logger.debug('Pausing job: {}'.format(hcat.session))
                        logger.debug('PAUSE loop begin')
                        while pause_counter < 400:
                            if hcat.status_get_status_string() == 'Paused':
                                logger.info('Job Paused: {}'.format(hcat.session))
                                break
                            elif del_check(job):
                                break
                            pause_counter += 1
                        logger.debug('PAUSE loop finished')
                        if hcat.status_get_status_string() != 'Paused':
                            logger.debug('Pause failed: {}'.format(hc_state))
                        ###***below not needed?
                        if len(cur_speed) < 1:
                            if not del_check(job):
                                logger.debug('Stale paused job caught, resuming')
                                job.meta['CrackQ State'] == 'Run/Restored'
                                job.save_meta()
                                hcat.hashcat_session_resume()
                    elif hc_state == 'Bypass':
                        logger.debug('Error: Bypass not cleared')
                    else:
                        logger.debug('Haschat state: {}'.format(hc_state))
                        if len(cur_speed) < 1:
                            if not del_check(job):
                                if hcat.status_get_status_string() == 'Paused':
                                    logger.debug('Stale paused job caught, resuming')
                                    job.meta['CrackQ State'] == 'Run/Restored'
                                    job.save_meta()
                                    hcat.hashcat_session_resume()
                else:
                    logger.error('Error finding redis job')
            sleep(10)
            main_counter += 10
    except KeyboardInterrupt:
        hcat.hashcat_session_quit()
        exit(0)
    except Exception as err:
        logger.error('MAIN loop closed: {}'.format(err))
Exemple #10
0
def runner(hash_file=None, hash_mode=1000,
           attack_mode=0, rules=None,
           mask=None, wordlist=None, session=None,
           outfile=None, restore=None, username=False,
           pot_path=None, show=False, brain=True,
           increment=False, increment_min=None,
           increment_max=False, speed=False, benchmark=False,
           benchmark_all=False, wordlist2=None):
    logger.info('Running hashcat')
    hc = Hashcat()
    logger.debug('Hashcat object ID: {}'.format(id(hc)))
    hc.session = session
    if benchmark:
        logger.debug('Running in benchmark mode')
        hc.benchmark = True
        if benchmark_all:
            hc.benchmark_all = True
        hc.hashcat_session_execute()
        return hc
    hc.potfile_disable = False
    hc.restore_disable = True
    hc.show = show
    if pot_path:
        hc.potfile_path = pot_path
    hc.quiet = False
    hc.optimized_kernel_enable = True
    hc.workload_profile = 4
    if username is True:
        hc.username = True
    if increment is True:
        hc.increment = True
    if increment_min:
        if isinstance(increment_min, int):
            hc.increment_min = increment_min
    if increment_max:
        if isinstance(increment_max, int):
            hc.increment_max = increment_max
    hc.hash = hash_file
    hc.attack_mode = attack_mode
    if rules:
        hc.rules = rules
        hc.rp_files_cnt = len(rules)
    hc.hash_mode = hash_mode
    if wordlist:
        hc.dict1 = wordlist
    if wordlist2:
        hc.dict2 = wordlist2
    if mask:
        hc.mask = mask
    if speed:
        hc.speed_only = True
        hc.hashcat_session_execute()
        return hc
    if brain:
        speed_session = '{}_speed'.format(session)
        job = redis_q.fetch_job(session)
        if 'brain_check' in job.meta:
            logger.debug('Restored job already has brain check state')
            speed_job = None
            if job.meta['brain_check'] is True:
                hc.brain_client = brain
                hc.brain_client_features = 3
                ###***replace with random string
                hc.brain_password = '******'
                speed_job = None
            else:
                speed_job = speed_q.fetch_job(speed_session)
        else:
            speed_job = speed_q.fetch_job(speed_session)
        wait_count = 0
        if speed_job:
            while len(speed_job.meta) < 1 and wait_count < 410:
                logger.debug('RUNNER loop')
                logger.debug('Speed meta not populated, waiting...')
                if job:
                    if del_check(job):
                        return hc
                if 'failed' in speed_job.get_status():
                    crack_q = crackqueue.Queuer()
                    err_msg = crack_q.error_parser(speed_job)
                    logger.error('Speed check failed: {}'.format(err_msg))
                    if job:
                        job.meta['brain_check'] = None
                        job.save_meta()
                    raise ValueError('Aborted, speed check failed: {}'.format(err_msg))
                elif 'finished' in speed_job.get_status():
                    logger.debug('Breaking runner loop speed check job has finished')
                    if job:
                        if del_check(job):
                            return hc
                elif 'CrackQ State' in speed_job.meta:
                    if del_check(speed_job):
                        return hc
                time.sleep(5)
                wait_count += 5
                speed_job = speed_q.fetch_job(speed_session)
            logger.debug('RUNNER loop finished')
            if 'Mode Info' in speed_job.meta:
                mode_info = speed_job.meta['Mode Info']
                salts = mode_info[-1]
                speed = int(mode_info[-2])
                brain = brain_check(speed, salts)
                hc.brain_client = brain
                hc.brain_client_features = 3
                ###***replace with random string
                hc.brain_password = '******'
                if brain is True:
                    if job:
                        job.meta['brain_check'] = True
                        job.save_meta()
                if brain is False:
                    if job:
                        job.meta['brain_check'] = False
                        job.save_meta()
            else:
                logger.error('Speed check error, disabling brain')
                if job:
                    job.meta['brain_check'] = None
                    if not del_check(job):
                        job.meta['CrackQ State'] = 'Run/Restored'
                        job.save_meta()
        else:
            logger.error('No speed job to check')
            if job and not del_check(job):
                job.meta['CrackQ State'] = 'Run/Restored'
                job.save_meta()
    markov_file = str(valid.val_filepath(path_string=file_dir,
                                         file_string='crackq.hcstat'))
    hc.markov_hcstat2 = markov_file
    hc.custom_charset_1 = '?l?d'
    hc.custom_charset_2 = '?l?d?u'
    hc.custom_charset_3 = '?l?d?s'
    hc.custom_charset_4 = '?u?d?s'
    hc.outfile = outfile
    logger.debug('HC. Hashcat Rules: {}'.format(hc.rules))
    logger.debug('HC. Hashcat rp_files_cnt: {}'.format(hc.rp_files_cnt))
    if restore:
        hc.skip = int(restore)
    hc.hashcat_session_execute()
    speed_started = rq.registry.StartedJobRegistry(queue=speed_q)
    cur_speed = speed_started.get_job_ids()
    if len(cur_speed) > 0:
        job = redis_q.fetch_job(session)
        if job:
            if not del_check(job):
                logger.debug('Speed job running, setting new job to Paused')
                job.meta['CrackQ State'] = 'Pause'
                job.save_meta()
    return hc