Esempio n. 1
0
def bench_callback(sender):
    """
    Callback function to create benchmark dictionary
    """
    logger.debug('Callback Triggered: Benchmark')
    status_dict = status(sender)
    session = str(sender.session)
    job = redis_q.fetch_job(session)
    if job:
        speed_raw = int(status_dict['Speed Raw']) * 1000
        speed_format = status_dict['Speed All']
        hash_mode = str(sender.hash_mode).strip()
        if 'Benchmarks' not in job.meta:
            job.meta['Benchmarks'] = {}
        job.meta['Benchmarks'].update({
            hash_mode: [speed_raw, speed_format]
            })
        job.save_meta()
        log_dir = CRACK_CONF['files']['log_dir']
        bench_file = valid.val_filepath(path_string=log_dir,
                                        file_string='sys_benchmark.json')
        with open(bench_file, 'w') as bench_fh:
            logger.debug('Writing results to benchmark file')
            bench_fh.write(json.dumps(job.meta))
    else:
        logger.error('Failed to write benchmark job meta: {}'.format(session))
Esempio n. 2
0
def test_init_check():
    """Check the queue is empty first"""
    cur_list = rq.registry.StartedJobRegistry(queue=q).get_job_ids()
    if len(cur_list) > 0:
        try:
            job_id = cur_list[0]
            logger.info('Deleting job: {:s}'.format(job_id))
            job = q.fetch_job(job_id)
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(5)
            counter = 0
            while len(cur_list) > 0 and counter < 9:
                cur_list = rq.registry.StartedJobRegistry(
                    queue=q).get_job_ids()
                time.sleep(5)
                counter += 2
            job.delete()
            time.sleep(21)
            comp_list = crack_q.check_complete(q)
            assert job_id not in comp_list
            assert len(cur_list) < 1
        except AttributeError as err:
            logger.error('Failed to delete job: {}'.format(err))
    assert len(cur_list) < 1
Esempio n. 3
0
File: auth.py Progetto: zha0/crackq
 def s_client(self):
     """
     Setup and return the SAML client with specified config
     """
     acs_url = url_for('sso', _scheme='https', _external=True)
     logger.debug('SSO ACS URL: {}'.format(acs_url))
     logout_url = url_for('logout', _scheme='https', _external=True)
     try:
         with open(self.meta_file, 'r') as meta_fh:
             meta_len = len(meta_fh.read())
         if meta_len < 1:
             try:
                 res = requests.get(self.meta_url)
                 with open(self.meta_file, 'w') as meta_fh:
                     meta_fh.write(res.text)
             except Exception as err:
                 logger.error(
                     'Invalid SAML metadata file/s provided:\n{}'.format(
                         err))
     except FileNotFoundError as err:
         res = requests.get(self.meta_url)
         with open(self.meta_file, 'w') as meta_fh:
             meta_fh.write(res.text)
         #logger.error('Invalid SAML metadata file provided')
     ###***review all of these settings
     settings = {
         'metadata': {
             "local": [self.meta_file]
         },
         'service': {
             'sp': {
                 'name_id_format': 'None',
                 'endpoints': {
                     'assertion_consumer_service':
                     [(acs_url, BINDING_HTTP_REDIRECT),
                      (acs_url, BINDING_HTTP_POST)],
                     'single_logout_service':
                     [(logout_url, BINDING_HTTP_REDIRECT)]
                 },
                 ###***update some of these if possible
                 'allow_unsolicited': True,
                 #'allow_unknown_attributes': True,
                 'authn_requests_signed': False,
                 'logout_requests_signed': True,
                 'want_assertions_signed': True,
                 'want_response_signed': False,
                 'attribute_map_dir': './attributemaps',
             },
         },
     }
     sp_config = Saml2Config()
     sp_config.load(settings)
     sp_config.entityid = self.entity_id
     sp_config.allow_unknown_attributes = True
     client = Saml2Client(config=sp_config)
     return client
Esempio n. 4
0
def send_email(mail_server, port, src, dest, sub, tls):
    """
    Simple email notification

    Arguments
    --------
    mail_server: str
        email server hostname/ip
    port: int
        port to use
    src: str
        email from address
    dest: str
        email to address
    tls: boolean
        use encryption for SMTP

    Returns
    -------
    """
    if os.environ['MAIL_USERNAME']:
        mail_username = os.environ['MAIL_USERNAME']
    if os.environ['MAIL_PASSWORD']:
        mail_password = os.environ['MAIL_PASSWORD']
    msg = MIMEText('')
    msg['To'] = email.utils.formataddr(('CrackQ', dest))
    msg['From'] = email.utils.formataddr(('CrackQ', src))
    msg['Subject'] = sub
    try:
        if tls:
            server = smtplib.SMTP(mail_server, port)
            server.starttls()
            if mail_username and mail_password:
                server.login(mail_username, mail_password)
            #server.set_debuglevel(True)
            server.sendmail(src, [dest],
                            msg.as_string())
            server.quit()
        else:
            server = smtplib.SMTP(mail_server, port)
            #server.set_debuglevel(True)
            server.sendmail(src, [dest],
                            msg.as_string())
            server.quit()
    except TimeoutError:
        logger.error('SMTP connection error - timeout')
        server.quit()
    except ssl.SSLError as err:
        logger.debug(err)
        logger.error('SMTP SSL/TLS error')
        server.quit()
Esempio n. 5
0
def finished_callback(sender):
    """
    Callback function to take action on hashcat finished signal.
    Action is to reset hashcat???
    #changed to just writing restul file for now
    """
    logger.debug('Callback Triggered: Cracking Finished')
    if CRACK_CONF['notify']:
        mail_server = CRACK_CONF['notify']['mail_server']
        mail_port = CRACK_CONF['notify']['mail_port']
        email_src = CRACK_CONF['notify']['src']
        inactive_time = CRACK_CONF['notify']['inactive_time']
        tls = CRACK_CONF['notify']['tls']
    session = sender.session
    job = redis_q.fetch_job(session)
    if job:
        if 'notify' in job.meta.keys():
            logger.debug('Sending notification')
            if job.meta['notify']:
                if 'email' in job.meta.keys():
                    user_email = job.meta['email']
                    try:
                        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        now = datetime.strptime(now,
                                                '%Y-%m-%d %H:%M:%S')
                        last = datetime.strptime(job.meta['last_seen'],
                                                 '%Y-%m-%d %H:%M:%S')
                        inactive_time = timedelta(minutes=int(inactive_time))
                        activity = now - last
                        if (activity > inactive_time
                                and job.meta['email_count'] < 2):
                            sub = 'CrackQ: Job complete notification'
                            send_email(mail_server, mail_port,
                                            email_src, user_email, sub, tls)
                            job.meta['email_count'] += 1
                            job.save()
                    except Exception as err:
                        logger.error('Failed to connect to mail server')
                        logger.error(err)
                else:
                    job.meta['Warning'] = "No email address in profile"
                    job.save()
        else:
            job.meta['Warning'] = "Notification settings error"
            job.save()
    else:
        logger.debug('No job yet')
    write_result(sender)
    if sender.benchmark:
        sender.status_reset()
Esempio n. 6
0
def error_callback(sender):
    """
    Callback function to take action on hashcat error event
    """
    logger.debug('Callback Triggered: ERROR')
    msg_buf = sender.hashcat_status_get_log()
    logger.debug('{}'.format(msg_buf))
    started = rq.registry.StartedJobRegistry(queue=redis_q)
    logger.error('{}'.format(msg_buf))
    if len(started.get_job_ids()) > 0:
        session = started.get_job_ids()[0]
        logger.error('{}: {}'.format(session, msg_buf))
        job = redis_q.fetch_job(session)
        job.meta['ERROR'] = msg_buf
        job.save_meta()
Esempio n. 7
0
    def get_home(cls):
        """
         Get and validate home directory from env variable

         Returns
         -------
         home: Path obj
            validated user home path string
        """
        try:
            if Path.home().startswith('/home/'):
                str_path = Path.home()
                return str_path
            else:
                raise Exception('Invalid $HOME env variable detected')
        except ValueError as err:
            logger.error('Invalid $HOME env variable detected')
            return False
Esempio n. 8
0
def test_wl_del():
    job_id = '0b7b91482fc24274b7d04fc0d6e61a96'
    try:
        logger.info('Deleting job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(16)
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
        job.delete()
        time.sleep(10)
        comp_list = crack_q.check_complete(q)
        assert job_id not in comp_list
    except AttributeError as err:
        logger.error('Failed to delete job: {}'.format(err))
Esempio n. 9
0
def test_stop_wl():
    job_id = '0b7b91482fc24274b7d04fc0d6e61a96'
    try:
        logger.info('Stopping job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
        wait_counter = 0
        while wait_counter < 5:
            time.sleep(21)
            cur_list = started.get_job_ids()
            wait_counter += 1
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
    except AttributeError as err:
        logger.error('Failed to stop job: {}'.format(err))
Esempio n. 10
0
def test_del():
    job_id = '63ece9904eb8478896baf3300a2c9513'
    try:
        logger.info('Deleting job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
            time.sleep(21)
        cur_list = started.get_job_ids()
        assert job_id not in cur_list
        job.delete()
        time.sleep(20)
        comp_list = crack_q.check_complete(q)
        assert job_id not in comp_list
    except AttributeError as err:
        logger.error('Failed to delete job: {}'.format(err))
Esempio n. 11
0
def cracked_callback(sender):
    """
    Callback function to take action on hashcat signal.
    Action is to write the latest cracked hashes
    """
    logger.debug('Callback Triggered: Cracked')
    status_dict = status(sender)
    logger.debug('Hashcat status: {}'.format(status_dict))
    if CRACK_CONF['notify']:
        mail_server = CRACK_CONF['notify']['mail_server']
        mail_port = CRACK_CONF['notify']['mail_port']
        email_src = CRACK_CONF['notify']['src']
        inactive_time = CRACK_CONF['notify']['inactive_time']
        tls = CRACK_CONF['notify']['tls']
    session = sender.session
    job = redis_q.fetch_job(session)
    if job:
        if 'notify' in job.meta.keys():
            logger.debug('Sending notification')
            if job.meta['notify']:
                if 'email' in job.meta.keys():
                    user_email = job.meta['email']
                    try:
                        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        now = datetime.strptime(now,
                                                '%Y-%m-%d %H:%M:%S')
                        last = datetime.strptime(job.meta['last_seen'],
                                                 '%Y-%m-%d %H:%M:%S')
                        inactive_time = timedelta(minutes=int(inactive_time))
                        activity = now - last
                        if (activity > inactive_time and job.meta['email_count'] < 1):
                            sub = 'CrackQ: Hash cracked notification'
                            send_email(mail_server, mail_port,
                                       email_src, user_email, sub, tls)
                            job.meta['email_count'] += 1
                            job.save()
                    except ssl.SSLError as err:
                        logger.error('Failed to connect to mail server')
                        logger.error(err)
                    except Exception as err:
                        logger.error('Failed to connect to mail server')
                        logger.error(err)
                else:
                    job.meta['Warning'] = "No email address in profile"
                    job.save()
        else:
            job.meta['Warning'] = "Notification settings error"
            job.save()
    else:
        logger.debug('No job yet')
    write_result(sender)
Esempio n. 12
0
    def val_path(cls, path_string):
        """
        Get and parse a path string

        Arguments
        ---------
        path_string: Path
            string representation of path name

        Returns
        -------

        Validated path string
        """
        try:
            pv.validate_filepath(path_string, platform='auto')
            str_path = pv.sanitize_filepath(path_string, platform='auto')
            return str_path
        except ValueError as err:
            logger.error('Invalid filepath provided')
            return False
Esempio n. 13
0
    def val_file(cls, file_string):
        """
        Get and parse a file string

        Arguments
        ---------
        file_string: string
            string representation of file name

        Returns
        --------
        file: Path object | boolean
            validated file string
        """
        try:
            pv.validate_filename(file_string, platform='auto')
            str_file = pv.sanitize_filename(file_string, platform='auto')
            return str_file
        except ValueError as err:
            logger.error('Invalid file name')
            return False
Esempio n. 14
0
def test_stop():
    job_id = '63ece9904eb8478896baf3300a2c9513'
    try:
        logger.info('Stopping job: {:s}'.format(job_id))
        job = q.fetch_job(job_id)
        started = rq.registry.StartedJobRegistry(queue=q)
        cur_list = started.get_job_ids()
        wait_counter = 0
        if job_id in cur_list:
            job.meta['CrackQ State'] = 'Stop'
            job.save_meta()
        time.sleep(21)
        while wait_counter < 5 and not cur_list:
            time.sleep(15)
            cur_list = started.get_job_ids()
            wait_counter += 1
            if len(cur_list) > 0:
                assert job_id not in cur_list
        #assert job_id not in cur_list
    except AttributeError as err:
        logger.error('Failed to stop job: {}'.format(err))
Esempio n. 15
0
def hc_worker(crack=None, hash_file=None, session=None,
              wordlist=None, outfile=None, hash_mode=1000,
              attack_mode=None, mask=None, rules=None, name=None,
              username=False, pot_path=None, restore=None,
              brain=True, mask_file=False, increment=False,
              increment_min=None, increment_max=None, speed=True,
              benchmark=False, benchmark_all=False, wordlist2=None):
    """
    Method to load a rq worker to take jobs from redis queue for execution

    ###***finish this
    Arguments
    ---------
    crack: object
        Hashcat execution python object for rq to execute
    hash_file: string
        File containing hashes to feed to hashcat
    session: Hashcat session
    wordlist: Wordlist to feed Hashcat
    Returns
    -------
    """

    if attack_mode:
        if not isinstance(attack_mode, int):
            attack_mode = None
    #job = redis_q.fetch_job(session)
    hcat = runner(hash_file=hash_file, mask=mask,
                  session=session, wordlist=wordlist,
                  outfile=outfile, attack_mode=attack_mode,
                  hash_mode=hash_mode, rules=rules,
                  username=username, pot_path=pot_path,
                  restore=restore, brain=brain, wordlist2=wordlist2,
                  benchmark=benchmark, benchmark_all=benchmark_all)
    hcat.event_connect(callback=error_callback,
                       signal="EVENT_LOG_ERROR")
    hcat.event_connect(callback=warning_callback,
                       signal="EVENT_LOG_WARNING")
    if benchmark:
        hcat.event_connect(callback=bench_callback,
                           signal="EVENT_CRACKER_FINISHED")
        hcat.event_connect(callback=finished_callback,
                           signal="EVENT_OUTERLOOP_FINISHED")
        hcat.event_connect(callback=any_callback,
                           signal="ANY")
    else:
        hcat.event_connect(callback=finished_callback,
                           signal="EVENT_CRACKER_FINISHED")
        hcat.event_connect(callback=cracked_callback,
                           signal="EVENT_CRACKER_HASH_CRACKED")
    try:
        main_counter = 0
        while True:
            hc_state = hcat.status_get_status_string()
            logger.debug('MAIN loop')
            if hc_state == 'Exhausted' and not mask_file:
                finished_callback(hcat)
                return 'Exhausted'
            if hc_state == 'Exhausted' and mask_file:
                # workaround for mask files
                ###***this needs to be better, some cases could exit early
                sleep(30)
                if hc_state == 'Exhausted':
                    logger.info('checking mask file')
                    if hc_state == 'Exhausted':
                        finished_callback(hcat)
                        return 'Exhausted'
            elif hc_state == 'Cracked':
                cracked_callback(hcat)
                return 'Cracked'
            elif hc_state == 'Aborted':
                logger.debug('Hashcat Abort status returned')
                event_log = hcat.hashcat_status_get_log()
                raise ValueError('Aborted: {}'.format(event_log))
            elif main_counter > 2000 and hc_state != 'Running' and mask_file == False:
                logger.debug('Reseting job, seems to be hung')
                raise ValueError('Error: Hashcat hung - Initialize timeout')
            else:
                logger.debug('HC State: {}'.format(hc_state))
                if 'Initializing' not in hc_state:
                    init_callback(hcat)
                    logger.debug('Hashcat initialized')
                job = redis_q.fetch_job(str(hcat.session))
                speed_started = rq.registry.StartedJobRegistry(queue=speed_q)
                cur_speed = speed_started.get_job_ids()
                if job:
                    if job.meta['CrackQ State'] == 'Stop':
                        logger.info('Stopping Job: {}'.format(hcat.session))
                        hcat.hashcat_session_quit()
                        return
                    elif job.meta['CrackQ State'] == 'Delete':
                        logger.info('Deleting Job: {}'.format(hcat.session))
                        speed_session = '{}_speed'.format(hcat.session)
                        speed_job = speed_q.fetch_job(speed_session)
                        if speed_job:
                            logger.debug('Deleting speed job')
                            speed_status = speed_job.get_status()
                            finished_states = ['finished',
                                               'failed']
                            del_count = 0
                            while (speed_status not in finished_states
                                   and del_count < 100):
                                logger.debug('DELETE wait loop')
                                speed_status = speed_job.get_status()
                                del_count += 1
                            logger.debug('Breaking runner loop speed check job has finished')
                            speed_job.delete()
                        cq_api.del_jobid(hcat.session)
                        hcat.hashcat_session_quit()
                        hcat.reset()
                        return
                    elif job.meta['CrackQ State'] == 'Pause':
                        hcat.hashcat_session_pause()
                        pause_counter = 0
                        logger.debug('Pausing job: {}'.format(hcat.session))
                        logger.debug('PAUSE loop begin')
                        while pause_counter < 400:
                            if hcat.status_get_status_string() == 'Paused':
                                logger.info('Job Paused: {}'.format(hcat.session))
                                break
                            elif del_check(job):
                                break
                            pause_counter += 1
                        logger.debug('PAUSE loop finished')
                        if hcat.status_get_status_string() != 'Paused':
                            logger.debug('Pause failed: {}'.format(hc_state))
                        ###***below not needed?
                        if len(cur_speed) < 1:
                            if not del_check(job):
                                logger.debug('Stale paused job caught, resuming')
                                job.meta['CrackQ State'] == 'Run/Restored'
                                job.save_meta()
                                hcat.hashcat_session_resume()
                    elif hc_state == 'Bypass':
                        logger.debug('Error: Bypass not cleared')
                    else:
                        logger.debug('Haschat state: {}'.format(hc_state))
                        if len(cur_speed) < 1:
                            if not del_check(job):
                                if hcat.status_get_status_string() == 'Paused':
                                    logger.debug('Stale paused job caught, resuming')
                                    job.meta['CrackQ State'] == 'Run/Restored'
                                    job.save_meta()
                                    hcat.hashcat_session_resume()
                else:
                    logger.error('Error finding redis job')
            sleep(10)
            main_counter += 10
    except KeyboardInterrupt:
        hcat.hashcat_session_quit()
        exit(0)
    except Exception as err:
        logger.error('MAIN loop closed: {}'.format(err))
Esempio n. 16
0
def runner(hash_file=None, hash_mode=1000,
           attack_mode=0, rules=None,
           mask=None, wordlist=None, session=None,
           outfile=None, restore=None, username=False,
           pot_path=None, show=False, brain=True,
           increment=False, increment_min=None,
           increment_max=False, speed=False, benchmark=False,
           benchmark_all=False, wordlist2=None):
    logger.info('Running hashcat')
    hc = Hashcat()
    logger.debug('Hashcat object ID: {}'.format(id(hc)))
    hc.session = session
    if benchmark:
        logger.debug('Running in benchmark mode')
        hc.benchmark = True
        if benchmark_all:
            hc.benchmark_all = True
        hc.hashcat_session_execute()
        return hc
    hc.potfile_disable = False
    hc.restore_disable = True
    hc.show = show
    if pot_path:
        hc.potfile_path = pot_path
    hc.quiet = False
    hc.optimized_kernel_enable = True
    hc.workload_profile = 4
    if username is True:
        hc.username = True
    if increment is True:
        hc.increment = True
    if increment_min:
        if isinstance(increment_min, int):
            hc.increment_min = increment_min
    if increment_max:
        if isinstance(increment_max, int):
            hc.increment_max = increment_max
    hc.hash = hash_file
    hc.attack_mode = attack_mode
    if rules:
        hc.rules = rules
        hc.rp_files_cnt = len(rules)
    hc.hash_mode = hash_mode
    if wordlist:
        hc.dict1 = wordlist
    if wordlist2:
        hc.dict2 = wordlist2
    if mask:
        hc.mask = mask
    if speed:
        hc.speed_only = True
        hc.hashcat_session_execute()
        return hc
    if brain:
        speed_session = '{}_speed'.format(session)
        job = redis_q.fetch_job(session)
        if 'brain_check' in job.meta:
            logger.debug('Restored job already has brain check state')
            speed_job = None
            if job.meta['brain_check'] is True:
                hc.brain_client = brain
                hc.brain_client_features = 3
                ###***replace with random string
                hc.brain_password = '******'
                speed_job = None
            else:
                speed_job = speed_q.fetch_job(speed_session)
        else:
            speed_job = speed_q.fetch_job(speed_session)
        wait_count = 0
        if speed_job:
            while len(speed_job.meta) < 1 and wait_count < 410:
                logger.debug('RUNNER loop')
                logger.debug('Speed meta not populated, waiting...')
                if job:
                    if del_check(job):
                        return hc
                if 'failed' in speed_job.get_status():
                    crack_q = crackqueue.Queuer()
                    err_msg = crack_q.error_parser(speed_job)
                    logger.error('Speed check failed: {}'.format(err_msg))
                    if job:
                        job.meta['brain_check'] = None
                        job.save_meta()
                    raise ValueError('Aborted, speed check failed: {}'.format(err_msg))
                elif 'finished' in speed_job.get_status():
                    logger.debug('Breaking runner loop speed check job has finished')
                    if job:
                        if del_check(job):
                            return hc
                elif 'CrackQ State' in speed_job.meta:
                    if del_check(speed_job):
                        return hc
                time.sleep(5)
                wait_count += 5
                speed_job = speed_q.fetch_job(speed_session)
            logger.debug('RUNNER loop finished')
            if 'Mode Info' in speed_job.meta:
                mode_info = speed_job.meta['Mode Info']
                salts = mode_info[-1]
                speed = int(mode_info[-2])
                brain = brain_check(speed, salts)
                hc.brain_client = brain
                hc.brain_client_features = 3
                ###***replace with random string
                hc.brain_password = '******'
                if brain is True:
                    if job:
                        job.meta['brain_check'] = True
                        job.save_meta()
                if brain is False:
                    if job:
                        job.meta['brain_check'] = False
                        job.save_meta()
            else:
                logger.error('Speed check error, disabling brain')
                if job:
                    job.meta['brain_check'] = None
                    if not del_check(job):
                        job.meta['CrackQ State'] = 'Run/Restored'
                        job.save_meta()
        else:
            logger.error('No speed job to check')
            if job and not del_check(job):
                job.meta['CrackQ State'] = 'Run/Restored'
                job.save_meta()
    markov_file = str(valid.val_filepath(path_string=file_dir,
                                         file_string='crackq.hcstat'))
    hc.markov_hcstat2 = markov_file
    hc.custom_charset_1 = '?l?d'
    hc.custom_charset_2 = '?l?d?u'
    hc.custom_charset_3 = '?l?d?s'
    hc.custom_charset_4 = '?u?d?s'
    hc.outfile = outfile
    logger.debug('HC. Hashcat Rules: {}'.format(hc.rules))
    logger.debug('HC. Hashcat rp_files_cnt: {}'.format(hc.rp_files_cnt))
    if restore:
        hc.skip = int(restore)
    hc.hashcat_session_execute()
    speed_started = rq.registry.StartedJobRegistry(queue=speed_q)
    cur_speed = speed_started.get_job_ids()
    if len(cur_speed) > 0:
        job = redis_q.fetch_job(session)
        if job:
            if not del_check(job):
                logger.debug('Speed job running, setting new job to Paused')
                job.meta['CrackQ State'] = 'Pause'
                job.save_meta()
    return hc