Exemple #1
0
    def check_failed(self, q_obj):
        """
        This method checks the failed queue and print info to a log file

        Parameters
        ---------
        log_file : str
            log file name to write to

        Returns
        -------
        success : boolean
        """
        try:
            failed_dict = {}
            failed_reg = rq.registry.FailedJobRegistry(queue=q_obj)
            if failed_reg.count > 0:
                for job_id in failed_reg.get_job_ids():
                    failed_dict[job_id] = {}
                    job = q_obj.fetch_job(job_id)
                    failed_dict[job_id]['Error'] = self.error_parser(job)
                    try:
                        name = cq_api.get_jobdetails(job.description)['name']
                        failed_dict[job_id]['Name'] = name
                    except KeyError:
                        failed_dict[job_id]['Name'] = 'No name'
                    except AttributeError:
                        failed_dict[job_id]['Name'] = 'No name'
            logger.debug('Failed dict: {}'.format(failed_dict))
            return failed_dict
        except AttributeError as err:
            logger.warning('Error getting failed queue: {}'.format(err))
            return {}
Exemple #2
0
def write_result(sender):
    """
    Method to write cracking results to file in json format

    When executed, this will open the corresponding session.crack file and
    load the data into a results file with other meta data relating to the
    job

    Arguments
    ---------
    hcat_status: dict
        Hashcat status dict (from status()), containing hashcat data
        form the cracking session
    redis_con: object
        redis connection object initiated
    Returns
    -------

    """
    logger.debug('Updating status file')
    hcat_status = status(sender)
    if '_speed' in sender.session:
        session = sender.session[:-6]
    else:
        session = sender.session
    result_file = valid.val_filepath(path_string=log_dir,
                                     file_string='{}.json'.format(session))
    if 'Progress' in hcat_status:
        hcat_status['Progress'] = int(hcat_status['Progress'])
    logger.debug('Updating job metadata')
    if not sender.benchmark:
        try:
            with open(result_file, 'r+') as result_fh:
                job = redis_q.fetch_job(session)
                if job and isinstance(hcat_status, dict):
                    job.meta['HC State'] = hcat_status
                    job.meta['Speed Array'] = circulator(
                        job.meta['Speed Array'], int(hcat_status['Speed Raw']),
                        180)
                    job.save_meta()
                    job_details = cq_api.get_jobdetails(job.description)
                    job_details['restore'] = hcat_status['Restore Point']
                    if 'brain_check' in job.meta:
                        job_details['brain_check'] = job.meta['brain_check']
                else:
                    result = result_fh.read()
                    job_details = json.loads(result.strip())
                job_details['Cracked Hashes'] = sender.status_get_digests_done(
                )
                job_details['Total Hashes'] = sender.status_get_digests_cnt()
                job_details['timeout'] = job.timeout
                result_fh.seek(0)
                result_fh.write(json.dumps(job_details))
                result_fh.truncate()
        except AttributeError as err:
            logger.debug('Status update failure: {}'.format(err))
        except KeyError as err:
            logger.debug('Status update failure: {}'.format(err))
        except UnboundLocalError as err:
            logger.debug('Status update failure: {}'.format(err))
Exemple #3
0
    def write_result(self, hcat_status):
        """
        Method to write cracking results to file in json format

        When executed, this will open the corresponding session.crack file and
        load the data into a results file with other meta data relating to the
        job

        Arguments
        ---------
        hcat_status: dict
            Hashcat status dict (from self.status()), containing hashcat data
            form the cracking session
        redis_con: object
            redis connection object initiated  ***by
        Returns
        -------


        """
        ###***ADD PATH VALIDATION??
        ###***refactor to remove file use
        ###**fix meta data shitness
        if 'Waiting' in hcat_status:
            hcat_status = {'HC State': 'Loading'}
            logger.warning('Status update failure')
            return
        elif 'Progress' in hcat_status:
            hcat_status['Progress'] = int(hcat_status['Progress'])
        logger.debug('Updating job metadata')
        rconf = CRACK_CONF['redis']
        redis_con = Redis(rconf['host'], rconf['port'])
        redis_q = Queue(connection=redis_con)
        logger.debug('Creating results file')
        #cracked_file = '{}{}.cracked'.format(self.log_dir, hcat_status['Session'])
        result_file = '{}{}.json'.format(self.log_dir, hcat_status['Session'])
        #try:
        #    with open(cracked_file, 'r') as cracked_fh:
        #        cracked_list = [cracked.rstrip() for cracked in cracked_fh]
        #    hcat_status['Cracked'] = cracked_list
        #except IOError as err:
        #    logger.debug('Cracked file does not exist: {}'.format(err))
        with open(result_file, 'w') as result_fh:
            try:
                job = redis_q.fetch_job(hcat_status['Session'])
                job.meta['HC State'] = hcat_status
                job.meta['Speed Array'] = self.circulator(
                    job.meta['Speed Array'], int(hcat_status['Speed Raw']),
                    180)
                job.save_meta()
                job_details = cq_api.get_jobdetails(job.description)
                job_details['restore'] = hcat_status['Restore Point']
                job_details = json.dumps(job_details)
                result_fh.write(job_details)
            except AttributeError as err:
                logger.info('Status update failure: {}'.format(err))
Exemple #4
0
    def check_failed(self):
        """
        This method checks the failed queue and print info to a log file

        Parameters
        ---------
        log_file : str
            log file name to write to

        Returns
        -------
        success : boolean
        """
        ###***finish this
        try:
            failed_dict = {}
            failed_reg = rq.registry.FailedJobRegistry(
                'default', connection=self.redis_con)
            if failed_reg.count > 0:
                q = failed_reg.get_queue()
                for job in failed_reg.get_job_ids():
                    failed_dict[job] = {}
                    j = q.fetch_job(job)
                    ###***make this better, use some other method for splitting
                    if j is not None:
                        err_split = j.exc_info.split('\n')
                        logger.debug('Failed job {}: {}'.format(
                            job, j.exc_info))
                        if 'Traceback' in err_split[0]:
                            failed_dict[job]['Error'] = j.exc_info.split(
                                ':')[-1].strip()
                        else:
                            failed_dict[job]['Error'] = err_split[0]
                        try:
                            failed_dict[job]['Name'] = cq_api.get_jobdetails(
                                j.description)['name']
                        except KeyError:
                            failed_dict[job]['Name'] = 'No name'
                        except AttributeError:
                            failed_dict[job]['Name'] = 'No name'
            logger.debug('Failed dict: {}'.format(failed_dict))
            return failed_dict
        except AttributeError as err:
            logger.warning('Error getting failed queue: {}'.format(err))
            return {}