Esempio n. 1
0
 def get(self):
     """获取任务列表"""
     args = request.args
     job_status = args.get('job_status')
     queue_name = args.get('queue_name')
     if job_status not in Config.RQ_JOB_STATUS:
         return {
             'code': StatesCode.JOB_STATUS_NO_EXIST,
             'message': '任务状态不存在!'
         }
     if queue_name not in Config.RQ_QUEUES_ALL:
         return {'code': StatesCode.QUEUE_NOT_EXIST, 'message': '任务队列不存在!'}
     job_list = []
     if job_status == 'queued':
         if queue_name == 'all':
             for queue_name in Config.RQ_QUEUES:
                 job_list += queue_dict[queue_name].get_job_ids()
         else:
             job_list = queue_dict[queue_name].get_job_ids()
     elif job_status == 'started':
         if queue_name == 'all':
             for queue_name in Config.RQ_QUEUES:
                 started_job_registry = StartedJobRegistry(
                     queue=queue_dict[queue_name])
                 job_list += started_job_registry.get_job_ids()
         else:
             started_job_registry = StartedJobRegistry(
                 queue=queue_dict[queue_name])
             job_list = started_job_registry.get_job_ids()
     elif job_status == 'finished':
         if queue_name == 'all':
             for queue_name in Config.RQ_QUEUES:
                 finished_job_registry = FinishedJobRegistry(
                     queue=queue_dict[queue_name])
                 job_list += finished_job_registry.get_job_ids()
         else:
             finished_job_registry = FinishedJobRegistry(
                 queue=queue_dict[queue_name])
             job_list = finished_job_registry.get_job_ids()
     elif job_status == 'failed':
         if queue_name == 'all':
             for queue_name in Config.RQ_QUEUES:
                 failed_job_registry = FailedJobRegistry(
                     queue=queue_dict[queue_name])
                 job_list += failed_job_registry.get_job_ids()
         else:
             failed_job_registry = FailedJobRegistry(
                 queue=queue_dict[queue_name])
             job_list = failed_job_registry.get_job_ids()
     elif job_status == 'deferred':
         if queue_name == 'all':
             for queue_name in Config.RQ_QUEUES:
                 deferred_job_registry = DeferredJobRegistry(
                     queue=queue_dict[queue_name])
                 job_list += deferred_job_registry.get_job_ids()
         else:
             deferred_job_registry = DeferredJobRegistry(
                 queue=queue_dict[queue_name])
             job_list = deferred_job_registry.get_job_ids()
     return {'code': StatesCode.SUCCESS, 'data': job_list}
Esempio n. 2
0
def failed_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FailedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)
        jobs = get_jobs(queue, job_ids, registry)

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Failed',
    }
    return render(request, 'django_rq/jobs.html', context_data)
Esempio n. 3
0
def requeue_all(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    registry = FailedJobRegistry(queue=queue)

    if request.method == 'POST':
        job_ids = registry.get_job_ids()
        count = 0
        # Confirmation received
        for job_id in job_ids:
            try:
                requeue_job(job_id, connection=queue.connection)
                count += 1
            except NoSuchJobError:
                pass

        messages.info(request,
                      'You have successfully requeued %d jobs!' % count)
        return redirect('rq_jobs', queue_index)

    context_data = {
        'queue_index': queue_index,
        'queue': queue,
        'total_jobs': len(registry),
    }

    return render(request, 'django_rq/requeue_all.html', context_data)
Esempio n. 4
0
 def getfailedjobs(self, q):
     try:
         registry = FailedJobRegistry(q, connection=self.base_connection)
         response_object = registry.get_job_ids()
         return response_object
     except Exception as e:
         return e
Esempio n. 5
0
def requeue(cli_config, queue, all, job_class, job_ids, **options):
    """Requeue failed jobs."""

    failed_job_registry = FailedJobRegistry(queue,
                                            connection=cli_config.connection)
    if all:
        job_ids = failed_job_registry.get_job_ids()

    if not job_ids:
        click.echo('Nothing to do')
        sys.exit(0)

    click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids)))
    fail_count = 0
    with click.progressbar(job_ids) as job_ids:
        for job_id in job_ids:
            try:
                failed_job_registry.requeue(job_id)
            except InvalidJobOperationError:
                fail_count += 1

    if fail_count > 0:
        click.secho(
            'Unable to requeue {0} jobs from failed job registry'.format(
                fail_count),
            fg='red')
Esempio n. 6
0
def get_queue_registry_jobs_count(queue_name, registry_name, offset, per_page):
    queue = Queue(queue_name)
    if registry_name != "queued":
        if per_page >= 0:
            per_page = offset + (per_page - 1)

        if registry_name == "failed":
            current_queue = FailedJobRegistry(queue_name)
        elif registry_name == "deferred":
            current_queue = DeferredJobRegistry(queue_name)
        elif registry_name == "started":
            current_queue = StartedJobRegistry(queue_name)
        elif registry_name == "finished":
            current_queue = FinishedJobRegistry(queue_name)
    else:
        current_queue = queue
    total_items = current_queue.count

    job_ids = current_queue.get_job_ids(offset, per_page)
    current_queue_jobs = [queue.fetch_job(job_id) for job_id in job_ids]
    jobs = [
        serialize_job(job) for job in current_queue_jobs if job is not None
    ]

    return (total_items, jobs)
Esempio n. 7
0
def check_scrape_job(scrape_id: str, scraper: Scraper):
    from karim import telegram_bot as bot
    failed = FailedJobRegistry(queue=queue)

    if scrape_id in failed.get_job_ids():
        # job failed
        bot.send_message(scraper.get_user_id(), failed_scraping_ig_text)
        sheet.log(datetime.utcnow(),
                  scraper.get_user_id(),
                  action='FAILED SCRAPE')
        return False
    else:
        redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
        conn = redis.from_url(redis_url)
        job = Job.fetch(scrape_id, connection=conn)
        result = job.result

        # Save result in sheets
        sheet.add_scrape(scraper.get_target(),
                         name=scraper.get_name(),
                         scraped=result)
        sheet.log(datetime.utcnow(),
                  scraper.get_user_id(),
                  action='SUCCESSFUL SCRAPE')
        # Update user
        markup = InlineKeyboardMarkup([[
            InlineKeyboardButton(text='Google Sheets',
                                 url=sheet.get_sheet_url(1))
        ]])
        bot.send_message(scraper.get_user_id(),
                         finished_scrape_text,
                         reply_markup=markup)
        return True
Esempio n. 8
0
 def requeue_all(self):
     with Connection(redis.from_url(self.url)):
         q = Queue()
         reg = FailedJobRegistry()
         tasks = reg.get_job_ids()
         for task in tasks:
             print(task)
             requeue_job(task, q.connection)
Esempio n. 9
0
 def getFailedExperiments(cls):
     with Connection(redis.from_url(
             current_app.config['REDIS_URL'])) as conn:
         registry = FailedJobRegistry('default', connection=conn)
         return [
             Job.fetch(id, connection=conn)
             for id in registry.get_job_ids()
         ]
Esempio n. 10
0
 def getfailedjobs(self, q):
     """returns list of failed redis jobs"""
     log.info(f"getting failed jobs: {q}")
     try:
         registry = FailedJobRegistry(q, connection=self.base_connection)
         response_object = registry.get_job_ids()
         return response_object
     except Exception as e:
         return e
Esempio n. 11
0
    def get(self):
        pending = queue.get_job_ids()

        registry = StartedJobRegistry('default', connection=broker)
        started = registry.get_job_ids()

        fail_queue = FailedJobRegistry(connection=broker)
        failed = fail_queue.get_job_ids()

        return {"jobs": started + pending, "failed": failed}
Esempio n. 12
0
def index():
    q_len = len(q)
    jobs = q.jobs

    registry_failed = FailedJobRegistry(queue=q)
    failed_jobs = []
    for job_id in registry_failed.get_job_ids():
        failed_jobs.append(q.fetch_job(job_id))

    return render_template("index.html", jobs=jobs, q_len=q_len, failed_jobs=failed_jobs,
                           failed_len=registry_failed.count)
Esempio n. 13
0
def check_failed_rq_jobs(queue_name='monitoring_tasks', delete_job=False):
    """This function will print out jobs that failed to execute on RQ 's task queue"""
    queue = Queue(connection=app.redis, name=queue_name)
    registry = FailedJobRegistry(queue=queue)
    # This is how to remove a job from a registry
    for job_id in registry.get_job_ids():
        # Get job whose ID is given
        job = Job.fetch(job_id, connection=app.redis)
        # Print out the job's exception stacktrace
        system_logging(f'\n{job.__dict__["exc_info"]}\n------------------------------------------\n', True, 'redis.log')
        # Remove from registry and delete job
        registry.remove(job_id, delete_job=delete_job)
Esempio n. 14
0
def check_dm_job(identifier: str, forwarder: Forwarder):
    print('TELEBOT: Check DM Job Initiated')
    from karim import telegram_bot as bot
    failed = FailedJobRegistry(queue=queue)

    count = 0
    for id in failed.get_job_ids():
        if identifier in id and DM in id:
            count += 1

    bot.send_message(forwarder.get_user_id(),
                     finished_sending_dm_text.format(count))
    return True
Esempio n. 15
0
def _get_failed_jobs(connection):
    queues = Queue.all(connection=connection)
    failed_jobs = []

    for q in queues:
        registry = FailedJobRegistry(q.name, connection=connection)

        job_ids = registry.get_job_ids()

        for id in job_ids:
            j = rq_job.Job.fetch(id, connection=connection)
            failed_jobs.append(_create_failed_job_obj(j))

    return failed_jobs
Esempio n. 16
0
def check_failed():
    """Check the jobs that have failed and requeue them."""
    # from rq import Queue, get_failed_queue, requeue_job
    from rq import Queue, requeue_job
    from pybossa.core import sentinel
    from rq.registry import FailedJobRegistry

    queue_name='maintenance'
    redis_conn = sentinel.master 
    queue = Queue(queue_name, connection=redis_conn)

    #fq = get_failed_queue()
    registry = FailedJobRegistry(queue=queue)
    job_ids = registry.get_job_ids()
    #job_ids = fq.job_ids
    count = len(job_ids)
    FAILED_JOBS_RETRIES = current_app.config.get('FAILED_JOBS_RETRIES')
    for job_id in job_ids:
        KEY = 'pybossa:job:failed:%s' % job_id
        job = queue.fetch_job(job_id)
        # job = fq.fetch_job(job_id)
        if sentinel.slave.exists(KEY):
            sentinel.master.incr(KEY)
        else:
            ttl = current_app.config.get('FAILED_JOBS_MAILS')*24*60*60
            sentinel.master.setex(KEY, ttl, 1)
        if int(sentinel.slave.get(KEY)) < FAILED_JOBS_RETRIES:
            requeue_job(job_id,connection=redis_conn)
        else:
            KEY = 'pybossa:job:failed:mailed:%s' % job_id
            if (not sentinel.slave.exists(KEY) and
                    current_app.config.get('ADMINS')):
                subject = "JOB: %s has failed more than 3 times" % job_id
                body = "Please, review the background jobs of your server."
                body += "\n This is the trace error\n\n"
                body += "------------------------------\n\n"
                body += job.exc_info
                mail_dict = dict(recipients=current_app.config.get('ADMINS'),
                                 subject=subject, body=body)
                send_mail(mail_dict)
                ttl = current_app.config.get('FAILED_JOBS_MAILS')*24*60*60
                sentinel.master.setex(KEY, ttl, 1)
    if count > 0:
        return "JOBS: %s You have failed the system." % job_ids
    else:
        return "You have not failed the system"
Esempio n. 17
0
def delete_failed(c, queue=None):
    """Delete failed jobs from given queue
    """
    if queue is None:
        raise ValueError("Please specify queue")

    q = Queue(
        queue,
        connection=Redis(host=REDIS_HOST,
                         port=REDIS_PORT,
                         password=REDIS_PASSWORD,
                         db=REDIS_DB),
    )
    r = FailedJobRegistry(queue=q)
    for job_id in r.get_job_ids():
        job = q.fetch_job(job_id)
        if job.is_failed:
            job.delete()
Esempio n. 18
0
def requeue_failed(c, queue=None):
    """Requeue started jobs from given queue
    """
    if queue is None:
        raise ValueError("Please specify queue")

    q = Queue(
        queue,
        connection=Redis(host=REDIS_HOST,
                         port=REDIS_PORT,
                         password=REDIS_PASSWORD,
                         db=REDIS_DB),
    )
    registry = FailedJobRegistry(queue=q)
    for job_id in registry.get_job_ids():
        job = q.fetch_job(job_id)
        if job is not None:
            job.requeue()
Esempio n. 19
0
def get_queue_registry_jobs_count(queue_name, registry_name, offset, per_page):
    queue = Queue(queue_name)
    if registry_name == 'failed':
        current_queue = FailedJobRegistry(queue_name)
    elif registry_name == 'deferred':
        current_queue = DeferredJobRegistry(queue_name)
    elif registry_name == 'started':
        current_queue = StartedJobRegistry(queue_name)
    elif registry_name == 'finished':
        current_queue = FinishedJobRegistry(queue_name)
    else:
        current_queue = queue
    total_items = current_queue.count

    job_ids = current_queue.get_job_ids(offset, per_page)
    current_queue_jobs = [queue.fetch_job(job_id) for job_id in job_ids]
    jobs = [serialize_job(job) for job in current_queue_jobs]

    return (total_items, jobs)
Esempio n. 20
0
 def handle(self, *args, **options):
     with Connection(REDIS_CLIENT):
         workers = Worker.all(REDIS_CLIENT)
         for worker in workers:
             send_kill_horse_command(REDIS_CLIENT, worker.name)
             send_shutdown_command(REDIS_CLIENT, worker.name)
             worker.register_death()
         job_ids = AsyncCronMail.objects.values_list('job_id').filter(started_at__isnull=False,status=False).first()
         if AsyncCronMail.objects.filter(started_at__isnull=False,status=False).count() > 0:
             try:
                 job = Job.fetch(job_ids[0], connection=REDIS_CLIENT)
                 DEFAULT_QUEUE.empty()
                 DEFAULT_QUEUE.enqueue_job(job)
             except:
                 print('Job does not exist')
         topper_registry = FailedJobRegistry(queue=TOPPER_QUEUE)
         for job_id in topper_registry.get_job_ids():
             topper_registry.remove(job_id, delete_job=True)
         w = Worker([DEFAULT_QUEUE,TOPPER_QUEUE], connection=REDIS_CLIENT, name='default_worker')
         w.work()
Esempio n. 21
0
    def test_worker_handle_job_failure(self):
        """Failed jobs are added to FailedJobRegistry"""
        q = Queue(connection=self.testconn)

        w = Worker([q])
        registry = FailedJobRegistry(connection=w.connection)

        timestamp = current_timestamp()

        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        # job is added to FailedJobRegistry with default failure ttl
        self.assertIn(job.id, registry.get_job_ids())
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + DEFAULT_FAILURE_TTL + 5)

        # job is added to FailedJobRegistry with specified ttl
        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + 7)
Esempio n. 22
0
    def test_worker_handle_job_failure(self):
        """Failed jobs are added to FailedJobRegistry"""
        q = Queue(connection=self.testconn)

        w = Worker([q])
        registry = FailedJobRegistry(connection=w.connection)

        timestamp = current_timestamp()

        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        # job is added to FailedJobRegistry with default failure ttl
        self.assertIn(job.id, registry.get_job_ids())
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + DEFAULT_FAILURE_TTL + 5)

        # job is added to FailedJobRegistry with specified ttl
        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + 7)
def get_object_id2queue_map(object_ids):
    """
    Get a {object_id: queue_names} dictionary of object IDs and the queues they
    currently belong to
    """
    queue_object_ids = defaultdict(set)
    queue_map = {}

    for queue_type in OBJECT_QUEUE_TYPES:
        queue = get_queue(queue_type)
        started_registry = StartedJobRegistry(queue=queue)
        job_ids = started_registry.get_job_ids() + queue.get_job_ids()

        # Check pending or executing jobs
        for job_id in job_ids:
            object_id = job_id_to_object_id(job_id)
            if object_id is not None:
                queue_object_ids[queue_type.value].add(object_id)

        failed_registry = FailedJobRegistry(queue=get_queue(queue_type))
        job_ids = failed_registry.get_job_ids()

        # Check failed jobs
        for job_id in job_ids:
            object_id = job_id_to_object_id(job_id)
            if object_id is not None:
                queue_object_ids[queue_type.value].add(object_id)
                queue_object_ids["failed"].add(object_id)

    # Check for all queues plus the catch-all failed queue
    queue_names = [queue_type.value
                   for queue_type in OBJECT_QUEUE_TYPES] + ["failed"]

    for object_id in object_ids:
        queue_map[object_id] = []
        for queue_name in queue_names:
            if object_id in queue_object_ids[queue_name]:
                queue_map[object_id].append(queue_name)

    return queue_map
Esempio n. 24
0
File: cli.py Progetto: nvie/rq
def requeue(cli_config, queue, all, job_class, job_ids,  **options):
    """Requeue failed jobs."""

    failed_job_registry = FailedJobRegistry(queue,
                                            connection=cli_config.connection)
    if all:
        job_ids = failed_job_registry.get_job_ids()

    if not job_ids:
        click.echo('Nothing to do')
        sys.exit(0)

    click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids)))
    fail_count = 0
    with click.progressbar(job_ids) as job_ids:
        for job_id in job_ids:
            try:
                failed_job_registry.requeue(job_id)
            except InvalidJobOperationError:
                fail_count += 1

    if fail_count > 0:
        click.secho('Unable to requeue {0} jobs from failed job registry'.format(fail_count), fg='red')
Esempio n. 25
0
def failed_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FailedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Failed',
    }
    return render(request, 'django_rq/jobs.html', context_data)
Esempio n. 26
0
def failed_clear():
    registry_failed = FailedJobRegistry(queue=q)
    for job_id in registry_failed.get_job_ids():
        registry_failed.remove(job_id, delete_job=True)
    return redirect("/")
Esempio n. 27
0
def requeue():
    registry_failed = FailedJobRegistry(queue=q)
    for job_id in registry_failed.get_job_ids():
        registry_failed.requeue(job_id)
    return redirect("/")
Esempio n. 28
0
 def handle(self, **options):
     queue = get_queue()
     failed_job_registry = FailedJobRegistry(queue.name, queue.connection)
     for job_id in failed_job_registry.get_job_ids():
         failed_job_registry.requeue(job_id)
Esempio n. 29
0
#ext lib
import redis
import pdfkit
from rq import Worker, Queue, Connection
from redis import Redis
from rq.job import Job
from rq.registry import FailedJobRegistry, ScheduledJobRegistry
from apscheduler.schedulers.background import BackgroundScheduler

app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
redis_conn = Redis(host='redis', port=6379)
q = Queue(connection=redis_conn)

failed_registry = FailedJobRegistry(queue=q)
for job_id in failed_registry.get_job_ids():
    app.logger.error("fal del-> " + str(job_id))
    failed_registry.remove(job_id, delete_job=True)

sch_registry = ScheduledJobRegistry(queue=q)
for job_id in sch_registry.get_job_ids():
    app.logger.error("sch del-> " + str(job_id))
    sch_registry.remove(job_id, delete_job=True)


def job_function():
    con = conDB.newCon()
    data = conDB.getPDFs(con).fetchall()
    # ter em conta time out to job result
    # ver se status é 0 o u 1 antes de chamar result
    for i in data: