示例#1
0
def search_tasks():
    """
    Retrieves all the tasks based on status
    :return:
    """
    status = request.args.get("status", "").lower()
    if status != "" and status not in [enum_val.lower() for enum_val in JobStatus.__dict__.keys()]:
        return make_response({"success": False, "message": f'Unknown status {status}'})
    jobs = []
    json_jobs = []
    # RUNNING JOBS
    if status == "" or (status != "finished" and status != "failed"):
        jobs.extend([queue.fetch_job(id) for id in started_jobs.get_job_ids()])
    # FINISHED JOBS
    if status == "" or status == "finished":
        jobs.extend(Job.fetch_many(finished_jobs.get_job_ids(), connection=redis))
    # FAILED JOBS
    if status == "" or status == "failed":
        jobs.extend([queue.fetch_job(id) for id in failed_jobs.get_job_ids()])
    # Change them to human readable format
    for job in jobs:
        json_jobs.append({"id": job.id, "result": job.return_value,
                          "created_at": job.created_at, "started_at": job.started_at,
                          "ended_at": job.ended_at, "status": job.get_status(),
                          "meta": job.meta})
    return make_response(jsonify(json_jobs), 200)
示例#2
0
def schedule_periodic_jobs(jobs):
    job_definitions = [prep(job) for job in jobs]

    jobs_to_clean_up = Job.fetch_many(
        set([job.id for job in rq_scheduler.get_jobs()])
        - set([job_id(job) for job in job_definitions]),
        rq_redis_connection,
    )

    jobs_to_schedule = [
        job for job in job_definitions if job_id(job) not in rq_scheduler
    ]

    for job in jobs_to_clean_up:
        logger.info("Removing %s (%s) from schedule.", job.id, job.func_name)
        rq_scheduler.cancel(job)
        job.delete()

    for job in jobs_to_schedule:
        logger.info(
            "Scheduling %s (%s) with interval %s.",
            job_id(job),
            job["func"].__name__,
            job.get("interval"),
        )
        schedule(job)
def purge_failed_jobs():
    with Connection(rq_redis_connection):
        queues = [
            q for q in Queue.all() if q.name not in default_operational_queues
        ]
        for queue in queues:
            failed_job_ids = FailedJobRegistry(queue=queue).get_job_ids()
            failed_jobs = Job.fetch_many(failed_job_ids, rq_redis_connection)
            stale_jobs = []
            for failed_job in failed_jobs:
                # the job may not actually exist anymore in Redis
                if not failed_job:
                    continue
                # the job could have an empty ended_at value in case
                # of a worker dying before it can save the ended_at value,
                # in which case we also consider them stale
                if not failed_job.ended_at:
                    stale_jobs.append(failed_job)
                elif (datetime.utcnow() - failed_job.ended_at
                      ).total_seconds() > settings.JOB_DEFAULT_FAILURE_TTL:
                    stale_jobs.append(failed_job)

            for stale_job in stale_jobs:
                stale_job.delete()

            if stale_jobs:
                logger.info(
                    "Purged %d old failed jobs from the %s queue.",
                    len(stale_jobs),
                    queue.name,
                )
示例#4
0
def hello():
    all_data = {}

    #This will hold all the device names
    device = redis.mget(redis.keys())

    #This will hold all the device ids
    keys = redis.keys()

    print(keys)
    all_jobs = []
    for key, dev in zip(keys, device):
        URL = "http://inventory:5001?id=" + str(dev)

        #This inventory will be fetched in the background
        job = q.enqueue(background_task, URL)

        all_jobs.append(job.id)

    check = 1
    full_jobs = Job.fetch_many(all_jobs, connection=r)

    while check:
        check = 0
        for job in full_jobs:
            if str(job.get_status()).lower() != "finished":
                check = 1
                break

    for key, job in zip(keys, full_jobs):
        result = job.result
        all_data.update({key: result})
    print("fetching inventory")

    return jsonify(all_data)
示例#5
0
def fetch_jobs(queue, job_ids):
        return [{
            'id': job.id,
            'name': job.func_name,
            'queue': queue.name,
            'enqueued_at': job.enqueued_at,
            'started_at': job.started_at
        } for job in Job.fetch_many(job_ids, connection=redis_connection) if job is not None]
示例#6
0
def fetch_jobs(queue, job_ids):
    return [{
        "id": job.id,
        "name": job.func_name,
        "queue": queue.name,
        "enqueued_at": job.enqueued_at,
        "started_at": job.started_at,
    } for job in Job.fetch_many(job_ids, connection=rq_redis_connection)
            if job is not None]
示例#7
0
文件: monitor.py 项目: wttfire/redash
def fetch_jobs(job_ids):
    return [{
        "id": job.id,
        "name": job.func_name,
        "origin": job.origin,
        "enqueued_at": job.enqueued_at,
        "started_at": job.started_at,
        "meta": job.meta,
    } for job in Job.fetch_many(job_ids, connection=rq_redis_connection)
            if job is not None]
示例#8
0
def active_analyses():
    """
    Returns a list of job IDs corresponding to active (pending or running)
    analysis tasks.
    """
    pending_jobs = Job.fetch_many(binary_analysis.job_ids, connection=redis)
    pending_ids = [
        job.id for job in pending_jobs if job.func_name != "analyze_binary"
    ]
    started = StartedJobRegistry("binary_analysis", connection=redis)

    return pending_ids + started.get_job_ids()
示例#9
0
def finished_tasks():
    q_len = len(queue)
    f_len = len(queue.finished_job_registry)

    jobs = Job.fetch_many(queue.finished_job_registry.get_job_ids(),
                          connection=redis)

    return render_template(
        "tasks/finished_tasks.html",
        f_len=f_len,
        q_len=q_len,
        jobs=jobs,
    )
示例#10
0
def get_all_jobs(connection=None, queue_name=DEFAULT_QUEUE_NAME):
    from redis import Redis
    from rq import Queue
    from rq.job import Job
    from rq.registry import FinishedJobRegistry, FailedJobRegistry

    queue = Queue(queue_name, connection=connection or Redis())
    queued_jobs = queue.job_ids
    finished_jobs = FinishedJobRegistry(queue=queue).get_job_ids()
    failed_jobs = FailedJobRegistry(queue=queue).get_job_ids()
    return Job.fetch_many(
        queued_jobs + finished_jobs + failed_jobs, connection=connection
    )
示例#11
0
def get_jobs(queue, job_ids, registry=None):
    """Fetch jobs in bulk from Redis.
    1. If job data is not present in Redis, discard the result
    2. If `registry` argument is supplied, delete empty jobs from registry
    """
    jobs = Job.fetch_many(job_ids, connection=queue.connection)
    valid_jobs = []
    for i, job in enumerate(jobs):
        if job is None:
            if registry:
                registry.remove(job_ids[i])
        else:
            valid_jobs.append(job)

    return valid_jobs
def view_or_update_batch_job(batch_job_id):
    batch_job = job_repository.find_one(batch_job_id)
    if batch_job is None:
        return {
            "success": False,
            "statusCode": 400,
            "error": f"Batch job not found with id {batch_job_id}",
        }
    if batch_job["finished"]:
        return {"success": True, "body": batch_job}
    job_id_array = [job["_id"] for job in batch_job["jobs"]]
    job_array = Job.fetch_many(
        job_id_array, connection=server.get_redis().get_redis_conn()
    )

    job_array_with_meta = update_job_array_with_meta(job_array)
    job_finished = is_batch_job_finished(job_array_with_meta)
    if job_finished:
        server.get_job_queue().enqueue_job(
            post_batch_job,
            priority="low",
            args=(tuple([batch_job_id])),
            kwargs={"job_id": batch_job_id},
        )

    update_resp = job_repository.update_one(
        batch_job_id,
        {
            "finished": job_finished,
            "stats": batch_job_stats(job_array_with_meta),
            "jobs": job_array_with_meta,
        },
    )

    if update_resp.acknowledged:
        server.get_job_queue().enqueue_job(
            poll_batch_job, priority="low", args=(tuple([batch_job_id]))
        )
        updated_batch_job = job_repository.find_one(batch_job_id)
        return {"success": True, "body": updated_batch_job}
    else:
        log.error(
            f"view_or_update_batch_job : failed to update new batch job with id {batch_job_id}"
        )
        return {
            "success": False,
            "error": f"Batch job update failed with id {batch_job_id}",
        }
示例#13
0
文件: test_job.py 项目: nvie/rq
    def test_fetch_many(self):
        """Fetching many jobs at once."""
        data = {
            'func': fixtures.some_calculation,
            'args': (3, 4),
            'kwargs': dict(z=2),
            'connection': self.testconn,
        }
        job = Job.create(**data)
        job.save()

        job2 = Job.create(**data)
        job2.save()

        jobs = Job.fetch_many([job.id, job2.id, 'invalid_id'], self.testconn)
        self.assertEqual(jobs, [job, job2, None])
示例#14
0
    def test_fetch_many(self):
        """Fetching many jobs at once."""
        data = {
            'func': fixtures.some_calculation,
            'args': (3, 4),
            'kwargs': dict(z=2),
            'connection': self.testconn,
        }
        job = Job.create(**data)
        job.save()

        job2 = Job.create(**data)
        job2.save()

        jobs = Job.fetch_many([job.id, job2.id, 'invalid_id'], self.testconn)
        self.assertEqual(jobs, [job, job2, None])
示例#15
0
def purge_failed_jobs():
    with Connection(rq_redis_connection):
        for queue in Queue.all():
            failed_job_ids = FailedJobRegistry(queue=queue).get_job_ids()
            failed_jobs = Job.fetch_many(failed_job_ids, rq_redis_connection)
            stale_jobs = [
                job for job in failed_jobs
                if (datetime.utcnow() -
                    job.ended_at).seconds > settings.JOB_DEFAULT_FAILURE_TTL
            ]

            for job in stale_jobs:
                job.delete()

            if stale_jobs:
                logger.info('Purged %d old failed jobs from the %s queue.',
                            len(stale_jobs), queue.name)
示例#16
0
def load_jobs(job_ids: Iterable[RqJobId]) -> Iterable[Job]:
    """
    Iterate RQ jobs having an ID in the input set of ``job_ids``.
    Notice that the returned jobs may be less than the number of input job
    IDs since other processes acting on the RQ queues may delete or purge
    jobs while this fetch operation is in progress.

    :param job_ids: the RQ job IDs of the jobs to fetch.
    :return: an generator object to iterate the jobs.
    """
    redis = redis_connection()
    splitter = IterCostSplitter(cost_fn=lambda _: 1, batch_max_cost=100)  # (1)

    for jid_batch_iter in splitter.iter_batches(job_ids):
        jid_batch = list(jid_batch_iter)  # (2)
        jobs = Job.fetch_many(job_ids=jid_batch, connection=redis)
        for j in jobs:
            if j is not None:  # (3)
                yield j
示例#17
0
文件: views.py 项目: Yolley/django-rq
def scheduled_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = ScheduledJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)

        jobs = Job.fetch_many(job_ids, connection=queue.connection)
        for i, job in enumerate(jobs):
            if job is None:
                registry.remove(job_ids[i])
            else:
                job.scheduled_at = registry.get_scheduled_time(job)

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Scheduled',
    }
    return render(request, 'django_rq/jobs.html', context_data)
示例#18
0
 def test_all_jobs_finished_successfully(self, redis_connection):  # pylint: disable=redefined-outer-name
     """Tests all jobs finished successully."""
     jobs = Job.fetch_many(['base-image'], connection=redis_connection)
     for job in jobs:
         assert job.get_status() == 'finished'
示例#19
0
async def job_status_ws_handler(redis_client: Redis,
                                request: web.Request) -> web.WebSocketResponse:
    ws = web.WebSocketResponse()
    await ws.prepare(request)
    logging.debug('Client connected to job status')

    jobs = {}
    job_status = {}

    async def check_status(job: Job) -> str:
        await asyncio.sleep(JOB_REFRESH_PERIOD)
        try:
            job.refresh()
        except NoSuchJobError:
            jobs[job.id] = None
        return job.id

    recieve_msg_task = asyncio.ensure_future(ws.receive())
    pending = set([recieve_msg_task])
    while not ws.closed:
        done, pending = await asyncio.wait(pending,
                                           return_when=asyncio.FIRST_COMPLETED)

        # update job list when received message
        if recieve_msg_task in done:
            msg = recieve_msg_task.result()
            if msg.type == WSMsgType.ERROR:
                logging.error(
                    f'WS connection closed with exception {ws.exception()}')

            elif msg.type == WSMsgType.TEXT:
                if msg.data == 'close':
                    await ws.close()

                else:
                    new_job_ids = json.loads(msg.data)
                    logging.debug('Watching new job ids: %s', new_job_ids)
                    jobs = dict(
                        zip(new_job_ids,
                            Job.fetch_many(new_job_ids, redis_client)))

                    pending = set(
                        asyncio.ensure_future(check_status(job))
                        for job in jobs.values() if job is not None)
                    recieve_msg_task = asyncio.ensure_future(ws.receive())
                    pending.add(recieve_msg_task)

                    job_status = {
                        job_id: job.get_status(
                            refresh=False) if job is not None else None
                        for job_id, job in jobs.items()
                    }
                    await ws.send_json(prepare_response(jobs))

        # handle job status check
        else:
            change = False
            for done_task in done:
                job_id = done_task.result()

                if jobs[job_id] is not None:
                    job = jobs[job_id]
                    new_status = job.get_status(refresh=False)
                    pending.add(check_status(jobs[job_id]))
                    if job_status[job_id] != new_status:
                        job_status[job_id] = new_status
                        change = True
                    else:
                        now = time.time()
                        if (
                                job.is_queued or job.is_started
                        ) and now - job.enqueued_at.timestamp() > TASK_LIMIT:
                            jobs[job_id].exc_info = 'task time limit exceeded'
                            change = True

                else:
                    job_status[job_id] = None
                    change = True

            if change:
                await ws.send_json(prepare_response(jobs))

    logging.debug('Client disconnected from job status')
    return ws
示例#20
0
def _get_jobs_from_registry(registry):
    job_ids = registry.get_job_ids()
    return Job.fetch_many(job_ids, connection=conn)
示例#21
0
    def __get_jobs_descriptions(self, jobs):
        """ Jobs (message) descriptions search method """

        jobs = Job.fetch_many(jobs, connection=self.redis_conn)
        return [job.description for job in jobs]
示例#22
0
def list_all_jobs():
    registry = FinishedJobRegistry(os.environ["RQ_QUEUE"], connection=redis)

    print(f"{len(registry.get_job_ids())} record(s) succesfully saved to './output/data.json'.")
    return Job.fetch_many(registry.get_job_ids(), connection=redis)