Пример #1
0
def worker(queues):
    # Configure any SQLAlchemy mappers loaded until now so that the mapping configuration
    # will already be available to the forked work horses and they won't need
    # to spend valuable time re-doing that on every fork.
    configure_mappers()

    if not queues:
        queues = default_queues

    with Connection(rq_redis_connection):
        w = Worker(queues,
                   log_job_description=False,
                   job_monitoring_interval=5)
        w.work()
Пример #2
0
    def test_worker_records_failure_metrics(self, _, incr):
        """
        Force superclass execute_job to do nothing and set status to JobStatus.Failed to simulate query failure
        """
        query = self.factory.create_query()

        with Connection(rq_redis_connection):
            job = enqueue_query(
                query.query_text,
                query.data_source,
                query.user_id,
                False,
                None,
                {
                    "Username": "******",
                    "query_id": query.id
                },
            )
            job.set_status(JobStatus.FAILED)

            Worker(["queries"]).work(max_jobs=1)

        calls = [
            call("rq.jobs.running.queries"),
            call("rq.jobs.started.queries"),
            call("rq.jobs.running.queries", -1, 1),
            call("rq.jobs.failed.queries"),
        ]
        incr.assert_has_calls(calls)
Пример #3
0
    def test_worker_records_success_metrics(self, incr):
        query = self.factory.create_query()

        with Connection(rq_redis_connection):
            enqueue_query(
                query.query_text,
                query.data_source,
                query.user_id,
                False,
                None,
                {
                    "Username": "******",
                    "query_id": query.id
                },
            )

            Worker(["queries"]).work(max_jobs=1)

        calls = [
            call("rq.jobs.running.queries"),
            call("rq.jobs.started.queries"),
            call("rq.jobs.running.queries", -1, 1),
            call("rq.jobs.finished.queries"),
        ]
        incr.assert_has_calls(calls)
Пример #4
0
    def __call__(self, process_spec):
        pid = process_spec['pid']
        if not self.time_to_check(pid):
            return True

        all_workers = Worker.all(connection=rq_redis_connection)
        worker = [
            w for w in all_workers
            if w.hostname == socket.gethostname().encode() and w.pid == pid
        ].pop()

        is_busy = worker.get_state() == WorkerStatus.BUSY

        time_since_seen = datetime.datetime.utcnow() - worker.last_heartbeat
        seen_lately = time_since_seen.seconds < 60

        total_jobs_in_watched_queues = sum(
            [len(q.jobs) for q in worker.queues])
        has_nothing_to_do = total_jobs_in_watched_queues == 0

        is_healthy = is_busy or seen_lately or has_nothing_to_do

        self._log(
            "Worker %s healthcheck: Is busy? %s. "
            "Seen lately? %s (%d seconds ago). "
            "Has nothing to do? %s (%d jobs in watched queues). "
            "==> Is healthy? %s", worker.key, is_busy, seen_lately,
            time_since_seen.seconds, has_nothing_to_do,
            total_jobs_in_watched_queues, is_healthy)

        return is_healthy
Пример #5
0
def healthcheck():
    hostname = socket.gethostname()
    with Connection(rq_redis_connection):
        all_workers = Worker.all()

        local_workers = [w for w in all_workers if w.hostname == hostname]
        heartbeats = [w.last_heartbeat for w in local_workers]
        time_since_seen = [
            datetime.datetime.utcnow() - hb for hb in heartbeats
        ]
        active = [t.seconds < 60 for t in time_since_seen]

        sys.exit(int(not all(active)))