示例#1
0
 def test_job_times(self):
     """job times are set correctly."""
     q = Queue('foo')
     w = Worker([q])
     before = utcnow()
     before = before.replace(microsecond=0)
     job = q.enqueue(say_hello)
     self.assertIsNotNone(job.enqueued_at)
     self.assertIsNone(job.started_at)
     self.assertIsNone(job.ended_at)
     self.assertEqual(
         w.work(burst=True), True,
         'Expected at least some work done.'
     )
     self.assertEqual(job.result, 'Hi there, Stranger!')
     after = utcnow()
     job.refresh()
     self.assertTrue(
         before <= job.enqueued_at <= after,
         'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
     )
     self.assertTrue(
         before <= job.started_at <= after,
         'Not %s <= %s <= %s' % (before, job.started_at, after)
     )
     self.assertTrue(
         before <= job.ended_at <= after,
         'Not %s <= %s <= %s' % (before, job.ended_at, after)
     )
示例#2
0
 def test_work_horse_force_death(self):
     """Simulate a frozen worker that doesn't observe the timeout properly.
     Fake it by artificially setting the timeout of the parent process to
     something much smaller after the process is already forked.
     """
     fooq = Queue('foo')
     self.assertEqual(fooq.count, 0)
     w = Worker(fooq)
     sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
     if os.path.exists(sentinel_file):
         os.remove(sentinel_file)
     fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
     job, queue = w.dequeue_job_and_maintain_ttl(5)
     w.fork_work_horse(job, queue)
     job.timeout = 5
     w.job_monitoring_interval = 1
     now = utcnow()
     w.monitor_work_horse(job)
     fudge_factor = 1
     total_time = w.job_monitoring_interval + 65 + fudge_factor
     self.assertTrue((utcnow() - now).total_seconds() < total_time)
     self.assertEqual(job.get_status(), JobStatus.FAILED)
     failed_job_registry = FailedJobRegistry(queue=fooq)
     self.assertTrue(job in failed_job_registry)
     self.assertEqual(fooq.count, 0)
示例#3
0
 def test_job_times(self):
     """job times are set correctly."""
     q = Queue('foo')
     w = Worker([q])
     before = utcnow()
     before = before.replace(microsecond=0)
     job = q.enqueue(say_hello)
     self.assertIsNotNone(job.enqueued_at)
     self.assertIsNone(job.started_at)
     self.assertIsNone(job.ended_at)
     self.assertEqual(
         w.work(burst=True), True,
         'Expected at least some work done.'
     )
     self.assertEqual(job.result, 'Hi there, Stranger!')
     after = utcnow()
     job.refresh()
     self.assertTrue(
         before <= job.enqueued_at <= after,
         'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
     )
     self.assertTrue(
         before <= job.started_at <= after,
         'Not %s <= %s <= %s' % (before, job.started_at, after)
     )
     self.assertTrue(
         before <= job.ended_at <= after,
         'Not %s <= %s <= %s' % (before, job.ended_at, after)
     )
示例#4
0
    def monitor_work_horse(self, job):
        """The worker will monitor the work horse and make sure that it
        either executes successfully or the status of the job is set to
        failed
        """
        self.monitor_started = utcnow()
        while True:
            try:
                with UnixSignalDeathPenalty(
                    self.job_monitoring_interval, HorseMonitorTimeoutException
                ):
                    retpid, ret_val = os.waitpid(self._horse_pid, 0)
                break
            except HorseMonitorTimeoutException:
                # Horse has not exited yet and is still running.
                # Send a heartbeat to keep the worker alive.
                self.heartbeat(self.job_monitoring_interval + 5)

                job.refresh()

                if job.is_cancelled:
                    self.stop_executing_job(job)

                if self.soft_limit_exceeded(job):
                    self.enforce_hard_limit(job)
            except OSError as e:
                # In case we encountered an OSError due to EINTR (which is
                # caused by a SIGINT or SIGTERM signal during
                # os.waitpid()), we simply ignore it and enter the next
                # iteration of the loop, waiting for the child to end.  In
                # any other case, this is some other unexpected OS error,
                # which we don't want to catch, so we re-raise those ones.
                if e.errno != errno.EINTR:
                    raise
                # Send a heartbeat to keep the worker alive.
                self.heartbeat()

        if ret_val == os.EX_OK:  # The process exited normally.
            return
        job_status = job.get_status()
        if job_status is None:  # Job completed and its ttl has expired
            return
        if job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:

            if not job.ended_at:
                job.ended_at = utcnow()

            # Unhandled failure: move the job to the failed queue
            self.log.warning(
                (
                    "Moving job to FailedJobRegistry "
                    "(work-horse terminated unexpectedly; waitpid returned {})"
                ).format(ret_val)
            )

            self.handle_job_failure(
                job,
                exc_string="Work-horse process was terminated unexpectedly "
                "(waitpid returned %s)" % ret_val,
            )
示例#5
0
 def test_clean_rq(self):
     r = get_redis_connection()
     self.assertEqual(len(r.keys("rq:job:*")), 0)
     r.hmset("rq:job:abc", {"bar": "baz"})
     r.hmset("rq:job:def", {"created_at": utcformat(utcnow())})
     r.hmset("rq:job:123", {"created_at": utcformat(utcnow() - timedelta(days=10))})
     self.assertEqual(len(r.keys("rq:job:*")), 3)
     call_command("clean_rq")
     self.assertEqual(len(r.keys("rq:job:*")), 2)
 def test_clean_rq(self):
     r = get_redis_connection()
     self.assertEqual(len(r.keys('rq:job:*')), 0)
     r.hmset('rq:job:abc', {'bar': 'baz'})
     r.hmset('rq:job:def', {'created_at': utcformat(utcnow())})
     r.hmset('rq:job:123', {
         'created_at': utcformat(utcnow() - timedelta(days=10))})
     self.assertEqual(len(r.keys('rq:job:*')), 3)
     call_command('clean_rq')
     self.assertEqual(len(r.keys('rq:job:*')), 2)
示例#7
0
    def test_should_run_maintenance_tasks(self):
        """Workers should run maintenance tasks on startup and every hour."""
        queue = Queue(connection=self.testconn)
        worker = Worker(queue)
        self.assertTrue(worker.should_run_maintenance_tasks)

        worker.last_cleaned_at = utcnow()
        self.assertFalse(worker.should_run_maintenance_tasks)
        worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
        self.assertTrue(worker.should_run_maintenance_tasks)
示例#8
0
 def test_clean_rq(self):
     r = get_redis_connection()
     self.assertEqual(len(r.keys('rq:job:*')), 0)
     r.hmset('rq:job:abc', {'bar': 'baz'})
     r.hmset('rq:job:def', {'created_at': utcformat(utcnow())})
     r.hmset('rq:job:123',
             {'created_at': utcformat(utcnow() - timedelta(days=10))})
     self.assertEqual(len(r.keys('rq:job:*')), 3)
     call_command('clean_rq')
     self.assertEqual(len(r.keys('rq:job:*')), 2)
示例#9
0
    def perform_job(self, job, queue, heartbeat_ttl=None):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job, heartbeat_ttl)

        push_connection(self.connection)

        started_job_registry = StartedJobRegistry(job.origin,
                                                  self.connection,
                                                  job_class=self.job_class)

        try:
            job.started_at = utcnow()
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
                rv = job.perform(self.workerKwargs)

            job.ended_at = utcnow()

            # Pickle the result in the same try-except block since we need
            # to use the same exc handling when pickling fails
            job._result = rv

            self.handle_job_success(job=job,
                                    queue=queue,
                                    started_job_registry=started_job_registry)
        except:
            job.ended_at = utcnow()
            self.handle_job_failure(job=job,
                                    started_job_registry=started_job_registry)
            self.handle_exception(job, *sys.exc_info())
            return False

        finally:
            pop_connection()

        self.log.info('{0}: {1} ({2})'.format(
            green(job.origin), blue('Job OK'), job.id))
        if rv is not None:
            log_result = "{0!r}".format(as_text(text_type(rv)))
            self.log.debug('Result: %s', yellow(log_result))

        if self.log_result_lifespan:
            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl == 0:
                self.log.info('Result discarded immediately')
            elif result_ttl > 0:
                self.log.info(
                    'Result is kept for {0} seconds'.format(result_ttl))
            else:
                self.log.warning(
                    'Result will never expire, clean up result key manually')

        return True
示例#10
0
def test_should_run_maintenance_tasks():
    """Workers should run maintenance tasks on startup and every hour."""

    queue = Queue()
    worker = Worker(queue)
    assert worker.should_run_maintenance_tasks

    worker.last_cleaned_at = utcnow()
    assert not worker.should_run_maintenance_tasks
    worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
    assert worker.should_run_maintenance_tasks
示例#11
0
    def test_statistics(self):
        """Successful and failed job counts are saved properly"""
        queue = Queue()
        job = queue.enqueue(div_by_zero)
        worker = Worker([queue])
        worker.register_birth()

        self.assertEqual(worker.failed_job_count, 0)
        self.assertEqual(worker.successful_job_count, 0)
        self.assertEqual(worker.total_working_time, 0)

        registry = StartedJobRegistry(connection=worker.connection)
        job.started_at = utcnow()
        job.ended_at = job.started_at + timedelta(seconds=0.75)
        worker.handle_job_failure(job)
        worker.handle_job_success(job, queue, registry)

        worker.refresh()
        self.assertEqual(worker.failed_job_count, 1)
        self.assertEqual(worker.successful_job_count, 1)
        self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds

        worker.handle_job_failure(job)
        worker.handle_job_success(job, queue, registry)

        worker.refresh()
        self.assertEqual(worker.failed_job_count, 2)
        self.assertEqual(worker.successful_job_count, 2)
        self.assertEqual(worker.total_working_time, 3.0)
示例#12
0
文件: worker.py 项目: dveselov/aiorq
    def clean_registries(self):
        """Runs maintenance jobs on each Queue's registries."""

        for queue in self.queues:
            logger.info('Cleaning registries for queue: %s', queue.name)
            yield from clean_registries(queue)
        self.last_cleaned_at = utcnow()
示例#13
0
文件: worker.py 项目: dveselov/aiorq
    def clean_registries(self):
        """Runs maintenance jobs on each Queue's registries."""

        for queue in self.queues:
            logger.info('Cleaning registries for queue: %s', queue.name)
            yield from clean_registries(queue)
        self.last_cleaned_at = utcnow()
示例#14
0
    def test_statistics(self):
        """Successful and failed job counts are saved properly"""
        q = Queue()
        job = q.enqueue(div_by_zero)
        w = Worker([q])
        w.register_birth()

        self.assertEqual(w.failed_job_count, 0)
        self.assertEqual(w.successful_job_count, 0)
        self.assertEqual(w.total_working_time, 0)

        registry = StartedJobRegistry(connection=w.connection)
        job.started_at = utcnow()
        job.ended_at = job.started_at + timedelta(seconds=0.75)
        w.handle_job_failure(job)
        w.handle_job_success(job, q, registry)

        w.refresh()
        self.assertEqual(w.failed_job_count, 1)
        self.assertEqual(w.successful_job_count, 1)
        self.assertEqual(w.total_working_time,
                         1500000)  # 1.5 seconds in microseconds

        w.handle_job_failure(job)
        w.handle_job_success(job, q, registry)

        w.refresh()
        self.assertEqual(w.failed_job_count, 2)
        self.assertEqual(w.successful_job_count, 2)
        self.assertEqual(w.total_working_time, 3000000)
示例#15
0
文件: worker.py 项目: dveselov/aiorq
    def should_run_maintenance_tasks(self):
        """Maintenance tasks should run on first startup or every hour."""

        if self.last_cleaned_at is None:
            return True
        if (utcnow() - self.last_cleaned_at) > timedelta(hours=1):
            return True
        return False
示例#16
0
文件: worker.py 项目: dveselov/aiorq
    def should_run_maintenance_tasks(self):
        """Maintenance tasks should run on first startup or every hour."""

        if self.last_cleaned_at is None:
            return True
        if (utcnow() - self.last_cleaned_at) > timedelta(hours=1):
            return True
        return False
示例#17
0
    def soft_limit_exceeded(self, job):
        job_has_time_limit = job.timeout != -1

        if job_has_time_limit:
            seconds_under_monitor = (utcnow() - self.monitor_started).seconds
            return seconds_under_monitor > job.timeout + self.grace_period
        else:
            return False
示例#18
0
 def perform(self):  # noqa
     """Invokes the job function with the job arguments."""
     rq.job._job_stack.push(self.id)
     try:
         self._result = self.func(*self.args, **self.kwargs)
         self.ended_at = utcnow()
     finally:
         assert self.id == rq.job._job_stack.pop()
     return self._result
示例#19
0
def test_job_times(loop):
    """Job times are set correctly."""

    q = Queue('foo')
    w = Worker([q])
    before = utcnow().replace(microsecond=0)
    job = yield from q.enqueue(say_hello)

    assert job.enqueued_at
    assert not job.started_at
    assert not job.ended_at
    assert (yield from w.work(burst=True, loop=loop))
    assert (yield from job.result) == 'Hi there, Stranger!'

    after = utcnow().replace(microsecond=0)
    yield from job.refresh()

    assert before <= job.enqueued_at <= after
    assert before <= job.started_at <= after
    assert before <= job.ended_at <= after
示例#20
0
文件: worker.py 项目: dveselov/aiorq
    def register_death(self):
        """Registers its own death."""

        logger.debug('Registering death')
        # We cannot use self.state = 'dead' here, because that would
        # rollback the pipeline
        pipe = self.connection.multi_exec()
        pipe.srem(self.redis_workers_keys, self.key)
        pipe.hset(self.key, 'death', utcformat(utcnow()))
        pipe.expire(self.key, 60)
        yield from pipe.execute()
示例#21
0
文件: worker.py 项目: dveselov/aiorq
    def register_death(self):
        """Registers its own death."""

        logger.debug('Registering death')
        # We cannot use self.state = 'dead' here, because that would
        # rollback the pipeline
        pipe = self.connection.multi_exec()
        pipe.srem(self.redis_workers_keys, self.key)
        pipe.hset(self.key, 'death', utcformat(utcnow()))
        pipe.expire(self.key, 60)
        yield from pipe.execute()
示例#22
0
    def test_dependencies_finished_returns_false_if_unfinished_job(self):
        dependency_jobs = [Job.create(fixtures.say_hello) for _ in range(2)]

        dependency_jobs[0]._status = JobStatus.FINISHED
        dependency_jobs[0].ended_at = utcnow()
        dependency_jobs[0].save()

        dependency_jobs[1]._status = JobStatus.STARTED
        dependency_jobs[1].ended_at = None
        dependency_jobs[1].save()

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = [job.id for job in dependency_jobs]
        dependent_job.register_dependency()

        now = utcnow()

        dependencies_finished = dependent_job.dependencies_are_met()

        self.assertFalse(dependencies_finished)
示例#23
0
文件: treeitem.py 项目: cedk/pootle
 def save_enqueued(self, pipe):
     """
     Preparing job to enqueue. Works via pipeline.
     Nothing done if WatchError happens while next `pipeline.execute()`.
     """
     job = self.create_job(status=JobStatus.QUEUED)
     self.set_job_params(pipeline=pipe)
     job.origin = self.origin
     job.enqueued_at = utcnow()
     if job.timeout is None:
         job.timeout = self.timeout
     job.save(pipeline=pipe)
示例#24
0
 def save_enqueued(self, pipe):
     """
     Preparing job to enqueue. Works via pipeline.
     Nothing done if WatchError happens while next `pipeline.execute()`.
     """
     job = self.create_job(status=JobStatus.QUEUED)
     self.set_job_params(pipeline=pipe)
     job.origin = self.origin
     job.enqueued_at = utcnow()
     if job.timeout is None:
         job.timeout = self.timeout
     job.save(pipeline=pipe)
示例#25
0
    def perform(self):  # noqa
        """Invokes the job function with the job arguments"""
        _job_stack.push(self.id)
        try:
            self.set_status(Status.STARTED)
            self.kwargs.update(job_id=self.id)
            newrelic_decorated_func = newrelic.agent.background_task()(self.func)
            self._result = newrelic_decorated_func(*self.args, **self.kwargs)
            # self._result = self.func(*self.args, **self.kwargs)
            self.ended_at = utcnow()
            self.set_status(Status.FINISHED)
        finally:
            assert self.id == _job_stack.pop()

        return self._result
示例#26
0
文件: worker.py 项目: dveselov/aiorq
    def prepare_job_execution(self, job):
        """Performs misc bookkeeping like updating states prior to job
        execution.
        """

        timeout = (job.timeout or 180) + 60

        pipe = self.connection.multi_exec()
        yield from self.set_state(WorkerStatus.BUSY, pipeline=pipe)
        yield from self.set_current_job_id(job.id, pipeline=pipe)
        yield from self.heartbeat(timeout, pipeline=pipe)
        registry = StartedJobRegistry(job.origin, self.connection)
        yield from registry.add(job, timeout, pipeline=pipe)
        yield from job.set_status(JobStatus.STARTED, pipeline=pipe)
        pipe.hset(job.key, 'started_at', utcformat(utcnow()))
        yield from pipe.execute()
示例#27
0
文件: worker.py 项目: dveselov/aiorq
    def prepare_job_execution(self, job):
        """Performs misc bookkeeping like updating states prior to job
        execution.
        """

        timeout = (job.timeout or 180) + 60

        pipe = self.connection.multi_exec()
        yield from self.set_state(WorkerStatus.BUSY, pipeline=pipe)
        yield from self.set_current_job_id(job.id, pipeline=pipe)
        yield from self.heartbeat(timeout, pipeline=pipe)
        registry = StartedJobRegistry(job.origin, self.connection)
        yield from registry.add(job, timeout, pipeline=pipe)
        yield from job.set_status(JobStatus.STARTED, pipeline=pipe)
        pipe.hset(job.key, 'started_at', utcformat(utcnow()))
        yield from pipe.execute()
示例#28
0
文件: queue.py 项目: essobi/aiorq
    def enqueue_job(self, job):
        """Enqueues a job for delayed execution."""

        # TODO: process pipeline and at_front method arguments.
        pipe = self.connection.pipeline()
        pipe.sadd(self.redis_queues_keys, self.key)
        yield from job.set_status(JobStatus.QUEUED, pipeline=pipe)

        job.origin = self.name
        job.enqueued_at = utcnow()

        # TODO: process job.timeout field.

        yield from job.save(pipeline=pipe)
        yield from pipe.execute()
        yield from self.push_job_id(job.id)
        return job
示例#29
0
文件: worker.py 项目: dveselov/aiorq
    def register_birth(self):
        """Registers its own birth."""

        logger.debug('Registering birth of worker {0}'.format(self.name))
        another_is_present = yield from self.connection.exists(self.key)
        another_is_dead = yield from self.connection.hexists(self.key, 'death')
        if another_is_present and not another_is_dead:
            msg = 'There exists an active worker named {0!r} already'
            raise ValueError(msg.format(self.name))
        key = self.key
        queues = ','.join(self.queue_names())
        pipe = self.connection.multi_exec()
        pipe.delete(key)
        pipe.hset(key, 'birth', utcformat(utcnow()))
        pipe.hset(key, 'queues', queues)
        pipe.sadd(self.redis_workers_keys, key)
        pipe.expire(key, self.default_worker_ttl)
        yield from pipe.execute()
示例#30
0
文件: worker.py 项目: dveselov/aiorq
    def register_birth(self):
        """Registers its own birth."""

        logger.debug('Registering birth of worker {0}'.format(self.name))
        another_is_present = yield from self.connection.exists(self.key)
        another_is_dead = yield from self.connection.hexists(self.key, 'death')
        if another_is_present and not another_is_dead:
            msg = 'There exists an active worker named {0!r} already'
            raise ValueError(msg.format(self.name))
        key = self.key
        queues = ','.join(self.queue_names())
        pipe = self.connection.multi_exec()
        pipe.delete(key)
        pipe.hset(key, 'birth', utcformat(utcnow()))
        pipe.hset(key, 'queues', queues)
        pipe.sadd(self.redis_workers_keys, key)
        pipe.expire(key, self.default_worker_ttl)
        yield from pipe.execute()
示例#31
0
    def test_dependencies_finished_returns_true_if_all_dependencies_finished(
            self):
        dependency_jobs = [Job.create(fixtures.say_hello) for _ in range(5)]

        dependent_job = Job.create(func=fixtures.say_hello)
        dependent_job._dependency_ids = [job.id for job in dependency_jobs]
        dependent_job.register_dependency()

        now = utcnow()

        # Set ended_at timestamps
        for i, job in enumerate(dependency_jobs):
            job._status = JobStatus.FINISHED
            job.ended_at = now - timedelta(seconds=i)
            job.save()

        dependencies_finished = dependent_job.dependencies_are_met()

        self.assertTrue(dependencies_finished)
示例#32
0
文件: test_job.py 项目: f0cker/rq
    def test_persistence_of_typical_jobs(self):
        """Storing typical jobs."""
        job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2))
        job.save()

        stored_date = self.testconn.hget(job.key, 'created_at').decode('utf-8')
        self.assertEqual(stored_date, utcformat(job.created_at))

        # ... and no other keys are stored
        self.assertEqual(
            set(self.testconn.hkeys(job.key)),
            {b'created_at', b'data', b'description', b'ended_at', b'last_heartbeat', b'started_at',
             b'worker_name', b'success_callback_name', b'failure_callback_name'}
        )

        self.assertEqual(job.last_heartbeat, None)
        self.assertEqual(job.last_heartbeat, None)

        ts = utcnow()
        job.heartbeat(ts, 0)
        self.assertEqual(job.last_heartbeat, ts)
示例#33
0
文件: job.py 项目: essobi/aiorq
    def __init__(self, id=None, connection=None):

        self.connection = resolve_connection(connection)
        self._id = id
        self.created_at = utcnow()
        self._data = UNEVALUATED
        self._func_name = UNEVALUATED
        self._instance = UNEVALUATED
        self._args = UNEVALUATED
        self._kwargs = UNEVALUATED
        self.description = None
        self.origin = None
        self.enqueued_at = None
        self.started_at = None
        self.ended_at = None
        self._result = None
        self.exc_info = None
        self.timeout = None
        self.result_ttl = None
        self.ttl = None
        self._status = None
        self._dependency_id = None
        self.meta = {}
示例#34
0
文件: web.py 项目: nugit/rq-dashboard
def list_workers():
    def get_job_dict(worker):
        j = worker.get_current_job()
        default = "No job"
        return {
            "func_name": j.func_name if j is not None else default,
            "queue": j.origin if j is not None else default,
            "args": "\n".join(str(a) for a in j._args) if j is not None else default,
            "kwargs": "\n".join("%s:%s" % (str(k), str(v)) for k, v in j._kwargs.iteritems())
            if j is not None
            else default,
            "job_id": j.id if j is not None else default,
        }

    workers = [
        dict(
            get_job_dict(worker),
            name=worker.name,
            state=worker.get_state(),
            run_time=str(utcnow() - worker.started_job_at),
        )
        for worker in Worker.all()
    ]
    return dict(workers=workers)
示例#35
0
    def enqueue_job(self, job, set_meta_data=True):
        """Override enqueue job to insert meta data without saving twice"""
        request_environ = {}
        job.meta["request_environ"] = request_environ

        # Add Queue key set
        added = self.connection.sadd(self.redis_queues_keys, self.key)

        # The rest of this function is copied from the RQ library.
        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = utcnow()

        if job.timeout is None:
            job.timeout = self.DEFAULT_TIMEOUT
        job.save()

        if self._async:
            self.push_job_id(job.id)
        else:
            job.perform()
            job.save()

        return job
示例#36
0
文件: worker.py 项目: dveselov/aiorq
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(
                    job.perform(), timeout, loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True
示例#37
0
def package_update(context, data_dict):
    '''Update a dataset (package).

    You must be authorized to edit the dataset and the groups that it belongs
    to.

    It is recommended to call
    :py:func:`ckan.logic.action.get.package_show`, make the desired changes to
    the result, and then call ``package_update()`` with it.

    Plugins may change the parameters of this function depending on the value
    of the dataset's ``type`` attribute, see the
    :py:class:`~ckan.plugins.interfaces.IDatasetForm` plugin interface.

    For further parameters see
    :py:func:`~ckan.logic.action.create.package_create`.

    :param id: the name or id of the dataset to update
    :type id: string

    :returns: the updated dataset (if ``'return_package_dict'`` is ``True`` in
              the context, which is the default. Otherwise returns just the
              dataset id)
    :rtype: dictionary

    '''

    process_batch_mode(context, data_dict)
    process_skip_validation(context, data_dict)

    model = context['model']
    user = context['user']
    name_or_id = data_dict.get('id') or data_dict.get('name')
    if name_or_id is None:
        raise logic.ValidationError({'id': _('Missing value')})

    pkg = model.Package.get(name_or_id)
    if pkg is None:
        raise logic.NotFound(_('Package was not found.'))
    context["package"] = pkg
    data_dict["id"] = pkg.id
    data_dict['type'] = pkg.type
    if 'groups' in data_dict:
        data_dict['solr_additions'] = helpers.build_additions(
            data_dict['groups'])

    if 'dataset_confirm_freshness' in data_dict and data_dict[
            'dataset_confirm_freshness'] == 'on':
        data_dict['review_date'] = utcnow()

    _check_access('package_update', context, data_dict)

    # get the schema
    package_plugin = lib_plugins.lookup_package_plugin(pkg.type)
    if 'schema' in context:
        schema = context['schema']
    else:
        schema = package_plugin.update_package_schema()

    if 'api_version' not in context:
        # check_data_dict() is deprecated. If the package_plugin has a
        # check_data_dict() we'll call it, if it doesn't have the method we'll
        # do nothing.
        check_data_dict = getattr(package_plugin, 'check_data_dict', None)
        if check_data_dict:
            try:
                package_plugin.check_data_dict(data_dict, schema)
            except TypeError:
                # Old plugins do not support passing the schema so we need
                # to ensure they still work.
                package_plugin.check_data_dict(data_dict)

    # Inject the existing package_creator as it should not be modifiable
    if hasattr(pkg, 'extras'):
        data_dict['package_creator'] = pkg.extras.get(
            'package_creator', data_dict.get('package_creator'))

    # Inject a code representing the batch within which this dataset was modified
    # KEEP_OLD - keep the code before this update
    # DONT_GROUP - don't use any code
    if context.get('batch_mode') == 'KEEP_OLD':
        try:
            batch_extras = pkg._extras.get('batch')
            if batch_extras and batch_extras.state == 'active':
                data_dict['batch'] = batch_extras.value
        except Exception, e:
            log.info(str(e))
示例#38
0
文件: worker.py 项目: dveselov/aiorq
    def perform_job(self, job, *, loop=None):
        """Performs the actual work of a job."""

        yield from self.prepare_job_execution(job)

        pipe = self.connection.multi_exec()
        started_job_registry = StartedJobRegistry(job.origin, self.connection)

        try:
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            try:
                rv = yield from asyncio.wait_for(job.perform(),
                                                 timeout,
                                                 loop=loop)
            except asyncio.TimeoutError as error:
                raise JobTimeoutException from error

            # Pickle the result in the same try-except block since we
            # need to use the same exc handling when pickling fails
            yield from self.set_current_job_id(None, pipeline=pipe)

            result_ttl = job.get_result_ttl(self.default_result_ttl)
            if result_ttl != 0:
                job.ended_at = utcnow()
                job._status = JobStatus.FINISHED
                yield from job.save(pipeline=pipe)

                finished_job_registry = FinishedJobRegistry(
                    job.origin, self.connection)
                yield from finished_job_registry.add(job, result_ttl, pipe)

            yield from job.cleanup(result_ttl, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)

            yield from pipe.execute()

        except Exception:
            # TODO: if `pipe.execute()` throws exception
            # `ConnectionClosedError` and we try to add actions to the
            # pipeline which was already executed then line below will
            # throw "AssertionError: Pipeline already executed. Create
            # new one."
            yield from job.set_status(JobStatus.FAILED, pipeline=pipe)
            yield from started_job_registry.remove(job, pipeline=pipe)
            yield from self.set_current_job_id(None, pipeline=pipe)
            try:
                yield from pipe.execute()
            except Exception:
                # Ensure that custom exception handlers are called
                # even if Redis is down
                pass
            yield from self.handle_exception(job, *sys.exc_info())
            return False

        logger.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
        if rv:
            log_result = "{!r}".format(as_text(text_type(rv)))
            logger.debug('Result: %s', yellow(log_result))

        if result_ttl == 0:
            logger.info('Result discarded immediately')
        elif result_ttl > 0:
            logger.info('Result is kept for %s seconds', result_ttl)
        else:
            logger.warning(
                'Result will never expire, clean up result key manually')

        return True
示例#39
0
 def soft_limit_exceeded(self, job):
     seconds_under_monitor = (utcnow() - self.monitor_started).seconds
     return seconds_under_monitor > job.timeout + self.grace_period
示例#40
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job)

        with self.connection._pipeline() as pipeline:
            started_job_registry = StartedJobRegistry(job.origin,
                                                      self.connection)

            try:
                logging.debug('perform_job in sw')
                job.matlab_engine = self.matlab_engine
                logging.debug('pj engine:' + str(self.matlab_engine))
                #   logging.debug('pj args,kwargs:'+str(job._args)+','+str(job._kwargs))
                if len(job._args) > 0:
                    new_args = (self.matlab_engine, ) + job._args
                    logging.debug('tg pj  new args:' + str(new_args))
                    job._args = new_args
                elif len(job._kwargs) > 0:
                    job._kwargs['matlab_engine'] = self.matlab_engine
                    logging.debug('tg pj new kwargs:' + str(job._kwargs))
                with self.death_penalty_class(
                        job.timeout or self.queue_class.DEFAULT_TIMEOUT):
                    rv = job.perform()
        # Pickle the result in the same try-except block since we need
        # to use the same exc handling when pickling fails
                job._result = rv

                self.set_current_job_id(None, pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.ended_at = utcnow()
                    job._status = JobStatus.FINISHED
                    job.save(pipeline=pipeline)

                    finished_job_registry = FinishedJobRegistry(
                        job.origin, self.connection)
                    finished_job_registry.add(job, result_ttl, pipeline)

                job.cleanup(result_ttl, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)

                pipeline.execute()

            except Exception:
                job.set_status(JobStatus.FAILED, pipeline=pipeline)
                started_job_registry.remove(job, pipeline=pipeline)
                try:
                    pipeline.execute()
                except Exception:
                    pass
                self.handle_exception(job, *sys.exc_info())
                return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = {0!r}'.format(yellow(
                text_type(rv))))

        if result_ttl == 0:
            self.log.info('Result discarded immediately')
        elif result_ttl > 0:
            self.log.info('Result is kept for {0} seconds'.format(result_ttl))
        else:
            self.log.warning(
                'Result will never expire, clean up result key manually')

        return True