Пример #1
0
 def test_add(self):
     """Adding a job to DeferredJobsRegistry."""
     job = Job()
     self.registry.add(job)
     job_ids = [
         as_text(job_id)
         for job_id in self.testconn.zrange(self.registry.key, 0, -1)
     ]
     self.assertEqual(job_ids, [job.id])
Пример #2
0
def get_codename(job_id):
    job = Job(job_id, connection=conn)

    if job.is_finished:
        participant = Participant.query.filter_by(code=job_id)
        participant.update({'code': str(job.result)})
        db.session.commit()
        return 'Complete', 200
    return 'Pending', 202
Пример #3
0
    def test_job_get_position(self):
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(fixtures.say_hello)
        job2 = queue.enqueue(fixtures.say_hello)
        job3 = Job(fixtures.say_hello)

        self.assertEqual(0, job.get_position())
        self.assertEqual(1, job2.get_position())
        self.assertEqual(None, job3.get_position())
Пример #4
0
    def test_data_property_sets_job_properties(self):
        """Job tuple gets derived lazily from data property."""
        job = Job()
        job.data = dumps(('foo', None, (1, 2, 3), {'bar': 'qux'}))

        self.assertEqual(job.func_name, 'foo')
        self.assertEqual(job.instance, None)
        self.assertEqual(job.args, (1, 2, 3))
        self.assertEqual(job.kwargs, {'bar': 'qux'})
Пример #5
0
def _enqueue_with_reservation(
    func, resources, args=None, kwargs=None, options=None, task_group=None
):
    if not args:
        args = tuple()
    if not kwargs:
        kwargs = dict()
    if not options:
        options = dict()

    def as_url(r):
        if isinstance(r, str):
            return r
        if isinstance(r, Model):
            return util.get_url(r)
        raise ValueError(_("Must be (str|Model)"))

    resources = {as_url(r) for r in resources}
    inner_task_id = str(uuid.uuid4())
    resource_task_id = str(uuid.uuid4())
    redis_conn = connection.get_redis_connection()
    current_job = get_current_job(connection=redis_conn)
    parent_kwarg = {}
    json.dumps(args, cls=NonJSONWarningEncoder)
    json.dumps(kwargs, cls=NonJSONWarningEncoder)
    if current_job:
        # set the parent task of the spawned task to the current task ID (same as rq Job ID)
        parent_kwarg["parent_task"] = Task.objects.get(pk=current_job.id)

    with transaction.atomic():
        task = Task.objects.create(
            pk=inner_task_id,
            _resource_job_id=resource_task_id,
            state=TASK_STATES.WAITING,
            logging_cid=(GuidMiddleware.get_guid() or ""),
            task_group=task_group,
            name=f"{func.__module__}.{func.__name__}",
            **parent_kwarg,
        )
        for resource in resources:
            reservation_record = ReservedResourceRecord.objects.get_or_create(resource=resource)[0]
            TaskReservedResourceRecord.objects.create(resource=reservation_record, task=task)

        task_args = (func, inner_task_id, list(resources), args, kwargs, options)
        try:
            q = Queue("resource-manager", connection=redis_conn)
            q.enqueue(
                _queue_reserved_task,
                job_id=resource_task_id,
                args=task_args,
                job_timeout=TASK_TIMEOUT,
            )
        except RedisConnectionError as e:
            task.set_failed(e, None)

    return Job(id=inner_task_id, connection=redis_conn)
Пример #6
0
    def test_job_properties_set_data_property(self):
        """Data property gets derived from the job tuple."""
        job = Job()
        job.func_name = 'foo'
        fname, instance, args, kwargs = loads(job.data)

        self.assertEqual(fname, job.func_name)
        self.assertEqual(instance, None)
        self.assertEqual(args, ())
        self.assertEqual(kwargs, {})
Пример #7
0
 def __init__(self, id, connection):
     self.id = id
     self.func = None
     self.instance = None
     self.keys = None
     self.decrement = None
     self.depends_on = None
     self.origin = None
     self.timeout = None
     self.connection = connection
     self.job = Job(id=id, connection=self.connection)
Пример #8
0
    def test_persistence_of_empty_jobs(self):  # noqa
        """Storing empty jobs."""
        job = Job()
        job.save()

        expected_date = strip_milliseconds(job.created_at)
        stored_date = self.testconn.hget(job.key, 'created_at')
        self.assertEquals(times.to_universal(stored_date), expected_date)

        # ... and no other keys are stored
        self.assertItemsEqual(self.testconn.hkeys(job.key), ['created_at'])
Пример #9
0
def job_status(job_id):
    """
    This function will return the status of a given job id in the queue
    """
    rq_job = Job(id=job_id, connection=redis_db)
    print(rq_job.get_status())
    print(rq_job.started_at)
    print(rq_job.ended_at)

    res = {'Job ID': job_id, 'Status': rq_job.get_status()}
    return json.dumps(res)
def print_status(request, id1):
    #id2=request.GET.get(id1,"")
    #x=id1.id
    #use_connection()
    #queue=Queue(connection=Redis())
    job = Job().fetch(id1)
    if (job.is_finished):
        return HttpResponseRedirect(reverse("done", args=(id2, )))
    elif (job.is_failed):
        return HttpResponse("failed")
    else:
        return render(request, print_status.html, {'id': id1})
Пример #11
0
def _enqueue_with_reservation(func,
                              resources,
                              args=None,
                              kwargs=None,
                              options=None,
                              task_group=None):
    if not args:
        args = tuple()
    if not kwargs:
        kwargs = dict()
    if not options:
        options = dict()

    resources = _validate_and_get_resources(resources)
    inner_task_id = str(uuid.uuid4())
    resource_task_id = str(uuid.uuid4())
    args_as_json = json.dumps(args, cls=UUIDEncoder)
    kwargs_as_json = json.dumps(kwargs, cls=UUIDEncoder)
    redis_conn = connection.get_redis_connection()
    current_job = get_current_job(connection=redis_conn)
    parent_kwarg = {}
    if current_job:
        # set the parent task of the spawned task to the current task ID (same as rq Job ID)
        parent_kwarg["parent_task"] = Task.objects.get(pk=current_job.id)

    with transaction.atomic():
        task = Task.objects.create(
            pk=inner_task_id,
            _resource_job_id=resource_task_id,
            state=TASK_STATES.WAITING,
            logging_cid=(get_guid() or ""),
            task_group=task_group,
            name=f"{func.__module__}.{func.__name__}",
            args=args_as_json,
            kwargs=kwargs_as_json,
            reserved_resources_record=resources,
            **parent_kwarg,
        )

        task_args = (func, inner_task_id, resources, args, kwargs, options)
        try:
            q = Queue("resource-manager", connection=redis_conn)
            q.enqueue(
                _queue_reserved_task,
                job_id=resource_task_id,
                args=task_args,
                job_timeout=TASK_TIMEOUT,
            )
        except RedisConnectionError as e:
            task.set_failed(e, None)

    return Job(id=inner_task_id, connection=redis_conn)
Пример #12
0
    def test_add_and_remove(self):
        """Adding and removing job to StartedJobRegistry."""
        timestamp = current_timestamp()
        job = Job()

        # Test that job is added with the right score
        self.registry.add(job, 1000)
        self.assertLess(self.testconn.zscore(self.registry.key, job.id),
                        timestamp + 1002)

        # Ensure that job is properly removed from sorted set
        self.registry.remove(job)
        self.assertIsNone(self.testconn.zscore(self.registry.key, job.id))
Пример #13
0
 def get_results(job_id):
 
     logger.debug('Received request: {}'.format(request))
     job = Job(job_id, connection=conn)
 
     if job.is_finished:
         return jsonify({'result': str(job.result)})
     elif job.is_failed:
         return jsonify({'status': 'failed', 'result': job.exc_info})
     elif job._status == 'started':
         return jsonify({'status': 'running'})
     else:
         return jsonify({'status': 'error', 'result': job.exc_info})
Пример #14
0
 def jobs(cls, queuename=None):
     cls.connect()
     if queuename:
         queue = Queue(queuename)
         if queuename != 'success':
             queue.compact()
             return [serialize_job(Job(id)) for id in queue.job_ids]
         else:
             return cls.successful_jobs()
     else:
         j = {}
         for queue in cls.queues():
             n = queue.get('name')
             j[n] = cls.jobs(n)
         return j
Пример #15
0
    def create(cls, func, instance, keys, decrement, connection, origin, timeout):
        """
        Creates object and initializes Job ID
        """
        job_wrapper = cls(None, connection)
        job_wrapper.job = Job(connection=connection)
        job_wrapper.id = job_wrapper.job.id
        job_wrapper.func = func
        job_wrapper.instance = instance
        job_wrapper.keys = keys
        job_wrapper.decrement = decrement
        job_wrapper.connection = connection
        job_wrapper.origin = origin
        job_wrapper.timeout = timeout

        return job_wrapper
Пример #16
0
def get_results(job_key: str):
    job = Job(job_key, connection=redis_conn)
    if not job.is_finished:
        return "Nay!", 202

    if isinstance(job.result, dict):
        print(job.result)
        # Should be 598/599 but werkzeug does not have them implemented
        # https://www.restapitutorial.com/httpstatuscodes.html
        return abort(504)

    obj: Result = Result.query.filter_by(id=job.result).first()
    results = sorted(obj.result_no_stop_words.items(),
                     key=operator.itemgetter(1),
                     reverse=True)[:10]
    return jsonify(results)
Пример #17
0
    def test_create_empty_job(self):
        """Creation of new empty jobs."""
        job = Job()

        # Jobs have a random UUID and a creation date
        self.assertIsNotNone(job.id)
        self.assertIsNotNone(job.created_at)

        # ...and nothing else
        self.assertIsNone(job.func, None)
        self.assertIsNone(job.args, None)
        self.assertIsNone(job.kwargs, None)
        self.assertIsNone(job.origin, None)
        self.assertIsNone(job.enqueued_at, None)
        self.assertIsNone(job.ended_at, None)
        self.assertIsNone(job.return_value, None)
        self.assertIsNone(job.exc_info, None)
Пример #18
0
def enqueue_with_reservation(func,
                             resources,
                             args=None,
                             kwargs=None,
                             options=None):
    """
    Enqueue a message to Pulp workers with a reservation.

    This method provides normal enqueue functionality, while also requesting necessary locks for
    serialized urls. No two tasks that claim the same resource can execute concurrently. It
    accepts resources which it transforms into a list of urls (one for each resource).

    This does not dispatch the task directly, but instead promises to dispatch it later by
    encapsulating the desired task through a call to a :func:`_queue_reserved_task` task. See
    the docblock on :func:`_queue_reserved_task` for more information on this.

    This method creates a :class:`pulpcore.app.models.Task` object. Pulp expects to poll on a
    task just after calling this method, so a Task entry needs to exist for it
    before it returns.

    Args:
        func (callable): The function to be run by RQ when the necessary locks are acquired.
        resources (list): A list of resources to reserve guaranteeing that only one task
            reserves these resources
        args (tuple): The positional arguments to pass on to the task.
        kwargs (dict): The keyword arguments to pass on to the task.
        options (dict): The options to be passed on to the task.

    Returns (rq.job.job): An RQ Job instance as returned by RQ's enqueue function
    """
    if not args:
        args = tuple()
    if not kwargs:
        kwargs = dict()
    if not options:
        options = dict()

    resources = {util.get_url(resource) for resource in resources}
    inner_task_id = str(uuid.uuid4())
    Task.objects.create(pk=inner_task_id, state=TASK_STATES.WAITING)
    redis_conn = connection.get_redis_connection()
    q = Queue('resource_manager', connection=redis_conn)
    task_args = (func, inner_task_id, list(resources), args, kwargs, options)
    q.enqueue(_queue_reserved_task, args=task_args, timeout=TASK_TIMEOUT)
    return Job(id=inner_task_id, connection=redis_conn)
Пример #19
0
    def test_create_empty_job(self):
        """Creation of new empty jobs."""
        job = Job()

        # Jobs have a random UUID and a creation date
        self.assertIsNotNone(job.id)
        self.assertIsNotNone(job.created_at)

        # ...and nothing else
        self.assertIsNone(job.func)
        self.assertIsNone(job.instance)
        self.assertIsNone(job.args)
        self.assertIsNone(job.kwargs)
        self.assertIsNone(job.origin)
        self.assertIsNone(job.enqueued_at)
        self.assertIsNone(job.ended_at)
        self.assertIsNone(job.result)
        self.assertIsNone(job.exc_info)
Пример #20
0
def cancel(task_id):
    """
    Cancel the task that is represented by the given task_id.

    This method cancels only the task with given task_id, not the spawned tasks. This also updates
    task's state to 'canceled'.

    :param task_id: The ID of the task you wish to cancel
    :type  task_id: basestring

    :raises MissingResource: if a task with given task_id does not exist
    """
    try:
        task_status = Task.objects.get(pk=task_id)
    except Task.DoesNotExist:
        raise MissingResource(task=task_id)

    if task_status.state in TASK_FINAL_STATES:
        # If the task is already done, just stop
        msg = _('Task [{task_id}] already in a completed state: {state}')
        _logger.info(msg.format(task_id=task_id, state=task_status.state))
        return

    redis_conn = connection.get_redis_connection()
    job = Job(id=str(task_status.pk), connection=redis_conn)

    if job.is_started:
        redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, job.get_id())
    job.delete()

    # A hack to ensure that we aren't deleting resources still being used by the workhorse
    time.sleep(1.5)

    with transaction.atomic():
        task_status.state = TASK_STATES.CANCELED
        task_status.save()
        _delete_incomplete_resources(task_status)
        task_status.release_resources()

    _logger.info(_('Task canceled: {id}.').format(id=task_id))
    return task_status
Пример #21
0
    def job(self, *args):
        args = list(args)
        id = args.pop(0)

        Data.connect()

        if not args:
            return Data.job(id)
        else:
            action = args.pop(0)
        if action == 'requeue':
            requeue_job(id)
        elif action == 'delete':
            try:
                Job(id).refresh()
                cancel_job(id)
            except Exception, e:
                if RAISE_EXCEPTIONS:
                    raise e
                else:
                    logger.warn(e)
Пример #22
0
 def to_job(jid):
     return None if jid is None else Job(id=jid, connection=connection)
Пример #23
0
import sys
import os
import time
from redis import Redis
from rq import Queue
from rq.job import Job

_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, _dir)

if __name__ == "__main__":
    from handlers import count_words_at_url
    conn = Redis(host="192.168.59.103")
    q = Queue(connection=conn)
    job = q.enqueue(count_words_at_url, 'http://www.baidu.com')

    print "job id is ", job.id

    waiting_job = Job(connection=conn, id=job.id)

    while True:
        if waiting_job.is_queued:
            print "job is in queue"
        if waiting_job.is_started:
            print "job has been started"
        if waiting_job.is_finished:
            print "job has finished"
            print "job result is", waiting_job.result
            break
        time.sleep(1)
Пример #24
0
def cancel(task_id):
    """
    Cancel the task that is represented by the given task_id.

    This method cancels only the task with given task_id, not the spawned tasks. This also updates
    task's state to either 'canceled' or 'canceling'.

    Args:
        task_id (str): The ID of the task you wish to cancel

    Raises:
        MissingResource: if a task with given task_id does not exist
    """
    try:
        task_status = Task.objects.get(pk=task_id)
    except Task.DoesNotExist:
        raise MissingResource(task=task_id)

    if task_status.state in TASK_FINAL_STATES:
        # If the task is already done, just stop
        msg = _("Task [{task_id}] already in a final state: {state}")
        _logger.debug(msg.format(task_id=task_id, state=task_status.state))
        return task_status

    _logger.info(_("Canceling task: {id}").format(id=task_id))

    if settings.USE_NEW_WORKER_TYPE:
        task = task_status
        # This is the only valid transition without holding the task lock
        rows = Task.objects.filter(pk=task.pk,
                                   state__in=TASK_INCOMPLETE_STATES).update(
                                       state=TASK_STATES.CANCELING)
        # Notify the worker that might be running that task and other workers to clean up
        with db_connection.cursor() as cursor:
            cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)",
                           (str(task.pk), ))
            cursor.execute("NOTIFY pulp_worker_wakeup")
        if rows == 1:
            task.refresh_from_db()
        return task

    redis_conn = connection.get_redis_connection()
    job = Job(id=str(task_status.pk), connection=redis_conn)
    resource_job = Job(id=str(task_status._resource_job_id),
                       connection=redis_conn)

    task_status.state = TASK_STATES.CANCELED
    task_status.save()

    resource_job.cancel()
    job.cancel()

    try:
        send_stop_job_command(redis_conn, job.get_id())
        send_stop_job_command(redis_conn, resource_job.get_id())
    except (InvalidJobOperation, NoSuchJobError):
        # We don't care if the job isn't currently running when we try to cancel
        pass

    # A hack to ensure that we aren't deleting resources still being used by the workhorse
    time.sleep(0.5)

    with transaction.atomic():
        for report in task_status.progress_reports.all():
            if report.state not in TASK_FINAL_STATES:
                report.state = TASK_STATES.CANCELED
                report.save()
        _delete_incomplete_resources(task_status)
        task_status.release_resources()

    return task_status
Пример #25
0
def enqueue_with_reservation(
    func, resources, args=None, kwargs=None, options=None, task_group=None
):
    """
    Enqueue a message to Pulp workers with a reservation.

    This method provides normal enqueue functionality, while also requesting necessary locks for
    serialized urls. No two tasks that claim the same resource can execute concurrently. It
    accepts resources which it transforms into a list of urls (one for each resource).

    This does not dispatch the task directly, but instead promises to dispatch it later by
    encapsulating the desired task through a call to a :func:`_queue_reserved_task` task. See
    the docblock on :func:`_queue_reserved_task` for more information on this.

    This method creates a :class:`pulpcore.app.models.Task` object. Pulp expects to poll on a
    task just after calling this method, so a Task entry needs to exist for it
    before it returns.

    Args:
        func (callable): The function to be run by RQ when the necessary locks are acquired.
        resources (list): A list of resources to reserve guaranteeing that only one task reserves
                          these resources. Each resource can be either a (str) resource URL or a
                          (django.models.Model) resource instance.
        args (tuple): The positional arguments to pass on to the task.
        kwargs (dict): The keyword arguments to pass on to the task.
        options (dict): The options to be passed on to the task.
        task_group (pulpcore.app.models.TaskGroup): A TaskGroup to add the created Task to.

    Returns (rq.job.job): An RQ Job instance as returned by RQ's enqueue function

    Raises:
        ValueError: When `resources` is an unsupported type.
    """
    if not args:
        args = tuple()
    if not kwargs:
        kwargs = dict()
    if not options:
        options = dict()

    def as_url(r):
        if isinstance(r, str):
            return r
        if isinstance(r, Model):
            return util.get_url(r)
        raise ValueError(_("Must be (str|Model)"))

    resources = {as_url(r) for r in resources}
    inner_task_id = str(uuid.uuid4())
    resource_task_id = str(uuid.uuid4())
    redis_conn = connection.get_redis_connection()
    current_job = get_current_job(connection=redis_conn)
    parent_kwarg = {}
    if current_job:
        # set the parent task of the spawned task to the current task ID (same as rq Job ID)
        parent_kwarg["parent_task"] = Task.objects.get(pk=current_job.id)

    with transaction.atomic():
        task = Task.objects.create(
            pk=inner_task_id,
            _resource_job_id=resource_task_id,
            state=TASK_STATES.WAITING,
            logging_cid=(GuidMiddleware.get_guid() or ""),
            task_group=task_group,
            name=f"{func.__module__}.{func.__name__}",
            **parent_kwarg,
        )
        for resource in resources:
            reservation_record = ReservedResourceRecord.objects.get_or_create(resource=resource)[0]
            TaskReservedResourceRecord.objects.create(resource=reservation_record, task=task)

        task_args = (func, inner_task_id, list(resources), args, kwargs, options)
        try:
            q = Queue("resource-manager", connection=redis_conn)
            q.enqueue(
                _queue_reserved_task,
                job_id=resource_task_id,
                args=task_args,
                job_timeout=TASK_TIMEOUT,
            )
        except RedisConnectionError as e:
            task.set_failed(e, None)

    return Job(id=inner_task_id, connection=redis_conn)
Пример #26
0
 def test_persistence_of_empty_jobs(self):  # noqa
     """Storing empty jobs."""
     job = Job()
     with self.assertRaises(ValueError):
         job.save()
Пример #27
0
def enqueue_with_reservation(func,
                             resources,
                             args=None,
                             kwargs=None,
                             options=None):
    """
    Enqueue a message to Pulp workers with a reservation.

    This method provides normal enqueue functionality, while also requesting necessary locks for
    serialized urls. No two tasks that claim the same resource can execute concurrently. It
    accepts resources which it transforms into a list of urls (one for each resource).

    This does not dispatch the task directly, but instead promises to dispatch it later by
    encapsulating the desired task through a call to a :func:`_queue_reserved_task` task. See
    the docblock on :func:`_queue_reserved_task` for more information on this.

    This method creates a :class:`pulpcore.app.models.Task` object. Pulp expects to poll on a
    task just after calling this method, so a Task entry needs to exist for it
    before it returns.

    Args:
        func (callable): The function to be run by RQ when the necessary locks are acquired.
        resources (list): A list of resources to reserve guaranteeing that only one task reserves
                          these resources. Each resource can be either a (str) resource URL or a
                          (django.models.Model) resource instance.
        args (tuple): The positional arguments to pass on to the task.
        kwargs (dict): The keyword arguments to pass on to the task.
        options (dict): The options to be passed on to the task.

    Returns (rq.job.job): An RQ Job instance as returned by RQ's enqueue function

    Raises:
        ValueError: When `resources` is an unsupported type.
    """
    if not args:
        args = tuple()
    if not kwargs:
        kwargs = dict()
    if not options:
        options = dict()

    def as_url(r):
        if isinstance(r, str):
            return r
        if isinstance(r, Model):
            return util.get_url(r)
        raise ValueError(_('Must be (str|Model)'))

    resources = {as_url(r) for r in resources}
    inner_task_id = str(uuid.uuid4())
    redis_conn = connection.get_redis_connection()
    current_job = get_current_job(connection=redis_conn)
    parent_kwarg = {}
    if current_job:
        current_task = Task.objects.get(pk=current_job.id)
        parent_kwarg['parent'] = current_task
    Task.objects.create(pk=inner_task_id,
                        state=TASK_STATES.WAITING,
                        name=f'{func.__module__}.{func.__name__}',
                        **parent_kwarg)
    q = Queue('resource-manager', connection=redis_conn)
    task_args = (func, inner_task_id, list(resources), args, kwargs, options)
    q.enqueue(_queue_reserved_task, args=task_args, timeout=TASK_TIMEOUT)
    return Job(id=inner_task_id, connection=redis_conn)
Пример #28
0
    def test_dependencies_key_should_have_prefixed_job_id(self):
        job_id = 'random'
        job = Job(id=job_id)
        expected_key = Job.redis_job_namespace_prefix + ":" + job_id + ':dependencies'

        assert job.dependencies_key == expected_key
Пример #29
0
def job_status(job_id):
    rq_job = Job(id=job_id, connection=redis_db)
    print(rq_job.get_status())
    print(rq_job)
    res = {'Job ID': job_id, 'Status': rq_job.get_status()}
    return json.dumps(res)