def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) resource_job.delete() job.delete() with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a completed state: {state}") _logger.info(msg.format(task_id=task_id, state=task_status.state)) return task_status redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) if job.is_started: redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, job.get_id()) if resource_job.is_started: redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, resource_job.get_id()) resource_job.delete() job.delete() # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(1.5) with transaction.atomic(): task_status.state = TASK_STATES.CANCELED for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() task_status.save() _delete_incomplete_resources(task_status) task_status.release_resources() _logger.info(_("Task canceled: {id}.").format(id=task_id)) return task_status
def reformat_job_data(job: Job): """ Create serialized version of Job which can be consumed by DataTable (RQ provides to_dict) including origin(queue), created_at, data, description, enqueued_at, started_at, ended_at, result, exc_info, timeout, result_ttl, failure_ttl, status, dependency_id, meta, ttl :param job: Job Instance need to be serialized :return: serialized job """ serialized_job = job.to_dict() return { "job_info": { "job_id": validate_job_data(job.get_id()), "job_description": validate_job_data(serialized_job.get('description')), "job_exc_info": validate_job_data(zlib.decompress(serialized_job.get('exc_info')).decode('utf-8') if serialized_job.get('exc_info') is not None else None), "job_status": validate_job_data(serialized_job.get('status')), "job_queue": validate_job_data(serialized_job.get('origin')), "job_created_time_humanize": validate_job_data(serialized_job.get('created_at'), humanize_func=humanize.naturaltime, with_utcparse=True, relative_to_now=True), "job_enqueued_time_humanize": validate_job_data(serialized_job.get('enqueued_at'), humanize_func=humanize.naturaltime, with_utcparse=True, relative_to_now=True), "job_ttl": validate_job_data(serialized_job.get('ttl'), default='Infinite', append_s=True), "job_timeout": validate_job_data(serialized_job.get('timeout'), default='180s', append_s=True), "job_result_ttl": validate_job_data(serialized_job.get('result_ttl'), default='500s', append_s=True), "job_fail_ttl": validate_job_data(serialized_job.get('failure_ttl'), default='1yr', append_s=True), }, }
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. :param task_id: The ID of the task you wish to cancel :type task_id: basestring :raises MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _('Task [{task_id}] already in a completed state: {state}') _logger.info(msg.format(task_id=task_id, state=task_status.state)) return redis_conn = connection.get_redis_connection() job = Job(id=str(task_id), connection=redis_conn) if job.is_started: redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, job.get_id()) job.delete() # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(1.5) with transaction.atomic(): task_status.state = TASK_STATES.CANCELED task_status.save() _delete_incomplete_resources(task_status) _logger.info(_('Task canceled: {id}.').format(id=task_id))
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to either 'canceled' or 'canceling'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) if settings.USE_NEW_WORKER_TYPE: task = task_status # This is the only valid transition without holding the task lock rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update( state=TASK_STATES.CANCELING) # Notify the worker that might be running that task and other workers to clean up with db_connection.cursor() as cursor: cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk), )) cursor.execute("NOTIFY pulp_worker_wakeup") if rows == 1: task.refresh_from_db() return task redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() resource_job.cancel() job.cancel() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status