def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) resource_job.delete() job.delete() with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a completed state: {state}") _logger.info(msg.format(task_id=task_id, state=task_status.state)) return task_status redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) if job.is_started: redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, job.get_id()) if resource_job.is_started: redis_conn.sadd(TASKING_CONSTANTS.KILL_KEY, resource_job.get_id()) resource_job.delete() job.delete() # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(1.5) with transaction.atomic(): task_status.state = TASK_STATES.CANCELED for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() task_status.save() _delete_incomplete_resources(task_status) task_status.release_resources() _logger.info(_("Task canceled: {id}.").format(id=task_id)) return task_status
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to either 'canceled' or 'canceling'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) task = task_status # This is the only valid transition without holding the task lock rows = Task.objects.filter( pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(state=TASK_STATES.CANCELING) # Notify the worker that might be running that task and other workers to clean up with connection.cursor() as cursor: cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk), )) cursor.execute("NOTIFY pulp_worker_wakeup") if rows == 1: task.refresh_from_db() return task
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to either 'canceled' or 'canceling'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) if settings.USE_NEW_WORKER_TYPE: task = task_status # This is the only valid transition without holding the task lock rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update( state=TASK_STATES.CANCELING) # Notify the worker that might be running that task and other workers to clean up with db_connection.cursor() as cursor: cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk), )) cursor.execute("NOTIFY pulp_worker_wakeup") if rows == 1: task.refresh_from_db() return task redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() resource_job.cancel() job.cancel() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status