def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to 'canceled'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) resource_job.delete() job.delete() with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status
def delete_job(self, job_id: UUID): """Try removing the job if present in any registries""" try: send_stop_job_command(self.redis_conn, str(job_id)) except Exception: pass for registry in self.registries: try: registry.remove(str(job_id), delete_job=True) except Exception: pass
def media_pre_delete(sender, instance, *args, **kwargs): try: conn = django_rq.get_connection() job = Job.fetch(instance.get_job_id(), connection=conn) if job.get_status() == 'started': try: send_stop_job_command(conn, instance.get_job_id()) except InvalidJobOperation: pass job.delete() except NoSuchJobError: pass
def stop_checking_score(): conn = redis.Redis() curr_job = Job.fetch('checking_score', connection=conn) if curr_job is not None: print(curr_job.get_status()) # qlen = len(q) # if qlen > 0: # curr_job = q.jobs[0] # curr_job_id = curr_job.id # print(curr_job.get_status()) # if curr_job_id == "checking_score" and curr_job.get_status() == "started": send_stop_job_command(conn, curr_job.id) return redirect("/")
def cancel(job_id): job = Job.fetch(job_id, connection=redis) temp_blob = bucket.blob(os.path.join(TEMP, job_id)) output_blob = bucket.blob(os.path.join(OUTPUT, job_id)) if temp_blob.exists() : temp_blob.delete() if output_blob.exists() : output_blob.delete() try: if job.get_status() == "started": send_stop_job_command(redis, job_id) print ("- Stopped executing job", job_id) return True elif job.get_status() == "queued": queue.remove(job_id) print ("- Removed job from queue", job_id) return True else: print ("- No job found for", job_id) except: print ("- No job found for", job_id) return False
def test_stop_job_command(self): """Ensure that stop_job command works properly.""" connection = self.testconn queue = Queue('foo', connection=connection, serializer=JSONSerializer) job = queue.enqueue(long_running_job, 3) worker = Worker('foo', connection=connection, serializer=JSONSerializer) # If job is not executing, an error is raised with self.assertRaises(InvalidJobOperation): send_stop_job_command(connection, job_id=job.id, serializer=JSONSerializer) # An exception is raised if job ID is invalid with self.assertRaises(NoSuchJobError): send_stop_job_command(connection, job_id='1', serializer=JSONSerializer) def start_work(): worker.work(burst=True) p = Process(target=start_work) p.start() p.join(1) time.sleep(0.1) send_command(connection, worker.name, 'stop-job', job_id=1) time.sleep(0.25) # Worker still working due to job_id mismatch worker.refresh() self.assertEqual(worker.get_state(), WorkerStatus.BUSY) send_stop_job_command(connection, job_id=job.id, serializer=JSONSerializer) time.sleep(0.25) # Job status is set appropriately self.assertTrue(job.is_stopped) # Worker has stopped working worker.refresh() self.assertEqual(worker.get_state(), WorkerStatus.IDLE)
def cancel(task_id): """ Cancel the task that is represented by the given task_id. This method cancels only the task with given task_id, not the spawned tasks. This also updates task's state to either 'canceled' or 'canceling'. Args: task_id (str): The ID of the task you wish to cancel Raises: MissingResource: if a task with given task_id does not exist """ try: task_status = Task.objects.get(pk=task_id) except Task.DoesNotExist: raise MissingResource(task=task_id) if task_status.state in TASK_FINAL_STATES: # If the task is already done, just stop msg = _("Task [{task_id}] already in a final state: {state}") _logger.debug(msg.format(task_id=task_id, state=task_status.state)) return task_status _logger.info(_("Canceling task: {id}").format(id=task_id)) if settings.USE_NEW_WORKER_TYPE: task = task_status # This is the only valid transition without holding the task lock rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update( state=TASK_STATES.CANCELING) # Notify the worker that might be running that task and other workers to clean up with db_connection.cursor() as cursor: cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk), )) cursor.execute("NOTIFY pulp_worker_wakeup") if rows == 1: task.refresh_from_db() return task redis_conn = connection.get_redis_connection() job = Job(id=str(task_status.pk), connection=redis_conn) resource_job = Job(id=str(task_status._resource_job_id), connection=redis_conn) task_status.state = TASK_STATES.CANCELED task_status.save() resource_job.cancel() job.cancel() try: send_stop_job_command(redis_conn, job.get_id()) send_stop_job_command(redis_conn, resource_job.get_id()) except (InvalidJobOperation, NoSuchJobError): # We don't care if the job isn't currently running when we try to cancel pass # A hack to ensure that we aren't deleting resources still being used by the workhorse time.sleep(0.5) with transaction.atomic(): for report in task_status.progress_reports.all(): if report.state not in TASK_FINAL_STATES: report.state = TASK_STATES.CANCELED report.save() _delete_incomplete_resources(task_status) task_status.release_resources() return task_status
def stop_all_ws_task(ws_id: str) -> None: for queue in queues.values(): for job in queue.jobs: if job.meta["ws_id"] == ws_id: send_stop_job_command(queue, job.id)