def abort_environment_build(environment_build_uuid, is_running=False): """Aborts an environment build. Aborts an environment build by setting its state to ABORTED and sending a REVOKE and ABORT command to celery. Args: is_running: environment_build_uuid: uuid of the environment build to abort Returns: """ filter_by = { "build_uuid": environment_build_uuid, } status_update = {"status": "ABORTED"} celery_app = make_celery(current_app) # Make use of both constructs (revoke, abort) so we cover both a # task that is pending and a task which is running. celery_app.control.revoke(environment_build_uuid, timeout=1.0) if is_running: res = AbortableAsyncResult(environment_build_uuid, app=celery_app) # It is responsibility of the task to terminate by reading it's # aborted status. res.abort() update_status_db( status_update, model=models.EnvironmentBuild, filter_by=filter_by, )
def post(self, request, *args, **kwargs): retry = request.POST.get('retry') abort = request.POST.get('abort') archive = request.POST.get('archive') add = request.POST.get('add') trigger = request.POST.get('trigger_find_more_links') if add: for link in add.split('\n'): download.apply_async(kwargs={ 'url': link }) elif trigger: find_more_links.apply_async(kwargs={ 'provider_id': trigger }) else: obj = File.objects.get(id=retry or abort or archive) if retry: download.apply_async(kwargs={ 'url': obj.file_url, 'name': obj.title, 'provider': obj.provider, }) elif abort: abortable = AbortableAsyncResult( obj.task.task_id) abortable.abort() elif archive: obj.deleted_on = timezone.now() obj.save() return HttpResponseRedirect(reverse('smart-downloader'))
def post(self, request, *args, **kwargs): retry = request.POST.get('retry') abort = request.POST.get('abort') archive = request.POST.get('archive') add = request.POST.get('add') trigger = request.POST.get('trigger_find_more_links') if add: for link in add.split('\n'): download.apply_async(kwargs={'url': link}) elif trigger: find_more_links.apply_async(kwargs={'provider_id': trigger}) else: obj = File.objects.get(id=retry or abort or archive) if retry: download.apply_async( kwargs={ 'url': obj.file_url, 'name': obj.title, 'provider': obj.provider, }) elif abort: abortable = AbortableAsyncResult(obj.task.task_id) abortable.abort() elif archive: obj.deleted_on = timezone.now() obj.save() return HttpResponseRedirect(reverse('smart-downloader'))
def stop_search_experiment_public_db(request, *args, **kwargs): comp_id = request.POST['compendium_id'] channel_name = request.session['channel_name'] view = request.POST['view'] operation = request.POST['operation'] stop_operation = request.POST['values'] compendium = CompendiumDatabase.objects.get(id=comp_id) try: view_task = ViewTask.objects.using(compendium.compendium_nick_name). \ get(view=view, operation=stop_operation) abortable_async_result = AbortableAsyncResult(view_task.task_id) abortable_async_result.abort() view_task.delete() Group("compendium_" + str(comp_id)).send({ 'text': json.dumps({ 'stream': view, 'payload': { 'request': {'operation': 'refresh'}, 'data': None } }) }) except Exception as e: pass return HttpResponse(json.dumps({'success': True}), content_type="application/json")
def delete_alignment_filter(request, *args, **kwargs): values = json.loads(request.POST['values']) comp_id = request.POST['compendium_id'] view = request.POST['view'] channel_name = request.session['channel_name'] operation = request.POST['operation'] compendium = CompendiumDatabase.objects.get(id=comp_id) for del_operation in values['operations']: try: view_task = ViewTask.objects.using(compendium.compendium_nick_name). \ get(view=view, operation=del_operation) abortable_async_result = AbortableAsyncResult( view_task.task_id) abortable_async_result.abort() view_task.delete() Group("compendium_" + str(comp_id)).send({ 'text': json.dumps({ 'stream': view, 'payload': { 'request': { 'operation': 'refresh' }, 'data': None } }) }) except Exception as e: pass platform = Platform.objects.using( compendium.compendium_nick_name).get(id=values['platform_id']) blast_file_name = values['alignment_id'] base_dir = AdminOptions.objects.get(option_name='raw_data_directory') plt_dir = os.path.join(base_dir.option_value, compendium.compendium_nick_name, 'platforms', platform.platform_access_id) mapper = MicroarrayMapper(os.path.join(plt_dir, blast_file_name)) mapper.delete_filter_db(values['filter_id']) Group("compendium_" + str(comp_id)).send({ 'text': json.dumps({ 'stream': view, 'payload': { 'request': { 'operation': 'refresh' }, 'data': None } }) }) return HttpResponse(json.dumps({'success': True}), content_type="application/json")
def request_abort(self): """ Set flag to abort this task if it is still running. """ if not self.end: async_result = AbortableAsyncResult(self.id) async_result.abort() self.status = 'ABORT REQUESTED' self.save()
def abort_computation_tasks(cls, task_ids: List[str]) -> None: for task_id in task_ids: task_result = AbortableAsyncResult(task_id) if task_result: task_result.abort() logger.info( "Aborted celery task %s, status: %s", task_id, task_result.is_aborted(), )
def abort(self): """ Abort this task if it is running. """ if not self.end: async_result = AbortableAsyncResult(self.id) async_result.abort() self.status = 'ABORT REQUESTED' self.save()
def request_abort(self): """ Set flag to abort this task if it is still running. """ if not self.end: async_result = AbortableAsyncResult(self.id) async_result.abort() self.status = 'ABORT REQUESTED' self.save()
def delete(self, *args, **kwargs): """ Purge data from Solr when a dataset is deleted. """ # Cancel import if necessary if self.current_task and self.current_task.end is None: async_result = AbortableAsyncResult(self.current_task.id) async_result.abort() super(Dataset, self).delete(*args, **kwargs)
def _collateral(self, run_uuids: List[str]): # Aborts and revokes all pipeline runs and waits for a reply for # 1.0s. celery = make_celery(current_app) celery.control.revoke(run_uuids, timeout=1.0) for run_uuid in run_uuids: res = AbortableAsyncResult(run_uuid, app=celery) # It is responsibility of the task to terminate by reading # its aborted status. res.abort()
def _collateral(self, environment_build_uuid: Optional[str]): if not environment_build_uuid: return celery_app = make_celery(current_app) # Make use of both constructs (revoke, abort) so we cover both a # task that is pending and a task which is running. celery_app.control.revoke(environment_build_uuid, timeout=1.0) res = AbortableAsyncResult(environment_build_uuid, app=celery_app) # It is responsibility of the task to terminate by reading it's # aborted status. res.abort()
def _collateral(self, run_uuid: Optional[str]): """Revoke the pipeline run celery task""" # If there run status was not STARTED/PENDING then there is # nothing to abort/revoke. if not run_uuid: return celery_app = make_celery(current_app) res = AbortableAsyncResult(run_uuid, app=celery_app) # It is responsibility of the task to terminate by reading it's # aborted status. res.abort() celery_app.control.revoke(run_uuid)
def node_cancel_load(request, node_id): node = get_node_subclass_or_404(request.user, node_id) if node_task := NodeTask.objects.filter(node=node, version=node.version).first(): if node_task.celery_task: logging.debug("TODO: Cancelling task %s", node_task.celery_task) app.control.revoke(node_task.celery_task, terminate=True) # @UndefinedVariable result = AbortableAsyncResult(node_task.celery_task) result.abort() if node_task.db_pid: run_sql("select pg_cancel_backend(%s)", [node_task.db_pid])
def abort(self): """ Abort a task. """ if not self.is_active(): return asyncres = AbortableAsyncResult(self.task_id) if self.is_abortable(): asyncres.abort() if asyncres.is_aborted(): self.status = "ABORTED" self.save() celery.task.control.revoke(self.task_id, terminate=True, signal="SIGTERM")
def kill_job(group): abbortables = [] _app = group.app if not group.ready(): for task in group.parent.children: abortable = AbortableAsyncResult(id=task.task_id, app=_app) abortable.abort() abbortables.append(abortable) for _ in range(KILL_MAX_WAIT_TIME): if all(task.result for task in abbortables): break sleep(60) print("Aborting distributed tasks ... ") return 0
def cancelarCaminando(request): if not request.user.is_authenticated: return redirect('login') asyncTask = AbortableAsyncResult(id=Settings.objects.get( setting='asyncKeyCaminando').value) if (asyncTask and asyncTask.state == 'PENDING' or asyncTask.state == 'STARTED'): asyncTask.abort() else: status = Settings.objects.get(setting='statusMatrizCaminando') status.value = -1 status.save() if (request): return redirect('index')
def abort(self): """ Abort a task. """ if not self.is_active(): return asyncres = AbortableAsyncResult(self.task_id) if self.is_abortable(): asyncres.abort() if asyncres.is_aborted(): self.status = "ABORTED" self.save() celery.task.control.revoke(self.task_id, terminate=True, signal="SIGTERM")
def task_revoked(pk): """Stop the task, mark it as revoked and execute BPM logic: - if the task executed a subprocess, revoke the process """ task = update_task(pk=pk, state="REVOKED", end_date=now()) result = AbortableAsyncResult(task.task_id) result.abort() revoke(task.task_id, terminate=True) for subprocess in task.subprocesses.iterator(): subprocess.stop() subprocess.update(state="REVOKED", end_date=task.end_date) logger.info('Subprocess "{subprocess}" revoked by task ' '"{task}"'.format(subprocess=subprocess, task=task))
def purge(): print('Cancelling all pending tasks') inspector = celery_config.app.control.inspect() for worker_tasks in chain( inspector.active().itervalues(), inspector.reserved().itervalues(), inspector.scheduled().itervalues()): for task in worker_tasks: try: print('Cancelling {task[id]}'.format(task=task)) r = AbortableAsyncResult(task['id']) r.abort() except: print('Cannot abort task {task[id]}'.format(task=task)) celery_config.app.control.purge() # just in case we forgot something
def delete(self, *args, **kwargs): """ Purge data from Solr when a dataset is deleted. """ dataset_id = self.id # Cancel import if necessary if self.current_task and self.current_task.end is None and self.current_task.task_name == 'redd.tasks.DatasetImportTask': async_result = AbortableAsyncResult(self.current_task.id) async_result.abort() super(Dataset, self).delete(*args, **kwargs) # Execute solr delete dataset_purge_data.apply_async(args=[dataset_id])
def abort_task() -> str: """ Aborts the task with the given id """ task_id = request.json.get("client_uuid") AbortableAsyncResult(id=task_id, app=celery).abort() return "cancelled"
def task_revoked(pk): """Stop the task, mark it as revoked and execute BPM logic: - if the task executed a subprocess, revoke the process """ task = update_task(pk=pk, state='REVOKED', end_date=now()) result = AbortableAsyncResult(task.task_id) result.abort() revoke(task.task_id, terminate=True) for subprocess in task.subprocesses.iterator(): subprocess.stop() subprocess.update(state='REVOKED', end_date=task.end_date) logger.info('Subprocess "{subprocess}" revoked by task ' '"{task}"'.format(subprocess=subprocess, task=task))
def stop_experiment(experiment_uuid) -> bool: """Stop an experiment. Args: experiment_uuid: Returns: True if the experiment exists and was stopped, false if it did not exist or if it was already completed. """ experiment = models.Experiment.query.filter_by( experiment_uuid=experiment_uuid).one_or_none() if experiment is None: return False run_uuids = [ run.run_uuid for run in experiment.pipeline_runs if run.status in ["PENDING", "STARTED"] ] if len(run_uuids) == 0: return False # Aborts and revokes all pipeline runs and waits for a # reply for 1.0s. celery = make_celery(current_app) celery.control.revoke(run_uuids, timeout=1.0) # TODO: possibly set status of steps and Run to "ABORTED" # note that a race condition would be present since the task # will try to set the status as well for run_uuid in run_uuids: res = AbortableAsyncResult(run_uuid, app=celery) # it is responsibility of the task to terminate by reading \ # it's aborted status res.abort() filter_by = {"run_uuid": run_uuid} status_update = {"status": "ABORTED"} update_status_db(status_update, model=models.NonInteractivePipelineRun, filter_by=filter_by) update_status_db(status_update, model=models.PipelineRunStep, filter_by=filter_by) db.session.commit() return True
def test_abortable_task_chain(): task_names = [ 'layman.layer.filesystem.tasks.refresh_input_chunk', 'layman.layer.db.tasks.refresh_table', 'layman.layer.geoserver.tasks.refresh_wfs', ] tasks = [ getattr(importlib.import_module(taskname.rsplit('.', 1)[0]), taskname.rsplit('.', 1)[1]) for taskname in task_names ] check_crs = False task_options = { 'crs_id': 'EPSG:4326', 'description': 'bla', 'title': 'bla', 'ensure_user': True, 'check_crs': check_crs, } filenames = ['abc.geojson'] workspace = 'test_abort_user' layername = 'test_abort_layer2' with app.app_context(): input_chunk.save_layer_files_str(workspace, layername, filenames, check_crs) task_chain = chain(*[ tasks_util.get_task_signature(workspace, layername, t, task_options, 'layername') for t in tasks ]) task_result = task_chain() results = [task_result] prev_result = task_result while prev_result.parent is not None: prev_result = prev_result.parent results.insert(0, prev_result) assert len(results) == 3 results_copy = [ AbortableAsyncResult(task_result.task_id, backend=celery_app.backend) for task_result in results ] time.sleep(1) assert results[0].state == results_copy[0].state == 'STARTED' assert results[1].state == results_copy[1].state == 'PENDING' assert results[2].state == results_copy[2].state == 'PENDING' with app.app_context(): celery_util.abort_task_chain(results_copy) # first one is failure, because it throws AbortedException assert results[0].state == results_copy[0].state == 'FAILURE' # second one (and all others) was revoked, but it was not started at all because of previous failure, so it's pending for ever assert results[1].state == results_copy[1].state == 'ABORTED' assert results[2].state == results_copy[2].state == 'ABORTED' with app.app_context(): input_chunk.delete_layer(workspace, layername)
def cancel(self): logger = logging.getLogger("ohdei.downloader.models.File.cancel") logger.debug("cancelling task: %s" % self.task_id) task = AbortableAsyncResult(self.task_id) logger.debug(task.state) try: ["FAILURE", "ABORTED", "REVOKED", "SUCCESS"].index(task.state) except ValueError: task.abort() # task.wait() #block until aborts if task.is_aborted: self.status = "aborted" self.save() logger.debug("download is cancelled: %s" % self.url) logger.debug(task.state) logger.debug(task.info) else: logger.error("error aborting task %s" % self.task_id)
def post(self, request): result = CheckDownloadIsInProcess( request) #check that task is done or not if result and result.status == 'SUCCESS': return Response( json.dumps({ 'message': "Task Done Successfully" # not allowed to change status task, it is completed now }), content_type='application/json') from celery.contrib.abortable import AbortableAsyncResult abortable_task = AbortableAsyncResult(request.data.get('task_id')) abortable_task.abort() return Response(json.dumps({'message': "Task Abort Done Successfully"}), content_type='application/json')
def test_single_abortable_task(): task_names = [ 'layman.layer.filesystem.tasks.refresh_input_chunk', ] tasks = [ getattr(importlib.import_module(taskname.rsplit('.', 1)[0]), taskname.rsplit('.', 1)[1]) for taskname in task_names ] check_crs = False task_options = { 'crs_id': 'EPSG:4326', 'description': 'bla', 'title': 'bla', 'ensure_user': True, 'check_crs': check_crs, } filenames = ['abc.geojson'] workspace = 'test_abort_user' layername = 'test_abort_layer' with app.app_context(): input_chunk.save_layer_files_str(workspace, layername, filenames, check_crs) task_chain = chain(*[ tasks_util.get_task_signature(workspace, layername, t, task_options, 'layername') for t in tasks ]) task_result = task_chain() results = [task_result] results_copy = [ AbortableAsyncResult(task_result.task_id, backend=celery_app.backend) for task_result in results ] i = 1 while i <= 20 and not results[0].state == results_copy[ 0].state == 'STARTED': print( f"results[0].state={results[0].state}, results_copy[0].state={results_copy[0].state}" ) time.sleep(0.1) i += 1 assert results[0].state == results_copy[0].state == 'STARTED' with app.app_context(): celery_util.abort_task_chain(results_copy) # first one is failure, because it throws AbortedException assert results[0].state == results_copy[0].state == 'FAILURE' with app.app_context(): input_chunk.delete_layer(workspace, layername)
def post(self, taskid): """ Abort a running task **Example request**: .. sourcecode:: http POST /api/task/abort/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1 Host: localhost:5555 **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Length: 61 Content-Type: application/json; charset=UTF-8 { "message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'" } :reqheader Authorization: optional OAuth token to authenticate :statuscode 200: no error :statuscode 401: unauthorized request :statuscode 503: result backend is not configured """ logger.info("Aborting task '%s'", taskid) result = AbortableAsyncResult(taskid) if not self.backend_configured(result): raise HTTPError(503) result.abort() self.write(dict(message="Aborted '%s'" % taskid))
def stop_pipeline_run(run_uuid) -> bool: """Stop a pipeline run. The run will cancelled if not running yet, otherwise it will be aborted. Args: run_uuid: Returns: True if a cancellation was issued to the run, false if the run did not exist or was not PENDING/STARTED. """ interactive_run = models.InteractiveRun.query.filter( models.InteractiveRun.status.in_(["PENDING", "STARTED"]), models.InteractiveRun.run_uuid == run_uuid, ).one_or_none() non_interactive_run = models.NonInteractiveRun.query.filter( models.NonInteractiveRun.status.in_(["PENDING", "STARTED"]), models.NonInteractiveRun.run_uuid == run_uuid, ).one_or_none() if interactive_run is None and non_interactive_run is None: return False celery_app = make_celery(current_app) res = AbortableAsyncResult(run_uuid, app=celery_app) # it is responsibility of the task to terminate by reading # it's aborted status res.abort() celery_app.control.revoke(run_uuid) # TODO: possibly set status of steps and Run to "ABORTED" # note that a race condition would be present since the # task will try to set the status as well return True
def post(self, taskid): """ Abort a running task **Example request**: .. sourcecode:: http POST /api/task/abort/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1 Host: localhost:5555 **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Length: 61 Content-Type: application/json; charset=UTF-8 { "message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'" } :reqheader Authorization: optional OAuth token to authenticate :statuscode 200: no error :statuscode 401: unauthorized request :statuscode 503: result backend is not configured """ logger.info("Aborting task '%s'", taskid) result = AbortableAsyncResult(taskid) if not self.backend_configured(result): raise HTTPError(503) result.abort() self.write(dict(message="Aborted '%s'" % taskid))
def post(self, taskid): """ aborting a task **Example request**: .. sourcecode:: http POST /api/task/abort/1480b55c-b8b2-462c-985e-24af3e9158f9?terminate=true Content-Length: 0 Content-Type: application/x-www-form-urlencoded; charset=utf-8 Host: localhost:5555 **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Length: 61 Content-Type: application/json; charset=UTF-8 { "message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'" } :query abort: abort the task if it is running :reqheader Authorization: optional OAuth token to authenticate :statuscode 200: no error :statuscode 401: unauthorized request """ result = AbortableAsyncResult(taskid) logger.info("Aborting task '%s'", taskid) if not self.backend_configured(result): raise HTTPError(503) result.abort() self.write(dict(message="Aborted '%s'" % taskid))
def post(self, taskid): """ aborting a task **Example request**: .. sourcecode:: http POST /api/task/abort/1480b55c-b8b2-462c-985e-24af3e9158f9?terminate=true Content-Length: 0 Content-Type: application/x-www-form-urlencoded; charset=utf-8 Host: localhost:5555 **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Length: 61 Content-Type: application/json; charset=UTF-8 { "message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'" } :query abort: abort the task if it is running :reqheader Authorization: optional OAuth token to authenticate :statuscode 200: no error :statuscode 401: unauthorized request """ result = AbortableAsyncResult(taskid) logger.info("Aborting task '%s'", taskid) if not self.backend_configured(result): raise HTTPError(503) result.abort() self.write(dict(message="Aborted '%s'" % taskid))
def get_publication_chain_info(workspace, publication_type, publication_name): chain_info = get_publication_chain_info_dict(workspace, publication_type, publication_name) from layman import celery_app if chain_info is not None: results = { task_id: AbortableAsyncResult(task_id, backend=celery_app.backend) for task_id in chain_info['by_order'] } chain_info['by_order'] = [ results[task_id] for task_id in chain_info['by_order'] ] chain_info['by_name'] = { k: results[task_id] for k, task_id in chain_info['by_name'].items() } chain_info['last'] = results[chain_info['last']] return chain_info
async def check_pipeline_run_task_status(run_config, pipeline, task_id): while True: # check status every second await asyncio.sleep(1) aborted = AbortableAsyncResult(task_id).is_aborted() run_status = await get_run_status(task_id, "pipeline", run_config["run_endpoint"]) # might be missing if the record has been removed, i.e. # due to a cleanup that might happen if the project has been # removed aborted = aborted or "status" not in run_status ready = run_status.get("status", "FAILURE") in ["SUCCESS", "FAILURE"] if aborted: pipeline.kill_all_running_steps(task_id, "docker", {"docker_client": docker_client}) if ready or aborted: break
def abort_current_task(self, wait_to_state=False, move_to_aborted=True): with transaction.atomic(): current_task = self.current_task async_result = AbortableAsyncResult(current_task.async_result_id) if wait_to_state: async_result.abort( ) # The task will know it's aborted and will finish it's execution else: async_result.revoke(terminate=True) # Kill the task current_task.set_status_aborted() if not move_to_aborted: self.insert_task_at_position(current_task, 0) else: self.aborted_tasks.add(current_task) if self.is_consuming_stopped: self._set_current_task(None)
def check_queues(): """ This task will check for every task queue that it's running and the state is correct. :return: """ from task_queue.models import TaskQueue, QueueTask global issue_detected had_issue_detected = issue_detected is not None active = celery_app.control.inspect().active() for obj in TaskQueue.objects.all(): try: active_tasks = obj.get_active_tasks(celery_active=active) if obj.current_task is None and len(active_tasks): logger.warn(f'{obj}: Task running and current task None. Killing task.') AbortableAsyncResult(active_tasks[0]['id']).revoke(terminate=True) elif obj.is_not_working(active_tasks): if obj.current_task: if issue_detected == 'not-working': logger.warn(f'{obj}: Current task should be running. Relaunching.') ct = obj.current_task ct.get_celery_task().run_now( *ct.task_args, queue_id=str(obj.id), wait_to_state=False, existing_id=str(ct.id), **ct.task_kwargs ) else: issue_detected = 'not-working' elif not obj.is_consuming_stopped: if issue_detected == 'not-working': logger.warn(f'{obj}: Next task should be running. Running next task.') obj.run_next() else: issue_detected = 'not-working' elif len(active_tasks): if obj.current_task and (obj.current_task.async_result_id != active_tasks[0]['id']): if issue_detected == active_tasks[0]['id']: logger.warn(f'Current task is different from task running. Fixing.') real_task_running = QueueTask.objects.filter(async_result_id=active_tasks[0]['id']).first() if not real_task_running: logger.warn(f'{obj}: Weird case. Current Task running does not exist.') else: obj.current_task.set_status_aborted() obj.insert_task_at_position(obj.current_task, 0) obj.current_task = real_task_running obj.save() real_task_running.get_celery_task().run_now( *real_task_running.task_args, queue_id=str(obj.id), wait_to_state=False, existing_id=str(real_task_running.id), **real_task_running.task_kwargs ) else: issue_detected = active_tasks[0]['id'] elif obj.current_task and obj.current_task.last_activity_at < (timezone.now() - rd(hour=1)): logger.warn(f'{obj}: Current Task last activity was more than one hour ago. Restarting...') ct = obj.current_task ct.get_celery_task().run_now( *ct.task_args, queue_id=str(obj.id), wait_to_state=False, existing_id=str(ct.id), **ct.task_kwargs ) else: issue_detected = None if had_issue_detected == issue_detected: # Issue fixed issue_detected = None elif had_issue_detected and not issue_detected: logger.warn(f'The issue has disappeared') elif not had_issue_detected and issue_detected: logger.warn(f'Issue was detected. Waiting next execution to fix it') except QueueWorkerNotRunning: logger.error(f'{obj}: Worker is not running')
def abort_event_task(task_id: str) -> None: result_task_send_mail = AbortableAsyncResult(task_id, app=task_send_mail) result_task_send_mail.abort()
def abort_celery_job(job_id): job = AbortableAsyncResult(job_id) if job: return job.abort()
def abort_func(): """ Check whether the task in question has been aborted. """ asyncres = AbortableAsyncResult(task_id) return asyncres.is_aborted()
def build_environment_task(task_uuid, project_uuid, environment_uuid, project_path): """Function called by the celery task to build an environment. Builds an environment (docker image) given the arguments, the logs produced by the user provided script are forwarded to a SocketIO server and namespace defined in the orchest internals config. Args: task_uuid: project_uuid: environment_uuid: project_path: Returns: """ with requests.sessions.Session() as session: try: update_environment_build_status("STARTED", session, task_uuid) # prepare the project snapshot with the correctly placed dockerfile, scripts, etc. build_context = prepare_build_context(task_uuid, project_uuid, environment_uuid, project_path) # use the agreed upon pattern for the docker image name docker_image_name = _config.ENVIRONMENT_IMAGE_NAME.format( project_uuid=project_uuid, environment_uuid=environment_uuid) if not os.path.exists(__ENV_BUILD_FULL_LOGS_DIRECTORY): os.mkdir(__ENV_BUILD_FULL_LOGS_DIRECTORY) # place the logs in the celery container complete_logs_path = os.path.join(__ENV_BUILD_FULL_LOGS_DIRECTORY, docker_image_name) status = SioStreamedTask.run( # what we are actually running/doing in this task task_lambda=lambda user_logs_fo: build_docker_image( docker_image_name, build_context, task_uuid, user_logs_fo, complete_logs_path, ), identity=f"{project_uuid}-{environment_uuid}", server=_config.ORCHEST_SOCKETIO_SERVER_ADDRESS, namespace=_config.ORCHEST_SOCKETIO_ENV_BUILDING_NAMESPACE, # note: using task.is_aborted() could be an option but # it was giving some issues related # to multithreading/processing, moreover, # also just passing the task_uuid to this function is less information # to rely on, which is good abort_lambda=lambda: AbortableAsyncResult(task_uuid). is_aborted(), ) # cleanup os.system('rm -rf "%s"' % build_context) update_environment_build_status(status, session, task_uuid) # catch all exceptions because we need to make sure to set the build state to failed except Exception as e: update_environment_build_status("FAILURE", session, task_uuid) raise e finally: filters = { "label": [ "_orchest_env_build_is_intermediate=1", f"_orchest_env_build_task_uuid={task_uuid}", ] } # artifacts of this build (intermediate containers, images, etc.) cleanup_env_build_docker_artifacts(filters) # see if outdated images of this environment can be cleaned up url = ( f"{CONFIG_CLASS.ORCHEST_API_ADDRESS}" f"/environment-images/dangling/{project_uuid}/{environment_uuid}" ) session.delete(url) return status
def abort(request): abortable_async_result = AbortableAsyncResult(request.GET['task_id']) abortable_async_result.abort() return HttpResponse()
def cansel_task(request, task_id): abortable_task = AbortableAsyncResult(task_id) abortable_task.abort() return Response({'result': 'Task was canseled'}, status=HTTP_204_NO_CONTENT)
def abort(request, task_id): abortable_async_result = AbortableAsyncResult(task_id) abortable_async_result.abort() return HttpResponse()