def archive_task( task: Union[str, Task], company_id: str, status_message: str, status_reason: str, ) -> int: """ Deque and archive task Return 1 if successful """ if isinstance(task, str): task = TaskBLL.get_task_with_access( task, company_id=company_id, only=( "id", "execution", "status", "project", "system_tags", "enqueue_status", ), requires_write_access=True, ) try: TaskBLL.dequeue_and_change_status( task, company_id, status_message, status_reason, ) except APIError: # dequeue may fail if the task was not enqueued pass return task.update( status_message=status_message, status_reason=status_reason, add_to_set__system_tags=EntityVisibility.archived.value, last_change=datetime.utcnow(), )
def get_by_id(call: APICall, company_id, req_model: TaskRequest): task = TaskBLL.get_task_with_access( req_model.task, company_id=company_id, allow_public=True ) task_dict = task.to_proper_dict() unprepare_from_saved(call, task_dict) call.result.data = {"task": task_dict}
def delete_task( task_id: str, company_id: str, move_to_trash: bool, force: bool, return_file_urls: bool, delete_output_models: bool, status_message: str, status_reason: str, ) -> Tuple[int, Task, CleanupResult]: task = TaskBLL.get_task_with_access( task_id, company_id=company_id, requires_write_access=True ) if ( task.status != TaskStatus.created and EntityVisibility.archived.value not in task.system_tags and not force ): raise errors.bad_request.TaskCannotBeDeleted( "due to status, use force=True", task=task.id, expected=TaskStatus.created, current=task.status, ) try: TaskBLL.dequeue_and_change_status( task, company_id=company_id, status_message=status_message, status_reason=status_reason, ) except APIError: # dequeue may fail if the task was not enqueued pass cleanup_res = cleanup_task( task, force=force, return_file_urls=return_file_urls, delete_output_models=delete_output_models, ) if move_to_trash: collection_name = task._get_collection_name() archived_collection = "{}__trash".format(collection_name) task.switch_collection(archived_collection) try: # A simple save() won't do due to mongoengine caching (nothing will be saved), so we have to force # an insert. However, if for some reason such an ID exists, let's make sure we'll keep going. task.save(force_insert=True) except Exception: pass task.switch_collection(collection_name) task.delete() update_project_time(task.project) return 1, task, cleanup_res
def stop_task( task_id: str, company_id: str, user_name: str, status_reason: str, force: bool, ) -> dict: """ Stop a running task. Requires task status 'in_progress' and execution_progress 'running', or force=True. Development task or task that has no associated worker is stopped immediately. For a non-development task with worker only the status message is set to 'stopping' to allow the worker to stop the task and report by itself :return: updated task fields """ task = TaskBLL.get_task_with_access( task_id, company_id=company_id, only=( "status", "project", "tags", "system_tags", "last_worker", "last_update", ), requires_write_access=True, ) def is_run_by_worker(t: Task) -> bool: """Checks if there is an active worker running the task""" update_timeout = config.get("apiserver.workers.task_update_timeout", 600) return (t.last_worker and t.last_update and (datetime.utcnow() - t.last_update).total_seconds() < update_timeout) if TaskSystemTags.development in task.system_tags or not is_run_by_worker( task): new_status = TaskStatus.stopped status_message = f"Stopped by {user_name}" else: new_status = task.status status_message = TaskStatusMessage.stopping return ChangeStatusRequest( task=task, new_status=new_status, status_reason=status_reason, status_message=status_message, force=force, ).execute()
def publish_task( task_id: str, company_id: str, force: bool, publish_model_func: Callable[[str, str], Any] = None, status_message: str = "", status_reason: str = "", ) -> dict: task = TaskBLL.get_task_with_access( task_id, company_id=company_id, requires_write_access=True ) if not force: validate_status_change(task.status, TaskStatus.published) previous_task_status = task.status output = task.output or Output() publish_failed = False try: # set state to publishing task.status = TaskStatus.publishing task.save() # publish task models if task.models and task.models.output and publish_model_func: model_id = task.models.output[-1].model model = ( Model.objects(id=model_id, company=company_id) .only("id", "ready") .first() ) if model and not model.ready: publish_model_func(model.id, company_id) # set task status to published, and update (or set) it's new output (view and models) return ChangeStatusRequest( task=task, new_status=TaskStatus.published, force=force, status_reason=status_reason, status_message=status_message, ).execute(published=datetime.utcnow(), output=output) except Exception as ex: publish_failed = True raise ex finally: if publish_failed: task.status = previous_task_status task.save()
def unarchive_task( task: str, company_id: str, status_message: str, status_reason: str, ) -> int: """ Unarchive task. Return 1 if successful """ task = TaskBLL.get_task_with_access( task, company_id=company_id, only=("id",), requires_write_access=True, ) return task.update( status_message=status_message, status_reason=status_reason, pull__system_tags=EntityVisibility.archived.value, last_change=datetime.utcnow(), )
def set_requirements(call: APICall, company_id, req_model: SetRequirementsRequest): requirements = req_model.requirements with translate_errors_context(): task = TaskBLL.get_task_with_access( req_model.task, company_id=company_id, only=("status", "script"), requires_write_access=True, ) if not task.script: raise errors.bad_request.MissingTaskFields( "Task has no script field", task=task.id ) res = update_task(task, update_cmds=dict(script__requirements=requirements)) call.result.data_model = UpdateResponse(updated=res) if res: call.result.data_model.fields = {"script.requirements": requirements}
def dequeue(call: APICall, company_id, request: UpdateRequest): task = TaskBLL.get_task_with_access( request.task, company_id=company_id, only=("id", "execution", "status", "project"), requires_write_access=True, ) res = DequeueResponse( **TaskBLL.dequeue_and_change_status( task, company_id, status_message=request.status_message, status_reason=request.status_reason, ) ) res.dequeued = 1 call.result.data_model = res
def delete(call: APICall, company_id, req_model: DeleteRequest): task = TaskBLL.get_task_with_access( req_model.task, company_id=company_id, requires_write_access=True ) move_to_trash = req_model.move_to_trash force = req_model.force if task.status != TaskStatus.created and not force: raise errors.bad_request.TaskCannotBeDeleted( "due to status, use force=True", task=task.id, expected=TaskStatus.created, current=task.status, ) with translate_errors_context(): result = cleanup_task(task, force) if move_to_trash: collection_name = task._get_collection_name() archived_collection = "{}__trash".format(collection_name) task.switch_collection(archived_collection) try: # A simple save() won't do due to mongoengine caching (nothing will be saved), so we have to force # an insert. However, if for some reason such an ID exists, let's make sure we'll keep going. with TimingContext("mongo", "save_task"): task.save(force_insert=True) except Exception: pass task.switch_collection(collection_name) task.delete() _reset_cached_tags(company_id, projects=[task.project]) update_project_time(task.project) call.result.data = dict(deleted=True, **attr.asdict(result))
def set_task_status_from_call( request: UpdateRequest, company_id, new_status=None, **set_fields ) -> dict: fields_resolver = SetFieldsResolver(set_fields) task = TaskBLL.get_task_with_access( request.task, company_id=company_id, only=tuple( {"status", "project", "started", "duration"} | fields_resolver.get_names() ), requires_write_access=True, ) if "duration" not in fields_resolver.get_names(): if new_status == Task.started: fields_resolver.add_fields(min__duration=max(0, task.duration or 0)) elif new_status in ( TaskStatus.completed, TaskStatus.failed, TaskStatus.stopped, ): fields_resolver.add_fields( duration=int((task.started - datetime.utcnow()).total_seconds()) if task.started else 0 ) status_reason = request.status_reason status_message = request.status_message force = request.force return ChangeStatusRequest( task=task, new_status=new_status or task.status, status_reason=status_reason, status_message=status_message, force=force, ).execute(**fields_resolver.get_fields(task))
def reset(call: APICall, company_id, request: ResetRequest): task = TaskBLL.get_task_with_access( request.task, company_id=company_id, requires_write_access=True ) force = request.force if not force and task.status == TaskStatus.published: raise errors.bad_request.InvalidTaskStatus(task_id=task.id, status=task.status) api_results = {} updates = {} try: dequeued = TaskBLL.dequeue(task, company_id, silent_fail=True) except APIError: # dequeue may fail if the task was not enqueued pass else: if dequeued: api_results.update(dequeued=dequeued) cleaned_up = cleanup_task(task, force) api_results.update(attr.asdict(cleaned_up)) updates.update( set__last_iteration=DEFAULT_LAST_ITERATION, set__last_metrics={}, set__metric_stats={}, unset__output__result=1, unset__output__model=1, unset__output__error=1, unset__last_worker=1, unset__last_worker_report=1, ) if request.clear_all: updates.update( set__execution=Execution(), unset__script=1, ) else: updates.update(unset__execution__queue=1) if task.execution and task.execution.artifacts: updates.update( set__execution__artifacts={ key: artifact for key, artifact in task.execution.artifacts.items() if artifact.mode == ArtifactModes.input } ) res = ResetResponse( **ChangeStatusRequest( task=task, new_status=TaskStatus.created, force=force, status_reason="reset", status_message="reset", ).execute( started=None, completed=None, published=None, active_duration=None, **updates, ) ) # do not return artifacts since they are not serializable res.fields.pop("execution.artifacts", None) for key, value in api_results.items(): setattr(res, key, value) call.result.data_model = res
def reset_task( task_id: str, company_id: str, force: bool, return_file_urls: bool, delete_output_models: bool, clear_all: bool, ) -> Tuple[dict, CleanupResult, dict]: task = TaskBLL.get_task_with_access( task_id, company_id=company_id, requires_write_access=True ) if not force and task.status == TaskStatus.published: raise errors.bad_request.InvalidTaskStatus(task_id=task.id, status=task.status) dequeued = {} updates = {} try: dequeued = TaskBLL.dequeue(task, company_id, silent_fail=True) except APIError: # dequeue may fail if the task was not enqueued pass cleaned_up = cleanup_task( task, force=force, update_children=False, return_file_urls=return_file_urls, delete_output_models=delete_output_models, ) updates.update( set__last_iteration=DEFAULT_LAST_ITERATION, set__last_metrics={}, set__metric_stats={}, set__models__output=[], set__runtime={}, unset__output__result=1, unset__output__error=1, unset__last_worker=1, unset__last_worker_report=1, ) if clear_all: updates.update( set__execution=Execution(), unset__script=1, ) else: updates.update(unset__execution__queue=1) if task.execution and task.execution.artifacts: updates.update( set__execution__artifacts={ key: artifact for key, artifact in task.execution.artifacts.items() if artifact.mode == ArtifactModes.input } ) res = ChangeStatusRequest( task=task, new_status=TaskStatus.created, force=force, status_reason="reset", status_message="reset", ).execute( started=None, completed=None, published=None, active_duration=None, enqueue_status=None, **updates, ) return dequeued, cleaned_up, res