def _wf_module_delete_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str) -> Optional[Dict[str, Any]]: """ Write a new secret (or `None`) to `wf_module`, or raise. Return a "delta" for websockets.ws_client_send_delta_async(), or `None` if the database has not been modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param) is None: return None # no-op wf_module.secrets = dict(wf_module.secrets) # shallow copy del wf_module.secrets[param] wf_module.save(update_fields=['secrets']) return { 'updateWfModules': { str(wf_module.id): { 'secrets': wf_module.secret_metadata, } } }
def _wf_module_delete_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str ) -> Optional[Dict[str, Any]]: """ Write a new secret (or `None`) to `wf_module`, or raise. Return a "delta" for websockets.ws_client_send_delta_async(), or `None` if the database has not been modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist wf_module.refresh_from_db() # may return None if ( wf_module is None or wf_module.secrets.get(param) is None ): return None wf_module.secrets = dict(wf_module.secrets) del wf_module.secrets[param] wf_module.save(update_fields=['secrets']) return { 'updateWfModules': { str(wf_module.id): { 'params': wf_module.get_params().as_dict() } } }
def _write_tab_position(workflow: Workflow, tab_slug: str) -> None: """Write position in DB, or raise (Workflow|Tab).DoesNotExist.""" with workflow.cooperative_lock(): # raises Workflow.DoesNotExist # raises Tab.DoesNotExist, e.g. if tab.is_deleted tab = workflow.live_tabs.get(slug=tab_slug) workflow.selected_tab_position = tab.position workflow.save(update_fields=['selected_tab_position'])
def _do_abort_upload(workflow: Workflow, wf_module: WfModule, uuid: uuidgen.UUID) -> None: with workflow.cooperative_lock(): try: in_progress_upload = wf_module.in_progress_uploads.get(id=uuid) except InProgressUpload.DoesNotExist: return # no-op in_progress_upload.delete_s3_data() # Aborted upload should disappear, as far as the user is concerned in_progress_upload.is_completed = True in_progress_upload.save(update_fields=["is_completed"])
def _write_wf_module_position(workflow: Workflow, wf_module_id: int) -> None: """Write position in DB, or raise (Workflow|Tab|WfModule).DoesNotExist.""" with workflow.cooperative_lock(): # raises Workflow.DoesNotExist # Raises WfModule.DoesNotExist, e.g. if tab.is_deleted wf_module = WfModule.live_in_workflow(workflow).get(pk=wf_module_id) tab = wf_module.tab tab.selected_wf_module_position = wf_module.order tab.save(update_fields=['selected_wf_module_position']) workflow.selected_tab_position = tab.position workflow.save(update_fields=['selected_tab_position'])
def _do_complete_multipart_upload( workflow: Workflow, wf_module: WfModule, ) -> Tuple[UploadedFile, Dict[str, Any]]: with workflow.cooperative_lock(): wf_module.refresh_from_db() uploaded_file = ( _write_uploaded_file_and_clear_inprogress_file_upload(wf_module) ) return ( uploaded_file.uuid, serializers.WfModuleSerializer(wf_module).data, )
def _load_tab_flows(workflow: Workflow, delta_id: int) -> List[TabFlow]: """ Query `workflow` for each tab's `TabFlow` (ordered by tab position). """ ret = [] with workflow.cooperative_lock(): # reloads workflow if workflow.last_delta_id != delta_id: raise UnneededExecution for tab in workflow.live_tabs.all(): steps = [(wfm, wfm.get_params()) for wfm in tab.live_wf_modules.all()] ret.append(TabFlow(tab, steps)) return ret
def _wf_module_set_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str, secret: str) -> Optional[Dict[str, Any]]: """ Write a new secret to `wf_module`, or raise. Return a "delta" for websockets.ws_client_send_delta_async(), or `None` if the database is not modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param, {}).get('secret') == secret: return None # no-op module_version = wf_module.module_version if module_version is None: raise HandlerError(f'BadRequest: ModuleVersion does not exist') if not any(p.type == 'secret' and p.secret_logic.provider == 'string' for p in module_version.param_fields): raise HandlerError( f'BadRequest: param is not a secret string parameter') created_at = timezone.now() created_at_str = ( created_at.strftime('%Y-%m-%dT%H:%M:%S') + '.' + created_at.strftime('%f')[0:3] # milliseconds + 'Z') wf_module.secrets = { **wf_module.secrets, param: { 'name': created_at_str, 'secret': secret, } } wf_module.save(update_fields=['secrets']) return { 'updateWfModules': { str(wf_module.id): { 'secrets': wf_module.secret_metadata, } } }
def _load_wf_modules_and_input(workflow: Workflow): """ Finds (stale_wf_modules, previous_cached_result_or_none) from the database. If all modules are up-to-date, returns ([], output_cached_result). Yes, beware: if we aren't rendering, we return *output*, and if we are rendering we return *input*. This is convenient for the caller. If there's a race, the returned `stale_wf_modules` may be too short, and `input_table` may be wrong. That should be fine because `execute_wfmodule` will raise an exception before starting work. """ with workflow.cooperative_lock(): # 1. Load list of wf_modules wf_modules = list(workflow.wf_modules.all()) if not wf_modules: return [], None # 2. Find index of first one that needs render index = 0 while index < len(wf_modules) and not _needs_render(wf_modules[index]): index += 1 wf_modules_needing_render = wf_modules[index:] if not wf_modules_needing_render: # We're up to date! output = None return [], output # 4. Load input if index == 0: prev_result = None else: # if the CachedRenderResult is obsolete because of a race (it's on # the filesystem as well as in the DB), we'll get _something_ back: # this method doesn't raise exceptions. There's no harm done if the # value is wrong: we'll check that later anyway. prev_result = wf_modules[index - 1].get_cached_render_result() return wf_modules_needing_render, prev_result
def _do_finish_upload(workflow: Workflow, wf_module: WfModule, uuid: uuidgen.UUID, filename: str) -> Dict[str, Any]: with workflow.cooperative_lock(): wf_module.refresh_from_db() try: in_progress_upload = wf_module.in_progress_uploads.get( id=uuid, is_completed=False) except InProgressUpload.DoesNotExist: raise HandlerError( "BadRequest: key is not being uploaded for this WfModule right now. " "(Even a valid key becomes invalid after you create, finish or abort " "an upload on its WfModule.)") try: in_progress_upload.convert_to_uploaded_file(filename) except FileNotFoundError: raise HandlerError( "BadRequest: file not found. " "You must upload the file before calling finish_upload.") return serializers.WfModuleSerializer(wf_module).data
def _load_tab_flows(workflow: Workflow, delta_id: int) -> List[TabFlow]: """ Query `workflow` for each tab's `TabFlow` (ordered by tab position). """ ret = [] with workflow.cooperative_lock(): # reloads workflow if workflow.last_delta_id != delta_id: raise UnneededExecution for tab in workflow.live_tabs.all(): steps = [ ExecuteStep( wfm, (wfm.module_version.param_schema if wfm.module_version is not None else ParamDType.Dict({})), wfm.get_params(), ) for wfm in tab.live_wf_modules.all() ] ret.append(TabFlow(tab, steps)) return ret
def _do_complete_upload( workflow: Workflow, wf_module: WfModule, key: str ) -> Tuple[UploadedFile, Dict[str, Any]]: with workflow.cooperative_lock(): wf_module.refresh_from_db() if ( wf_module.inprogress_file_upload_id is not None or wf_module.inprogress_file_upload_key != key ): raise HandlerError( 'DoesNotExist: key must point to an incomplete upload' ) uploaded_file = ( _write_uploaded_file_and_clear_inprogress_file_upload(wf_module) ) return ( uploaded_file.uuid, serializers.WfModuleSerializer(wf_module).data, )
def _do_create_multipart_upload( workflow: Workflow, wf_module: WfModule, filename: str ) -> Dict[str, str]: key = _generate_key(wf_module, filename) with workflow.cooperative_lock(): wf_module.refresh_from_db() wf_module.abort_inprogress_upload() # in case there is one already upload_id = minio.create_multipart_upload(minio.UserFilesBucket, key, filename) wf_module.inprogress_file_upload_id = upload_id wf_module.inprogress_file_upload_key = key wf_module.inprogress_file_upload_last_accessed_at = timezone.now() wf_module.save( update_fields=['inprogress_file_upload_id', 'inprogress_file_upload_key', 'inprogress_file_upload_last_accessed_at'] ) return {'key': key, 'uploadId': upload_id}
def _do_prepare_upload( workflow: Workflow, wf_module: WfModule, filename: str, n_bytes: int, base64Md5sum: str, ) -> Dict[str, str]: key = _generate_key(wf_module, filename) with workflow.cooperative_lock(): wf_module.refresh_from_db() wf_module.abort_inprogress_upload() url, headers = minio.presign_upload(minio.UserFilesBucket, key, filename, n_bytes, base64Md5sum) wf_module.inprogress_file_upload_id = None wf_module.inprogress_file_upload_key = key wf_module.inprogress_file_upload_last_accessed_at = timezone.now() wf_module.save( update_fields=['inprogress_file_upload_id', 'inprogress_file_upload_key', 'inprogress_file_upload_last_accessed_at'] ) return {'key': key, 'url': url, 'headers': headers}
def _load_wf_modules_and_input(workflow: Workflow, until_wf_module: Optional[WfModule]): """ Finds (stale_wf_modules, previous_cached_result_or_none) from the database. If all modules are up-to-date, returns ([], output_cached_result). Yes, beware: if we aren't rendering, we return *output*, and if we are rendering we return *input*. This is convenient for the caller. If there's a race, the returned `stale_wf_modules` may be too short, and `input_table` may be wrong. That should be fine because `execute_wfmodule` will raise an exception before starting work. """ with workflow.cooperative_lock(): # 1. Load list of wf_modules wf_modules = list(workflow.wf_modules.all()) if not wf_modules: return [], None # 2. Find index of first one that needs render index = 0 while index < len(wf_modules) and not _needs_render(wf_modules[index]): index += 1 # 3. Find index of last module we're requesting if until_wf_module: try: until_index = [m.id for m in wf_modules] \ .index(until_wf_module.id) except ValueError: # Module isn't in workflow any more raise UnneededExecution else: until_index = len(wf_modules) - 1 wf_modules_needing_render = wf_modules[index:until_index + 1] if not wf_modules_needing_render: # We're up to date! if until_wf_module: # _needs_render() returned false, so we know we can fetch the # cached result. Load from `wf_modules`, not `until_wf_module`, # because the latter may be stale. output = wf_modules[until_index].get_cached_render_result() else: output = None return [], output # 4. Load input if index == 0: prev_result = None else: # if the CachedRenderResult is obsolete because of a race (it's on # the filesystem as well as in the DB), we'll get _something_ back: # this method doesn't raise exceptions. There's no harm done if the # value is wrong: we'll check that later anyway. prev_result = wf_modules[index - 1].get_cached_render_result() return wf_modules_needing_render, prev_result
def _do_create_upload(workflow: Workflow, wf_module: WfModule) -> Dict[str, Any]: with workflow.cooperative_lock(): wf_module.refresh_from_db() in_progress_upload = wf_module.in_progress_uploads.create() return in_progress_upload.generate_upload_parameters()