def _do_set_notifications(scope, wf_module: WfModule, notifications: bool): wf_module.notifications = notifications wf_module.save(update_fields=["notifications"]) if notifications: server.utils.log_user_event_from_scope( scope, "Enabled email notifications", {"wfModuleId": wf_module.id} )
def _wf_module_delete_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str ) -> Optional[Dict[str, Any]]: """ Write a new secret (or `None`) to `wf_module`, or raise. Return a "delta" for websockets.ws_client_send_delta_async(), or `None` if the database has not been modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param) is None: return None # no-op wf_module.secrets = dict(wf_module.secrets) # shallow copy del wf_module.secrets[param] wf_module.save(update_fields=["secrets"]) return { "updateWfModules": { str(wf_module.id): {"secrets": wf_module.secret_metadata} } }
def _wf_module_delete_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str) -> Optional[clientside.Update]: """ Write a new secret (or `None`) to `wf_module`, or raise. Return a `clientside.Update`, or `None` if the database is not modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param) is None: return None # no-op wf_module.secrets = dict(wf_module.secrets) # shallow copy del wf_module.secrets[param] wf_module.save(update_fields=["secrets"]) return clientside.Update(steps={ wf_module.id: clientside.StepUpdate(secrets=wf_module.secret_metadata) })
def _do_create_result( workflow_id: int, wf_module: WfModule, result: FetchResult, now: timezone.datetime ) -> None: """ Do database manipulations for create_result(). Modify `wf_module` in-place. Do *not* do the logic in ChangeDataVersionCommand. We're creating a new version, not doing something undoable. Raise WfModule.DoesNotExist or Workflow.DoesNotExist in case of a race. """ error = "" if result.errors: if result.errors[0].message.id != "TODO_i18n": raise RuntimeError("TODO handle i18n-ready fetch-result errors") elif result.errors[0].quick_fixes: raise RuntimeError("TODO handle quick fixes from fetches") else: error = result.errors[0].message.args["text"] with _locked_wf_module(workflow_id, wf_module): storedobjects.create_stored_object( workflow_id, wf_module.id, result.path, stored_at=now ) storedobjects.enforce_storage_limits(wf_module) wf_module.fetch_error = error wf_module.is_busy = False wf_module.last_update_check = now wf_module.save(update_fields=["fetch_error", "is_busy", "last_update_check"])
def get_migrated_params( wf_module: WfModule, *, module_zipfile: ModuleZipfile = None) -> Dict[str, Any]: """ Read `wf_module.params`, calling migrate_params() or using cache fields. Call this within a `Workflow.cooperative_lock()`. If migrate_params() was already called for this version of the module, return the cached value. See `wf_module.cached_migrated_params`, `wf_module.cached_migrated_params_module_version`. Raise `ModuleError` if migration fails. Raise `KeyError` if the module was deleted. Raise `RuntimeError` (unrecoverable) if there is a problem loading or executing the module. (Modules are validated before import, so this should not happen.) The result may be invalid. Call `validate()` to raise a `ValueError` to detect that case. TODO avoid holding the database lock whilst executing stuff on the kernel. (This will involve auditing and modifying all callers to handle new error cases.) """ if module_zipfile is None: # raise KeyError module_zipfile = MODULE_REGISTRY.latest(wf_module.module_id_name) stale = ( module_zipfile.version == "develop" # works if cached version (and thus cached _result_) is None or (module_zipfile.get_param_schema_version() != wf_module.cached_migrated_params_module_version)) if not stale: return wf_module.cached_migrated_params else: # raise ModuleError params = invoke_migrate_params(module_zipfile, wf_module.params) wf_module.cached_migrated_params = params wf_module.cached_migrated_params_module_version = ( module_zipfile.get_param_schema_version()) try: wf_module.save(update_fields=[ "cached_migrated_params", "cached_migrated_params_module_version", ]) except ValueError: # WfModule was deleted, so we get: # "ValueError: Cannot force an update in save() with no primary key." pass return params
def _do_mark_result_unchanged(workflow_id: int, wf_module: WfModule, now: timezone.datetime) -> None: """ Do database manipulations for mark_result_unchanged(). Modify `wf_module` in-place. Raise WfModule.DoesNotExist or Workflow.DoesNotExist in case of a race. """ with _locked_wf_module(workflow_id, wf_module): wf_module.is_busy = False wf_module.last_update_check = now wf_module.save(update_fields=["is_busy", "last_update_check"])
def _do_try_set_autofetch( scope, wf_module: WfModule, auto_update_data: bool, update_interval: int ): # We may ROLLBACK; if we do, we need to remember the old values old_auto_update_data = wf_module.auto_update_data old_update_interval = wf_module.update_interval check_quota = ( auto_update_data and wf_module.auto_update_data and update_interval < wf_module.update_interval ) or (auto_update_data and not wf_module.auto_update_data) quota_exceeded = None try: with transaction.atomic(): wf_module.auto_update_data = auto_update_data wf_module.update_interval = update_interval if auto_update_data: wf_module.next_update = timezone.now() + datetime.timedelta( seconds=update_interval ) else: wf_module.next_update = None wf_module.save( update_fields=["auto_update_data", "update_interval", "next_update"] ) # Now before we commit, let's see if we've surpassed the user's limit; # roll back if we have. # # Only rollback if we're _increasing_ our fetch count. If we're # lowering it, allow that -- even if the user is over limit, we still # want to commit because it's an improvement. if check_quota: autofetches = autofetch.list_autofetches_json(scope) if autofetches["nFetchesPerDay"] > autofetches["maxFetchesPerDay"]: raise AutofetchQuotaExceeded(autofetches) except AutofetchQuotaExceeded as err: wf_module.auto_update_data = old_auto_update_data wf_module.update_interval = old_update_interval quota_exceeded = err.autofetches retval = { "isAutofetch": wf_module.auto_update_data, "fetchInterval": wf_module.update_interval, } if quota_exceeded is not None: retval["quotaExceeded"] = quota_exceeded # a dict return retval
def _wf_module_set_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str, secret: str) -> Optional[clientside.Update]: """ Write a new secret to `wf_module`, or raise. Return a `clientside.Update`, or `None` if the database is not modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param, {}).get("secret") == secret: return None # no-op try: module_zipfile = MODULE_REGISTRY.latest(wf_module.module_id_name) except KeyError: raise HandlerError( f"BadRequest: ModuleZipfile {wf_module.module_id_name} does not exist" ) module_spec = module_zipfile.get_spec() if not any(p.type == "secret" and p.secret_logic.provider == "string" for p in module_spec.param_fields): raise HandlerError( f"BadRequest: param is not a secret string parameter") created_at = timezone.now() created_at_str = ( created_at.strftime("%Y-%m-%dT%H:%M:%S") + "." + created_at.strftime("%f")[0:3] # milliseconds + "Z") wf_module.secrets = { **wf_module.secrets, param: { "name": created_at_str, "secret": secret }, } wf_module.save(update_fields=["secrets"]) return clientside.Update(steps={ wf_module.id: clientside.StepUpdate(secrets=wf_module.secret_metadata) })
def _wf_module_set_secret_and_build_delta( workflow: Workflow, wf_module: WfModule, param: str, secret: str ) -> Optional[Dict[str, Any]]: """ Write a new secret to `wf_module`, or raise. Return a "delta" for websockets.ws_client_send_delta_async(), or `None` if the database is not modified. Raise Workflow.DoesNotExist if the Workflow was deleted. """ with workflow.cooperative_lock(): # raises Workflow.DoesNotExist try: wf_module.refresh_from_db() except WfModule.DoesNotExist: return None # no-op if wf_module.secrets.get(param, {}).get("secret") == secret: return None # no-op module_version = wf_module.module_version if module_version is None: raise HandlerError(f"BadRequest: ModuleVersion does not exist") if not any( p.type == "secret" and p.secret_logic.provider == "string" for p in module_version.param_fields ): raise HandlerError(f"BadRequest: param is not a secret string parameter") created_at = timezone.now() created_at_str = ( created_at.strftime("%Y-%m-%dT%H:%M:%S") + "." + created_at.strftime("%f")[0:3] # milliseconds + "Z" ) wf_module.secrets = { **wf_module.secrets, param: {"name": created_at_str, "secret": secret}, } wf_module.save(update_fields=["secrets"]) return { "updateWfModules": { str(wf_module.id): {"secrets": wf_module.secret_metadata} } }
def clear_cached_render_result_for_wf_module(wf_module: WfModule) -> None: """ Delete a CachedRenderResult, if it exists. This deletes the Parquet file from disk, _then_ empties relevant database fields and saves them (and only them). """ delete_parquet_files_for_wf_module(wf_module.workflow_id, wf_module.id) wf_module.cached_render_result_delta_id = None wf_module.cached_render_result_errors = [] wf_module.cached_render_result_json = b"null" wf_module.cached_render_result_status = None wf_module.cached_render_result_columns = None wf_module.cached_render_result_nrows = None wf_module.save(update_fields=WF_MODULE_FIELDS)
def cache_render_result(workflow: Workflow, wf_module: WfModule, delta_id: int, result: RenderResult) -> None: """ Save `result` for later viewing. Raise AssertionError if `delta_id` is not what we expect. Since this alters data, be sure to call it within a lock: with workflow.cooperative_lock(): wf_module.refresh_from_db() # may change delta_id cache_render_result(workflow, wf_module, delta_id, result) """ assert delta_id == wf_module.last_relevant_delta_id assert result is not None json_bytes = json_encode(result.json).encode("utf-8") if not result.table.metadata.columns: if result.errors: status = "error" else: status = "unreachable" else: status = "ok" wf_module.cached_render_result_delta_id = delta_id wf_module.cached_render_result_errors = result.errors wf_module.cached_render_result_error = "" # DELETEME wf_module.cached_render_result_quick_fixes = [] # DELETEME wf_module.cached_render_result_status = status wf_module.cached_render_result_json = json_bytes wf_module.cached_render_result_columns = result.table.metadata.columns wf_module.cached_render_result_nrows = result.table.metadata.n_rows # Now we get to the part where things can end up inconsistent. Try to # err on the side of not-caching when that happens. delete_parquet_files_for_wf_module( workflow.id, wf_module.id) # makes old cache inconsistent wf_module.save( update_fields=WF_MODULE_FIELDS) # makes new cache inconsistent if result.table.metadata.columns: # only write non-zero-column tables with tempfile_context() as parquet_path: parquet.write(parquet_path, result.table.table) minio.fput_file(BUCKET, parquet_key(workflow.id, wf_module.id, delta_id), parquet_path) # makes new cache consistent
def _do_create_result(workflow_id: int, wf_module: WfModule, result: FetchResult, now: timezone.datetime) -> None: """ Do database manipulations for create_result(). Modify `wf_module` in-place. Do *not* do the logic in ChangeDataVersionCommand. We're creating a new version, not doing something undoable. Raise WfModule.DoesNotExist or Workflow.DoesNotExist in case of a race. """ with _locked_wf_module(workflow_id, wf_module): storedobjects.create_stored_object(workflow_id, wf_module.id, result.path, stored_at=now) storedobjects.enforce_storage_limits(wf_module) wf_module.fetch_errors = result.errors wf_module.is_busy = False wf_module.last_update_check = now wf_module.save( update_fields=["fetch_errors", "is_busy", "last_update_check"])
def _do_set_file_upload_api_token(wf_module: WfModule, api_token: Optional[str]): wf_module.file_upload_api_token = api_token wf_module.save(update_fields=["file_upload_api_token"])
def _do_clear_unseen_notification(wf_module: WfModule): wf_module.has_unseen_notification = False wf_module.save(update_fields=["has_unseen_notification"])
def _do_set_collapsed(wf_module: WfModule, is_collapsed: bool): wf_module.is_collapsed = is_collapsed wf_module.save(update_fields=["is_collapsed"])