def clear_cached_render_result_for_wf_module(wf_module: WfModule) -> None: """ Delete a CachedRenderResult, if it exists. This deletes the Parquet file from disk, _then_ empties relevant database fields and saves them (and only them). """ delete_parquet_files_for_wf_module(wf_module.workflow_id, wf_module.id) wf_module.cached_render_result_delta_id = None wf_module.cached_render_result_errors = [] wf_module.cached_render_result_json = b"null" wf_module.cached_render_result_status = None wf_module.cached_render_result_columns = None wf_module.cached_render_result_nrows = None wf_module.save(update_fields=WF_MODULE_FIELDS)
def cache_render_result(workflow: Workflow, wf_module: WfModule, delta_id: int, result: RenderResult) -> None: """ Save `result` for later viewing. Raise AssertionError if `delta_id` is not what we expect. Since this alters data, be sure to call it within a lock: with workflow.cooperative_lock(): wf_module.refresh_from_db() # may change delta_id cache_render_result(workflow, wf_module, delta_id, result) """ assert delta_id == wf_module.last_relevant_delta_id assert result is not None json_bytes = json_encode(result.json).encode("utf-8") if not result.table.metadata.columns: if result.errors: status = "error" else: status = "unreachable" else: status = "ok" wf_module.cached_render_result_delta_id = delta_id wf_module.cached_render_result_errors = result.errors wf_module.cached_render_result_error = "" # DELETEME wf_module.cached_render_result_quick_fixes = [] # DELETEME wf_module.cached_render_result_status = status wf_module.cached_render_result_json = json_bytes wf_module.cached_render_result_columns = result.table.metadata.columns wf_module.cached_render_result_nrows = result.table.metadata.n_rows # Now we get to the part where things can end up inconsistent. Try to # err on the side of not-caching when that happens. delete_parquet_files_for_wf_module( workflow.id, wf_module.id) # makes old cache inconsistent wf_module.save( update_fields=WF_MODULE_FIELDS) # makes new cache inconsistent if result.table.metadata.columns: # only write non-zero-column tables with tempfile_context() as parquet_path: parquet.write(parquet_path, result.table.table) minio.fput_file(BUCKET, parquet_key(workflow.id, wf_module.id, delta_id), parquet_path) # makes new cache consistent