def create_stored_object( workflow_id: int, step_id: int, path: Path, stored_at: Optional[datetime.datetime] = None, ) -> StoredObject: """Write and return a new StoredObject. The caller should call enforce_storage_limits() after calling this. Raise IntegrityError if a database race prevents saving this. Raise a s3 error if writing to s3 failed. In case of partial completion, a StoredObject will exist in the database but no file will be saved in s3. """ if stored_at is None: stored_at = datetime.datetime.now() key = _build_key(workflow_id, step_id) size = path.stat().st_size stored_object = StoredObject.objects.create( stored_at=stored_at, step_id=step_id, key=key, size=size, hash="unhashed", ) s3.fput_file(BUCKET, key, path) return stored_object
def cache_render_result( workflow: Workflow, step: Step, delta_id: int, result: LoadedRenderResult ) -> None: """Save `result` for later viewing. Raise AssertionError if `delta_id` is not what we expect. Since this alters data, call it within a lock: with workflow.cooperative_lock(): step.refresh_from_db() # may change delta_id cache_render_result(workflow, step, delta_id, result) """ assert delta_id == step.last_relevant_delta_id assert result is not None json_bytes = json_encode(result.json).encode("utf-8") if not result.columns: if result.errors: status = "error" else: status = "unreachable" else: status = "ok" step.cached_render_result_delta_id = delta_id step.cached_render_result_errors = result.errors step.cached_render_result_status = status step.cached_render_result_json = json_bytes step.cached_render_result_columns = result.columns step.cached_render_result_nrows = result.table.num_rows # Now we get to the part where things can end up inconsistent. Try to # err on the side of not-caching when that happens. delete_parquet_files_for_step(workflow.id, step.id) # makes old cache inconsistent step.save(update_fields=STEP_FIELDS) # makes new cache inconsistent if result.table.num_columns: # only write non-zero-column tables with tempfile_context() as parquet_path: cjwparquet.write(parquet_path, result.table) s3.fput_file( BUCKET, parquet_key(workflow.id, step.id, delta_id), parquet_path ) # makes new cache consistent
def import_zipfile(path: Path) -> clientside.Module: """Save a zipfile to database and s3 and build a `clientside.Module`. Raise `WorkbenchModuleImportError` if `path` points to an invalid module. Otherwise, do not raise any errors one can sensibly recover from. """ temp_zipfile = ModuleZipfile(path) validate_zipfile(temp_zipfile) # raise WorkbenchModuleImportError module_id = temp_zipfile.module_id version = temp_zipfile.version module_spec = temp_zipfile.get_spec() js_module = temp_zipfile.get_optional_js_module() or "" s3.fput_file(s3.ExternalModulesBucket, "%s/%s" % (module_id, path.name), path) ModuleVersion.objects.update_or_create( id_name=module_id, source_version_hash=version, spec=asdict(temp_zipfile.get_spec()), js_module=js_module, ) return clientside.Module(module_spec, js_module)