Пример #1
0
def clear_cached_render_result_for_step(step: Step) -> None:
    """Delete a CachedRenderResult, if it exists.

    This deletes the Parquet file from disk, _then_ empties relevant
    database fields and saves them (and only them).
    """
    delete_parquet_files_for_step(step.workflow_id, step.id)

    step.cached_render_result_delta_id = None
    step.cached_render_result_errors = []
    step.cached_render_result_json = b"null"
    step.cached_render_result_status = None
    step.cached_render_result_columns = None
    step.cached_render_result_nrows = None

    step.save(update_fields=STEP_FIELDS)
Пример #2
0
def cache_render_result(
    workflow: Workflow, step: Step, delta_id: int, result: LoadedRenderResult
) -> None:
    """Save `result` for later viewing.

    Raise AssertionError if `delta_id` is not what we expect.

    Since this alters data, call it within a lock:

        with workflow.cooperative_lock():
            step.refresh_from_db()  # may change delta_id
            cache_render_result(workflow, step, delta_id, result)
    """
    assert delta_id == step.last_relevant_delta_id
    assert result is not None

    json_bytes = json_encode(result.json).encode("utf-8")
    if not result.columns:
        if result.errors:
            status = "error"
        else:
            status = "unreachable"
    else:
        status = "ok"

    step.cached_render_result_delta_id = delta_id
    step.cached_render_result_errors = result.errors
    step.cached_render_result_status = status
    step.cached_render_result_json = json_bytes
    step.cached_render_result_columns = result.columns
    step.cached_render_result_nrows = result.table.num_rows

    # Now we get to the part where things can end up inconsistent. Try to
    # err on the side of not-caching when that happens.
    delete_parquet_files_for_step(workflow.id, step.id)  # makes old cache inconsistent
    step.save(update_fields=STEP_FIELDS)  # makes new cache inconsistent
    if result.table.num_columns:  # only write non-zero-column tables
        with tempfile_context() as parquet_path:
            cjwparquet.write(parquet_path, result.table)
            s3.fput_file(
                BUCKET, parquet_key(workflow.id, step.id, delta_id), parquet_path
            )  # makes new cache consistent