Ejemplo n.º 1
0
def arrow_quick_fix_action_to_thrift(
        value: QuickFixAction) -> ttypes.QuickFixAction:
    if isinstance(value, PrependStepQuickFixAction):
        return ttypes.QuickFixAction(
            prepend_step=ttypes.PrependStepQuickFixAction(
                value.module_slug,
                ttypes.RawParams(json_encode(value.partial_params))))
    else:
        raise NotImplementedError
Ejemplo n.º 2
0
def cache_render_result(workflow: Workflow, wf_module: WfModule, delta_id: int,
                        result: RenderResult) -> None:
    """
    Save `result` for later viewing.

    Raise AssertionError if `delta_id` is not what we expect.

    Since this alters data, be sure to call it within a lock:

        with workflow.cooperative_lock():
            wf_module.refresh_from_db()  # may change delta_id
            cache_render_result(workflow, wf_module, delta_id, result)
    """
    assert delta_id == wf_module.last_relevant_delta_id
    assert result is not None

    json_bytes = json_encode(result.json).encode("utf-8")
    if not result.table.metadata.columns:
        if result.errors:
            status = "error"
        else:
            status = "unreachable"
    else:
        status = "ok"

    wf_module.cached_render_result_delta_id = delta_id
    wf_module.cached_render_result_errors = result.errors
    wf_module.cached_render_result_error = ""  # DELETEME
    wf_module.cached_render_result_quick_fixes = []  # DELETEME
    wf_module.cached_render_result_status = status
    wf_module.cached_render_result_json = json_bytes
    wf_module.cached_render_result_columns = result.table.metadata.columns
    wf_module.cached_render_result_nrows = result.table.metadata.n_rows

    # Now we get to the part where things can end up inconsistent. Try to
    # err on the side of not-caching when that happens.
    delete_parquet_files_for_wf_module(
        workflow.id, wf_module.id)  # makes old cache inconsistent
    wf_module.save(
        update_fields=WF_MODULE_FIELDS)  # makes new cache inconsistent
    if result.table.metadata.columns:  # only write non-zero-column tables
        with tempfile_context() as parquet_path:
            parquet.write(parquet_path, result.table.table)
            minio.fput_file(BUCKET,
                            parquet_key(workflow.id, wf_module.id, delta_id),
                            parquet_path)  # makes new cache consistent
Ejemplo n.º 3
0
def cache_render_result(
    workflow: Workflow, step: Step, delta_id: int, result: LoadedRenderResult
) -> None:
    """Save `result` for later viewing.

    Raise AssertionError if `delta_id` is not what we expect.

    Since this alters data, call it within a lock:

        with workflow.cooperative_lock():
            step.refresh_from_db()  # may change delta_id
            cache_render_result(workflow, step, delta_id, result)
    """
    assert delta_id == step.last_relevant_delta_id
    assert result is not None

    json_bytes = json_encode(result.json).encode("utf-8")
    if not result.columns:
        if result.errors:
            status = "error"
        else:
            status = "unreachable"
    else:
        status = "ok"

    step.cached_render_result_delta_id = delta_id
    step.cached_render_result_errors = result.errors
    step.cached_render_result_status = status
    step.cached_render_result_json = json_bytes
    step.cached_render_result_columns = result.columns
    step.cached_render_result_nrows = result.table.num_rows

    # Now we get to the part where things can end up inconsistent. Try to
    # err on the side of not-caching when that happens.
    delete_parquet_files_for_step(workflow.id, step.id)  # makes old cache inconsistent
    step.save(update_fields=STEP_FIELDS)  # makes new cache inconsistent
    if result.table.num_columns:  # only write non-zero-column tables
        with tempfile_context() as parquet_path:
            cjwparquet.write(parquet_path, result.table)
            s3.fput_file(
                BUCKET, parquet_key(workflow.id, step.id, delta_id), parquet_path
            )  # makes new cache consistent
Ejemplo n.º 4
0
def arrow_render_result_to_thrift(value: RenderResult) -> ttypes.RenderResult:
    return ttypes.RenderResult(
        arrow_arrow_table_to_thrift(value.table),
        [arrow_render_error_to_thrift(e) for e in value.errors],
        "" if value.json is None else json_encode(value.json),
    )
Ejemplo n.º 5
0
def arrow_raw_params_to_thrift(value: RawParams) -> ttypes.RawParams:
    return ttypes.RawParams(json_encode(value.params))
Ejemplo n.º 6
0
 def to_thrift(self) -> ttypes.RenderResult:
     return ttypes.RenderResult(
         self.table.to_thrift(),
         [e.to_thrift() for e in self.errors],
         "" if self.json is None else json_encode(self.json),
     )
Ejemplo n.º 7
0
 def to_thrift(self) -> ttypes.QuickFixAction:
     return ttypes.QuickFixAction(
         prepend_step=ttypes.PrependStepQuickFixAction(
             self.module_slug, ttypes.RawParams(json_encode(self.partial_params))
         )
     )
Ejemplo n.º 8
0
 def to_thrift(self) -> ttypes.RawParams:
     return ttypes.RawParams(json_encode(self.params))
Ejemplo n.º 9
0
def migrate_params_thrift(params: ttypes.Params):
    params_dict: Dict[str, Any] = json.loads(params.json)
    result_dict = migrate_params(params_dict)
    result_json = json_encode(result_dict)
    return ttypes.Params(result_json)