def get_observations(ert, ensemble_id1, keyword: str, poly_ran):
    obs = run_in_loop(
        records.get_record_observations(res=ert,
                                        ensemble_id=ensemble_id1,
                                        name=keyword))

    if "PSUM" in keyword:
        n = int(keyword[4:])
        if n < poly_ran["sum_obs_count"]:
            count = poly_ran["summary_data_entries"] // poly_ran[
                "sum_obs_every"]
            assert len(obs) == 1
            assert len(obs[0].errors) == count
            assert len(obs[0].x_axis) == count
            assert len(obs[0].values) == count
        else:
            assert len(obs) == 0

    elif "POLY_RES_" in keyword:
        n = int(keyword.split("@")[0][9:])
        if n < poly_ran["gen_obs_count"]:
            count = poly_ran["gen_data_entries"] // poly_ran["gen_obs_every"]
            assert len(obs) == 1
            assert len(obs[0].errors) == count
            assert len(obs[0].x_axis) == count
            assert len(obs[0].values) == count
        else:
            assert len(obs) == 0
    else:
        assert False, f"should never get here, keyword is {keyword}"
def get_result(ert, ensemble_id1, keyword, poly_ran):
    csv = run_in_loop(
        responses.get_ensemble_response_dataframe(res=ert,
                                                  ensemble_id=ensemble_id1,
                                                  response_name=keyword)).body
    response_df1 = pd.read_csv(io.BytesIO(csv),
                               index_col=0,
                               float_precision="round_trip")
    assert len(response_df1.columns) == poly_ran["gen_data_entries"]
    assert len(response_df1.index) == poly_ran["reals"]
def get_record_parquet(ert, ensemble_id1, keyword, poly_ran):
    parquet = run_in_loop(
        records.get_ensemble_record(
            res=ert,
            name=keyword,
            ensemble_id=ensemble_id1,
            accept="application/x-parquet",
        )).body
    record_df1 = pd.read_parquet(io.BytesIO(parquet))
    assert len(record_df1.columns) == poly_ran["gen_data_entries"]
    assert len(record_df1.index) == poly_ran["reals"]
def get_single_record_csv(ert, ensemble_id1, keyword, poly_ran):
    csv = run_in_loop(
        records.get_ensemble_record(
            res=ert,
            name=keyword,
            ensemble_id=ensemble_id1,
            realization_index=poly_ran["reals"] - 1,
        )).body
    record_df1_indexed = pd.read_csv(io.BytesIO(csv),
                                     index_col=0,
                                     float_precision="round_trip")
    assert len(record_df1_indexed.columns) == poly_ran["gen_data_entries"]
    assert len(record_df1_indexed.index) == 1
def get_parameters(ert, ensemble_id1, keyword, poly_ran):
    parameters_json = run_in_loop(
        records.get_ensemble_parameters(res=ert, ensemble_id=ensemble_id1))
    assert (len(parameters_json) == poly_ran["parameter_entries"] *
            poly_ran["parameter_count"])