def test_run_query(zmq_port, zmq_host, fm_conn, redis):
    """
    Run daily_location query and check the resulting table contains the expected rows.
    """
    msg_run_query = {
        "action": "run_query",
        "params": {
            "query_kind": "spatial_aggregate",
            "locations": {
                "query_kind": "daily_location",
                "date": "2016-01-01",
                "method": "last",
                "aggregation_unit": "admin3",
                "subscriber_subset": None,
            },
        },
        "request_id": "DUMMY_ID",
    }
    q = RedactedSpatialAggregate(spatial_aggregate=SpatialAggregate(
        locations=daily_location(
            date="2016-01-01",
            method="last",
            spatial_unit=make_spatial_unit("admin", level=3),
            table=None,
            subscriber_subset=None,
        )))
    expected_query_id = q.query_id

    #
    # Check that we are starting with a clean slate (no cache tables, empty redis).
    #
    reset_cache(get_db(), redis, protect_table_objects=False)
    assert cache_schema_is_empty(get_db())
    assert not redis.exists(expected_query_id)

    #
    # Send message to run the daily_location query, check it was accepted
    # and a redis lookup was created for the query id.
    #
    reply = send_zmq_message_and_receive_reply(msg_run_query,
                                               port=zmq_port,
                                               host=zmq_host)
    # assert reply["status"] in ("executing", "queued", "completed")
    assert reply["status"] in ("success")
    assert expected_query_id == reply["payload"]["query_id"]
    # assert redis.exists(expected_query_id)

    #
    # Wait until the query has finished.
    #
    poll_until_done(zmq_port, expected_query_id)

    #
    # Check that a cache table for the query result was created
    # and that it contains the expected number of rows.
    #
    output_cache_table = f"x{expected_query_id}"
    assert output_cache_table in get_cache_tables(get_db())
    num_rows = (get_db().engine.execute(
        f"SELECT COUNT(*) FROM cache.{output_cache_table}").fetchone()[0])
    assert num_rows == 14

    #
    # In addition, check first few rows of the result are as expected.
    #

    first_few_rows_expected = [
        ("524 1 02 09", 26),
        ("524 1 03 13", 20),
        ("524 3 08 43", 35),
    ]
    first_few_rows = (get_db().engine.execute(
        f"SELECT * FROM cache.{output_cache_table} ORDER BY pcod LIMIT 3").
                      fetchall())
    assert first_few_rows_expected == first_few_rows
Exemple #2
0
def flowmachine_connect():
    with connections():
        yield
        reset_cache(get_db(), get_redis(), protect_table_objects=False)
        get_db().engine.dispose()  # Close the connection
        get_redis().flushdb()  # Empty the redis