예제 #1
0
 def unlock(timer, redis, db_id):
     qsm = QueryStateMachine(redis, dl.query_id, db_id)
     qsm.enqueue()
     for i in range(101):
         timer.append(i)
     qsm.execute()
     qsm.finish()
예제 #2
0
def test_query_progress(dummy_redis):
    """
    Test correct counts for dependency progress are returned.
    """
    dummy = DummyQuery(dummy_param="DUMMY")
    queued_qsm = QueryStateMachine(dummy_redis, dummy.query_id,
                                   get_db().conn_id)
    queued_qsm.enqueue()
    stored_dummy = DummyQuery(dummy_param="STORED_DUMMY")
    stored_dummy.store()
    executing_dummy = DummyQuery(dummy_param="EXECUTING_DUMMY")
    executing_qsm = QueryStateMachine(dummy_redis, executing_dummy.query_id,
                                      get_db().conn_id)
    executing_qsm.enqueue()
    executing_qsm.execute()

    nested = DummyQuery(dummy_param=[dummy, stored_dummy, executing_dummy])
    assert query_progress(nested) == dict(
        eligible=3,
        running=1,
        queued=1,
    )
    nested.store()
    assert query_progress(nested) == dict(
        eligible=0,
        running=0,
        queued=0,
    )
예제 #3
0
 def unlock(timer):
     qsm = QueryStateMachine(dl.redis, dl.md5)
     qsm.enqueue()
     for i in range(101):
         timer.append(i)
     qsm.execute()
     qsm.finish()
예제 #4
0
def test_drop_query_errors():
    """Test that resetting a query's cache will error if in a state where that isn't possible."""
    q = DummyQuery(dummy_id=1, sleep_time=5)
    qsm = QueryStateMachine(q.redis, q.md5)
    # Mark the query as in the process of resetting
    qsm.enqueue()
    qsm.execute()
    with pytest.raises(QueryResetFailedException):
        q.invalidate_db_cache()
예제 #5
0
    def __init__(self, name=None, schema=None, columns=None):
        """

        """
        try:
            self.connection
        except AttributeError:
            raise NotConnectedError()

        if "." in name:
            extracted_schema, name = name.split(".")
            if schema is not None:
                if schema != extracted_schema:
                    raise ValueError("Two schema provided.")
            schema = extracted_schema
        elif schema is None:
            schema = "public"

        self.name = name
        self.schema = schema
        self.fqn = "{}.{}".format(schema, name) if schema else name
        if "." not in self.fqn:
            raise ValueError("{} is not a valid table.".format(self.fqn))
        if not self.is_stored:
            raise ValueError("{} is not a known table.".format(self.fqn))

        # Get actual columns of this table from the database
        db_columns = list(
            zip(*self.connection.fetch(
                f"""SELECT column_name from INFORMATION_SCHEMA.COLUMNS
             WHERE table_name = '{self.name}' AND table_schema='{self.schema}'"""
            )))[0]
        if (columns is None or columns
                == []):  # No columns specified, setting them from the database
            columns = db_columns
        else:
            self.parent_table = Table(
                schema=self.schema, name=self.name)  # Point to the full table
            if isinstance(columns, str):  # Wrap strings in a list
                columns = [columns]
            logger.debug(
                f"Checking provided columns {columns} against db columns {db_columns}"
            )
            if not set(columns).issubset(db_columns):
                raise ValueError("{} are not columns of {}".format(
                    set(columns).difference(db_columns), self.fqn))

        # Record provided columns to ensure that query_id differs with different columns
        self.columns = columns
        super().__init__()
        # Table is immediately in a 'finished executing' state
        q_state_machine = QueryStateMachine(self.redis, self.query_id)
        if not q_state_machine.is_completed:
            q_state_machine.enqueue()
            q_state_machine.execute()
            write_cache_metadata(self.connection, self, compute_time=0)
            q_state_machine.finish()
예제 #6
0
def test_store_exceptions(fail_event, expected_exception):
    """Test that exceptions are raised when watching a store op triggered elsewhere."""
    q = DummyQuery(dummy_id=1, sleep_time=5)
    qsm = QueryStateMachine(q.redis, q.md5)
    # Mark the query as having begun executing elsewhere
    qsm.enqueue()
    qsm.execute()
    q_fut = q.store()
    qsm.trigger_event(fail_event)
    with pytest.raises(expected_exception):
        raise q_fut.exception()
예제 #7
0
def test_drop_query_blocks(monkeypatch):
    """Test that resetting a query's cache will block if that's already happening."""
    monkeypatch.setattr(flowmachine.core.query, "_sleep",
                        Mock(side_effect=BlockingIOError))
    q = DummyQuery(dummy_id=1, sleep_time=5)
    qsm = QueryStateMachine(q.redis, q.md5)
    # Mark the query as in the process of resetting
    qsm.enqueue()
    qsm.execute()
    qsm.finish()
    qsm.reset()
    with pytest.raises(BlockingIOError):
        q.invalidate_db_cache()
예제 #8
0
async def test_rerun_query_after_cancelled(server_config, real_connections):
    """
    Test that a query can be rerun after it has been cancelled.
    """
    query_obj = (FlowmachineQuerySchema().load(
        dict(
            query_kind="spatial_aggregate",
            locations=dict(
                query_kind="daily_location",
                date="2016-01-01",
                method="last",
                aggregation_unit="admin3",
            ),
        ))._flowmachine_query_obj)
    query_id = query_obj.query_id
    qsm = QueryStateMachine(get_redis(), query_id, get_db().conn_id)
    qsm.enqueue()
    qsm.cancel()
    assert not query_obj.is_stored
    assert qsm.is_cancelled
    query_info_lookup = QueryInfoLookup(get_redis())
    query_info_lookup.register_query(
        query_id,
        dict(
            query_kind="spatial_aggregate",
            locations=dict(
                query_kind="daily_location",
                date="2016-01-01",
                method="last",
                aggregation_unit="admin3",
            ),
        ),
    )

    msg = await action_handler__run_query(
        config=server_config,
        query_kind="spatial_aggregate",
        locations=dict(
            query_kind="daily_location",
            date="2016-01-01",
            method="last",
            aggregation_unit="admin3",
        ),
    )
    assert msg["status"] == ZMQReplyStatus.SUCCESS
    qsm.wait_until_complete()
    assert query_obj.is_stored
예제 #9
0
def test_cache_ddl_op_error(dummy_redis):
    """
    Test that errors when generating SQL leave the query state machine in error state.
    """

    query_mock = Mock(query_id="DUMMY_MD5")
    qsm = QueryStateMachine(dummy_redis, "DUMMY_MD5")
    qsm.enqueue()

    with pytest.raises(TestException):
        write_query_to_cache(
            name="DUMMY_QUERY",
            redis=dummy_redis,
            query=query_mock,
            connection=Mock(),
            ddl_ops_func=Mock(side_effect=TestException),
            write_func=Mock(),
        )
    assert qsm.current_query_state == QueryState.ERRORED
예제 #10
0
def test_queued_dependencies(dummy_redis):
    """
    Test that only queued dependencies are returned.
    """
    dummy = DummyQuery(dummy_param="DUMMY")
    queued_qsm = QueryStateMachine(dummy_redis, dummy.query_id,
                                   get_db().conn_id)
    queued_qsm.enqueue()
    stored_dummy = DummyQuery(dummy_param="STORED_DUMMY")
    stored_dummy.store()
    executing_dummy = DummyQuery(dummy_param="EXECUTING_DUMMY")
    executing_qsm = QueryStateMachine(dummy_redis, executing_dummy.query_id,
                                      get_db().conn_id)
    executing_qsm.enqueue()
    executing_qsm.execute()

    nested = DummyQuery(dummy_param=[dummy, stored_dummy, executing_dummy])
    assert queued_dependencies(
        set([nested, dummy, stored_dummy, executing_dummy])) == [dummy]