def test_min_size_max_size(dsn): with pool.ConnectionPool(dsn, min_size=2) as p: assert p.min_size == p.max_size == 2 with pool.ConnectionPool(dsn, min_size=2, max_size=4) as p: assert p.min_size == 2 assert p.max_size == 4 with pytest.raises(ValueError): pool.ConnectionPool(dsn, min_size=4, max_size=2)
def test_wait_ready(dsn, monkeypatch): delay_connection(monkeypatch, 0.1) with pytest.raises(pool.PoolTimeout): with pool.ConnectionPool(dsn, min_size=4, num_workers=1) as p: p.wait(0.3) with pool.ConnectionPool(dsn, min_size=4, num_workers=1) as p: p.wait(0.5) with pool.ConnectionPool(dsn, min_size=4, num_workers=2) as p: p.wait(0.3) p.wait(0.0001) # idempotent
def test_setup_no_timeout(dsn, proxy): with pytest.raises(pool.PoolTimeout): with pool.ConnectionPool(proxy.client_dsn, min_size=1, num_workers=1) as p: p.wait(0.2) with pool.ConnectionPool(proxy.client_dsn, min_size=1, num_workers=1) as p: sleep(0.5) assert not p._pool proxy.start() with p.connection() as conn: conn.execute("select 1")
def test_reset(dsn): resets = 0 def setup(conn): with conn.transaction(): conn.execute("set timezone to '+1:00'") def reset(conn): nonlocal resets resets += 1 with conn.transaction(): conn.execute("set timezone to utc") with pool.ConnectionPool(dsn, min_size=1, reset=reset) as p: with p.connection() as conn: assert resets == 0 conn.execute("set timezone to '+2:00'") p.wait() assert resets == 1 with p.connection() as conn: with conn.execute("show timezone") as cur: assert cur.fetchone() == ("UTC",) p.wait() assert resets == 2
def test_stats_usage(dsn): def worker(n): try: with p.connection(timeout=0.3) as conn: conn.execute("select pg_sleep(0.2)") except pool.PoolTimeout: pass with pool.ConnectionPool(dsn, min_size=3) as p: p.wait(2.0) ts = [Thread(target=worker, args=(i,)) for i in range(7)] [t.start() for t in ts] [t.join() for t in ts] stats = p.get_stats() assert stats["requests_num"] == 7 assert stats["requests_queued"] == 4 assert 850 <= stats["requests_wait_ms"] <= 950 assert stats["requests_errors"] == 1 assert 1150 <= stats["usage_ms"] <= 1250 assert stats.get("returns_bad", 0) == 0 with p.connection() as conn: conn.close() p.wait() stats = p.pop_stats() assert stats["requests_num"] == 8 assert stats["returns_bad"] == 1 with p.connection(): pass assert p.get_stats()["requests_num"] == 1
def test_defaults(dsn): with pool.ConnectionPool(dsn) as p: assert p.min_size == p.max_size == 4 assert p.timeout == 30 assert p.max_idle == 10 * 60 assert p.max_lifetime == 60 * 60 assert p.num_workers == 3
def test_configure(dsn): inits = 0 def configure(conn): nonlocal inits inits += 1 with conn.transaction(): conn.execute("set default_transaction_read_only to on") with pool.ConnectionPool(dsn, min_size=1, configure=configure) as p: p.wait(timeout=1.0) with p.connection() as conn: assert inits == 1 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on" with p.connection() as conn: assert inits == 1 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on" conn.close() with p.connection() as conn: assert inits == 2 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on"
def test_queue_size(dsn): def worker(t, ev=None): try: with p.connection(): if ev: ev.set() sleep(t) except pool.TooManyRequests as e: errors.append(e) else: success.append(True) errors = [] success = [] with pool.ConnectionPool(dsn, min_size=1, max_waiting=3) as p: p.wait() ev = Event() t = Thread(target=worker, args=(0.3, ev)) t.start() ev.wait() ts = [Thread(target=worker, args=(0.1,)) for i in range(4)] [t.start() for t in ts] [t.join() for t in ts] assert len(success) == 4 assert len(errors) == 1 assert isinstance(errors[0], pool.TooManyRequests) assert p.name in str(errors[0]) assert str(p.max_waiting) in str(errors[0]) assert p.get_stats()["requests_errors"] == 1
def test_closed_queue(dsn): p = pool.ConnectionPool(dsn, min_size=1) success = [] def w1(): with p.connection() as conn: assert ( conn.execute("select 1 from pg_sleep(0.2)").fetchone()[0] == 1 ) success.append("w1") def w2(): with pytest.raises(pool.PoolClosed): with p.connection(): pass success.append("w2") t1 = Thread(target=w1) t2 = Thread(target=w2) t1.start() sleep(0.1) t2.start() p.close() t1.join() t2.join() assert len(success) == 2
def test_connection_class(dsn): class MyConn(psycopg3.Connection): pass with pool.ConnectionPool(dsn, connection_class=MyConn, min_size=1) as p: with p.connection() as conn: assert isinstance(conn, MyConn)
def test_shrink(dsn, monkeypatch): from psycopg3.pool.pool import ShrinkPool results = [] def run_hacked(self, pool): n0 = pool._nconns orig_run(self, pool) n1 = pool._nconns results.append((n0, n1)) orig_run = ShrinkPool._run monkeypatch.setattr(ShrinkPool, "_run", run_hacked) def worker(n): with p.connection() as conn: conn.execute("select pg_sleep(0.1)") with pool.ConnectionPool(dsn, min_size=2, max_size=4, max_idle=0.2) as p: p.wait(5.0) assert p.max_idle == 0.2 ts = [Thread(target=worker, args=(i,)) for i in range(4)] [t.start() for t in ts] [t.join() for t in ts] sleep(1) assert results == [(4, 4), (4, 3), (3, 2), (2, 2), (2, 2)]
def test_del_stop_threads(dsn): p = pool.ConnectionPool(dsn) ts = [p._sched_runner] + p._workers del p sleep(0.1) for t in ts: assert not t.is_alive()
def test_grow(dsn, monkeypatch, retries): delay_connection(monkeypatch, 0.1) def worker(n): t0 = time() with p.connection() as conn: conn.execute("select 1 from pg_sleep(0.2)") t1 = time() results.append((n, t1 - t0)) for retry in retries: with retry: with pool.ConnectionPool( dsn, min_size=2, max_size=4, num_workers=3 ) as p: p.wait(1.0) results = [] ts = [Thread(target=worker, args=(i,)) for i in range(6)] [t.start() for t in ts] [t.join() for t in ts] want_times = [0.2, 0.2, 0.3, 0.4, 0.4, 0.4] times = [item[1] for item in results] for got, want in zip(times, want_times): assert got == pytest.approx(want, 0.1), times
def test_fail_rollback_close(dsn, caplog, monkeypatch): caplog.set_level(logging.WARNING, logger="psycopg3.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() def bad_rollback(): conn.pgconn.finish() orig_rollback() # Make the rollback fail orig_rollback = conn.rollback monkeypatch.setattr(conn, "rollback", bad_rollback) pid = conn.pgconn.backend_pid with pytest.raises(psycopg3.ProgrammingError): conn.execute("wat") assert conn.pgconn.transaction_status == TransactionStatus.INERROR p.putconn(conn) with p.connection() as conn2: assert conn2.pgconn.backend_pid != pid assert conn2.pgconn.transaction_status == TransactionStatus.IDLE assert len(caplog.records) == 3 assert "INERROR" in caplog.records[0].message assert "OperationalError" in caplog.records[1].message assert "BAD" in caplog.records[2].message
def test_reconnect(proxy, caplog, monkeypatch): caplog.set_level(logging.WARNING, logger="psycopg3.pool") assert pool.base.ConnectionAttempt.INITIAL_DELAY == 1.0 assert pool.base.ConnectionAttempt.DELAY_JITTER == 0.1 monkeypatch.setattr(pool.base.ConnectionAttempt, "INITIAL_DELAY", 0.1) monkeypatch.setattr(pool.base.ConnectionAttempt, "DELAY_JITTER", 0.0) proxy.start() with pool.ConnectionPool(proxy.client_dsn, min_size=1) as p: p.wait(2.0) proxy.stop() with pytest.raises(psycopg3.OperationalError): with p.connection() as conn: conn.execute("select 1") sleep(1.0) proxy.start() p.wait() with p.connection() as conn: conn.execute("select 1") assert "BAD" in caplog.messages[0] times = [rec.created for rec in caplog.records] assert times[1] - times[0] < 0.05 deltas = [times[i + 1] - times[i] for i in range(1, len(times) - 1)] assert len(deltas) == 3 want = 0.1 for delta in deltas: assert delta == pytest.approx(want, 0.05), deltas want *= 2
def test_queue_timeout_override(dsn): def worker(n): t0 = time() timeout = 0.25 if n == 3 else None try: with p.connection(timeout=timeout) as conn: (pid,) = conn.execute( "select pg_backend_pid() from pg_sleep(0.2)" ).fetchone() except pool.PoolTimeout as e: t1 = time() errors.append((n, t1 - t0, e)) else: t1 = time() results.append((n, t1 - t0, pid)) results = [] errors = [] with pool.ConnectionPool(dsn, min_size=2, timeout=0.1) as p: ts = [Thread(target=worker, args=(i,)) for i in range(4)] [t.start() for t in ts] [t.join() for t in ts] assert len(results) == 3 assert len(errors) == 1 for e in errors: assert 0.1 < e[1] < 0.15
def test_connection_not_lost(dsn): with pool.ConnectionPool(dsn, min_size=1) as p: with pytest.raises(ZeroDivisionError): with p.connection() as conn: pid = conn.pgconn.backend_pid 1 / 0 with p.connection() as conn2: assert conn2.pgconn.backend_pid == pid
def test_del_no_warning(dsn, recwarn): p = pool.ConnectionPool(dsn, min_size=2) with p.connection() as conn: conn.execute("select 1") p.wait() ref = weakref.ref(p) del p assert not ref() assert not recwarn
def test_closed_putconn(dsn): p = pool.ConnectionPool(dsn, min_size=1) with p.connection() as conn: pass assert not conn.closed with p.connection() as conn: p.close() assert conn.closed
def test_close_no_threads(dsn): p = pool.ConnectionPool(dsn) assert p._sched_runner.is_alive() for t in p._workers: assert t.is_alive() p.close() assert not p._sched_runner.is_alive() for t in p._workers: assert not t.is_alive()
def test_uniform_use(dsn): with pool.ConnectionPool(dsn, min_size=4) as p: counts = Counter() for i in range(8): with p.connection() as conn: sleep(0.1) counts[id(conn)] += 1 assert len(counts) == 4 assert set(counts.values()) == set([2])
def test_max_lifetime(dsn): with pool.ConnectionPool(dsn, min_size=1, max_lifetime=0.2) as p: sleep(0.1) pids = [] for i in range(5): with p.connection() as conn: pids.append(conn.pgconn.backend_pid) sleep(0.2) assert pids[0] == pids[1] != pids[4], pids
def test_configure_badstate(dsn, caplog): caplog.set_level(logging.WARNING, logger="psycopg3.pool") def configure(conn): conn.execute("select 1") with pool.ConnectionPool(dsn, min_size=1, configure=configure) as p: with pytest.raises(pool.PoolTimeout): p.wait(timeout=0.5) assert caplog.records assert "INTRANS" in caplog.records[0].message
def test_broken_reconnect(dsn): with pool.ConnectionPool(dsn, min_size=1) as p: with p.connection() as conn: with conn.execute("select pg_backend_pid()") as cur: (pid1,) = cur.fetchone() conn.close() with p.connection() as conn2: with conn2.execute("select pg_backend_pid()") as cur: (pid2,) = cur.fetchone() assert pid1 != pid2
def test_its_really_a_pool(dsn): with pool.ConnectionPool(dsn, min_size=2) as p: with p.connection() as conn: with conn.execute("select pg_backend_pid()") as cur: (pid1,) = cur.fetchone() with p.connection() as conn2: with conn2.execute("select pg_backend_pid()") as cur: (pid2,) = cur.fetchone() with p.connection() as conn: assert conn.pgconn.backend_pid in (pid1, pid2)
def test_closed_getconn(dsn): p = pool.ConnectionPool(dsn, min_size=1) assert not p.closed with p.connection(): pass p.close() assert p.closed with pytest.raises(pool.PoolClosed): with p.connection(): pass
def test_configure_broken(dsn, caplog): caplog.set_level(logging.WARNING, logger="psycopg3.pool") def configure(conn): with conn.transaction(): conn.execute("WAT") with pool.ConnectionPool(min_size=1, configure=configure) as p: with pytest.raises(pool.PoolTimeout): p.wait(timeout=0.5) assert caplog.records assert "WAT" in caplog.records[0].message
def test_inerror_rollback(dsn, caplog): caplog.set_level(logging.WARNING, logger="psycopg3.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() pid = conn.pgconn.backend_pid with pytest.raises(psycopg3.ProgrammingError): conn.execute("wat") assert conn.pgconn.transaction_status == TransactionStatus.INERROR p.putconn(conn) with p.connection() as conn2: assert conn2.pgconn.backend_pid == pid assert conn2.pgconn.transaction_status == TransactionStatus.IDLE assert len(caplog.records) == 1 assert "INERROR" in caplog.records[0].message
def test_spike(dsn, monkeypatch): # Inspired to https://github.com/brettwooldridge/HikariCP/blob/dev/ # documents/Welcome-To-The-Jungle.md delay_connection(monkeypatch, 0.15) def worker(): with p.connection(): sleep(0.002) with pool.ConnectionPool(dsn, min_size=5, max_size=10) as p: p.wait() ts = [Thread(target=worker) for i in range(50)] [t.start() for t in ts] [t.join() for t in ts] p.wait() assert len(p._pool) < 7
def test_stats_connect(dsn, proxy, monkeypatch): proxy.start() delay_connection(monkeypatch, 0.2) with pool.ConnectionPool(proxy.client_dsn, min_size=3) as p: p.wait() stats = p.get_stats() assert stats["connections_num"] == 3 assert stats.get("connections_errors", 0) == 0 assert stats.get("connections_lost", 0) == 0 assert 600 <= stats["connections_ms"] < 1200 proxy.stop() p.check() sleep(0.1) stats = p.get_stats() assert stats["connections_num"] > 3 assert stats["connections_errors"] > 0 assert stats["connections_lost"] == 3