def test_min_size_max_size(dsn): with pool.ConnectionPool(dsn, min_size=2) as p: assert p.min_size == p.max_size == 2 with pool.ConnectionPool(dsn, min_size=2, max_size=4) as p: assert p.min_size == 2 assert p.max_size == 4 with pytest.raises(ValueError): pool.ConnectionPool(dsn, min_size=4, max_size=2)
def test_wait_ready(dsn, monkeypatch): delay_connection(monkeypatch, 0.1) with pytest.raises(pool.PoolTimeout): with pool.ConnectionPool(dsn, min_size=4, num_workers=1) as p: p.wait(0.3) with pool.ConnectionPool(dsn, min_size=4, num_workers=1) as p: p.wait(0.5) with pool.ConnectionPool(dsn, min_size=4, num_workers=2) as p: p.wait(0.3) p.wait(0.0001) # idempotent
def test_setup_no_timeout(dsn, proxy): with pytest.raises(pool.PoolTimeout): with pool.ConnectionPool(proxy.client_dsn, min_size=1, num_workers=1) as p: p.wait(0.2) with pool.ConnectionPool(proxy.client_dsn, min_size=1, num_workers=1) as p: sleep(0.5) assert not p._pool proxy.start() with p.connection() as conn: conn.execute("select 1")
def test_stats_usage(dsn, retries): def worker(n): try: with p.connection(timeout=0.3) as conn: conn.execute("select pg_sleep(0.2)") except pool.PoolTimeout: pass for retry in retries: with retry: with pool.ConnectionPool(dsn, min_size=3) as p: p.wait(2.0) ts = [Thread(target=worker, args=(i, )) for i in range(7)] for t in ts: t.start() for t in ts: t.join() stats = p.get_stats() assert stats["requests_num"] == 7 assert stats["requests_queued"] == 4 assert 850 <= stats["requests_wait_ms"] <= 950 assert stats["requests_errors"] == 1 assert 1150 <= stats["usage_ms"] <= 1250 assert stats.get("returns_bad", 0) == 0 with p.connection() as conn: conn.close() p.wait() stats = p.pop_stats() assert stats["requests_num"] == 8 assert stats["returns_bad"] == 1 with p.connection(): pass assert p.get_stats()["requests_num"] == 1
def test_closed_queue(dsn, retries): def w1(): with p.connection() as conn: assert ( conn.execute("select 1 from pg_sleep(0.2)").fetchone()[0] == 1) success.append("w1") def w2(): with pytest.raises(pool.PoolClosed): with p.connection(): pass success.append("w2") for retry in retries: with retry: p = pool.ConnectionPool(dsn, min_size=1) success = [] t1 = Thread(target=w1) t2 = Thread(target=w2) t1.start() sleep(0.1) t2.start() p.close() t1.join() t2.join() assert len(success) == 2
def test_del_stop_threads(dsn): p = pool.ConnectionPool(dsn) ts = [p._sched_runner] + p._workers del p sleep(0.1) for t in ts: assert not t.is_alive()
def test_queue(dsn, retries): def worker(n): t0 = time() with p.connection() as conn: (pid, ) = conn.execute("select pg_backend_pid() from pg_sleep(0.2)" ).fetchone() # type: ignore[misc] t1 = time() results.append((n, t1 - t0, pid)) for retry in retries: with retry: results: List[Tuple[int, float, int]] = [] with pool.ConnectionPool(dsn, min_size=2) as p: p.wait() ts = [Thread(target=worker, args=(i, )) for i in range(6)] for t in ts: t.start() for t in ts: t.join() times = [item[1] for item in results] want_times = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6] for got, want in zip(times, want_times): assert got == pytest.approx(want, 0.1), times assert len(set(r[2] for r in results)) == 2, results
def test_shrink(dsn, monkeypatch): from psycopg_pool.pool import ShrinkPool results: List[Tuple[int, int]] = [] def run_hacked(self, pool): n0 = pool._nconns orig_run(self, pool) n1 = pool._nconns results.append((n0, n1)) orig_run = ShrinkPool._run monkeypatch.setattr(ShrinkPool, "_run", run_hacked) def worker(n): with p.connection() as conn: conn.execute("select pg_sleep(0.1)") with pool.ConnectionPool(dsn, min_size=2, max_size=4, max_idle=0.2) as p: p.wait(5.0) assert p.max_idle == 0.2 ts = [Thread(target=worker, args=(i, )) for i in range(4)] for t in ts: t.start() for t in ts: t.join() sleep(1) assert results == [(4, 4), (4, 3), (3, 2), (2, 2), (2, 2)]
def test_putconn_no_pool(dsn): with pool.ConnectionPool(dsn, min_size=1) as p: conn = psycopg.connect(dsn) with pytest.raises(ValueError): p.putconn(conn) conn.close()
def test_fail_rollback_close(dsn, caplog, monkeypatch): caplog.set_level(logging.WARNING, logger="psycopg.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() def bad_rollback(): conn.pgconn.finish() orig_rollback() # Make the rollback fail orig_rollback = conn.rollback monkeypatch.setattr(conn, "rollback", bad_rollback) pid = conn.pgconn.backend_pid with pytest.raises(psycopg.ProgrammingError): conn.execute("wat") assert conn.pgconn.transaction_status == TransactionStatus.INERROR p.putconn(conn) with p.connection() as conn2: assert conn2.pgconn.backend_pid != pid assert conn2.pgconn.transaction_status == TransactionStatus.IDLE assert len(caplog.records) == 3 assert "INERROR" in caplog.records[0].message assert "OperationalError" in caplog.records[1].message assert "BAD" in caplog.records[2].message
def test_connection_class(dsn): class MyConn(psycopg.Connection[Any]): pass with pool.ConnectionPool(dsn, connection_class=MyConn, min_size=1) as p: with p.connection() as conn: assert isinstance(conn, MyConn)
def test_queue_timeout_override(dsn, retries): def worker(n): t0 = time() timeout = 0.25 if n == 3 else None try: with p.connection(timeout=timeout) as conn: (pid, ) = conn.execute( # type: ignore[misc] "select pg_backend_pid() from pg_sleep(0.2)").fetchone() except pool.PoolTimeout as e: t1 = time() errors.append((n, t1 - t0, e)) else: t1 = time() results.append((n, t1 - t0, pid)) for retry in retries: with retry: results: List[Tuple[int, float, int]] = [] errors: List[Tuple[int, float, Exception]] = [] with pool.ConnectionPool(dsn, min_size=2, timeout=0.1) as p: ts = [Thread(target=worker, args=(i, )) for i in range(4)] for t in ts: t.start() for t in ts: t.join() assert len(results) == 3 assert len(errors) == 1 for e in errors: assert 0.1 < e[1] < 0.15
def test_queue_timeout(dsn, retries): def worker(n): t0 = time() try: with p.connection() as conn: (pid, ) = conn.execute( "select pg_backend_pid() from pg_sleep(0.2)").fetchone() except pool.PoolTimeout as e: t1 = time() errors.append((n, t1 - t0, e)) else: t1 = time() results.append((n, t1 - t0, pid)) for retry in retries: with retry: results = [] errors = [] with pool.ConnectionPool(dsn, min_size=2, timeout=0.1) as p: ts = [Thread(target=worker, args=(i, )) for i in range(4)] [t.start() for t in ts] [t.join() for t in ts] assert len(results) == 2 assert len(errors) == 2 for e in errors: assert 0.1 < e[1] < 0.15
def test_defaults(dsn): with pool.ConnectionPool(dsn) as p: assert p.min_size == p.max_size == 4 assert p.timeout == 30 assert p.max_idle == 10 * 60 assert p.max_lifetime == 60 * 60 assert p.num_workers == 3
def test_queue_size(dsn): def worker(t, ev=None): try: with p.connection(): if ev: ev.set() sleep(t) except pool.TooManyRequests as e: errors.append(e) else: success.append(True) errors: List[Exception] = [] success: List[bool] = [] with pool.ConnectionPool(dsn, min_size=1, max_waiting=3) as p: p.wait() ev = Event() t = Thread(target=worker, args=(0.3, ev)) t.start() ev.wait() ts = [Thread(target=worker, args=(0.1, )) for i in range(4)] for t in ts: t.start() for t in ts: t.join() assert len(success) == 4 assert len(errors) == 1 assert isinstance(errors[0], pool.TooManyRequests) assert p.name in str(errors[0]) assert str(p.max_waiting) in str(errors[0]) assert p.get_stats()["requests_errors"] == 1
def test_grow(dsn, monkeypatch, retries, min_size, want_times): delay_connection(monkeypatch, 0.1) def worker(n): t0 = time() with p.connection() as conn: conn.execute("select 1 from pg_sleep(0.25)") t1 = time() results.append((n, t1 - t0)) for retry in retries: with retry: with pool.ConnectionPool(dsn, min_size=min_size, max_size=4, num_workers=3) as p: p.wait(1.0) results: List[Tuple[int, float]] = [] ts = [ Thread(target=worker, args=(i, )) for i in range(len(want_times)) ] for t in ts: t.start() for t in ts: t.join() times = [item[1] for item in results] for got, want in zip(times, want_times): assert got == pytest.approx(want, 0.1), times
def test_reset(dsn): resets = 0 def setup(conn): with conn.transaction(): conn.execute("set timezone to '+1:00'") def reset(conn): nonlocal resets resets += 1 with conn.transaction(): conn.execute("set timezone to utc") with pool.ConnectionPool(dsn, min_size=1, reset=reset) as p: with p.connection() as conn: assert resets == 0 conn.execute("set timezone to '+2:00'") p.wait() assert resets == 1 with p.connection() as conn: with conn.execute("show timezone") as cur: assert cur.fetchone() == ("UTC", ) p.wait() assert resets == 2
def test_configure(dsn): inits = 0 def configure(conn): nonlocal inits inits += 1 with conn.transaction(): conn.execute("set default_transaction_read_only to on") with pool.ConnectionPool(dsn, min_size=1, configure=configure) as p: p.wait() with p.connection() as conn: assert inits == 1 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on" # type: ignore[index] with p.connection() as conn: assert inits == 1 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on" # type: ignore[index] conn.close() with p.connection() as conn: assert inits == 2 res = conn.execute("show default_transaction_read_only") assert res.fetchone()[0] == "on" # type: ignore[index]
def test_connection_not_lost(dsn): with pool.ConnectionPool(dsn, min_size=1) as p: with pytest.raises(ZeroDivisionError): with p.connection() as conn: pid = conn.pgconn.backend_pid 1 / 0 with p.connection() as conn2: assert conn2.pgconn.backend_pid == pid
def test_closed_putconn(dsn): p = pool.ConnectionPool(dsn, min_size=1) with p.connection() as conn: pass assert not conn.closed with p.connection() as conn: p.close() assert conn.closed
def test_del_no_warning(dsn, recwarn): p = pool.ConnectionPool(dsn, min_size=2) with p.connection() as conn: conn.execute("select 1") p.wait() ref = weakref.ref(p) del p assert not ref() assert not recwarn, [str(w.message) for w in recwarn.list]
def test_close_no_threads(dsn): p = pool.ConnectionPool(dsn) assert p._sched_runner.is_alive() for t in p._workers: assert t.is_alive() p.close() assert not p._sched_runner.is_alive() for t in p._workers: assert not t.is_alive()
def test_max_lifetime(dsn): with pool.ConnectionPool(dsn, min_size=1, max_lifetime=0.2) as p: sleep(0.1) pids = [] for i in range(5): with p.connection() as conn: pids.append(conn.pgconn.backend_pid) sleep(0.2) assert pids[0] == pids[1] != pids[4], pids
def test_reopen(dsn): p = pool.ConnectionPool(dsn) with p.connection() as conn: conn.execute("select 1") p.close() assert p._sched_runner is None assert not p._workers with pytest.raises(psycopg.OperationalError, match="cannot be reused"): p.open()
def test_broken_reconnect(dsn): with pool.ConnectionPool(dsn, min_size=1) as p: with p.connection() as conn: with conn.execute("select pg_backend_pid()") as cur: (pid1, ) = cur.fetchone() # type: ignore[misc] conn.close() with p.connection() as conn2: with conn2.execute("select pg_backend_pid()") as cur: (pid2, ) = cur.fetchone() # type: ignore[misc] assert pid1 != pid2
def test_uniform_use(dsn, retries): for retry in retries: with retry: with pool.ConnectionPool(dsn, min_size=4) as p: counts = Counter[int]() for i in range(8): with p.connection() as conn: sleep(0.1) counts[id(conn)] += 1 assert len(counts) == 4 assert set(counts.values()) == set([2])
def test_configure_badstate(dsn, caplog): caplog.set_level(logging.WARNING, logger="psycopg.pool") def configure(conn): conn.execute("select 1") with pool.ConnectionPool(dsn, min_size=1, configure=configure) as p: with pytest.raises(pool.PoolTimeout): p.wait(timeout=0.5) assert caplog.records assert "INTRANS" in caplog.records[0].message
def test_closed_getconn(dsn): p = pool.ConnectionPool(dsn, min_size=1) assert not p.closed with p.connection(): pass p.close() assert p.closed with pytest.raises(pool.PoolClosed): with p.connection(): pass
def test_its_really_a_pool(dsn): with pool.ConnectionPool(dsn, min_size=2) as p: with p.connection() as conn: with conn.execute("select pg_backend_pid()") as cur: (pid1, ) = cur.fetchone() # type: ignore[misc] with p.connection() as conn2: with conn2.execute("select pg_backend_pid()") as cur: (pid2, ) = cur.fetchone() # type: ignore[misc] with p.connection() as conn: assert conn.pgconn.backend_pid in (pid1, pid2)
def test_open_context(dsn): p = pool.ConnectionPool(dsn, open=False) assert p.closed with p: assert not p.closed with p.connection() as conn: cur = conn.execute("select 1") assert cur.fetchone() == (1, ) assert p.closed