def test_connection_of_transaction_not_used_as_context_manager_also_goes_idle( self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", ) # Get a transaction. transaction = datastore.transaction(commit=False) # Check connection is not idle. conn = transaction.c self.assertFalse(conn.is_idle.is_set()) # Delete the transaction context manager before entering. print( "Testing transaction not used as context manager, expecting exception..." ) del transaction # Check connection is idle after garbage collection. self.assertTrue(conn.is_idle.wait(timeout=0.1))
def setUp(self) -> None: self.datastore = PostgresDatastore( "eventsourcing", "127.0.0.1", "5432", "eventsourcing", "eventsourcing", ) self.drop_tables()
def test_connect_failure_raises_interface_error(self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="9876543210", # bad port user="******", password="******", ) with self.assertRaises(InterfaceError): datastore.transaction(commit=True)
def drop_postgres_table(datastore: PostgresDatastore, table_name): statement = f"DROP TABLE {table_name};" try: with datastore.transaction(commit=True) as curs: curs.execute(statement) except PersistenceError: pass
def setUp(self): super().setUp() os.environ["POSTGRES_DBNAME"] = "eventsourcing" os.environ["POSTGRES_HOST"] = "127.0.0.1" os.environ["POSTGRES_PORT"] = "5432" os.environ["POSTGRES_USER"] = "******" os.environ["POSTGRES_PASSWORD"] = "******" db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_PORT"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, f"{BankAccounts.name.lower()}_events") drop_postgres_table(db, f"{EmailProcess.name.lower()}_events") drop_postgres_table(db, f"{EmailProcess.name.lower()}_tracking") drop_postgres_table(db, f"{EmailProcess.name.lower()}2_events") drop_postgres_table(db, f"{EmailProcess.name.lower()}2_tracking") drop_postgres_table(db, "brokenprocessing_events") drop_postgres_table(db, "brokenprocessing_tracking") drop_postgres_table(db, "brokenconverting_events") drop_postgres_table(db, "brokenconverting_tracking") drop_postgres_table(db, "brokenpulling_events") drop_postgres_table(db, "brokenpulling_tracking") drop_postgres_table(db, "commands_events") drop_postgres_table(db, "commands_tracking") drop_postgres_table(db, "results_events") drop_postgres_table(db, "results_tracking") os.environ["PERSISTENCE_MODULE"] = "eventsourcing.postgres"
def setUp(self) -> None: super().setUp() self.uris = tmpfile_uris() os.environ["INFRASTRUCTURE_FACTORY"] = "eventsourcing.postgres:Factory" os.environ["POSTGRES_DBNAME"] = "eventsourcing" os.environ["POSTGRES_HOST"] = "127.0.0.1" os.environ["POSTGRES_PORT"] = "5432" os.environ["POSTGRES_USER"] = "******" os.environ["POSTGRES_PASSWORD"] = "******" db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_PORT"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "worlds_events") del os.environ["INFRASTRUCTURE_FACTORY"] del os.environ["POSTGRES_DBNAME"] del os.environ["POSTGRES_HOST"] del os.environ["POSTGRES_PORT"] del os.environ["POSTGRES_USER"] del os.environ["POSTGRES_PASSWORD"]
def setUp(self) -> None: self.datastore = PostgresDatastore( "eventsourcing", "127.0.0.1", "eventsourcing", "eventsourcing", ) drop_postgres_table(self.datastore, "stored_events")
def test_timer_closes_connection(self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", conn_max_age=0, ) # Check connection is closed after using transaction. transaction = datastore.transaction(commit=False) with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) self.assertTrue(transaction.c.is_closing.wait(timeout=0.5)) for _ in range(1000): if transaction.c.is_closed: break else: sleep(0.0001) else: self.fail("Connection is not closed") with self.assertRaises(psycopg2.InterfaceError) as cm: transaction.c.cursor() self.assertEqual(cm.exception.args[0], "connection already closed") # Check closed connection can be recreated and also closed. transaction = datastore.transaction(commit=False) with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) self.assertTrue(transaction.c.is_closing.wait(timeout=0.5)) for _ in range(1000): if transaction.c.is_closed: break else: sleep(0.0001) else: self.fail("Connection is not closed")
def drop_tables(self): datastore = PostgresDatastore( "eventsourcing", "127.0.0.1", "5432", "eventsourcing", "eventsourcing", ) drop_postgres_table(datastore, "testcase_events") drop_postgres_table(datastore, "testcase_tracking")
def setUp(self) -> None: super().setUp() self.uris = tmpfile_uris() db = PostgresDatastore( "eventsourcing", "127.0.0.1", "5432", "eventsourcing", "eventsourcing", ) drop_postgres_table(db, "universe_events")
def drop_postgres_table(datastore: PostgresDatastore, table_name): try: with datastore.transaction() as c: statement = f"DROP TABLE {table_name};" print("Executing statement:", statement) c.execute(statement) print("Executed statement:", statement) except psycopg2.errors.lookup(UNDEFINED_TABLE): print("Failed to execute statement:", statement) pass # print(f"Table does not exist: {table_name}") except Exception: print("Strange error:", statement)
def setUp(self) -> None: super().setUp() os.environ["PERSISTENCE_MODULE"] = "eventsourcing.postgres" os.environ["CREATE_TABLE"] = "y" os.environ["POSTGRES_DBNAME"] = "eventsourcing" os.environ["POSTGRES_HOST"] = "127.0.0.1" os.environ["POSTGRES_PORT"] = "5432" os.environ["POSTGRES_USER"] = "******" os.environ["POSTGRES_PASSWORD"] = "******" os.environ["POSTGRES_SCHEMA"] = "public" db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_PORT"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "public.bankaccounts_events") drop_postgres_table(db, "public.bankaccounts_snapshots") db.close()
def tearDown(self) -> None: db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_PORT"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "public.bankaccounts_events") drop_postgres_table(db, "public.bankaccounts_snapshots") del os.environ["PERSISTENCE_MODULE"] del os.environ["CREATE_TABLE"] del os.environ["POSTGRES_DBNAME"] del os.environ["POSTGRES_HOST"] del os.environ["POSTGRES_PORT"] del os.environ["POSTGRES_USER"] del os.environ["POSTGRES_PASSWORD"] del os.environ["POSTGRES_SCHEMA"] db.close() super().tearDown()
def test_transaction(self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", ) # Get a transaction. transaction = datastore.transaction(commit=False) # Check connection is not idle. self.assertFalse(transaction.c.is_idle.is_set()) # Check transaction gives database cursor when used as context manager. with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) # Check connection is idle after context manager has exited. self.assertTrue(transaction.c.is_idle.wait(timeout=0.1))
def setUp(self) -> None: super().setUp() self.uris = tmpfile_uris() os.environ["INFRASTRUCTURE_FACTORY"] = "eventsourcing.postgres:Factory" os.environ["CREATE_TABLE"] = "y" os.environ["POSTGRES_DBNAME"] = "eventsourcing" os.environ["POSTGRES_HOST"] = "127.0.0.1" os.environ["POSTGRES_USER"] = "******" os.environ["POSTGRES_PASSWORD"] = "******" db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "bankaccounts_events") drop_postgres_table(db, "bankaccounts_snapshots")
def tearDown(self) -> None: db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_PORT"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "bankaccounts_events") drop_postgres_table(db, "bankaccounts_snapshots") del os.environ["INFRASTRUCTURE_FACTORY"] del os.environ["CREATE_TABLE"] del os.environ["POSTGRES_DBNAME"] del os.environ["POSTGRES_HOST"] del os.environ["POSTGRES_PORT"] del os.environ["POSTGRES_USER"] del os.environ["POSTGRES_PASSWORD"] super().tearDown()
def setUp(self): os.environ["POSTGRES_DBNAME"] = "eventsourcing" os.environ["POSTGRES_HOST"] = "127.0.0.1" os.environ["POSTGRES_USER"] = "******" os.environ["POSTGRES_PASSWORD"] = "******" db = PostgresDatastore( os.getenv("POSTGRES_DBNAME"), os.getenv("POSTGRES_HOST"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD"), ) drop_postgres_table(db, "bankaccounts_events") drop_postgres_table(db, "emailnotifications_events") drop_postgres_table(db, "emailnotifications_tracking") drop_postgres_table(db, "brokenprocessing_events") drop_postgres_table(db, "brokenprocessing_tracking") os.environ["INFRASTRUCTURE_FACTORY"] = "eventsourcing.postgres:Factory"
def test_close_connection(self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", ) # Try closing without first creating connection. datastore.close_connection() # Create a connection. with datastore.transaction(commit=False) as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) # Try closing after creating connection. datastore.close_connection()
def drop_postgres_table(datastore: PostgresDatastore, table_name): try: with datastore.transaction() as c: c.execute(f"DROP TABLE {table_name};") except psycopg2.errors.lookup(UNDEFINED_TABLE): pass # print(f"Table does not exist: {table_name}")
def test_pre_ping(self): datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", pre_ping=True, ) # Create a connection. transaction = datastore.transaction(commit=False) pg_conn = transaction.c.c self.assertEqual(pg_conn, transaction.c.c) # Check the connection works. with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) # Close all connections via separate connection. pg_close_all_connections() # Check the connection doesn't think it's closed. self.assertFalse(transaction.c.is_closed) # Check we can get a new connection that works. transaction = datastore.transaction(commit=False) with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) # Check it's actually a different connection. self.assertNotEqual(pg_conn, transaction.c.c) # Check this doesn't work if we don't use pre_ping. datastore = PostgresDatastore( dbname="eventsourcing", host="127.0.0.1", port="5432", user="******", password="******", pre_ping=False, ) # Create a connection. transaction = datastore.transaction(commit=False) pg_conn = transaction.c.c self.assertEqual(pg_conn, transaction.c.c) # Check the connection works. with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1") self.assertEqual(c.fetchall(), [[1]]) # Close all connections via separate connection. pg_close_all_connections() # Check the connection doesn't think it's closed. self.assertFalse(transaction.c.is_closed) # Get a stale connection and check it doesn't work. transaction = datastore.transaction(commit=False) # Check it's the same connection. self.assertEqual(pg_conn, transaction.c.c) with self.assertRaises(InterfaceError): with transaction as conn: with conn.cursor() as c: c.execute("SELECT 1")
class TestPostgresAggregateRecorderErrors(TestCase): def setUp(self) -> None: self.datastore = PostgresDatastore( "eventsourcing", "127.0.0.1", "5432", "eventsourcing", "eventsourcing", ) self.drop_tables() def tearDown(self) -> None: self.drop_tables() def drop_tables(self): drop_postgres_table(self.datastore, "stored_events") def create_recorder(self): return PostgresAggregateRecorder(datastore=self.datastore, events_table_name="stored_events") def test_create_table_raises_programming_error_when_sql_is_broken(self): recorder = self.create_recorder() # Mess up the statement. recorder.create_table_statements = ["BLAH"] with self.assertRaises(ProgrammingError): recorder.create_table() def test_insert_events_raises_programming_error_when_table_not_created( self): # Construct the recorder. recorder = self.create_recorder() # Write a stored event without creating the table. stored_event1 = StoredEvent( originator_id=uuid4(), originator_version=0, topic="topic1", state=b"state1", ) with self.assertRaises(ProgrammingError): recorder.insert_events([stored_event1]) def test_insert_events_raises_programming_error_when_sql_is_broken(self): # Construct the recorder. recorder = self.create_recorder() # Create the table. recorder.create_table() # Write a stored event with broken statement. recorder.insert_events_statement = "BLAH" stored_event1 = StoredEvent( originator_id=uuid4(), originator_version=0, topic="topic1", state=b"state1", ) with self.assertRaises(ProgrammingError): recorder.insert_events([stored_event1]) def test_select_events_raises_programming_error_when_table_not_created( self): # Construct the recorder. recorder = self.create_recorder() # Select events without creating the table. originator_id = uuid4() with self.assertRaises(ProgrammingError): recorder.select_events(originator_id=originator_id) def test_select_events_raises_programming_error_when_sql_is_broken(self): # Construct the recorder. recorder = self.create_recorder() # Create the table. recorder.create_table() # Select events with broken statement. recorder.select_events_statement = "BLAH" originator_id = uuid4() with self.assertRaises(ProgrammingError): recorder.select_events(originator_id=originator_id) def test_duplicate_prepared_statement_error_is_ignored(self): # Construct the recorder. recorder = self.create_recorder() # Create the table. recorder.create_table() # Check the statement is not prepared. statement_name = "select_stored_events" conn = self.datastore.get_connection() self.assertFalse(conn.is_prepared.get(statement_name)) # Cause the statement to be prepared. recorder.select_events(originator_id=uuid4()) # Check the statement was prepared. conn = self.datastore.get_connection() self.assertTrue(conn.is_prepared.get(statement_name)) # Forget the statement is prepared. del conn.is_prepared[statement_name] # Should ignore "duplicate prepared statement" error. recorder.select_events(originator_id=uuid4()) # Check the statement was prepared. conn = self.datastore.get_connection() self.assertTrue(conn.is_prepared.get(statement_name))
class TestPostgresApplicationRecorder(ApplicationRecorderTestCase): def setUp(self) -> None: self.datastore = PostgresDatastore( "eventsourcing", "127.0.0.1", "5432", "eventsourcing", "eventsourcing", ) self.drop_tables() def tearDown(self) -> None: self.drop_tables() def drop_tables(self): drop_postgres_table(self.datastore, "stored_events") def create_recorder(self): recorder = PostgresApplicationRecorder( self.datastore, events_table_name="stored_events") recorder.create_table() return recorder def close_db_connection(self, *args): self.datastore.close_connection() def test_concurrent_no_conflicts(self): super().test_concurrent_no_conflicts() def test_concurrent_throughput(self): super().test_concurrent_throughput() def test_retry_select_notifications_after_closing_connection(self): # Construct the recorder. recorder = self.create_recorder() # Check we have a connection (from create_table). self.assertTrue(self.datastore._connections) # Write a stored event. originator_id = uuid4() stored_event1 = StoredEvent( originator_id=originator_id, originator_version=0, topic="topic1", state=b"state1", ) recorder.insert_events([stored_event1]) # Close connections. pg_close_all_connections() # Select events. recorder.select_notifications(start=1, limit=1) def test_retry_max_notification_id_after_closing_connection(self): # Construct the recorder. recorder = self.create_recorder() # Check we have a connection (from create_table). self.assertTrue(self.datastore._connections) # Write a stored event. originator_id = uuid4() stored_event1 = StoredEvent( originator_id=originator_id, originator_version=0, topic="topic1", state=b"state1", ) recorder.insert_events([stored_event1]) # Close connections. pg_close_all_connections() # Select events. recorder.max_notification_id()