def test_sets_wal_journal_mode_if_not_memory(self): # Check datastore for in-memory database. with self.datastore.transaction(): pass self.assertFalse(self.datastore.is_journal_mode_wal) self.assertFalse(self.datastore.journal_mode_was_changed_to_wal) # Create datastore for non-existing file database. self.uris = tmpfile_uris() self.db_uri = next(self.uris) datastore = SQLiteDatastore(self.db_uri) with datastore.transaction(): pass self.assertTrue(datastore.is_journal_mode_wal) self.assertTrue(datastore.journal_mode_was_changed_to_wal) datastore.close_all_connections() del datastore # Recreate datastore for existing database. datastore = SQLiteDatastore(self.db_uri) with datastore.transaction(): pass self.assertTrue(datastore.is_journal_mode_wal) self.assertFalse(datastore.journal_mode_was_changed_to_wal)
def test_select_raises_operational_error_if_table_not_created(self): recorder = SQLiteProcessRecorder(SQLiteDatastore(":memory:")) with self.assertRaises(OperationalError): recorder.select_events(uuid4()) with self.assertRaises(OperationalError): recorder.max_tracking_id("application name")
def test_cache_raises_aggregate_not_found_when_projector_func_returns_none( self): transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) transcoder.register(EmailAddressAsStr()) event_recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) event_recorder.create_table() event_store = EventStore( mapper=Mapper(transcoder=transcoder), recorder=event_recorder, ) repository = Repository( event_store, cache_maxsize=2, ) aggregate = Aggregate() event_store.put(aggregate.collect_events()) self.assertEqual(1, repository.get(aggregate.id).version) aggregate.trigger_event(Aggregate.Event) event_store.put(aggregate.collect_events()) with self.assertRaises(AggregateNotFound): repository.get(aggregate.id, projector_func=lambda _, __: None)
def test_cache_fastforward_false(self): transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) transcoder.register(EmailAddressAsStr()) event_recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) event_recorder.create_table() event_store = EventStore( mapper=Mapper(transcoder=transcoder), recorder=event_recorder, ) repository = Repository( event_store, cache_maxsize=2, fastforward=False, ) aggregate = Aggregate() event_store.put(aggregate.collect_events()) self.assertEqual(1, repository.get(aggregate.id).version) aggregate.trigger_event(Aggregate.Event) event_store.put(aggregate.collect_events()) self.assertEqual(1, repository.get(aggregate.id).version)
class TestSqliteDatastore(TestCase): def setUp(self) -> None: self.datastore = SQLiteDatastore(":memory:") def test_connect_failure_raises_interface_error(self): datastore = SQLiteDatastore(None) with self.assertRaises(InterfaceError): with datastore.transaction(commit=False): pass def test_transaction(self): transaction = self.datastore.transaction(commit=False) with transaction as cursor: cursor.execute("SELECT 1") rows = cursor.fetchall() self.assertEqual(len(rows), 1) self.assertEqual(len(rows[0]), 1) self.assertEqual(rows[0][0], 1) def test_sets_wal_journal_mode_if_not_memory(self): # Check datastore for in-memory database. with self.datastore.transaction(commit=False): pass self.assertFalse(self.datastore.pool.is_journal_mode_wal) self.assertFalse(self.datastore.pool.journal_mode_was_changed_to_wal) # Create datastore for non-existing file database. self.uris = tmpfile_uris() self.db_uri = next(self.uris) datastore = SQLiteDatastore(self.db_uri) with datastore.transaction(commit=False): pass self.assertTrue(datastore.pool.is_journal_mode_wal) self.assertTrue(datastore.pool.journal_mode_was_changed_to_wal) datastore.close() del datastore # Recreate datastore for existing database. datastore = SQLiteDatastore(self.db_uri) with datastore.transaction(commit=False): pass self.assertTrue(datastore.pool.is_journal_mode_wal) self.assertFalse(datastore.pool.journal_mode_was_changed_to_wal)
def create_recorder(self) -> ApplicationRecorder: self.uris = tmpfile_uris() self.db_uri = next(self.uris) recorder = SQLiteApplicationRecorder( SQLiteDatastore(db_name=self.db_uri)) recorder.create_table() return recorder
def test_select(self): recorder = SQLiteProcessRecorder(SQLiteDatastore(":memory:")) recorder.create_table() # Construct notification log. notification_log = LocalNotificationLog(recorder, section_size=5) reader = NotificationLogReader(notification_log, section_size=5) notifications = list(reader.select(start=1)) self.assertEqual(len(notifications), 0) # Write 5 events. originator_id = uuid4() for i in range(5): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events( [stored_event], ) notifications = list(reader.select(start=1)) self.assertEqual(len(notifications), 5) # Write 4 events. originator_id = uuid4() for i in range(4): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events([stored_event]) notifications = list(reader.select(start=1)) self.assertEqual(len(notifications), 9) notifications = list(reader.select(start=2)) self.assertEqual(len(notifications), 8) notifications = list(reader.select(start=3)) self.assertEqual(len(notifications), 7) notifications = list(reader.select(start=4)) self.assertEqual(len(notifications), 6) notifications = list(reader.select(start=8)) self.assertEqual(len(notifications), 2) notifications = list(reader.select(start=9)) self.assertEqual(len(notifications), 1) notifications = list(reader.select(start=10)) self.assertEqual(len(notifications), 0)
def test_select_raises_operational_error_if_table_not_created(self): recorder = SQLiteApplicationRecorder(SQLiteDatastore(":memory:")) with self.assertRaises(OperationalError): recorder.select_events(uuid4()) with self.assertRaises(OperationalError): recorder.select_notifications(start=1, limit=1) with self.assertRaises(OperationalError): recorder.max_notification_id()
def test_insert_raises_operational_error_if_table_not_created(self): recorder = SQLiteProcessRecorder(SQLiteDatastore(":memory:")) stored_event1 = StoredEvent( originator_id=uuid4(), originator_version=1, topic="topic1", state=b"", ) with self.assertRaises(OperationalError): recorder.insert_events([stored_event1])
def test(self): # Open an account. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) # Credit the account. account.append_transaction(Decimal("10.00")) account.append_transaction(Decimal("25.00")) account.append_transaction(Decimal("30.00")) # Collect pending events. pending = account.collect_events() # Construct event store. transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) transcoder.register(EmailAddressAsStr()) recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) event_store = EventStore( mapper=Mapper(transcoder), recorder=recorder, ) recorder.create_table() # Get last event. last_event = event_store.get(account.id, desc=True, limit=1) assert list(last_event) == [] # Store pending events. event_store.put(pending) # Get domain events. domain_events = event_store.get(account.id) # Reconstruct the bank account. copy = None for domain_event in domain_events: copy = domain_event.mutate(copy) # Check copy has correct attribute values. assert copy.id == account.id assert copy.balance == Decimal("65.00") # Get last event. events = event_store.get(account.id, desc=True, limit=1) events = list(events) assert len(events) == 1 last_event = events[0] assert last_event.originator_id == account.id assert type(last_event) == BankAccount.TransactionAppended
def test(self): # Open an account. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) # Credit the account. account.append_transaction(Decimal("10.00")) account.append_transaction(Decimal("25.00")) account.append_transaction(Decimal("30.00")) transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) transcoder.register(EmailAddressAsStr()) snapshot_store = EventStore( mapper=Mapper(transcoder=transcoder), recorder=SQLiteAggregateRecorder( SQLiteDatastore(":memory:"), events_table_name="snapshots", ), ) snapshot_store.recorder.create_table() # Clear pending events. account.collect_events() # Take a snapshot. snapshot = Snapshot.take(account) self.assertNotIn("pending_events", snapshot.state) # Store snapshot. snapshot_store.put([snapshot]) # Get snapshot. snapshots = snapshot_store.get(account.id, desc=True, limit=1) snapshot = next(snapshots) assert isinstance(snapshot, Snapshot) # Reconstruct the bank account. copy = snapshot.mutate() assert isinstance(copy, BankAccount) # Check copy has correct attribute values. assert copy.id == account.id assert copy.balance == Decimal("65.00")
def test_cache_maxsize_nonzero(self): transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) transcoder.register(EmailAddressAsStr()) event_recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) event_recorder.create_table() event_store = EventStore( mapper=Mapper(transcoder=transcoder), recorder=event_recorder, ) repository = Repository(event_store, cache_maxsize=2) self.assertEqual(type(repository.cache), LRUCache) aggregate1 = Aggregate() self.assertFalse(aggregate1.id in repository) event_store.put(aggregate1.collect_events()) self.assertTrue(aggregate1.id in repository) aggregate2 = Aggregate() self.assertFalse(aggregate2.id in repository) event_store.put(aggregate2.collect_events()) self.assertTrue(aggregate2.id in repository) aggregate3 = Aggregate() self.assertFalse(aggregate3.id in repository) event_store.put(aggregate3.collect_events()) self.assertTrue(aggregate3.id in repository) self.assertFalse(aggregate1.id in repository.cache.cache) self.assertEqual(1, repository.get(aggregate1.id).version) self.assertEqual(1, repository.get(aggregate2.id).version) self.assertEqual(1, repository.get(aggregate3.id).version) aggregate1.trigger_event(Aggregate.Event) event_store.put(aggregate1.collect_events()) self.assertEqual(2, repository.get(aggregate1.id).version)
def test_raises_operational_error_when_selecting_fails(self): recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) # Don't create table. with self.assertRaises(OperationalError): recorder.select_events(uuid4())
def test(self): recorder = SQLiteApplicationRecorder(SQLiteDatastore(":memory:")) recorder.create_table() # Construct notification log. notification_log = LocalNotificationLog(recorder, section_size=5) # Get the "current" section of log. section = notification_log["1,10"] self.assertEqual(len(section.items), 0) # event notifications self.assertEqual(section.id, None) self.assertEqual(section.next_id, None) # Write 5 events. originator_id = uuid4() for i in range(5): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events([stored_event]) # Get the "head" section of log. section = notification_log["1,10"] self.assertEqual(len(section.items), 5) # event notifications self.assertEqual(section.items[0].id, 1) self.assertEqual(section.items[1].id, 2) self.assertEqual(section.items[2].id, 3) self.assertEqual(section.items[3].id, 4) self.assertEqual(section.items[4].id, 5) self.assertEqual(section.id, "1,5") self.assertEqual(section.next_id, "6,10") # Get the "1,5" section of log. section = notification_log["1,5"] self.assertEqual(len(section.items), 5) # event notifications self.assertEqual(section.items[0].id, 1) self.assertEqual(section.items[1].id, 2) self.assertEqual(section.items[2].id, 3) self.assertEqual(section.items[3].id, 4) self.assertEqual(section.items[4].id, 5) self.assertEqual(section.id, "1,5") self.assertEqual(section.next_id, "6,10") # Get the next section of log. section = notification_log["6,10"] self.assertEqual(len(section.items), 0) # event notifications self.assertEqual(section.id, None) self.assertEqual(section.next_id, None) # Write 4 events. originator_id = uuid4() for i in range(4): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events([stored_event]) # Get the next section of log. section = notification_log["6,10"] self.assertEqual(len(section.items), 4) # event notifications self.assertEqual(section.items[0].id, 6) self.assertEqual(section.items[1].id, 7) self.assertEqual(section.items[2].id, 8) self.assertEqual(section.items[3].id, 9) self.assertEqual(section.id, "6,9") self.assertEqual(section.next_id, None) # Start at non-regular section start. section = notification_log["3,7"] self.assertEqual(len(section.items), 5) # event notifications self.assertEqual(section.items[0].id, 3) self.assertEqual(section.items[1].id, 4) self.assertEqual(section.items[2].id, 5) self.assertEqual(section.items[3].id, 6) self.assertEqual(section.items[4].id, 7) self.assertEqual(section.id, "3,7") self.assertEqual(section.next_id, "8,12") # Notification log limits section size. section = notification_log["3,10"] self.assertEqual(len(section.items), 5) # event notifications self.assertEqual(section.items[0].id, 3) self.assertEqual(section.items[1].id, 4) self.assertEqual(section.items[2].id, 5) self.assertEqual(section.items[3].id, 6) self.assertEqual(section.items[4].id, 7) self.assertEqual(section.id, "3,7") self.assertEqual(section.next_id, "8,12") # Reader limits section size. section = notification_log["3,4"] self.assertEqual(len(section.items), 2) # event notifications self.assertEqual(section.items[0].id, 3) self.assertEqual(section.items[1].id, 4) self.assertEqual(section.id, "3,4") self.assertEqual(section.next_id, "5,6") # Meaningless section ID. section = notification_log["3,2"] self.assertEqual(len(section.items), 0) # event notifications self.assertEqual(section.id, None) self.assertEqual(section.next_id, None)
def create_recorder(self): recorder = SQLiteApplicationRecorder( SQLiteDatastore(db_name=self.db_uri, pool_size=100)) recorder.create_table() return recorder
def test_connect_failure_raises_interface_error(self): datastore = SQLiteDatastore(None) with self.assertRaises(InterfaceError): datastore.transaction()
def setUp(self) -> None: self.datastore = SQLiteDatastore(":memory:")
def create_recorder(self) -> ApplicationRecorder: recorder = SQLiteApplicationRecorder( SQLiteDatastore(db_name="file::memory:?cache=shared")) recorder.create_table() return recorder
def test_insert_raises_operational_error_if_table_not_created(self): recorder = SQLiteProcessRecorder(SQLiteDatastore(":memory:")) with self.assertRaises(OperationalError): recorder.insert_events([])
def create_recorder(self): recorder = SQLiteProcessRecorder(SQLiteDatastore(":memory:")) recorder.create_table() return recorder
def test_select(self): recorder = SQLiteApplicationRecorder(SQLiteDatastore(":memory:")) recorder.create_table() # Construct notification log. notification_log = LocalNotificationLog(recorder, section_size=10) # Select start 1, limit 10 notifications = notification_log.select(1, 10) self.assertEqual(len(notifications), 0) # Write 5 events. originator_id = uuid4() for i in range(5): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events([stored_event]) # Select start 1, limit 10 notifications = notification_log.select(1, 5) self.assertEqual(len(notifications), 5) self.assertEqual(notifications[0].id, 1) self.assertEqual(notifications[1].id, 2) self.assertEqual(notifications[2].id, 3) self.assertEqual(notifications[3].id, 4) self.assertEqual(notifications[4].id, 5) # Select start 1, limit 10 notifications = notification_log.select(1, 5) self.assertEqual(len(notifications), 5) self.assertEqual(notifications[0].id, 1) self.assertEqual(notifications[1].id, 2) self.assertEqual(notifications[2].id, 3) self.assertEqual(notifications[3].id, 4) self.assertEqual(notifications[4].id, 5) # Select start 6, limit 5 notifications = notification_log.select(6, 5) self.assertEqual(len(notifications), 0) # Write 4 events. originator_id = uuid4() for i in range(4): stored_event = StoredEvent( originator_id=originator_id, originator_version=i, topic="topic", state=b"state", ) recorder.insert_events([stored_event]) # Select start 6, limit 5 notifications = notification_log.select(6, 5) self.assertEqual(len(notifications), 4) # event notifications self.assertEqual(notifications[0].id, 6) self.assertEqual(notifications[1].id, 7) self.assertEqual(notifications[2].id, 8) self.assertEqual(notifications[3].id, 9) # Select start 3, limit 5 notifications = notification_log.select(3, 5) self.assertEqual(len(notifications), 5) # event notifications self.assertEqual(notifications[0].id, 3) self.assertEqual(notifications[1].id, 4) self.assertEqual(notifications[2].id, 5) self.assertEqual(notifications[3].id, 6) self.assertEqual(notifications[4].id, 7) # Notification log limits limit. # Select start 1, limit 20 with self.assertRaises(ValueError) as cm: notification_log.select(1, 20) self.assertEqual( cm.exception.args[0], "Requested limit 20 greater than section size 10" )
def test_raises_operational_error_when_creating_table_fails(self): recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) # Broken create table statements. recorder.create_table_statements = ["BLAH"] with self.assertRaises(OperationalError): recorder.create_table()
def test_with_snapshot_store(self) -> None: transcoder = JSONTranscoder() transcoder.register(UUIDAsHex()) transcoder.register(DecimalAsStr()) transcoder.register(DatetimeAsISO()) event_recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) event_recorder.create_table() event_store: EventStore[Aggregate.Event] = EventStore( mapper=Mapper(transcoder=transcoder), recorder=event_recorder, ) snapshot_recorder = SQLiteAggregateRecorder(SQLiteDatastore(":memory:")) snapshot_recorder.create_table() snapshot_store: EventStore[Snapshot] = EventStore( mapper=Mapper(transcoder=transcoder), recorder=snapshot_recorder, ) repository: Repository = Repository(event_store, snapshot_store) # Check key error. with self.assertRaises(AggregateNotFound): repository.get(uuid4()) # Open an account. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) # Credit the account. account.append_transaction(Decimal("10.00")) account.append_transaction(Decimal("25.00")) account.append_transaction(Decimal("30.00")) # Collect pending events. pending = account.collect_events() # Store pending events. event_store.put(pending) copy = repository.get(account.id) assert isinstance(copy, BankAccount) # Check copy has correct attribute values. assert copy.id == account.id assert copy.balance == Decimal("65.00") snapshot = Snapshot( originator_id=account.id, originator_version=account.version, timestamp=datetime.now(tz=TZINFO), topic=get_topic(type(account)), state=account.__dict__, ) snapshot_store.put([snapshot]) copy2 = repository.get(account.id) assert isinstance(copy2, BankAccount) # Check copy has correct attribute values. assert copy2.id == account.id assert copy2.balance == Decimal("65.00") # Credit the account. account.append_transaction(Decimal("10.00")) event_store.put(account.collect_events()) # Check copy has correct attribute values. copy3 = repository.get(account.id) assert isinstance(copy3, BankAccount) assert copy3.id == account.id assert copy3.balance == Decimal("75.00") # Check can get old version of account. copy4 = repository.get(account.id, version=copy.version) assert isinstance(copy4, BankAccount) assert copy4.balance == Decimal("65.00") copy5 = repository.get(account.id, version=1) assert isinstance(copy5, BankAccount) assert copy5.balance == Decimal("0.00") copy6 = repository.get(account.id, version=2) assert isinstance(copy6, BankAccount) assert copy6.balance == Decimal("10.00") copy7 = repository.get(account.id, version=3) assert isinstance(copy7, BankAccount) assert copy7.balance == Decimal("35.00"), copy7.balance copy8 = repository.get(account.id, version=4) assert isinstance(copy8, BankAccount) assert copy8.balance == Decimal("65.00"), copy8.balance
def create_recorder(self): recorder = SQLiteApplicationRecorder(SQLiteDatastore(self.db_uri)) recorder.create_table() return recorder