def test_has_tracking_id(self): # Construct the recorder. recorder = self.create_recorder() self.assertFalse(recorder.has_tracking_id("upstream_app", 1)) self.assertFalse(recorder.has_tracking_id("upstream_app", 2)) self.assertFalse(recorder.has_tracking_id("upstream_app", 3)) tracking1 = Tracking( application_name="upstream_app", notification_id=1, ) tracking2 = Tracking( application_name="upstream_app", notification_id=2, ) recorder.insert_events( stored_events=[], tracking=tracking1, ) self.assertTrue(recorder.has_tracking_id("upstream_app", 1)) self.assertFalse(recorder.has_tracking_id("upstream_app", 2)) self.assertFalse(recorder.has_tracking_id("upstream_app", 3)) recorder.insert_events( stored_events=[], tracking=tracking2, ) self.assertTrue(recorder.has_tracking_id("upstream_app", 1)) self.assertTrue(recorder.has_tracking_id("upstream_app", 2)) self.assertFalse(recorder.has_tracking_id("upstream_app", 3))
def pull_and_process(self, name: str) -> None: """ Pulls and processes unseen domain event notifications from the notification log reader of the names application. Converts received event notifications to domain event objects, and then calls the :func:`policy` with a new :class:`ProcessEvent` object which contains a :class:`~eventsourcing.persistence.Tracking` object that keeps track of the name of the application and the position in its notification log from which the domain event notification was pulled. The policy will save aggregates to the process event object, using its :func:`~ProcessEvent.save` method, which collects pending domain events using the aggregates' :func:`~eventsourcing.domain.Aggregate.collect_events` method, and the process event object will then be recorded by calling the :func:`record` method. """ reader, mapper = self.readers[name] start = self.recorder.max_tracking_id(name) + 1 for notification in reader.read(start=start): domain_event = mapper.to_domain_event(notification) process_event = ProcessEvent( Tracking( application_name=name, notification_id=notification.id, )) self.policy( domain_event, process_event, ) self.record(process_event)
def test_legacy_save(self): # Open an account. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) events = account.collect_events() created_event = events[0] processing_event = ProcessingEvent(tracking=Tracking( application_name="upstream_app", notification_id=5, )) with warnings.catch_warnings(record=True) as w: policy_legacy_save(created_event, processing_event) # Verify deprecation warning. assert len(w) == 1 assert issubclass(w[-1].category, DeprecationWarning) assert "'save()' is deprecated, use 'collect_events()' instead" in str( w[-1].message) self.assertEqual(len(processing_event.events), 1) self.assertIsInstance( processing_event.events[0], EmailNotification.Created, )
def test_max_doesnt_increase_when_lower_inserted_later(self) -> None: # Construct the recorder. recorder = self.create_recorder() tracking1 = Tracking( application_name="upstream_app", notification_id=1, ) tracking2 = Tracking( application_name="upstream_app", notification_id=2, ) # Insert tracking info. recorder.insert_events( stored_events=[], tracking=tracking2, ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 2, ) # Insert tracking info. recorder.insert_events( stored_events=[], tracking=tracking1, ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 2, )
def convert_notifications( self, leader_name: str, notifications: Iterable[Notification]) -> List[ProcessingJob]: """ Uses the given :class:`~eventsourcing.persistence.Mapper` to convert each received :class:`~eventsourcing.persistence.Notification` object to an :class:`~eventsourcing.domain.AggregateEvent` object paired with a :class:`~eventsourcing.persistence.Tracking` object. """ mapper = self.mappers[leader_name] processing_jobs = [] for notification in notifications: domain_event = mapper.to_domain_event(notification) tracking = Tracking( application_name=leader_name, notification_id=notification.id, ) processing_jobs.append((domain_event, tracking)) return processing_jobs
def insert(): originator_id = uuid4() stored_event = StoredEvent( originator_id=originator_id, originator_version=0, topic="topic1", state=b"state1", ) tracking1 = Tracking( application_name="upstream_app", notification_id=next(notification_ids), ) recorder.insert_events( stored_events=[ stored_event, ], tracking=tracking1, )
def test_policy(self): # Open an account. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) events = account.collect_events() created_event = events[0] processing_event = ProcessingEvent(tracking=Tracking( application_name="upstream_app", notification_id=5, )) policy(created_event, processing_event) self.assertEqual(len(processing_event.events), 1) self.assertIsInstance( processing_event.events[0], EmailNotification.Created, )
def run(self) -> None: self.has_started.set() try: while True: recording_event_or_notifications = self.converting_queue.get() self.converting_queue.task_done() if (self.is_stopping.is_set() or recording_event_or_notifications is None): return processing_jobs = [] if isinstance(recording_event_or_notifications, RecordingEvent): recording_event = recording_event_or_notifications for recording in recording_event.recordings: if (self.follower.follow_topics and recording.notification.topic not in self.follower.follow_topics): continue tracking = Tracking( application_name=recording_event.application_name, notification_id=recording.notification.id, ) processing_jobs.append( (recording.domain_event, tracking)) else: notifications = recording_event_or_notifications processing_jobs = self.follower.convert_notifications( leader_name=self.leader_name, notifications=notifications) if processing_jobs: self.processing_queue.put(processing_jobs) except Exception as e: print(traceback.format_exc()) self.error = NotificationConvertingError(str(e)) self.error.__cause__ = e self.has_errored.set()
def test_retry_max_tracking_id_after_closing_connection(self): # Construct the recorder. recorder = self.create_recorder() # Check we have a connection (from create_table). self.assertTrue(self.datastore._connections) # Write a stored event. originator_id = uuid4() stored_event1 = StoredEvent( originator_id=originator_id, originator_version=0, topic="topic1", state=b"state1", ) recorder.insert_events([stored_event1], tracking=Tracking("upstream", 1)) # Close connections. pg_close_all_connections() # Select events. notification_id = recorder.max_tracking_id("upstream") self.assertEqual(notification_id, 1)
def receive_recording_event(self, recording_event: RecordingEvent) -> None: """ Receives recording event by appending it to list of received recording events. Unless this method has previously been called and not yet returned, it will then attempt to make the followers process all received recording events, until there are none remaining. """ with self._recording_events_received_lock: self._recording_events_received.append(recording_event) if self._processing_lock.acquire(blocking=False): try: while True: with self._recording_events_received_lock: recording_events_received = self._recording_events_received self._recording_events_received = [] if not recording_events_received: break for recording_event in recording_events_received: leader_name = recording_event.application_name previous_max_notification_id = ( self._previous_max_notification_ids.get( leader_name, 0)) # Ignore recording event if already seen a subsequent. if (recording_event.previous_max_notification_id is not None and recording_event.previous_max_notification_id < previous_max_notification_id): continue # Catch up if there is a gap in sequence of recording events. if (recording_event.previous_max_notification_id is None or recording_event.previous_max_notification_id > previous_max_notification_id): for follower_name in self.system.leads[ leader_name]: follower = self.apps[follower_name] assert isinstance(follower, Follower) start = (follower.recorder.max_tracking_id( leader_name) + 1) stop = recording_event.recordings[ 0].notification.id - 1 follower.pull_and_process( leader_name=leader_name, start=start, stop=stop, ) for recording in recording_event.recordings: for follower_name in self.system.leads[ leader_name]: follower = self.apps[follower_name] assert isinstance(follower, Follower) if (follower.follow_topics and recording.notification.topic not in follower.follow_topics): continue follower.process_event( domain_event=recording.domain_event, tracking=Tracking( application_name=recording_event. application_name, notification_id=recording.notification. id, ), ) self._previous_max_notification_ids[ leader_name] = recording_event.recordings[ -1].notification.id finally: self._processing_lock.release()
def test_process_event(self): class UUID5EmailNotification(Aggregate): def __init__(self, to, subject, message): self.to = to self.subject = subject self.message = message @staticmethod def create_id(to: str): return uuid5(NAMESPACE_URL, f"/emails/{to}") class UUID5EmailProcess(EmailProcess): def policy(self, domain_event, processing_event): if isinstance(domain_event, BankAccount.Opened): notification = UUID5EmailNotification( to=domain_event.email_address, subject="Your New Account", message="Dear {}, ...".format(domain_event.full_name), ) processing_event.collect_events(notification) bank_accounts = BankAccounts() email_process = UUID5EmailProcess() account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) recordings = bank_accounts.save(account) self.assertEqual(len(recordings), 1) aggregate_event = recordings[0].domain_event notification = recordings[0].notification tracking = Tracking(bank_accounts.name, notification.id) # Process the event. email_process.process_event(aggregate_event, tracking) self.assertEqual( email_process.recorder.max_tracking_id(bank_accounts.name), notification.id) # Process the event again, ignore tracking integrity error. email_process.process_event(aggregate_event, tracking) self.assertEqual( email_process.recorder.max_tracking_id(bank_accounts.name), notification.id) # Create another event that will cause conflict with email processing. account = BankAccount.open( full_name="Alice", email_address="*****@*****.**", ) recordings = bank_accounts.save(account) # Process the event and expect an integrity error. aggregate_event = recordings[0].domain_event notification = recordings[0].notification tracking = Tracking(bank_accounts.name, notification.id) with self.assertRaises(IntegrityError): email_process.process_event(aggregate_event, tracking)
def test_insert_select(self): # Construct the recorder. recorder = self.create_recorder() # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 0, ) # Write two stored events. originator_id1 = uuid4() originator_id2 = uuid4() stored_event1 = StoredEvent( originator_id=originator_id1, originator_version=1, topic="topic1", state=b"state1", ) stored_event2 = StoredEvent( originator_id=originator_id1, originator_version=2, topic="topic2", state=b"state2", ) stored_event3 = StoredEvent( originator_id=originator_id2, originator_version=1, topic="topic3", state=b"state3", ) stored_event4 = StoredEvent( originator_id=originator_id2, originator_version=2, topic="topic4", state=b"state4", ) tracking1 = Tracking( application_name="upstream_app", notification_id=1, ) tracking2 = Tracking( application_name="upstream_app", notification_id=2, ) # Insert two events with tracking info. recorder.insert_events( stored_events=[ stored_event1, stored_event2, ], tracking=tracking1, ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 1, ) # Check can't insert third event with same tracking info. with self.assertRaises(IntegrityError): recorder.insert_events( stored_events=[stored_event3], tracking=tracking1, ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 1, ) # Insert third event with different tracking info. recorder.insert_events( stored_events=[stored_event3], tracking=tracking2, ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 2, ) # Insert fourth event without tracking info. recorder.insert_events(stored_events=[stored_event4], ) # Get current position. self.assertEqual( recorder.max_tracking_id("upstream_app"), 2, )