def check_causal_dependencies(self, upstream_name, causal_dependencies_json): """ Checks the causal dependencies are satisfied (have already been processed). :param upstream_name: Name of the upstream application being processed. :param causal_dependencies_json: Pipelines and positions in notification logs. :raises CausalDependencyFailed: If causal dependencies are not satisfied. """ # Decode causal dependencies of the domain event notification. if causal_dependencies_json: causal_dependencies = self.event_store.event_mapper.json_loads( causal_dependencies_json) else: causal_dependencies = [] record_manager = self.event_store.record_manager assert isinstance(record_manager, RecordManagerWithTracking) # Check causal dependencies are satisfied. for causal_dependency in causal_dependencies: # Todo: Check causal dependency on system software version? # Check causal dependencies on event notifications. assert isinstance(causal_dependency, dict) pipeline_id = causal_dependency["pipeline_id"] notification_id = causal_dependency["notification_id"] has_tracking_record = record_manager.has_tracking_record( upstream_application_name=upstream_name, pipeline_id=pipeline_id, notification_id=notification_id, ) if not has_tracking_record: # Invalidate reader position. self.is_reader_position_ok[upstream_name] = False # Raise exception. raise CausalDependencyFailed({ "application_name": self.name, "upstream_name": upstream_name, "pipeline_id": pipeline_id, "notification_id": notification_id, })
def run(self, prompt=None, advance_by=None): if prompt: assert isinstance(prompt, Prompt) upstream_names = [prompt.process_name] else: upstream_names = self.readers.keys() notification_count = 0 for upstream_name in upstream_names: if not self.is_reader_position_ok[upstream_name]: self.del_notification_generator(upstream_name) self.set_reader_position_from_tracking_records(upstream_name) self.is_reader_position_ok[upstream_name] = True while True: with self._policy_lock: # Get notification generator. generator = self.get_notification_generator(upstream_name, advance_by) try: notification = next(generator) except StopIteration: self.del_notification_generator(upstream_name) break notification_count += 1 # Get domain event from notification. event = self.get_event_from_notification(notification) # Decode causal dependencies of the domain event. causal_dependencies = notification.get('causal_dependencies') or '[]' causal_dependencies = json_loads(causal_dependencies) or [] # Check causal dependencies are satisfied. for causal_dependency in causal_dependencies: pipeline_id = causal_dependency['pipeline_id'] notification_id = causal_dependency['notification_id'] _manager = self.event_store.record_manager has_tracking_record = _manager.has_tracking_record( upstream_application_name=upstream_name, pipeline_id=pipeline_id, notification_id=notification_id ) if not has_tracking_record: # Invalidate reader position. self.is_reader_position_ok[upstream_name] = False # Raise exception. raise CausalDependencyFailed({ 'application_name': self.name, 'upstream_name': upstream_name, 'pipeline_id': pipeline_id, 'notification_id': notification_id }) # Wait on the clock event, if there is one. if self.clock_event is not None: self.clock_event.wait() # print("Processing upstream event: ", event) new_events = self.process_upstream_event(event, notification['id'], upstream_name) self.take_snapshots(new_events) # Publish a prompt if there are new notifications. # Todo: Optionally send events as prompts, saves pulling event if it arrives in order. if any([event.__notifiable__ for event in new_events]): self.publish_prompt() return notification_count
def run(self, prompt=None, advance_by=None): if prompt is None: readers_items = self.readers.items() else: assert isinstance(prompt, Prompt) reader = self.readers[prompt.process_name] readers_items = [(prompt.process_name, reader)] notification_count = 0 for upstream_application_name, reader in readers_items: if not self.is_reader_position_ok[upstream_application_name]: self.set_reader_position_from_tracking_records(reader, upstream_application_name) self.is_reader_position_ok[upstream_application_name] = True for notification in reader.read(advance_by=advance_by): # Todo: Put this on a queue and then get the next one. notification_count += 1 # Todo: Get this from a queue and do it in a different thread? # Domain event from notification. event = self.event_store.sequenced_item_mapper.from_topic_and_data( topic=notification['event_type'], data=notification['state'] ) # Wait for causal dependencies to be satisfied. upstream_causal_dependencies = notification.get('causal_dependencies') if upstream_causal_dependencies is not None: upstream_causal_dependencies = json_loads(upstream_causal_dependencies) if upstream_causal_dependencies is None: upstream_causal_dependencies = [] for causal_dependency in upstream_causal_dependencies: pipeline_id = causal_dependency['pipeline_id'] notification_id = causal_dependency['notification_id'] if not self.tracking_record_manager.has_tracking_record( application_name=self.name, upstream_application_name=upstream_application_name, pipeline_id=pipeline_id, notification_id=notification_id ): self.is_reader_position_ok[upstream_application_name] = False raise CausalDependencyFailed({ 'application_name': self.name, 'upstream_application_name': upstream_application_name, 'pipeline_id': pipeline_id, 'notification_id': notification_id }) # Call policy with the event. all_aggregates, causal_dependencies = self.call_policy(event) # Record new events. try: new_events = self.record_new_events( all_aggregates, notification, upstream_application_name, causal_dependencies ) except Exception as e: self.is_reader_position_ok[upstream_application_name] = False # self._cached_entities = {} raise e else: # Publish a prompt if there are new notifications. # Todo: Optionally send events as prompts, saves pulling event if it arrives in order. if len(new_events): self.publish_prompt() return notification_count
def run(self, prompt: Optional[Prompt] = None, advance_by: Optional[int] = None) -> int: if prompt: assert isinstance(prompt, PromptToPull) upstream_names = [prompt.process_name] else: upstream_names = list(self.readers.keys()) notification_count = 0 record_manager = self.event_store.record_manager assert isinstance(record_manager, ACIDRecordManager) for upstream_name in upstream_names: if not self.is_reader_position_ok[upstream_name]: self.del_notification_generator(upstream_name) self.set_reader_position_from_tracking_records(upstream_name) self.is_reader_position_ok[upstream_name] = True while True: with self._policy_lock: # Get notification generator. generator = self.get_notification_generator( upstream_name, advance_by) try: notification = next(generator) except StopIteration: self.del_notification_generator(upstream_name) break notification_count += 1 # Get domain event from notification. event = self.get_event_from_notification(notification) # Decode causal dependencies of the domain event. causal_dependencies = ( self.event_store.event_mapper.json_loads( notification.get("causal_dependencies") or "[]") or []) # Check causal dependencies are satisfied. assert isinstance(causal_dependencies, list) for causal_dependency in causal_dependencies: # Todo: Check causal dependency on system software version? # Check causal dependencies on event notifications. assert isinstance(causal_dependency, dict) pipeline_id = causal_dependency["pipeline_id"] notification_id = causal_dependency["notification_id"] has_tracking_record = record_manager.has_tracking_record( upstream_application_name=upstream_name, pipeline_id=pipeline_id, notification_id=notification_id, ) if not has_tracking_record: # Invalidate reader position. self.is_reader_position_ok[upstream_name] = False # Raise exception. raise CausalDependencyFailed({ "application_name": self.name, "upstream_name": upstream_name, "pipeline_id": pipeline_id, "notification_id": notification_id, }) # Wait on the clock event, if there is one. if self.clock_event is not None: self.clock_event.wait() # print("Processing upstream event: ", event) new_events: Sequence[ TAggregateEvent] = self.process_upstream_event( event, notification["id"], upstream_name) self.take_snapshots(new_events) # Publish a prompt if there are new notifications. # Todo: Optionally send events as prompts, saves pulling # event if it arrives in order. if any([event.__notifiable__ for event in new_events]): self.publish_prompt() return notification_count