Example #1
0
 def construct_notification_log(self) -> None:
     """
     Constructs notification log object.
     """
     self._notification_log = RecordManagerNotificationLog(
         self.event_store.record_manager,
         section_size=self.notification_log_section_size,
     )
Example #2
0
    def init_process(self, msg):
        self.pipeline_actor = msg.pipeline_actor
        self.downstream_actors = msg.downstream_actors
        self.pipeline_id = msg.pipeline_id
        self.upstream_application_names = msg.upstream_application_names

        # Construct the process application class.
        process_class = msg.process_application_class
        if msg.infrastructure_class:
            process_class = process_class.mixin(msg.infrastructure_class)

        # Reset the database connection (for Django).
        process_class.reset_connection_after_forking()

        # Construct the process application.
        self.process = process_class(pipeline_id=self.pipeline_id)
        assert isinstance(self.process, ProcessApplication)

        # Subscribe the slave actor's send_prompt() method.
        #  - the process application will call publish_prompt()
        #    and the actor will receive the prompt and send it
        #    as a message.
        subscribe(predicate=self.is_my_prompt, handler=self.send_prompt)

        # Close the process application persistence policy.
        #  - slave actor process application doesn't publish
        #    events, so we don't need this
        self.process.persistence_policy.close()

        # Unsubscribe process application's publish_prompt().
        #  - slave actor process application doesn't publish
        #    events, so we don't need this
        unsubscribe(
            predicate=self.process.persistence_policy.is_event,
            handler=self.process.publish_prompt_for_events,
        )

        # Construct and follow upstream notification logs.
        for upstream_application_name in self.upstream_application_names:
            record_manager = self.process.event_store.record_manager
            # assert isinstance(record_manager, ACIDRecordManager), type(record_manager)
            notification_log = RecordManagerNotificationLog(
                record_manager=record_manager.clone(
                    application_name=upstream_application_name,
                    pipeline_id=self.pipeline_id,
                ),
                section_size=self.process.notification_log_section_size,
            )
            self.process.follow(upstream_application_name, notification_log)
 def create_notification_log(self, section_size):
     return RecordManagerNotificationLog(self.entity_record_manager,
                                         section_size)
Example #4
0
    def run(self) -> None:
        # Construct process application class.
        process_class = self.application_process_class
        if not isinstance(process_class,
                          ApplicationWithConcreteInfrastructure):
            if self.infrastructure_class:
                process_class = process_class.mixin(self.infrastructure_class)
            else:
                raise ProgrammingError("infrastructure_class is not set")

        # Construct process application object.
        self.process: ProcessApplication = process_class(
            pipeline_id=self.pipeline_id, setup_table=self.setup_tables)

        # Follow upstream notification logs.
        for upstream_name in self.upstream_names:

            # Obtain a notification log object (local or remote) for the upstream
            # process.
            if upstream_name == self.process.name:
                # Upstream is this process's application,
                # so use own notification log.
                notification_log = self.process.notification_log
            else:
                # For a different application, we need to construct a notification
                # log with a record manager that has the upstream application ID.
                # Currently assumes all applications are using the same database
                # and record manager class. If it wasn't the same database,we would
                # to use a remote notification log, and upstream would need to provide
                # an API from which we can pull. It's not unreasonable to have a fixed
                # number of application processes connecting to the same database.
                record_manager = self.process.event_store.record_manager

                notification_log = RecordManagerNotificationLog(
                    record_manager=record_manager.clone(
                        application_name=upstream_name,
                        # Todo: Check if setting pipeline_id is necessary (it's the
                        #  same?).
                        pipeline_id=self.pipeline_id,
                    ),
                    section_size=self.process.notification_log_section_size,
                )
                # Todo: Support upstream partition IDs different from self.pipeline_id?
                # Todo: Support combining partitions. Read from different partitions
                #  but write to the same partition,
                # could be one os process that reads from many logs of the same
                # upstream app, or many processes each
                # reading one partition with contention writing to the same partition).
                # Todo: Support dividing partitions Read from one but write to many.
                #  Maybe one process per
                # upstream partition, round-robin to pick partition for write. Or
                # have many processes reading
                # with each taking it in turn to skip processing somehow.
                # Todo: Dividing partitions would allow a stream to flow at the same
                #  rate through slower
                # process applications.
                # Todo: Support merging results from "replicated state machines" -
                #  could have a command
                # logging process that takes client commands and presents them in a
                # notification log.
                # Then the system could be deployed in different places, running
                # independently, receiving
                # the same commands, and running the same processes. The command
                # logging process could
                # be accompanied with a result logging process that reads results
                # from replicas as they
                # are available. Not sure what to do if replicas return different
                # things. If one replica
                # goes down, then it could resume by pulling events from another? Not
                # sure what to do.
                # External systems could be modelled as commands.

            # Make the process follow the upstream notification log.
            self.process.follow(upstream_name, notification_log)

        # Subscribe to broadcast prompts published by the process application.
        subscribe(handler=self.broadcast_prompt, predicate=is_prompt_to_pull)

        try:
            self.loop_on_prompts()
        finally:
            unsubscribe(handler=self.broadcast_prompt,
                        predicate=is_prompt_to_pull)
Example #5
0
 def construct_notification_log(self):
     self.notification_log = RecordManagerNotificationLog(
         self.event_store.record_manager,
         section_size=self.notification_log_section_size
     )