Exemple #1
0
    def __init__(self, persist_event_type=None, uri=None, session=None, cipher_key=None,
                 stored_event_record_class=None, setup_table=True, contiguous_record_ids=True):

        # Setup cipher (optional).
        self.setup_cipher(cipher_key)

        # Setup connection to database.
        self.setup_datastore(session, uri)

        # Setup the event store.
        self.stored_event_record_class = stored_event_record_class
        self.contiguous_record_ids = contiguous_record_ids
        self.setup_event_store()

        # Setup notifications.
        self.notification_log = RecordManagerNotificationLog(
            self.event_store.record_manager,
            section_size=20,
        )

        # Setup an event sourced repository.
        self.setup_repository()

        # Setup a persistence policy.
        self.setup_persistence_policy(persist_event_type)

        # Setup table in database.
        if setup_table:
            self.setup_table()
Exemple #2
0
    def run(self):
        # Construct process application object.
        self.process = self.application_process_class(
            pipeline_id=self.pipeline_id,
            notification_log_section_size=self.notification_log_section_size,
            pool_size=self.pool_size,
            setup_table=self.setup_tables,
        )

        # Follow upstream notification logs.
        for upstream_name in self.upstream_names:

            # Obtain a notification log object (local or remote) for the upstream process.
            if upstream_name == self.process.name:
                # Upstream is this process's application,
                # so use own notification log.
                notification_log = self.process.notification_log
            else:
                # For a different application, we need to construct a notification
                # log with a record manager that has the upstream application ID.
                # Currently assumes all applications are using the same database
                # and record manager class. If it wasn't the same database,we would
                # to use a remote notification log, and upstream would need to provide
                # an API from which we can pull. It's not unreasonable to have a fixed
                # number of application processes connecting to the same database.
                record_manager = self.process.event_store.record_manager
                notification_log = RecordManagerNotificationLog(
                    record_manager=record_manager.clone(
                        application_name=upstream_name,
                        pipeline_id=self.pipeline_id),
                    section_size=self.process.notification_log_section_size)
                # Todo: Support upstream partition IDs different from self.pipeline_id.
                # Todo: Support combining partitions. Read from different partitions but write to the same partition,
                # could be one os process that reads from many logs of the same upstream app, or many processes each
                # reading one partition with contention writing to the same partition).
                # Todo: Support dividing partitions Read from one but write to many. Maybe one process per
                # upstream partition, round-robin to pick partition for write. Or have many processes reading
                # with each taking it in turn to skip processing somehow.
                # Todo: Dividing partitions would allow a stream to flow at the same rate through slower
                # process applications.
                # Todo: Support merging results from "replicated state machines" - could have a command
                # logging process that takes client commands and presents them in a notification log.
                # Then the system could be deployed in different places, running independently, receiving
                # the same commands, and running the same processes. The command logging process could
                # be accompanied with a result logging process that reads results from replicas as they
                # are available. Not sure what to do if replicas return different things. If one replica
                # goes down, then it could resume by pulling events from another? Not sure what to do.
                # External systems could be modelled as commands.

            # Make the process follow the upstream notification log.
            self.process.follow(upstream_name, notification_log)

        # Subscribe to broadcast prompts published by the process application.
        subscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)

        try:
            self.loop_on_prompts()
        finally:
            unsubscribe(handler=self.broadcast_prompt,
                        predicate=self.is_prompt)
Exemple #3
0
    def init_process(self, msg):
        self.pipeline_actor = msg.pipeline_actor
        self.downstream_actors = msg.downstream_actors
        self.pipeline_id = msg.pipeline_id
        self.upstream_application_names = msg.upstream_application_names

        self.process = msg.process_application_class(
            pipeline_id=self.pipeline_id,
            notification_log_section_size=5,
            pool_size=3,
        )
        assert isinstance(self.process, ProcessApplication)
        # Close the persistence policy.
        self.process.persistence_policy.close()
        # Replace publish_prompt().
        self.process.publish_prompt = lambda *args: self.publish_prompt(*args)

        # Construct and follow upstream notification logs.
        for upstream_application_name in self.upstream_application_names:
            record_manager = self.process.event_store.record_manager
            assert isinstance(record_manager, SQLAlchemyRecordManager)
            notification_log = RecordManagerNotificationLog(
                record_manager=record_manager.clone(
                    application_name=upstream_application_name,
                    pipeline_id=self.pipeline_id
                ),
                section_size=self.process.notification_log_section_size
            )
            self.process.follow(upstream_application_name, notification_log)
Exemple #4
0
    def __init__(self,
                 name='',
                 persistence_policy=None,
                 persist_event_type=None,
                 uri=None,
                 pool_size=5,
                 session=None,
                 cipher_key=None,
                 sequenced_item_class=None,
                 stored_event_record_class=None,
                 setup_table=True,
                 contiguous_record_ids=True,
                 pipeline_id=-1,
                 notification_log_section_size=None):

        self.notification_log_section_size = notification_log_section_size
        self.name = name or type(self).__name__.lower()

        # Setup cipher (optional).
        self.setup_cipher(cipher_key)

        # Setup connection to database.
        self.setup_datastore(session, uri, pool_size)

        # Setup the event store.
        self.sequenced_item_class = sequenced_item_class
        self.stored_event_record_class = stored_event_record_class
        self.contiguous_record_ids = contiguous_record_ids
        self.application_id = uuid_from_application_name(self.name)
        self.pipeline_id = pipeline_id
        self.setup_event_store()

        # Setup notifications.
        self.notification_log = RecordManagerNotificationLog(
            self.event_store.record_manager,
            section_size=self.notification_log_section_size)

        # Setup an event sourced repository.
        self.setup_repository()

        # Setup a persistence policy.
        self.persistence_policy = persistence_policy
        if self.persistence_policy is None:
            self.setup_persistence_policy(persist_event_type
                                          or type(self).persist_event_type)

        # Setup table in database.
        if setup_table and not session:
            self.setup_table()
Exemple #5
0
    def init_process(self, msg):
        self.pipeline_actor = msg.pipeline_actor
        self.downstream_actors = msg.downstream_actors
        self.pipeline_id = msg.pipeline_id
        self.upstream_application_names = msg.upstream_application_names

        # Construct the process application class.
        process_class = msg.process_application_class
        if msg.infrastructure_class:
            process_class = process_class.mixin(msg.infrastructure_class)

        # Reset the database connection (for Django).
        process_class.reset_connection_after_forking()

        # Construct the process application.
        self.process = process_class(pipeline_id=self.pipeline_id, )
        assert isinstance(self.process, ProcessApplication)

        # Subscribe the slave actor's send_prompt() method.
        #  - the process application will call publish_prompt()
        #    and the actor will receive the prompt and send it
        #    as a message.
        subscribe(predicate=self.is_my_prompt, handler=self.send_prompt)

        # Close the process application persistence policy.
        #  - slave actor process application doesn't publish
        #    events, so we don't need this
        self.process.persistence_policy.close()

        # Unsubscribe process application's publish_prompt().
        #  - slave actor process application doesn't publish
        #    events, so we don't need this
        unsubscribe(predicate=self.process.persistence_policy.is_event,
                    handler=self.process.publish_prompt)

        # Construct and follow upstream notification logs.
        for upstream_application_name in self.upstream_application_names:
            record_manager = self.process.event_store.record_manager
            # assert isinstance(record_manager, ACIDRecordManager), type(record_manager)
            notification_log = RecordManagerNotificationLog(
                record_manager=record_manager.clone(
                    application_name=upstream_application_name,
                    pipeline_id=self.pipeline_id),
                section_size=self.process.notification_log_section_size)
            self.process.follow(upstream_application_name, notification_log)
Exemple #6
0
 def create_notification_log(self, section_size):
     return RecordManagerNotificationLog(self.entity_record_manager,
                                         section_size)
Exemple #7
0
 def setup_notification_log(self):
     self.notification_log = RecordManagerNotificationLog(
         self.event_store.record_manager,
         section_size=self.notification_log_section_size)