Beispiel #1
0
    def get_event_class_and_attrs(self, topic, state):
        # Resolve topic to event class.
        domain_event_class = resolve_topic(topic)

        # Decrypt state.
        if self.cipher:
            state = self.cipher.decrypt(state)

        # Deserialize data.
        event_attrs = json_loads(state, cls=self.json_decoder_class)
        return domain_event_class, event_attrs
Beispiel #2
0
    def from_topic_and_data(self, topic, data):
        # Resolve topic to event class.
        domain_event_class = resolve_topic(topic)

        # Decrypt data.
        if self.cipher:
            data = self.cipher.decrypt(data)

        # Deserialize data.
        event_attrs = json_loads(data, cls=self.json_decoder_class)

        # Reconstruct domain event object.
        return reconstruct_object(domain_event_class, event_attrs)
Beispiel #3
0
    def run(self, prompt=None, advance_by=None):

        if prompt:
            assert isinstance(prompt, Prompt)
            upstream_names = [prompt.process_name]
        else:
            upstream_names = self.readers.keys()

        notification_count = 0
        for upstream_name in upstream_names:

            if not self.is_reader_position_ok[upstream_name]:
                self.del_notification_generator(upstream_name)
                self.set_reader_position_from_tracking_records(upstream_name)
                self.is_reader_position_ok[upstream_name] = True

            while True:
                with self._policy_lock:
                    # Get notification generator.
                    generator = self.get_notification_generator(upstream_name, advance_by)
                    try:
                        notification = next(generator)
                    except StopIteration:
                        self.del_notification_generator(upstream_name)
                        break

                    notification_count += 1

                    # Get domain event from notification.
                    event = self.get_event_from_notification(notification)

                    # Decode causal dependencies of the domain event.
                    causal_dependencies = notification.get('causal_dependencies') or '[]'
                    causal_dependencies = json_loads(causal_dependencies) or []

                    # Check causal dependencies are satisfied.
                    for causal_dependency in causal_dependencies:
                        pipeline_id = causal_dependency['pipeline_id']
                        notification_id = causal_dependency['notification_id']

                        _manager = self.event_store.record_manager
                        has_tracking_record = _manager.has_tracking_record(
                            upstream_application_name=upstream_name,
                            pipeline_id=pipeline_id,
                            notification_id=notification_id
                        )
                        if not has_tracking_record:
                            # Invalidate reader position.
                            self.is_reader_position_ok[upstream_name] = False

                            # Raise exception.
                            raise CausalDependencyFailed({
                                'application_name': self.name,
                                'upstream_name': upstream_name,
                                'pipeline_id': pipeline_id,
                                'notification_id': notification_id
                            })

                    # Wait on the clock event, if there is one.
                    if self.clock_event is not None:
                        self.clock_event.wait()

                    # print("Processing upstream event: ", event)
                    new_events = self.process_upstream_event(event, notification['id'], upstream_name)

                self.take_snapshots(new_events)

                # Publish a prompt if there are new notifications.
                # Todo: Optionally send events as prompts, saves pulling event if it arrives in order.
                if any([event.__notifiable__ for event in new_events]):
                    self.publish_prompt()

        return notification_count
Beispiel #4
0
    def run(self, prompt=None, advance_by=None):

        if prompt is None:
            readers_items = self.readers.items()
        else:
            assert isinstance(prompt, Prompt)
            reader = self.readers[prompt.process_name]
            readers_items = [(prompt.process_name, reader)]

        notification_count = 0
        for upstream_application_name, reader in readers_items:

            if not self.is_reader_position_ok[upstream_application_name]:
                self.set_reader_position_from_tracking_records(reader, upstream_application_name)
                self.is_reader_position_ok[upstream_application_name] = True

            for notification in reader.read(advance_by=advance_by):
                # Todo: Put this on a queue and then get the next one.
                notification_count += 1

                # Todo: Get this from a queue and do it in a different thread?
                # Domain event from notification.
                event = self.event_store.sequenced_item_mapper.from_topic_and_data(
                    topic=notification['event_type'],
                    data=notification['state']
                )

                # Wait for causal dependencies to be satisfied.
                upstream_causal_dependencies = notification.get('causal_dependencies')
                if upstream_causal_dependencies is not None:
                    upstream_causal_dependencies = json_loads(upstream_causal_dependencies)
                if upstream_causal_dependencies is None:
                    upstream_causal_dependencies = []
                for causal_dependency in upstream_causal_dependencies:
                    pipeline_id = causal_dependency['pipeline_id']
                    notification_id = causal_dependency['notification_id']

                    if not self.tracking_record_manager.has_tracking_record(
                        application_name=self.name,
                        upstream_application_name=upstream_application_name,
                        pipeline_id=pipeline_id,
                        notification_id=notification_id
                    ):
                        self.is_reader_position_ok[upstream_application_name] = False

                        raise CausalDependencyFailed({
                            'application_name': self.name,
                            'upstream_application_name': upstream_application_name,
                            'pipeline_id': pipeline_id,
                            'notification_id': notification_id
                        })

                # Call policy with the event.
                all_aggregates, causal_dependencies = self.call_policy(event)

                # Record new events.
                try:
                    new_events = self.record_new_events(
                        all_aggregates, notification, upstream_application_name, causal_dependencies
                    )
                except Exception as e:
                    self.is_reader_position_ok[upstream_application_name] = False
                    # self._cached_entities = {}
                    raise e
                else:
                    # Publish a prompt if there are new notifications.
                    # Todo: Optionally send events as prompts, saves pulling event if it arrives in order.
                    if len(new_events):
                        self.publish_prompt()

        return notification_count
Beispiel #5
0
 def test_json_loads(self):
     # Check raises ValueError when JSON string is invalid.
     with self.assertRaises(ValueError):
         json_loads('{')
Beispiel #6
0
    def test_causal_dependencies(self):
        # Try to process an event that has unresolved causal dependencies.
        pipeline_id1 = 0
        pipeline_id2 = 1

        # Create two events, one has causal dependency on the other.
        core1 = Process(
            'core',
            persist_event_type=ExampleAggregate.Created,
            setup_tables=True,
            pipeline_id=pipeline_id1,
        )

        core2 = Process(
            'core',
            pipeline_id=pipeline_id2,
            policy=example_policy,
            session=core1.session
        )

        # First event in partition 1.
        aggregate = ExampleAggregate.__create__()
        aggregate.__save__()

        # Second event in partition 2.
        # - it's important this is done in a policy so the causal dependency is identified
        core2.follow('core', core1.notification_log)
        core2.run()

        # Check the aggregate exists.
        self.assertTrue(aggregate.id in core1.repository)

        # Check the aggregate has been "moved on".
        self.assertTrue(core1.repository[aggregate.id].is_moved_on)

        # Check the events have different partition IDs.
        records = core1.event_store.record_manager.get_records(aggregate.id)
        self.assertEqual(2, len(records))
        self.assertEqual(pipeline_id1, records[0].pipeline_id)

        # Check the causal dependencies have been constructed.
        self.assertEqual(None, records[0].causal_dependencies)
        self.assertTrue({
            'notification_id': 1,
            'pipeline_id': pipeline_id1
        }, json_loads(records[1].causal_dependencies))

        # Setup downstream process.
        downstream1 = Process(
            'downstream',
            pipeline_id=pipeline_id1,
            session=core1.session,
            policy=event_logging_policy,
        )
        downstream1.follow('core', core1.notification_log)
        downstream2 = Process(
            'downstream',
            pipeline_id=pipeline_id2,
            session=core1.session,
            policy=event_logging_policy,
        )
        downstream2.follow('core', core2.notification_log)

        # Try to process pipeline 2, should fail due to causal dependency.
        with self.assertRaises(CausalDependencyFailed):
            downstream2.run()

        self.assertEqual(0, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(0, len(downstream2.event_store.record_manager.get_notifications()))

        # Try to process pipeline 1, should work.
        downstream1.run()

        self.assertEqual(1, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(0, len(downstream2.event_store.record_manager.get_notifications()))

        # Try again to process pipeline 2, should work this time.
        downstream2.run()

        self.assertEqual(1, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(1, len(downstream2.event_store.record_manager.get_notifications()))

        core1.close()
        core2.close()
        downstream1.close()
        downstream2.close()
Beispiel #7
0
    def test_causal_dependencies(self):
        # Try to process an event that has unresolved causal dependencies.
        pipeline_id1 = 0
        pipeline_id2 = 1

        # Create two events, one has causal dependency on the other.
        process_class = ProcessApplication.mixin(self.process_class)
        core1 = process_class(
            name='core',
            # persist_event_type=ExampleAggregate.Created,
            persist_event_type=BaseAggregateRoot.Event,
            setup_table=True,
            pipeline_id=pipeline_id1,
        )
        core1.use_causal_dependencies = True

        # Needed for SQLAlchemy only.
        kwargs = {
            'session': core1.session
        } if hasattr(core1, 'session') else {}

        core2 = process_class(name='core',
                              pipeline_id=pipeline_id2,
                              policy=example_policy,
                              **kwargs)
        core2.use_causal_dependencies = True

        # First event in pipeline 1.
        aggregate = ExampleAggregate.__create__()
        aggregate.__save__()

        # Second event in pipeline 2.
        # - it's important this is done in a policy so the causal dependencies are identified
        core2.follow('core', core1.notification_log)
        core2.run()

        # Check the aggregate exists.
        self.assertTrue(aggregate.id in core1.repository)

        # Check the aggregate has been "moved on".
        aggregate = core1.repository[aggregate.id]
        self.assertTrue(aggregate.is_moved_on)
        self.assertTrue(aggregate.second_id)
        self.assertIn(aggregate.second_id, core1.repository)

        # Check the events have different pipeline IDs.
        aggregate_records = core1.event_store.record_manager.get_records(
            aggregate.id)
        second_entity_records = core1.event_store.record_manager.get_records(
            aggregate.second_id)

        self.assertEqual(2, len(aggregate_records))
        self.assertEqual(1, len(second_entity_records))

        self.assertEqual(pipeline_id1, aggregate_records[0].pipeline_id)
        self.assertEqual(pipeline_id2, aggregate_records[1].pipeline_id)
        self.assertEqual(pipeline_id2, second_entity_records[0].pipeline_id)

        # Check the causal dependencies have been constructed.
        # - the first 'Created' event doesn't have an causal dependencies
        self.assertFalse(aggregate_records[0].causal_dependencies)

        # - the second 'Created' event depends on the Created event in another pipeline.
        expect = [{'notification_id': 1, 'pipeline_id': pipeline_id1}]
        actual = json_loads(second_entity_records[0].causal_dependencies)

        self.assertEqual(expect, actual)

        # - the 'AttributeChanged' event depends on the second Created,
        # which is in the same pipeline, so expect no causal dependencies.
        self.assertFalse(aggregate_records[1].causal_dependencies)

        # Setup downstream process.
        downstream1 = process_class(name='downstream',
                                    pipeline_id=pipeline_id1,
                                    policy=event_logging_policy,
                                    **kwargs)
        downstream1.follow('core', core1.notification_log)
        downstream2 = process_class(name='downstream',
                                    pipeline_id=pipeline_id2,
                                    policy=event_logging_policy,
                                    **kwargs)
        downstream2.follow('core', core2.notification_log)

        # Try to process pipeline 2, should fail due to causal dependency.
        with self.assertRaises(CausalDependencyFailed):
            downstream2.run()

        self.assertEqual(
            0, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(
            0, len(downstream2.event_store.record_manager.get_notifications()))

        # Try to process pipeline 1, should work.
        downstream1.run()

        self.assertEqual(
            1, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(
            0, len(downstream2.event_store.record_manager.get_notifications()))

        # Try again to process pipeline 2, should work this time.
        downstream2.run()

        self.assertEqual(
            1, len(downstream1.event_store.record_manager.get_notifications()))
        self.assertEqual(
            2, len(downstream2.event_store.record_manager.get_notifications()))

        core1.close()
        core2.close()
        downstream1.close()
        downstream2.close()