def start(self): """ Starts all the actors to run a system of process applications. """ # Subscribe to broadcast prompts published by a process # application in the parent operating system process. subscribe(handler=self.forward_prompt, predicate=is_prompt_to_pull) # Initialise the system actor. msg = SystemInitRequest( self.system.process_classes, self.infrastructure_class, self.system.upstream_names, self.pipeline_ids, ) response = self.actor_system.ask(self.system_actor, msg) # Keep the pipeline actor addresses, to send prompts directly. assert isinstance(response, SystemInitResponse), type(response) assert list(response.pipeline_actors.keys()) == self.pipeline_ids, ( "Configured pipeline IDs mismatch initialised system {} {}" ).format(list(self.pipeline_actors.keys()), self.pipeline_ids) self.pipeline_actors = response.pipeline_actors
def test_publish_subscribe_unsubscribe(self): # Check subscribing event handlers with predicates. # - when predicate is True, handler should be called event = mock.Mock() predicate = mock.Mock() handler = mock.Mock() # When predicate is True, handler should be called ONCE. subscribe(event_predicate=predicate, subscriber=handler) publish(event) predicate.assert_called_once_with(event) handler.assert_called_once_with(event) # When predicate is True, after unsubscribing, handler should NOT be called again. unsubscribe(event_predicate=predicate, subscriber=handler) publish(event) predicate.assert_called_once_with(event) handler.assert_called_once_with(event) # When predicate is False, handler should NOT be called. predicate = lambda x: False handler = mock.Mock() subscribe(event_predicate=predicate, subscriber=handler) publish(event) self.assertEqual(0, handler.call_count)
def __init__(self, name=None, policy=None, setup_tables=False, setup_table=False, session=None, persist_event_type=None, **kwargs): setup_table = setup_tables = setup_table or setup_tables super(Process, self).__init__(name=name, setup_table=setup_table, session=session, persist_event_type=persist_event_type, **kwargs) # self._cached_entities = {} self.policy_func = policy self.readers = OrderedDict() self.is_reader_position_ok = defaultdict(bool) # Setup tracking records. self.tracking_record_manager = self.tracking_record_manager_class( self.datastore.session) if setup_tables and not session: self.datastore.setup_table( self.tracking_record_manager.record_class) ## Prompts policy. # # 1. Publish prompts whenever domain events are published (important: after persisted). # 2. Run this process whenever upstream prompted followers to pull for new notification. subscribe(predicate=self.persistence_policy.is_event, handler=self.publish_prompt_from_event) subscribe(predicate=self.is_upstream_prompt, handler=self.run)
def __init__(self, contract_valuation_repo, call_link_repo, call_dependencies_repo, call_requirement_repo, call_result_repo, simulated_price_repo, market_simulation_repo, call_leafs_repo, call_evaluation_queue, call_dependents_repo, perturbation_dependencies_repo, simulated_price_requirements_repo): assert isinstance(contract_valuation_repo, ContractValuationRepository), contract_valuation_repo assert isinstance(call_link_repo, CallLinkRepository), call_link_repo assert isinstance(call_dependencies_repo, CallDependenciesRepository), call_dependencies_repo assert isinstance(call_requirement_repo, CallRequirementRepository), call_requirement_repo assert isinstance(call_result_repo, (CallResultRepository, dict)), call_result_repo # assert isinstance(simulated_price_repo, SimulatedPriceRepository), simulated_price_repo assert isinstance(market_simulation_repo, MarketSimulationRepository), market_simulation_repo assert isinstance(call_dependents_repo, CallDependentsRepository), call_dependents_repo assert isinstance(perturbation_dependencies_repo, PerturbationDependenciesRepo), perturbation_dependencies_repo assert isinstance(simulated_price_requirements_repo, SimulatedPriceRequirementsRepo), simulated_price_requirements_repo self.contract_valuation_repo = contract_valuation_repo self.call_link_repo = call_link_repo self.call_dependencies_repo = call_dependencies_repo self.call_requirement_repo = call_requirement_repo self.call_result_repo = call_result_repo self.simulated_price_repo = simulated_price_repo self.market_simulation_repo = market_simulation_repo self.call_leafs_repo = call_leafs_repo self.call_evaluation_queue = call_evaluation_queue self.call_dependents_repo = call_dependents_repo self.perturbation_dependencies_repo = perturbation_dependencies_repo self.simulated_price_dependencies_repo = simulated_price_requirements_repo subscribe(self.is_contract_valuation_created, self.generate_contract_valuation)
def __init__(self, name=None, policy=None, setup_table=False, use_direct_query_if_available=False, notification_log_reader_class=None, **kwargs): self.policy_func = policy self.readers = OrderedDict() self.is_reader_position_ok = defaultdict(bool) self._notification_generators = {} self._policy_lock = Lock() self.clock_event = None self.tick_interval = None self.use_direct_query_if_available = use_direct_query_if_available self.notification_log_reader_class = notification_log_reader_class or \ type(self).notification_log_reader_class super(ProcessApplication, self).__init__(name=name, setup_table=setup_table, **kwargs) # Publish prompts for any domain events that we persist. if self.persistence_policy: subscribe( predicate=self.persistence_policy.is_event, handler=self.publish_prompt, )
def run(self): # Construct process application object. self.process = self.application_process_class( pipeline_id=self.pipeline_id, notification_log_section_size=self.notification_log_section_size, pool_size=self.pool_size, setup_table=self.setup_tables, ) # Follow upstream notification logs. for upstream_name in self.upstream_names: # Obtain a notification log object (local or remote) for the upstream process. if upstream_name == self.process.name: # Upstream is this process's application, # so use own notification log. notification_log = self.process.notification_log else: # For a different application, we need to construct a notification # log with a record manager that has the upstream application ID. # Currently assumes all applications are using the same database # and record manager class. If it wasn't the same database,we would # to use a remote notification log, and upstream would need to provide # an API from which we can pull. It's not unreasonable to have a fixed # number of application processes connecting to the same database. record_manager = self.process.event_store.record_manager notification_log = RecordManagerNotificationLog( record_manager=record_manager.clone( application_name=upstream_name, pipeline_id=self.pipeline_id), section_size=self.process.notification_log_section_size) # Todo: Support upstream partition IDs different from self.pipeline_id. # Todo: Support combining partitions. Read from different partitions but write to the same partition, # could be one os process that reads from many logs of the same upstream app, or many processes each # reading one partition with contention writing to the same partition). # Todo: Support dividing partitions Read from one but write to many. Maybe one process per # upstream partition, round-robin to pick partition for write. Or have many processes reading # with each taking it in turn to skip processing somehow. # Todo: Dividing partitions would allow a stream to flow at the same rate through slower # process applications. # Todo: Support merging results from "replicated state machines" - could have a command # logging process that takes client commands and presents them in a notification log. # Then the system could be deployed in different places, running independently, receiving # the same commands, and running the same processes. The command logging process could # be accompanied with a result logging process that reads results from replicas as they # are available. Not sure what to do if replicas return different things. If one replica # goes down, then it could resume by pulling events from another? Not sure what to do. # External systems could be modelled as commands. # Make the process follow the upstream notification log. self.process.follow(upstream_name, notification_log) # Subscribe to broadcast prompts published by the process application. subscribe(handler=self.broadcast_prompt, predicate=self.is_prompt) try: self.loop_on_prompts() finally: unsubscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)
def start(self): assert len(self.system.processes) == 0, "Already running" # Construct the processes. for process_class in self.system.process_classes.values(): process = self.system.construct_app( process_class=process_class, infrastructure_class=self.infrastructure_class, setup_table=self.setup_tables or self.system.setup_tables, use_direct_query_if_available=self. use_direct_query_if_available, ) self.system.processes[process.name] = process # Tell each process about the processes it follows. for followed_name, followers in self.system.followers.items(): followed = self.system.processes[followed_name] followed_log = followed.notification_log for follower_name in followers: follower = self.system.processes[follower_name] follower.follow(followed_name, followed_log) # Do something to propagate prompts. subscribe( predicate=self.system.is_prompt, handler=self.handle_prompt, )
def wrap(handler_func): subscribe(create_type_predicate(), handler_func) @wraps(handler_func) def handler_func_wrapper(*args, **kwargs): handler_func(*args, **kwargs) return handler_func_wrapper
def test_handle_prompt_failed(self): process = Process( 'test', policy=example_policy, persist_event_type=ExampleAggregate.Event, setup_tables=True, ) def raise_exception(_): raise Exception() def raise_prompt_failed(_): raise PromptFailed() subscribe(raise_exception) try: with self.assertRaises(PromptFailed): process.publish_prompt() finally: unsubscribe(raise_exception) subscribe(raise_prompt_failed) try: with self.assertRaises(PromptFailed): process.publish_prompt() finally: unsubscribe(raise_prompt_failed) try: process.publish_prompt() finally: process.close()
def test_handle_prompt_failed(self): process = ProcessApplication.mixin(self.infrastructure_class)( name="test", policy=example_policy, persist_event_type=ExampleAggregate.Event, setup_table=True, ) def raise_exception(_): raise Exception() def raise_prompt_failed(_): raise PromptFailed() subscribe(raise_exception) try: with self.assertRaises(PromptFailed): process.publish_prompt() finally: unsubscribe(raise_exception) subscribe(raise_prompt_failed) try: with self.assertRaises(PromptFailed): process.publish_prompt() finally: unsubscribe(raise_prompt_failed) try: process.publish_prompt() finally: process.close()
def __init__(self, market_calibration_repo, market_simulation_repo): assert isinstance(market_calibration_repo, MarketCalibrationRepository) assert isinstance(market_simulation_repo, MarketSimulationRepository) self.market_calibration_repo = market_calibration_repo self.market_simulation_repo = market_simulation_repo subscribe(self.market_simulation_created, self.generate_simulated_prices_for_market_simulation)
def __init__(self, contract_specification_repo, call_dependencies_repo, call_dependents_repo): assert isinstance(contract_specification_repo, ContractSpecificationRepository) assert isinstance(call_dependencies_repo, CallDependenciesRepository) assert isinstance(call_dependents_repo, CallDependentsRepository) self.contract_specification_repo = contract_specification_repo self.call_dependencies_repo = call_dependencies_repo self.call_dependents_repo = call_dependents_repo subscribe(self.contract_specification_created, self.generate_dependency_graph)
def __init__(self, market_calibration_repo, market_simulation_repo, simulated_price_repo): assert isinstance(market_calibration_repo, MarketCalibrationRepository) assert isinstance(market_simulation_repo, MarketSimulationRepository) # assert isinstance(simulated_price_repo, SimulatedPriceRepository) self.market_calibration_repo = market_calibration_repo self.market_simulation_repo = market_simulation_repo self.simulated_price_repo = simulated_price_repo subscribe(self.market_simulation_created, self.generate_simulated_prices_for_market_simulation)
def __init__( self, event_store: AbstractEventStore, persist_event_type: Optional[Union[type, Tuple]] = None, ): self.event_store = event_store self.persist_event_type = persist_event_type subscribe(self.store_events, self.is_event)
def wrap(func): def handler(event): if isinstance(event, (list, tuple)): for e in event: handler(e) elif not event_classes or isinstance(event, tuple(event_classes)): func(event) subscribe(handler=handler, predicate=lambda _: True) return func
def start(self): assert self.os_processes is None, "Already started" self.os_processes = [] self.manager = Manager() self.inboxes = {} self.outboxes = {} # Setup queues. for pipeline_id in self.pipeline_ids: for process_class, upstream_classes in self.system.followings.items( ): inbox_id = (pipeline_id, process_class.__name__.lower()) if inbox_id not in self.inboxes: self.inboxes[inbox_id] = self.manager.Queue() for upstream_class in upstream_classes: outbox_id = (pipeline_id, upstream_class.__name__.lower()) if outbox_id not in self.outboxes: self.outboxes[outbox_id] = Outbox() if inbox_id not in self.outboxes[ outbox_id].downstream_inboxes: self.outboxes[outbox_id].downstream_inboxes[ inbox_id] = self.inboxes[inbox_id] # Subscribe to broadcast prompts published by a process # application in the parent operating system process. subscribe(handler=self.broadcast_prompt, predicate=self.is_prompt) # Start operating system process. for pipeline_id in self.pipeline_ids: for process_class, upstream_classes in self.system.followings.items( ): os_process = OperatingSystemProcess( application_process_class=process_class, upstream_names=[ cls.__name__.lower() for cls in upstream_classes ], poll_interval=self.poll_interval, pipeline_id=pipeline_id, notification_log_section_size=self. notification_log_section_size, pool_size=self.pool_size, setup_tables=self.setup_tables, inbox=self.inboxes[(pipeline_id, process_class.__name__.lower())], outbox=self.outboxes[(pipeline_id, process_class.__name__.lower())], ) os_process.daemon = True os_process.start() self.os_processes.append(os_process) if self.setup_tables: # Avoid conflicts when creating tables. sleep(self.sleep_for_setup_tables)
def __init__(self, repository, snapshot_store, persist_event_type=EventWithOriginatorVersion, period=2): self.repository = repository assert isinstance(snapshot_store, AbstractEventStore) self.snapshot_store = snapshot_store self.period = period self.persist_event_type = persist_event_type subscribe(predicate=self.condition, handler=self.take_snapshot)
def __init__( self, name: str = "", policy: Optional[FunctionType] = None, setup_table: bool = False, use_direct_query_if_available: bool = False, notification_log_reader_class: Optional[Type[NotificationLogReader]] = None, apply_policy_to_generated_events: bool = False, **kwargs: Any ): self.policy_func = policy self.readers: OrderedDict[str, NotificationLogReader] = OrderedDict() self.is_reader_position_ok: Dict[str, bool] = defaultdict(bool) self._notification_generators: Dict[str, Iterator[Dict[str, Any]]] = {} self._policy_lock = Lock() self.clock_event: Optional[Event] = None self.tick_interval: Optional[Union[float, int]] = None self.use_direct_query_if_available = use_direct_query_if_available self.notification_log_reader_class = ( notification_log_reader_class or type(self).notification_log_reader_class ) self.apply_policy_to_generated_events = ( apply_policy_to_generated_events or type(self).apply_policy_to_generated_events ) super(ProcessApplication, self).__init__( name=name, setup_table=setup_table, **kwargs ) if self._event_store: self.notification_topic_key = ( self._event_store.record_manager.field_names.topic ) self.notification_state_key = ( self._event_store.record_manager.field_names.state ) # Publish prompts for any domain events that we persist. # - this means when a process application is used as a simple # application, with calls to aggregate __save__() methods, # then the new events that are stored might need to be processed # by another application. So it can help also to publish a # prompt after the events have been stored so that followers # can be immediately nudged to process the events (otherwise they # would only pick up the events next time they happen to run). # The particular way a prompt published here is actually sent to # any followers is the responsibility of a particular system runner. if self._persistence_policy: subscribe( predicate=self._persistence_policy.is_event, handler=self.publish_prompt_for_events, )
def wrap(func): def handler(event): if isinstance(event, (list, tuple)): # Call handler once for each event. for e in event: handler(e) elif not args or isinstance(event, args): # Call handler if there are no classes or have an instance. func(event) subscribe(handler=handler, predicate=lambda _: True) return func
def test_calculate_statistics_when_backlog_is_done(self): received_statistics_calculated_events = [] product_backlog = Given.product_backlog().please() analytics = Given.analytics().please() subscribe(lambda e: received_statistics_calculated_events.extend(e), predicate=lambda events: all( isinstance(e, Analytics.StatisticsCalculated) and e. originator_id == analytics.id for e in events)) product_backlog.__trigger_event__(ProductBacklog.BacklogIsDone) self.assertEqual(1, len(received_statistics_calculated_events))
def __init__( self, repository: AbstractEntityRepository, snapshot_store: AbstractEventStore[AbstractSnapshot, AbstractRecordManager], persist_event_type: Optional[Union[type, Tuple]] = ( EventWithOriginatorVersion, ), period: int = 0, ): self.repository = repository self.snapshot_store = snapshot_store self.period = period self.persist_event_type = persist_event_type subscribe(predicate=self.condition, handler=self.take_snapshot)
def init_process(self, msg): self.pipeline_actor = msg.pipeline_actor self.downstream_actors = msg.downstream_actors self.pipeline_id = msg.pipeline_id self.upstream_application_names = msg.upstream_application_names # Construct the process application class. process_class = msg.process_application_class if msg.infrastructure_class: process_class = process_class.mixin(msg.infrastructure_class) # Reset the database connection (for Django). process_class.reset_connection_after_forking() # Construct the process application. self.process = process_class(pipeline_id=self.pipeline_id) assert isinstance(self.process, ProcessApplication) # Subscribe the slave actor's send_prompt() method. # - the process application will call publish_prompt() # and the actor will receive the prompt and send it # as a message. subscribe(predicate=self.is_my_prompt, handler=self.send_prompt) # Close the process application persistence policy. # - slave actor process application doesn't publish # events, so we don't need this self.process.persistence_policy.close() # Unsubscribe process application's publish_prompt(). # - slave actor process application doesn't publish # events, so we don't need this unsubscribe( predicate=self.process.persistence_policy.is_event, handler=self.process.publish_prompt_for_events, ) # Construct and follow upstream notification logs. for upstream_application_name in self.upstream_application_names: record_manager = self.process.event_store.record_manager # assert isinstance(record_manager, ACIDRecordManager), type(record_manager) notification_log = RecordManagerNotificationLog( record_manager=record_manager.clone( application_name=upstream_application_name, pipeline_id=self.pipeline_id, ), section_size=self.process.notification_log_section_size, ) self.process.follow(upstream_application_name, notification_log)
def __init__(self, contract_specification_repo, call_dependencies_repo, call_dependents_repo, call_leafs_repo, call_requirement_repo, max_dependency_graph_size, dsl_classes): assert isinstance(contract_specification_repo, ContractSpecificationRepository) assert isinstance(call_dependencies_repo, CallDependenciesRepository) assert isinstance(call_dependents_repo, CallDependentsRepository) self.contract_specification_repo = contract_specification_repo self.call_dependencies_repo = call_dependencies_repo self.call_dependents_repo = call_dependents_repo self.call_leafs_repo = call_leafs_repo self.call_requirement_repo = call_requirement_repo subscribe(self.contract_specification_created, self.generate_dependency_graph) subscribe(self.call_requirement_created, self.limit_calls) self.total_calls = defaultdict(int) self.max_dependency_graph_size = max_dependency_graph_size self.dsl_classes = dsl_classes
def test_trigger_backlog_is_done_event_when_all_items_are_done(self): received_backlog_is_done_events = [] backlog = Given \ .product_backlog() \ .with_item("US1", "A") \ .please() backlog.items()[0].tasks()[0].done() scrum_team = Given.scrum_team().please() subscribe(lambda e: received_backlog_is_done_events.extend(e), predicate=lambda events: all( isinstance(e, ProductBacklog.BacklogIsDone) and e. originator_id == backlog.id for e in events)) scrum_team.__trigger_event__(ScrumTeam.DayWorkIsDone) self.assertEqual(1, len(received_backlog_is_done_events))
def __init__(self, name=None, policy=None, setup_table=False, tracking_record_manager_class=None, always_track_notifications=False, **kwargs): self.always_track_notifications = always_track_notifications or self.always_track_notifications self.tracking_record_manager_class = tracking_record_manager_class or self.tracking_record_manager_class self.policy_func = policy self.readers = OrderedDict() self.is_reader_position_ok = defaultdict(bool) super(ProcessApplication, self).__init__(name=name, setup_table=setup_table, **kwargs) # self._cached_entities = {} ## Prompts policy. # # 1. Publish prompts whenever domain events are published (important: after persisted). # 2. Run this process whenever upstream prompted followers to pull for new notification. subscribe(predicate=self.persistence_policy.is_event, handler=self.publish_prompt_from_event) subscribe(predicate=self.is_upstream_prompt, handler=self.run)
def start(self) -> None: assert len(self.processes) == 0, "Already running" # Construct the processes. for process_class in self.system.process_classes.values(): self._construct_app_by_class(process_class) # Tell each process about the processes it follows. for followed_name, followers in self.system.followers.items(): followed = self.processes[followed_name] followed_log = followed.notification_log for follower_name in followers: follower = self.processes[follower_name] follower.follow(followed_name, followed_log) # Do something to propagate prompts. subscribe(predicate=is_prompt, handler=self.handle_prompts)
def __init__(self, contract_valuation_repo, call_link_repo, call_dependencies_repo, call_requirement_repo, call_result_repo, simulated_price_repo, market_simulation_repo, call_leafs_repo, call_evaluation_queue, result_counters, usage_counters, call_dependents_repo, perturbation_dependencies_repo, simulated_price_requirements_repo): assert isinstance(contract_valuation_repo, ContractValuationRepository), contract_valuation_repo assert isinstance(call_link_repo, CallLinkRepository), call_link_repo assert isinstance(call_dependencies_repo, CallDependenciesRepository), call_dependencies_repo assert isinstance(call_requirement_repo, CallRequirementRepository), call_requirement_repo assert isinstance(call_result_repo, CallResultRepository), call_result_repo assert isinstance(simulated_price_repo, SimulatedPriceRepository), simulated_price_repo assert isinstance(market_simulation_repo, MarketSimulationRepository), market_simulation_repo assert isinstance(call_dependents_repo, CallDependentsRepository), call_dependents_repo assert isinstance( perturbation_dependencies_repo, PerturbationDependenciesRepo), perturbation_dependencies_repo assert isinstance( simulated_price_requirements_repo, SimulatedPriceRequirementsRepo), simulated_price_requirements_repo # assert isinstance(result_counters, dict), result_counters self.contract_valuation_repo = contract_valuation_repo self.call_link_repo = call_link_repo self.call_dependencies_repo = call_dependencies_repo self.call_requirement_repo = call_requirement_repo self.call_result_repo = call_result_repo self.simulated_price_repo = simulated_price_repo self.market_simulation_repo = market_simulation_repo self.call_leafs_repo = call_leafs_repo self.call_evaluation_queue = call_evaluation_queue self.result_counters = result_counters self.usage_counters = usage_counters self.call_dependents_repo = call_dependents_repo self.perturbation_dependencies_repo = perturbation_dependencies_repo self.simulated_price_dependencies_repo = simulated_price_requirements_repo subscribe(self.contract_valuation_created, self.generate_contract_valuation)
def start(self) -> None: if len(self.processes): raise ProgrammingError("Already running") # Construct the processes. for process_class in self.system.process_classes.values(): self._construct_app_by_class(process_class) # Tell each process which other processes to follow. for downstream_name, upstream_names in self.system.upstream_names.items(): downstream_process = self.processes[downstream_name] for upstream_name in upstream_names: upstream_process = self.processes[upstream_name] upstream_log = upstream_process.notification_log downstream_process.follow(upstream_name, upstream_log) # Do something to propagate prompts. subscribe(predicate=is_prompt_to_pull, handler=self.handle_prompt)
def test_publish_subscribe_unsubscribe(self): # Check subscribing event handlers with predicates. # - when predicate is True, handler should be called event = mock.Mock() predicate = mock.Mock() handler = mock.Mock() # Check we can assert there are no event handlers subscribed. assert_event_handlers_empty() # When predicate is True, handler should be called ONCE. subscribe(handler=handler, predicate=predicate) # Check we can assert there are event handlers subscribed. self.assertRaises(EventHandlersNotEmptyError, assert_event_handlers_empty) # Check what happens when an event is published. publish(event) predicate.assert_called_once_with(event) handler.assert_called_once_with(event) # When predicate is True, after unsubscribing, handler should NOT be called again. unsubscribe(handler=handler, predicate=predicate) publish(event) predicate.assert_called_once_with(event) handler.assert_called_once_with(event) # Check we can assert there are no event handlers subscribed. assert_event_handlers_empty() # When predicate is False, handler should NOT be called. predicate = lambda x: False handler = mock.Mock() subscribe(handler=handler, predicate=predicate) publish(event) self.assertEqual(0, handler.call_count) # Unsubscribe. unsubscribe(handler=handler, predicate=predicate) # Check we can assert there are no event handlers subscribed. assert_event_handlers_empty()
def __init__(self, call_result_repo, call_evaluation_queue=None): self.call_result_repo = call_result_repo self.call_evaluation_queue = call_evaluation_queue self.result = {} self.dependents = {} self.dependencies = {} self.outstanding_dependents = {} self.outstanding_dependencies = {} subscribe(self.is_call_dependencies_created, self.cache_dependencies) subscribe(self.is_call_dependents_created, self.cache_dependents) subscribe(self.is_call_result_created, self.cache_result) subscribe(self.is_call_result_discarded, self.purge_result)
def test_version_6(): # Get subscriber ready from eventsourcing.domain.model.events import subscribe global events subscribe(handler=add_to_event_record, predicate=is_company_event) # Set up new_company = Company.__create__(name="Rocinante Limited") new_company.create_share_class(name="ordinary", nominal_value=0.0001) initial_shareholders = [ "James Holden", "Amos Burton", "Naomi Nagata", "Alex Kamal" ] for person in initial_shareholders: new_company.add_new_shareholder( shareholder_name=person, number_of_shares=2500, share_class=new_company.share_classes[0]) new_company.__save__() # Check that the company has a newly created ordinary share class assert new_company.share_classes[0].name == "ordinary" assert new_company.share_classes[0].nominal_value == 0.0001 # Check our shareholders assert new_company.name == "Rocinante Limited" assert "James Holden" and "Naomi Nagata" in [ sh.name for sh in new_company.shareholders ] assert len(new_company.shareholders) == 4 # Check that James Holden has 2500 ordinary shares of £0.0001 each assert new_company.shareholders[0].shares_held[0].number == 2500 assert new_company.shareholders[0].shares_held[ 0].share_class.name == "ordinary" assert new_company.shareholders[0].shares_held[ 0].share_class.nominal_value == 0.0001 # Check that our event log has some stuff in it assert len(events) == 6 return new_company
def start(self): """ Starts all the actors to run a system of process applications. """ # Subscribe to broadcast prompts published by a process # application in the parent operating system process. subscribe(handler=self.forward_prompt, predicate=self.is_prompt) # Initialise the system actor. command = SystemInitRequest(self.system.followings, self.pipeline_ids) response = self.actor_system.ask(self.system_actor, command) # Keep the pipeline actor addresses, to send prompts directly. if isinstance(response, PoisonMessage): raise Exception( "Got a poison message after init ask: {}".format(response)) self.pipeline_actors = response.pipeline_actors if list(self.pipeline_actors.keys()) != self.pipeline_ids: raise ValueError( "Given pipeline IDs mismatch initialised system {} {}".format( list(self.pipeline_actors.keys()), self.pipeline_ids))
def __init__(self, **kwargs): super(ProductBacklog, self).__init__(**kwargs) self._items = [] subscribe(self.task_is_done, self._is_task_is_done_event) subscribe(self.remove_done_items, self._is_day_work_is_done_event) subscribe(self.check_if_backlog_is_done, self._is_day_work_is_done_event)
def init(self, upstream_processes: dict, downstream_processes: dict) -> None: """ Initialise with actor handles for upstream and downstream processes. Need to initialise after construction so that all handles exist. """ self.upstream_processes = upstream_processes self.downstream_processes = downstream_processes # Subscribe to broadcast prompts published by the process application. subscribe(handler=self._enqueue_prompt_to_pull, predicate=is_prompt_to_pull) # Construct process application object. process_class = self.application_process_class if not isinstance(process_class, ApplicationWithConcreteInfrastructure): if self.infrastructure_class: process_class = process_class.mixin(self.infrastructure_class) else: raise ProgrammingError("infrastructure_class is not set") def construct_process(): return process_class(pipeline_id=self.pipeline_id, setup_table=self.setup_tables) self.process = self.do_db_job(construct_process, (), {}) assert isinstance(self.process, ProcessApplication), self.process # print(getpid(), "Created application process: %s" % self.process) for upstream_name, ray_notification_log in self.upstream_processes.items( ): # Make the process follow the upstream notification log. self.process.follow(upstream_name, ray_notification_log) self._reset_positions() self.positions_initialised.set()
def test_attribute(self): # Check we get an error when called with something other than a function. self.assertRaises(ProgrammingError, attribute, 'not a getter') self.assertRaises(ProgrammingError, attribute, 123) self.assertRaises(ProgrammingError, attribute, None) # Call the decorator with a function. getter = lambda: None p = attribute(getter) # Check we got a property object. self.assertIsInstance(p, property) # Check the property object has both setter and getter functions. self.assertTrue(p.fset) self.assertTrue(p.fget) # Pretend we decorated an object. entity_id = uuid4() o = VersionedEntity(originator_id=entity_id, originator_version=0) o.__dict__['_<lambda>'] = 'value1' # Call the property's getter function. value = p.fget(o) self.assertEqual(value, 'value1') # Call the property's setter function. p.fset(o, 'value2') # Check the attribute has changed. value = p.fget(o) self.assertEqual(value, 'value2') # Check the property's getter function isn't the getter function we passed in. self.assertNotEqual(p.fget, getter) # Define a class that uses the decorator. class Aaa(VersionedEntity): "An event sourced entity." def __init__(self, a, *args, **kwargs): super(Aaa, self).__init__(*args, **kwargs) self._a = a @attribute def a(self): "A mutable event sourced property." # Instantiate the class and check assigning to the property publishes an event and updates the object state. published_events = [] subscription = (lambda x: True, lambda x: published_events.append(x)) subscribe(*subscription) entity_id = uuid4() try: aaa = Aaa(originator_id=entity_id, originator_version=1, a=1) self.assertEqual(aaa.a, 1) aaa.a = 'value1' self.assertEqual(aaa.a, 'value1') finally: unsubscribe(*subscription) # Check an event was published. self.assertEqual(len(published_events), 1) # Check the published event was an AttributeChanged event, with the expected attribute values. published_event = published_events[0] self.assertIsInstance(published_event, AttributeChanged) self.assertEqual(published_event.name, '_a') self.assertEqual(published_event.value, 'value1') self.assertTrue(published_event.originator_version, 1) self.assertEqual(published_event.originator_id, entity_id)
def __init__(self, event_store, event_type=None): assert isinstance(event_store, AbstractEventStore), type(event_store) self.event_store = event_store self.event_type = event_type subscribe(self.store_event, self.is_event)
def __init__(self, event_store): assert isinstance(event_store, AbstractEventStore) self.event_store = event_store subscribe(self.is_domain_event, self.store_domain_event)
def wrap(handler_func): subscribe(handler_func, event_type_predicate) return handler_func
def subscribe(self): subscribe(self.is_call_requirement_created, self.print_compilation_progress) subscribe(self.is_calculating, self.check_is_timed_out) subscribe(self.is_calculating, self.check_is_interrupted) subscribe(self.is_evaluation_complete, self.set_is_finished) subscribe(self.is_result_value_computed, self.inc_result_value_computed_count) subscribe(self.is_result_value_computed, self.print_evaluation_progress) subscribe(self.is_call_result_created, self.inc_call_result_count) subscribe(self.is_call_result_created, self.print_evaluation_progress)
def setUp(self): assert_event_handlers_empty() self.published_events = [] self.subscription = (lambda x: True, lambda x: self.published_events.append(x)) subscribe(*self.subscription)