def test_multiprocessing_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() with system.construct_app(Orders) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository self.close_connections_before_forking() with MultiprocessRunner(system): with system.construct_app(Orders) as app: retries = 50 while not app.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def setUp(self): # Set environment. set_db_uri() # Define system. self.system = System( Orders | Reservations | Orders | Payments | Orders, infrastructure_class=self.infrastructure_class, )
def test_not_infrastructure_class_exception(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=TestCase, ) with self.assertRaises(ProgrammingError): system.construct_app(Orders)
def test_construct_app(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system.construct_app(Orders) as app: self.assertIsInstance(app, Orders) with system.construct_app(Payments) as app: self.assertIsInstance(app, Payments) with system.construct_app(Reservations) as app: self.assertIsInstance(app, Reservations)
def test_multithreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() with MultiThreadedRunner(system) as runner: orders = runner.get(Orders) started = time() # Create new orders. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) retries = num_orders for order_id in order_ids: # while not orders.repository[order_id].is_reserved: # sleep(0.1) # retries -= 1 # assert retries, "Failed set order.is_reserved" while not orders.repository[order_id].is_paid: sleep(0.5) retries -= 1 assert retries, "Failed set order.is_paid" print(f"Duration: { time() - started :.4f}s")
def test_multithreaded_runner_with_single_pipe(self): system = System( Orders | Reservations, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() # with system as runner: with MultiThreadedRunner(system) as runner: order_id = create_new_order() orders_repo = runner.get(Orders).repository self.assertEqual(orders_repo[order_id].id, order_id) reservations_repo = runner.get(Reservations).repository reservation_id = Reservation.create_reservation_id(order_id) patience = 10 while True: try: self.assertEqual( reservations_repo[reservation_id].order_id, order_id ) except (RepositoryKeyError, AssertionError): if patience: patience -= 1 sleep(0.1) else: raise else: break
def test_multithreaded_runner_with_singleapp_system(self): system = System( Examples | Examples, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() with MultiThreadedRunner(system) as runner: app = runner.get(Examples) aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in app.repository # Check the aggregate is moved on. retries = 50 while not app.repository[aggregate.id].is_moved_on: sleep(0.1) retries -= 1 assert retries, "Failed to move"
def test_multiprocessing_singleapp_system(self): system = System( Examples | Examples, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() self.close_connections_before_forking() with MultiprocessRunner(system) as runner: examples = runner.get(Examples) aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in examples.repository # Check the aggregate is moved on. retries = 50 while not examples.repository[aggregate.id].is_moved_on: sleep(0.1) retries -= 1 assert retries, "Failed to move"
def setUp(self) -> None: self.runner = SingleThreadedRunner( system=System(BookingApplication), infrastructure_class=SQLAlchemyApplication, setup_tables=True, ) self.runner.start() self.client = LocalClient(self.runner)
def test_multithreaded_runner_with_single_application_class(self): system = System( Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with MultiThreadedRunner(system) as runner: app = runner.get(Orders) order_id = app.create_new_order() repository = app.repository self.assertEqual(repository[order_id].id, order_id)
def test_bind(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, ) # Check system does not have infrastructure class. self.assertIsNone(system.infrastructure_class) # Bind system to infrastructure. system2 = system.bind(self.infrastructure_class) # Check system2 has infrastructure class. self.assertIsNotNone(system2.infrastructure_class) # Check ProgrammingError on attempt to bind system2. with self.assertRaises(ProgrammingError): system2.bind(self.infrastructure_class)
def test_singlethreaded_runner_with_single_application_class(self): system = System( Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: orders = runner.get(Orders) order_id = orders.create_new_order() repository = orders.repository self.assertEqual(repository[order_id].id, order_id)
def test_multiprocess_runner_with_single_application_class(self): system = System( Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() self.close_connections_before_forking() with MultiprocessRunner(system) as runner: app = runner.get(Orders) order_id = app.create_new_order() repository = app.repository self.assertEqual(repository[order_id].id, order_id)
def test_can_run_if_already_running(self): system = System( Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system: with self.assertRaises(ProgrammingError): with system: pass with system as runner: with self.assertRaises(ProgrammingError): with runner: pass
def test_singlethreaded_runner_with_single_pipe(self): system = System( Orders | Reservations, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: orders = runner.get(Orders) order_id = orders.create_new_order() self.assertEqual(orders.repository[order_id].id, order_id) reservation_id = Reservation.create_reservation_id(order_id) reservations = runner.get(Reservations) reservations_repo = reservations.repository self.assertEqual(reservations_repo[reservation_id].order_id, order_id)
def test_singlethreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = runner.get(Orders).repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test(self): timer_started = datetime.now() runner = GrpcRunner( System(Orders | Reservations | Orders | Payments | Orders), pipeline_ids=[0, 1], infrastructure_class=PopoApplication, setup_tables=True, push_prompt_interval=0.1, ) with runner: orders_client_0 = runner.get(Orders, 0) orders_client_1 = runner.get(Orders, 1) listener_0 = runner.listen("test", [orders_client_0.client]) listener_1 = runner.listen("test", [orders_client_1.client]) startup_duration = (datetime.now() - timer_started).total_seconds() print("Start duration: %ss" % (startup_duration)) num_orders = 2000 order_ids = [] timer_started = datetime.now() for i in range(num_orders): order_id = orders_client_0.create_new_order() assert isinstance(order_id, UUID) order_ids.append(order_id) order_id = orders_client_1.create_new_order() assert isinstance(order_id, UUID) order_ids.append(order_id) started = datetime.now() while True: if orders_client_0.is_order_paid(order_ids[-2]): break elif listener_0.prompt_events["orders"].wait(timeout=.1): listener_0.prompt_events["orders"].clear() elif (datetime.now() - started).total_seconds() > num_orders: self.fail("Timed out waiting for orders to be paid") if orders_client_1.is_order_paid(order_ids[-1]): break elif listener_1.prompt_events["orders"].wait(timeout=.1): listener_1.prompt_events["orders"].clear() elif (datetime.now() - started).total_seconds() > num_orders: self.fail("Timed out waiting for orders to be paid") print("Orders per second: %d" % (len(order_ids) / (datetime.now() - timer_started).total_seconds()))
def test_singlethreaded_runner_with_direct_query(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, use_direct_query_if_available=True, ) with system as runner: orders = runner.get(Orders) # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. assert orders.repository[order_id].is_reserved assert orders.repository[order_id].is_paid
def test_clocked_multithreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() clock_speed = 10 with MultiThreadedRunner(system, clock_speed=clock_speed) as runner: started = time() # Create a new order. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) # sleep(tick_interval / 3) # sleep(tick_interval ) retries = 30 * num_orders num_completed = 0 for order_id in order_ids: app = runner.get(Orders) while not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, ( "Failed set order.is_paid (after %s completed)" % num_completed ) num_completed += 1 print(f"Duration: { time() - started :.4f}s")
from eventsourcing.system.definition import System from peto.application.households import HouseholdsApplication from peto.application.people import PeopleApplication from peto.application.quanrantines import QuarantinesApplication from peto.application.samples import BatchesApplication, SamplesApplication system = System( PeopleApplication | HouseholdsApplication, BatchesApplication | SamplesApplication | QuarantinesApplication, )
class TestActors(unittest.TestCase): infrastructure_class = SQLAlchemyApplication def setUp(self): # Set environment. set_db_uri() # Define system. self.system = System( Orders | Reservations | Orders | Payments | Orders, infrastructure_class=self.infrastructure_class, ) def test_simple_system_base(self): start_actor_system() self.check_actors() @skip("Having trouble running Thespian's 'multiproc tcp base'") def test_multiproc_tcp_base(self): start_multiproc_tcp_base_system() self.check_actors() def close_connections_before_forking(self): # Used for closing Django connection before multiprocessing module forks the OS process. pass def check_actors(self, num_pipelines=3, num_orders_per_pipeline=5): pipeline_ids = list(range(num_pipelines)) self.close_connections_before_forking() actors = ActorModelRunner(self.system, pipeline_ids=pipeline_ids, shutdown_on_close=True) # Todo: Use wakeupAfter() to poll for new notifications (see Timer Messages). order_ids = [] with self.system.construct_app(Orders, setup_table=True) as app, actors: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: app.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) # Wait for orders to be reserved and paid. retries = 20 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. orders = [app.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in orders]) last_timestamp = max([o.__last_modified__ for o in orders]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in orders ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations))) def tearDown(self): # Unset environment. try: del os.environ["DB_URI"] except KeyError: pass try: # Shutdown base actor system. shutdown_actor_system() finally: # Clear event handlers. try: assert_event_handlers_empty() finally: clear_event_handlers()
def test_stepping_singlethreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() normal_speed = 3 scale_factor = 0 runner = SteppingSingleThreadedRunner( system=system, normal_speed=normal_speed, scale_factor=scale_factor, # is_verbose=True, ) start_time = time() start_process_time = process_time() with runner: orders = runner.get(Orders) # Create a new order. num_orders = 120 order_ids = [] for i in range(num_orders): def cmd(): order_id = create_new_order() assert order_id in orders.repository order_ids.append(order_id) runner.call_in_future(cmd, 20 * (i + 1)) # sleep(0.1) # sleep(tick_interval / 3) # sleep(tick_interval * 10) retries = 10 * num_orders while len(order_ids) < num_orders: sleep(0.01) for order_id in order_ids[-1:]: # while not orders.repository[order_id].is_reserved: # sleep(0.1) # retries -= 1 # assert retries, "Failed set order.is_reserved" while retries and not orders.repository[order_id].is_paid: if not runner.clock_thread.is_alive(): self.fail("Clock thread died") # else: # print("clock thread is alive") # sleep(1) sleep(0.2) retries -= 1 if runner.clock_thread.is_alive(): self.fail("Clock thread still alive") final_time = runner.clock_thread.tick_count / runner.normal_speed print( f"Runner: average clock speed {runner.clock_thread.actual_clock_speed:.1f}Hz" ) print(f"Runner: total tick count {runner.clock_thread.tick_count}") print(f"Runner: total time in simulation {final_time:.2f}s") elapsed_time = time() - start_time print(f"Duration: { elapsed_time :.4f}s") execution_time = process_time() - start_process_time print(f"CPU: { 100 * execution_time / elapsed_time :.2f}%") assert retries, "Failed set order.is_paid" sleep(0.001)
@policy.register(example.domain.Order.Created) def _set_order_id(self, repository, event): cmd = repository[event.command_id] cmd.order_id = event.originator_id @policy.register(example.domain.Order.Paid) def _set_done(self, repository, event): cmd = repository[event.command_id] cmd.done() # TODO: put these comments where they needs to go. # -> spoiler, not here. # Some thoughts from wapi: # Maybe it makes sense to have one instance of Commands application # for each "Web worker" (instance of FastAPI), and then have the "Core" # be independent of that, for example with thespian. # The question is then what to do with Reporting type processes? # They want to expose through FastAPI, and ideally update their state # close to that? # /!\ Reply from john # - multiple web app instances: simplest thing is each has a command # application writing to the same 'pipeline id', might help to have # multiple pipelines reducing contention on writing log sequences. # - assuming one pipeline id: one instance of each downstream process # application process. Like micro services except the call is a prompt. # - multiple pipelines adds another level of complexity. system = System(Commands | Orders | Commands, Orders | Reservations | Orders, Orders | Payments | Orders)
def test_multipipeline_multiprocessing_multiapp(self): self.set_db_uri() system = System( (Orders, Reservations, Orders, Payments, Orders), setup_tables=True, infrastructure_class=self.infrastructure_class, ) num_pipelines = 2 pipeline_ids = range(num_pipelines) multiprocess_runner = MultiprocessRunner(system, pipeline_ids=pipeline_ids) num_orders_per_pipeline = 5 order_ids = [] self.close_connections_before_forking() # Start multiprocessing system. with multiprocess_runner: # Create some new orders. for _ in range(num_orders_per_pipeline): orders = multiprocess_runner.get(Orders) for pipeline_id in pipeline_ids: orders.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) sleep(0.05) # Wait for orders to be reserved and paid. retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not orders.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i ) while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. order_aggregates = [orders.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in order_aggregates]) last_timestamp = max([o.__last_modified__ for o in order_aggregates]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period) ) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in order_aggregates ] print("Min order processing time: {:.3f}s".format(min(durations))) print( "Mean order processing time: {:.3f}s".format( sum(durations) / len(durations) ) ) print("Max order processing time: {:.3f}s".format(max(durations)))