def test_multiprocessing_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with system.construct_app(Orders) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository self.close_connections_before_forking() with MultiprocessRunner(system): with system.construct_app(Orders) as app: retries = 50 while not app.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def test_multithreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with MultiThreadedRunner(system): started = time() orders = system.processes['orders'] # Create new orders. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) retries = num_orders for order_id in order_ids: # while not orders.repository[order_id].is_reserved: # sleep(0.1) # retries -= 1 # assert retries, "Failed set order.is_reserved" while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid" print(f"Duration: { time() - started :.4f}s")
def test_clocked_multithreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() clock_speed = 10 tick_interval = 1 / clock_speed with MultiThreadedRunner(system, clock_speed=clock_speed): started = time() orders = system.processes['orders'] # Create a new order. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) # sleep(tick_interval / 3) # sleep(tick_interval * 10) retries = 30 * num_orders num_completed = 0 for order_id in order_ids: while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid (after %s completed)" % num_completed num_completed += 1 print(f"Duration: { time() - started :.4f}s")
def test_multiprocessing_multiapp_system(self): set_db_uri() with Orders(setup_table=True) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository system = System( Orders | Reservations | Orders, Orders | Payments | Orders, ) multiprocess = Multiprocess(system) # Start multiprocessing system. with multiprocess: with Orders() as app: retries = 50 while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def setUp(self): # Shutdown base actor system, if running. # ActorSystem().shutdown() # Set environment. set_db_uri() # Define system. self.system = System(Orders | Reservations | Orders | Payments | Orders)
def test_singlethreaded_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders) with system: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = system.orders.repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test_singlethreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) with system: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = system.processes['orders'].repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test_multiprocessing_singleapp_system(self): system = System(Examples | Examples, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() self.close_connections_before_forking() with MultiprocessRunner(system), system.construct_app(Examples) as app: aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in app.repository # Check the aggregate is moved on. retries = 50 while not app.repository[aggregate.id].is_moved_on: sleep(0.1) retries -= 1 assert retries, "Failed to move"
def test_multiprocessing_singleapp_system(self): system = System(Examples | Examples, setup_tables=True) set_db_uri() with Examples() as app, Multiprocess(system): aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in app.repository # Check the aggregate is moved on. retries = 50 while not app.repository[aggregate.id].is_moved_on: time.sleep(0.1) retries -= 1 assert retries, "Failed to move"
def test_multithreaded_runner_with_singleapp_system(self): system = System(Examples | Examples, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with MultiThreadedRunner(system): app = system.processes['examples'] aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in app.repository # Check the aggregate is moved on. retries = 50 while not app.repository[aggregate.id].is_moved_on: sleep(0.1) retries -= 1 assert retries, "Failed to move"
def test_multipipeline_multiprocessing_multiapp(self): self.set_db_uri() system = System((Orders, Reservations, Orders, Payments, Orders), setup_tables=True, infrastructure_class=self.infrastructure_class) num_pipelines = 2 pipeline_ids = range(num_pipelines) multiprocess = MultiprocessRunner(system, pipeline_ids=pipeline_ids) num_orders_per_pipeline = 5 order_ids = [] self.close_connections_before_forking() # Start multiprocessing system. with multiprocess, system.construct_app(Orders) as orders: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: orders.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) sleep(0.05) # Wait for orders to be reserved and paid. retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not orders.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. order_aggregates = [orders.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in order_aggregates]) last_timestamp = max( [o.__last_modified__ for o in order_aggregates]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in order_aggregates ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations)))
from commands import Commands from processes import ExecutionService from eventsourcing.application.system import System if __name__ == '__main__': commands_pipeline = Commands | ExecutionService | Commands system = System(commands_pipeline) with system: # Create "create trade" command. trade_id = system.commands.create_trade(1234) cmd = system.commands.repository[trade_id] trade = system.executionservice.repository[cmd.trade_id]
def setUp(self): # Set environment. set_db_uri() # Define system. self.system = System(Orders | Reservations | Orders | Payments | Orders, infrastructure_class=self.infrastructure_class)
class TestActors(unittest.TestCase): infrastructure_class = SQLAlchemyApplication def setUp(self): # Set environment. set_db_uri() # Define system. self.system = System(Orders | Reservations | Orders | Payments | Orders, infrastructure_class=self.infrastructure_class) def test_simple_system_base(self): start_actor_system() self.check_actors() @skip("Having trouble running Thespian's 'multiproc tcp base'") def test_multiproc_tcp_base(self): start_multiproc_tcp_base_system() self.check_actors() def close_connections_before_forking(self): # Used for closing Django connection before multiprocessing module forks the OS process. pass def check_actors(self, num_pipelines=3, num_orders_per_pipeline=5): pipeline_ids = list(range(num_pipelines)) self.close_connections_before_forking() actors = ActorModelRunner(self.system, pipeline_ids=pipeline_ids, shutdown_on_close=True) # Todo: Use wakeupAfter() to poll for new notifications (see Timer Messages). order_ids = [] with self.system.construct_app(Orders, setup_table=True) as app, actors: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: app.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) # Wait for orders to be reserved and paid. retries = 20 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format(order_id, i) while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. orders = [app.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in orders]) last_timestamp = max([o.__last_modified__ for o in orders]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print("Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [o.__last_modified__ - o.__created_on__ for o in orders] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format(sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations))) def tearDown(self): # Unset environment. try: del (os.environ['DB_URI']) except KeyError: pass try: # Shutdown base actor system. shutdown_actor_system() finally: # Clear event handlers. try: assert_event_handlers_empty() finally: clear_event_handlers()