def test_multiprocessing_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with system.construct_app(Orders) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository self.close_connections_before_forking() with MultiprocessRunner(system): with system.construct_app(Orders) as app: retries = 50 while not app.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def test_multiprocessing_singleapp_system(self): system = System(Examples | Examples, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() self.close_connections_before_forking() with MultiprocessRunner(system), system.construct_app(Examples) as app: aggregate = ExampleAggregate.__create__() aggregate.__save__() assert aggregate.id in app.repository # Check the aggregate is moved on. retries = 50 while not app.repository[aggregate.id].is_moved_on: sleep(0.1) retries -= 1 assert retries, "Failed to move"
def test_multipipeline_multiprocessing_multiapp(self): self.set_db_uri() system = System((Orders, Reservations, Orders, Payments, Orders), setup_tables=True, infrastructure_class=self.infrastructure_class) num_pipelines = 2 pipeline_ids = range(num_pipelines) multiprocess = MultiprocessRunner(system, pipeline_ids=pipeline_ids) num_orders_per_pipeline = 5 order_ids = [] self.close_connections_before_forking() # Start multiprocessing system. with multiprocess, system.construct_app(Orders) as orders: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: orders.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) sleep(0.05) # Wait for orders to be reserved and paid. retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not orders.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. order_aggregates = [orders.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in order_aggregates]) last_timestamp = max( [o.__last_modified__ for o in order_aggregates]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in order_aggregates ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations)))
class TestActors(unittest.TestCase): infrastructure_class = SQLAlchemyApplication def setUp(self): # Set environment. set_db_uri() # Define system. self.system = System(Orders | Reservations | Orders | Payments | Orders, infrastructure_class=self.infrastructure_class) def test_simple_system_base(self): start_actor_system() self.check_actors() @skip("Having trouble running Thespian's 'multiproc tcp base'") def test_multiproc_tcp_base(self): start_multiproc_tcp_base_system() self.check_actors() def close_connections_before_forking(self): # Used for closing Django connection before multiprocessing module forks the OS process. pass def check_actors(self, num_pipelines=3, num_orders_per_pipeline=5): pipeline_ids = list(range(num_pipelines)) self.close_connections_before_forking() actors = ActorModelRunner(self.system, pipeline_ids=pipeline_ids, shutdown_on_close=True) # Todo: Use wakeupAfter() to poll for new notifications (see Timer Messages). order_ids = [] with self.system.construct_app(Orders, setup_table=True) as app, actors: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: app.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) # Wait for orders to be reserved and paid. retries = 20 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format(order_id, i) while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. orders = [app.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in orders]) last_timestamp = max([o.__last_modified__ for o in orders]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print("Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [o.__last_modified__ - o.__created_on__ for o in orders] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format(sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations))) def tearDown(self): # Unset environment. try: del (os.environ['DB_URI']) except KeyError: pass try: # Shutdown base actor system. shutdown_actor_system() finally: # Clear event handlers. try: assert_event_handlers_empty() finally: clear_event_handlers()