def test_multithreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with MultiThreadedRunner(system): started = time() orders = system.processes['orders'] # Create new orders. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) retries = num_orders for order_id in order_ids: # while not orders.repository[order_id].is_reserved: # sleep(0.1) # retries -= 1 # assert retries, "Failed set order.is_reserved" while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid" print(f"Duration: { time() - started :.4f}s")
def test_multiprocessing_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() with system.construct_app(Orders) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository self.close_connections_before_forking() with MultiprocessRunner(system): with system.construct_app(Orders) as app: retries = 50 while not app.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def test_clocked_multithreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) self.set_db_uri() clock_speed = 10 tick_interval = 1 / clock_speed with MultiThreadedRunner(system, clock_speed=clock_speed): started = time() orders = system.processes['orders'] # Create a new order. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) # sleep(tick_interval / 3) # sleep(tick_interval * 10) retries = 30 * num_orders num_completed = 0 for order_id in order_ids: while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid (after %s completed)" % num_completed num_completed += 1 print(f"Duration: { time() - started :.4f}s")
def test_multiprocessing_multiapp_system(self): set_db_uri() with Orders(setup_table=True) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository system = System( Orders | Reservations | Orders, Orders | Payments | Orders, ) multiprocess = Multiprocess(system) # Start multiprocessing system. with multiprocess: with Orders() as app: retries = 50 while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def check_actors(self, num_pipelines=2, num_orders_per_pipeline=5): pipeline_ids = list(range(num_pipelines)) actors = Actors(self.system, pipeline_ids=pipeline_ids, shutdown_on_close=True) # Todo: Use wakeupAfter() to poll for new notifications (see Timer Messages). # Todo: Fix multiple pipelines with multiproc bases. order_ids = [] with Orders(setup_table=True) as app, actors: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: app.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) # Wait for orders to be reserved and paid. retries = 100 + 100 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. orders = [app.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in orders]) last_timestamp = max([o.__last_modified__ for o in orders]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in orders ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations)))
def test_singlethreaded_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True) with system: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = system.orders.repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test_singlethreaded_runner_with_multiapp_system(self): system = System(Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class) with system: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = system.processes['orders'].repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test_multipipeline_multiprocessing_multiapp(self): self.set_db_uri() system = System((Orders, Reservations, Orders, Payments, Orders), setup_tables=True, infrastructure_class=self.infrastructure_class) num_pipelines = 2 pipeline_ids = range(num_pipelines) multiprocess = MultiprocessRunner(system, pipeline_ids=pipeline_ids) num_orders_per_pipeline = 5 order_ids = [] self.close_connections_before_forking() # Start multiprocessing system. with multiprocess, system.construct_app(Orders) as orders: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: orders.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) sleep(0.05) # Wait for orders to be reserved and paid. retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not orders.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. order_aggregates = [orders.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in order_aggregates]) last_timestamp = max( [o.__last_modified__ for o in order_aggregates]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in order_aggregates ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations)))