def test_multithreaded_runner_with_single_pipe(self): system = System( Orders | Reservations, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() # with system as runner: with MultiThreadedRunner(system) as runner: order_id = create_new_order() orders_repo = runner.get(Orders).repository self.assertEqual(orders_repo[order_id].id, order_id) reservations_repo = runner.get(Reservations).repository reservation_id = Reservation.create_reservation_id(order_id) patience = 10 while True: try: self.assertEqual( reservations_repo[reservation_id].order_id, order_id ) except (RepositoryKeyError, AssertionError): if patience: patience -= 1 sleep(0.1) else: raise else: break
def test_multiprocessing_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() with system.construct_app(Orders) as app: # Create a new order. order_id = create_new_order() # Check new order exists in the repository. assert order_id in app.repository self.close_connections_before_forking() with MultiprocessRunner(system): with system.construct_app(Orders) as app: retries = 50 while not app.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved" while retries and not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid"
def test_multithreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() with MultiThreadedRunner(system) as runner: orders = runner.get(Orders) started = time() # Create new orders. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) retries = num_orders for order_id in order_ids: # while not orders.repository[order_id].is_reserved: # sleep(0.1) # retries -= 1 # assert retries, "Failed set order.is_reserved" while not orders.repository[order_id].is_paid: sleep(0.5) retries -= 1 assert retries, "Failed set order.is_paid" print(f"Duration: { time() - started :.4f}s")
def check_actors(self, num_pipelines=3, num_orders_per_pipeline=5): pipeline_ids = list(range(num_pipelines)) actors = ThespianRunner(self.system, pipeline_ids=pipeline_ids, shutdown_on_close=True) # Todo: Use wakeupAfter() to poll for new notifications (see Timer Messages). order_ids = [] with self.system.construct_app(Orders, setup_table=True) as app, actors: # Create some new orders. for _ in range(num_orders_per_pipeline): for pipeline_id in pipeline_ids: app.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) # Wait for orders to be reserved and paid. retries = 20 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not app.repository[order_id].is_reserved: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i) while retries and not app.repository[order_id].is_paid: time.sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. orders = [app.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in orders]) last_timestamp = max([o.__last_modified__ for o in orders]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in orders ] print("Min order processing time: {:.3f}s".format(min(durations))) print("Mean order processing time: {:.3f}s".format( sum(durations) / len(durations))) print("Max order processing time: {:.3f}s".format(max(durations)))
def test_singlethreaded_runner_with_single_application_class(self): system = System( Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: order_id = create_new_order() repository = runner.get(Orders).repository self.assertEqual(repository[order_id].id, order_id)
def test_multiprocess_runner_with_single_pipe(self): system = System( Orders | Reservations, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() self.close_connections_before_forking() with MultiprocessRunner(system) as runner: repository = runner.get(Orders).repository order_id = create_new_order() self.assertEqual(repository[order_id].id, order_id)
def test_singlethreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. repository = runner.get(Orders).repository assert repository[order_id].is_reserved assert repository[order_id].is_paid
def test_singlethreaded_runner_with_single_pipe(self): system = System( Orders | Reservations, setup_tables=True, infrastructure_class=self.infrastructure_class, ) with system as runner: order_id = create_new_order() orders = runner.get(Orders) self.assertEqual(orders.repository[order_id].id, order_id) reservation_id = Reservation.create_reservation_id(order_id) reservations = runner.get(Reservations) reservations_repo = reservations.repository self.assertEqual(reservations_repo[reservation_id].order_id, order_id)
def test_singlethreaded_runner_with_direct_query(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, use_direct_query_if_available=True, ) with system as runner: orders = runner.get(Orders) # Create new Order aggregate. order_id = create_new_order() # Check the order is reserved and paid. assert orders.repository[order_id].is_reserved assert orders.repository[order_id].is_paid
def test_clocked_multithreaded_runner_with_multiapp_system(self): system = System( Orders | Reservations | Orders, Orders | Payments | Orders, setup_tables=True, infrastructure_class=self.infrastructure_class, ) self.set_db_uri() clock_speed = 10 with MultiThreadedRunner(system, clock_speed=clock_speed) as runner: started = time() # Create a new order. num_orders = 10 order_ids = [] for i in range(num_orders): order_id = create_new_order() order_ids.append(order_id) # sleep(tick_interval / 3) # sleep(tick_interval ) retries = 30 * num_orders num_completed = 0 for order_id in order_ids: app = runner.get(Orders) while not app.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, ( "Failed set order.is_paid (after %s completed)" % num_completed ) num_completed += 1 print(f"Duration: { time() - started :.4f}s")
def test_multipipeline_multiprocessing_multiapp(self): self.set_db_uri() system = System( (Orders, Reservations, Orders, Payments, Orders), setup_tables=True, infrastructure_class=self.infrastructure_class, ) num_pipelines = 2 pipeline_ids = range(num_pipelines) multiprocess_runner = MultiprocessRunner(system, pipeline_ids=pipeline_ids) num_orders_per_pipeline = 5 order_ids = [] self.close_connections_before_forking() # Start multiprocessing system. with multiprocess_runner: # Create some new orders. for _ in range(num_orders_per_pipeline): orders = multiprocess_runner.get(Orders) for pipeline_id in pipeline_ids: orders.change_pipeline(pipeline_id) order_id = create_new_order() order_ids.append(order_id) sleep(0.05) # Wait for orders to be reserved and paid. retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids) for i, order_id in enumerate(order_ids): while not orders.repository[order_id].is_reserved: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_reserved {} ({})".format( order_id, i ) while retries and not orders.repository[order_id].is_paid: sleep(0.1) retries -= 1 assert retries, "Failed set order.is_paid ({})".format(i) # Calculate timings from event timestamps. order_aggregates = [orders.repository[oid] for oid in order_ids] first_timestamp = min([o.__created_on__ for o in order_aggregates]) last_timestamp = max([o.__last_modified__ for o in order_aggregates]) duration = last_timestamp - first_timestamp rate = len(order_ids) / float(duration) period = 1 / rate print( "Orders system processed {} orders in {:.3f}s at rate of {:.1f} " "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period) ) # Print min, average, max duration. durations = [ o.__last_modified__ - o.__created_on__ for o in order_aggregates ] print("Min order processing time: {:.3f}s".format(min(durations))) print( "Mean order processing time: {:.3f}s".format( sum(durations) / len(durations) ) ) print("Max order processing time: {:.3f}s".format(max(durations)))
def cmd(): order_id = create_new_order() assert order_id in orders.repository order_ids.append(order_id)