def test_simulate_with_stime_limit(): def f(sim): sim.data.append('f') sim.schedule(2, g) def g(sim): sim.data.append('g') def init(sim): sim.schedule(1, f) sim.data.append('init') ret1 = simulate([], init, stime_limit=0.5) ret2 = simulate([], init, stime_limit=1) ret3 = simulate([], init, stime_limit=2) ret4 = simulate([], init, stime_limit=3) assert ret1.data == ['init'] assert ret2.data == ['init', 'f'] assert ret3.data == ['init', 'f'] assert ret4.data == ['init', 'f', 'g'] assert ret1.num_events == 0 assert ret2.num_events == 1 assert ret3.num_events == 1 assert ret4.num_events == 2 assert ret1.stime == 1 assert ret2.stime == 3 assert ret3.stime == 3 assert ret4.stime == 3
def test_params_accessible_via_getattr_and_getitem(): params = {'x': 10, 'y': 'hello'} def init(sim): assert sim.params.x == 10 assert sim.params['x'] == 10 assert sim.params.y == 'hello' simulate([], init=init, params=params)
def test_tree_traverse_with_trace_on_enter_and_on_leave(): """Validate traversing the tree twice writing trace on enter and on leave. In this test we call the simulation two times. For the first time, we use the standard traversing with adding node labels to trace on node enter. For the second time, we change enter and leave implementation to add node labels to trace during node leaving instead of entering. We check that the traversal time is the same (we still visit all nodes), but the trace is another. Besides that, we validate the results co-exist and are not shared. """ def trace_on_leave__enter(sim, node): assert not node.visited node.visited = True sim.schedule(node.delay, visit_next_child, args=[node]) def trace_on_leave__leave(sim, node): sim.data.trace.append(node.label) if node.parent and node.parent.visited: sim.schedule(0, visit_next_child, args=(node.parent, )) trace_on_enter_result = simulate(TreeData, init=start_from_root, handlers={ 'enter': enter_node, 'leave': leave_node, }) trace_on_leave_result = simulate(TreeData, init=start_from_root, handlers={ 'enter': trace_on_leave__enter, 'leave': trace_on_leave__leave, }) trace_stime_limit_result = simulate(TreeData, init=start_from_root, handlers={ 'enter': enter_node, 'leave': leave_node, }, stime_limit=7) np.testing.assert_allclose(trace_on_enter_result.stime, 15.0, atol=0.1) np.testing.assert_allclose(trace_on_leave_result.stime, 15.0, atol=0.1) np.testing.assert_allclose(trace_stime_limit_result.stime, 10.0, atol=0.1) assert trace_on_enter_result.data.trace == ['A', 'B', 'C', 'D', 'E'] assert trace_on_leave_result.data.trace == ['D', 'C', 'E', 'B', 'A'] assert trace_stime_limit_result.data.trace == ['A', 'B', 'C', 'D']
def test_simulate_accept_classes_without_create(): class ModelData: default_value = 'default value' def __init__(self): self.value = ModelData.default_value ret_for_default_value = simulate(ModelData) ModelData.default_value = 'new value' ret_for_updated_value = simulate(ModelData) assert ret_for_default_value.data.value == 'default value' assert ret_for_updated_value.data.value == 'new value'
def simulate_network(topology, failure_interval, repair_interval, num_offline_till_repair=2, **kwargs): stime_limit = kwargs.get('stime_limit', defaults['stime_limit']) num_runs = kwargs.get('num_runs', defaults['num_runs']) log_level = kwargs.get('log_level', Logger.Level.ERROR) routing_mode = { 'static': Network.STATIC, 'dynamic': Network.DYNAMIC }[kwargs.get('routing_mode', 'static')] cons_repair_interval = kwargs.get('cons_repair_interval', repair_interval) results = [] for i_run in range(num_runs): results.append( simulate(ModelData, stime_limit=stime_limit, params={ 'topology': topology, 'routing_mode': routing_mode, 'failure_interval': failure_interval, 'repair_interval': repair_interval, 'num_offline_till_repair': num_offline_till_repair, 'log_level': log_level, 'cons_repair_interval': cons_repair_interval, 'record_samples': kwargs.get('record_samples', False), 'sample_interval': kwargs.get('sample_interval', 1), })) return _SimRet(results)
def test_schedule_accept_none_handler_by_changing_only_time(): def init(sim): sim.schedule(5) ret = simulate([], init=init) assert ret.stime == 5
def test_random_source_provides_statistics(): """Validate that `RandomSource` provides statistics. """ intervals = (10, 12, 15, 17) data_size = (123, 453, 245, 321) class TestModel(Model): def __init__(self, sim): super().__init__(sim) self.source = RandomSource( sim, source_id=34, dest_addr=13, data_size=Mock(side_effect=data_size), interval=Mock(side_effect=(intervals + (1000, ))), ) self.network = DummyModel(sim, 'Network') self.source.connections['network'] = self.network ret = simulate(TestModel, stime_limit=sum(intervals)) assert ret.data.source.arrival_intervals.as_tuple() == intervals assert ret.data.source.data_size_stat.as_tuple() == data_size # Also check that we can not replace statistics: with pytest.raises(AttributeError): from pydesim import Intervals ret.data.source.arrival_intervals = Intervals() with pytest.raises(AttributeError): from pydesim import Statistic ret.data.source.data_size_stat = Statistic() # Check that source records the number of packets being sent: assert ret.data.source.num_packets_sent == 4
def test_dcf_line_network_with_single_source(num_stations): sr = simulate( WirelessHalfDuplexLineNetwork, stime_limit=SIM_TIME_LIMIT, params=dict( num_stations=num_stations, active_sources=[0], payload_size=PAYLOAD_SIZE, source_interval=SOURCE_INTERVAL, mac_header_size=MAC_HEADER, phy_header_size=PHY_HEADER, ack_size=ACK_SIZE, preamble=PREAMBLE, bitrate=BITRATE, difs=DIFS, sifs=SIFS, slot=SLOT, cwmin=CWMIN, cwmax=CWMAX, distance=DISTANCE, connection_radius=CONNECTION_RADIUS, speed_of_light=SPEED_OF_LIGHT, ), loglevel=Logger.Level.INFO ) client = sr.data.stations[0] server = sr.data.stations[-1] source_id = client.source.source_id expected_number_of_packets = floor(SIM_TIME_LIMIT / SOURCE_INTERVAL.mean()) assert client.source.num_packets_sent == expected_number_of_packets assert (expected_number_of_packets - 1 <= server.sink.num_packets_received <= expected_number_of_packets) mean_payload = PAYLOAD_SIZE.mean() expected_service_time = ( DIFS + CWMIN/2 * SLOT + PREAMBLE + (mean_payload + MAC_HEADER + PHY_HEADER) / BITRATE + SIFS + PREAMBLE + (PHY_HEADER + ACK_SIZE) / BITRATE + 2 * DISTANCE / SPEED_OF_LIGHT ) expected_end_to_end_delay = expected_service_time * (num_stations - 1) assert_allclose( server.sink.source_delays[source_id].mean(), expected_end_to_end_delay, rtol=0.2 ) client_if = client.get_interface_to(server) assert client_if.queue.size_trace.timeavg() == 0 assert_allclose( client_if.transmitter.busy_trace.timeavg(), expected_service_time / SOURCE_INTERVAL.mean(), rtol=0.2 )
def test_saturated_network_with_three_stations(): sr = simulate( CollisionDomainSaturatedNetwork, stime_limit=SIM_TIME_LIMIT, params=dict( num_stations=3, payload_size=PAYLOAD_SIZE, mac_header_size=MAC_HEADER, phy_header_size=PHY_HEADER, ack_size=ACK_SIZE, preamble=PREAMBLE, bitrate=BITRATE, difs=DIFS, sifs=SIFS, slot=SLOT, cwmin=CWMIN, cwmax=CWMIN, # the same here, no increase to calculate p_collision connection_radius=CONNECTION_RADIUS, speed_of_light=SPEED_OF_LIGHT, queue_capacity=None, ), loglevel=Logger.Level.WARNING ) # Since all stations send data to station 0, we assign these stations # to more valuable variables: access_point = sr.data.stations[0] access_point_iface = access_point.interfaces[0] # We validate the collisions probabilities: p_collision = access_point_iface.receiver.num_collisions / ( access_point_iface.receiver.num_collisions + access_point_iface.receiver.num_received ) assert_allclose(p_collision, 1 / CWMIN, rtol=0.1)
def test_large_collision_domain_network__smoke(): """In this test we validate that all stations are really in a single domain and run the model for some time. We actually do not test any meaningful properties, except connections and that only server receives data. """ num_stations = randint(5, 15) source_interval = Exponential(uniform(1.0, 10.0)) payload_size = Exponential(randint(10, 100)) sr = simulate( CollisionDomainNetwork, stime_limit=500, params=dict( num_stations=num_stations, payload_size=payload_size, source_interval=source_interval, mac_header_size=MAC_HEADER, phy_header_size=PHY_HEADER, ack_size=ACK_SIZE, preamble=PREAMBLE, bitrate=BITRATE, difs=DIFS, sifs=SIFS, slot=SLOT, cwmin=CWMIN, cwmax=CWMAX, connection_radius=CONNECTION_RADIUS, speed_of_light=SPEED_OF_LIGHT, queue_capacity=None, ), loglevel=Logger.Level.WARNING ) access_point = sr.data.stations[0] clients = sr.data.stations[1:] conn_man = sr.data.connection_manager # Test that connections are established between all stations: for i in range(num_stations): radio = sr.data.get_iface(i).radio peers = set(conn_man.get_peers(radio)) assert len(peers) == num_stations - 1 and radio not in peers # Test that the number of packets received by any client sink is 0: for client in clients: assert client.sink.num_packets_received == 0 # Test that the number of packets generated by the sources - (queue sizes # + number of packets in transceivers) at the end of simulation is # almost equal to the number of received packets by the access point sink: num_packets_sent = [ (cli.source.num_packets_sent - cli.interfaces[0].queue.size() - (1 if cli.interfaces[0].transmitter.state else 0)) for cli in clients ] num_packets_received = access_point.sink.num_packets_received assert_allclose(sum(num_packets_sent), num_packets_received, rtol=0.05)
def test_schedule_negative_delays_not_allowed(): def invalid_init(sim): sim.schedule(-1) def invalid_handler(sim): sim.schedule(-0.1) def valid_init(sim): sim.schedule(10, invalid_handler) with pytest.raises(ValueError) as excinfo1: simulate([], init=invalid_init) with pytest.raises(ValueError) as excinfo2: simulate([], init=valid_init) assert "negative delay" in str(excinfo1.value).lower() assert "negative delay" in str(excinfo2.value).lower()
def test_sim_provide_cancel_operation(): def init(sim): eid = sim.schedule(1) sim.cancel(eid) ret = simulate([], init) assert ret.stime == 0 assert ret.num_events == 0
def test_simulate_accepts_classes_with_create_method(): class ModelData: default_value = 'correct value' def __init__(self, value='incorrect value'): self.value = value @classmethod def create(cls): return ModelData(cls.default_value) ret_for_default_value = simulate(ModelData) ModelData.default_value = 'some new value' ret_for_updated_value = simulate(ModelData) assert ret_for_default_value.data.value == 'correct value' assert ret_for_updated_value.data.value == 'some new value'
def test_mm1_multihop_tandem_model_with_cross_traffic(arrival, service, stime_limit, num_stations): ret = simulate(QueueingTandemNetwork, stime_limit=stime_limit, params={ 'arrivals': [arrival for _ in range(num_stations)], 'services': [service for _ in range(num_stations)], 'queue_capacity': None, 'num_stations': num_stations, }) n = num_stations mean_service = service.mean() mean_arrival = arrival.mean() rho = mean_service / mean_arrival expected_node_delays = [] for i in range(num_stations): server = ret.data.servers[i] queue = ret.data.queues[i] est_busy_rate = server.busy_trace.timeavg() est_system_size = ret.data.system_size_trace[i].timeavg() est_arrival_mean = queue.arrival_intervals.statistic().mean() est_service_mean = server.service_intervals.mean() est_departure_mean = server.departure_intervals.statistic().mean() est_system_wait = ret.data.system_wait_intervals[i].mean() est_queue_wait = queue.wait_intervals.mean() expected_busy_rate = rho * (i + 1) expected_service_mean = mean_service expected_system_size = expected_busy_rate / (1 - expected_busy_rate) expected_arrival_mean = mean_arrival / (i + 1) expected_departure_mean = expected_arrival_mean expected_node_delays.append(expected_system_size * expected_arrival_mean) assert_allclose(est_busy_rate, expected_busy_rate, rtol=0.25) assert_allclose(est_service_mean, expected_service_mean, rtol=0.25) assert_allclose(est_system_size, expected_system_size, rtol=0.25) assert_allclose(est_arrival_mean, expected_arrival_mean, rtol=0.25) assert_allclose(est_departure_mean, expected_departure_mean, rtol=0.25) assert_allclose(est_system_wait, expected_node_delays[-1], rtol=0.25) assert_allclose(est_queue_wait, expected_node_delays[-1] - expected_service_mean, rtol=0.25) est_delays = [ret.data.sources[i].delays.mean() for i in range(n)] expected_delays = [0.0] * n for i in range(n - 1, -1, -1): expected_delays[i] = expected_node_delays[i] + (expected_delays[i + 1] if i < n - 1 else 0) assert_allclose(est_delays, expected_delays, rtol=0.35)
def test_tree_traverse_in_parallel(): """Validate events scheduled in parallel work properly, and using raw data. In this test we schedule visiting all children once we enter the state, after the same delay. We do not come back to parent, so 'leave' event is not specified. This is like traversing the tree using unlimited number of threads by starting new thread for each child just after processing the parent. Since then, the delay is just the maximum delay of a branch. Besides that, we define data as a `namedtuple`, not `TreeData`, and check that is is used properly and put into the result. To be sure the events are queued in the correct order, we add several nodes (F, G, H, I, J) to the tree: (A [d=1], (B [d=2], (C [d=3], (D [d=4], (J [d=1]))), (E [d=5], (H [d=1], (I [d=1]))), (F [d=2], (G [d=2])) ) ) The visiting order is expected like this: A -> B -> C -> E -> F -> G -> D -> H -> I -> J """ tree = create_tree() node_f = TreeNode(2.0, 'F') node_g = TreeNode(2.0, 'G') node_h = TreeNode(1.0, 'H') node_i = TreeNode(1.0, 'I') node_j = TreeNode(1.0, 'J') tree.get('B').add_child(node_f) tree.get('D').add_child(node_j) tree.get('E').add_child(node_h) node_f.add_child(node_g) node_h.add_child(node_i) data_class = namedtuple('ModelData', ['tree', 'trace']) data = data_class(tree=tree, trace=[]) def visit(sim, node): assert not node.visited node.visited = True sim.data.trace.append(node.label) if node.children: for child in node.children: sim.schedule(node.delay, visit, args=[child]) else: sim.schedule(node.delay) ret = simulate(data, init=start_from_root, handlers={'enter': visit}) np.testing.assert_allclose(ret.stime, 11.0, atol=0.1, rtol=0.01) assert ret.data.trace == ['A', 'B', 'C', 'E', 'F', 'G', 'D', 'H', 'I', 'J'] assert ret.data == data # also check that data is the same
def test_schedule_multiple_events(): def handler(sim): sim.data.append(sim.stime) def init(sim): sim.schedule(1, handler) sim.schedule(2, handler) ret = simulate([], init=init) assert ret.data == [1, 2]
def test_simulate_calls_constructor_without_parameters_but_with_sim(): with patch('pydesim.simulator.Simulator') as SimulatorMock: class SomeModel(Model): def __init__(self, sim): assert isinstance(sim, SimulatorMock) super().__init__(self, sim) assert sim.params.x == 10 assert sim.params.y == 'hello' result = simulate(SomeModel, params={'x': 10, 'y': 'hello'})
def test_stime_is_readonly(): def valid_handler(sim): sim.data.append('OK') def valid_init(sim): sim.schedule(1, sim.handlers.handler) with pytest.raises(AttributeError) as excinfo1: def invalid_init(sim): sim.stime = 10 simulate([], init=invalid_init) with pytest.raises(AttributeError) as excinfo2: def invalid_handler(sim): sim.stime += 1 simulate([], init=valid_init, handlers={'handler': invalid_handler}) with pytest.raises(AttributeError) as excinfo3: def invalid_fin(sim): sim.stime -= 1 simulate([], init=valid_init, fin=invalid_fin, handlers={'handler': valid_handler}) assert 'set attribute' in str(excinfo1.value) assert 'set attribute' in str(excinfo2.value) assert 'set attribute' in str(excinfo3.value)
def tandem_queue_network_with_fixed_service(arrivals, service, queue_capacity, stime_limit): num_stations = len(arrivals) sr = simulate(QueueingTandemNetworkWithFixedService, stime_limit=stime_limit, params={ 'arrivals': arrivals, 'service': service, 'queue_capacity': queue_capacity, 'num_stations': num_stations, }) simret_class = namedtuple('SimRet', ['nodes']) node_class = namedtuple('Node', [ 'delay', 'queue_size', 'system_size', 'busy', 'arrivals', 'departures', 'service', 'num_served', 'num_arrived', 'num_dropped', 'drop_ratio', 'queue_wait', 'system_wait', ]) active_nodes = {i for i in range(num_stations) if arrivals[i] is not None} nodes = [ node_class( delay=(sr.data.sources[i].delays if i in active_nodes else None), queue_size=sr.data.queues[i].size_trace, system_size=sr.data.system_size_trace[i], busy=sr.data.servers[i].busy_trace, arrivals=sr.data.queues[i].arrival_intervals.statistic(), departures=sr.data.servers[i].departure_intervals.statistic(), service=sr.data.servers[i].service_intervals, num_served=sr.data.servers[i].num_served, num_arrived=sr.data.queues[i].num_arrived, num_dropped=sr.data.queues[i].num_dropped, drop_ratio=sr.data.queues[i].drop_ratio, queue_wait=sr.data.queues[i].wait_intervals, system_wait=sr.data.system_wait_intervals[i], ) for i in range(num_stations) ] return simret_class(nodes=nodes)
def test_controlled_source_provides_statistics(): """Validate that `ControlledSource` provides statistics. """ intervals = (10, 12, 15, 17) data_size = (123, 453, 245, 321) class SourceController(Model): def __init__(self, sim, src): super().__init__(sim) self.iterator = iter(intervals) self.src = src self.sim.schedule(next(self.iterator), self.handle_timeout) def handle_timeout(self): self.src.get_next() try: interval = next(self.iterator) except StopIteration: pass else: self.sim.schedule(interval, self.handle_timeout) class TestModel(Model): def __init__(self, sim): super().__init__(sim) self.source = ControlledSource( sim, source_id=34, dest_addr=13, data_size=Mock(side_effect=data_size), ) self.network = DummyModel(sim, 'Network') self.source.connections['network'] = self.network self.controller = SourceController(sim, self.source) ret = simulate(TestModel, stime_limit=sum(intervals)) assert ret.data.source.data_size_stat.as_tuple() == data_size assert ret.data.source.arrival_intervals.as_tuple() == intervals # Also check that we can not replace statistics: with pytest.raises(AttributeError): from pydesim import Intervals ret.data.source.arrival_intervals = Intervals() with pytest.raises(AttributeError): from pydesim import Statistic ret.data.source.data_size_stat = Statistic() # Check that source records the number of packets being sent: assert ret.data.source.num_packets_sent == 4
def test_scheduled_methods_are_called_in_chain(): def write_some_data(sim, value='first'): sim.data.append(value) if value == 'first': sim.schedule(3, write_some_data, args=('second', )) elif value == 'second': sim.schedule(10, write_some_data, kwargs={'value': 'third'}) def init(sim): sim.schedule(1, write_some_data) ret = simulate([], init=init) assert ret.stime == 14 assert ret.data == ['first', 'second', 'third']
def test_simulate_executes_init_and_fin(): """In this test we validate that `simulate()` calls init and fin methods. """ data = [] def init(sim): sim.data.append(1) def fin(sim): sim.data.append('A') ret = simulate(data, init=init, fin=fin, handlers={}, stime_limit=1) assert ret.stime == 0 assert ret.data == [1, 'A']
def test_wired_line_network_with_single_source(num_stations): sr = simulate(WiredLineNetwork, stime_limit=SIM_TIME_LIMIT, params=dict( num_stations=num_stations, payload_size=PAYLOAD_SIZE, source_interval=SOURCE_INTERVAL, header_size=HEADER_SIZE, bitrate=BITRATE, distance=DISTANCE, speed_of_light=SPEED_OF_LIGHT, active_sources=[0], preamble=PREAMBLE, ifs=IFS, ), loglevel=Logger.Level.ERROR) client = sr.data.stations[0] server = sr.data.stations[-1] source_id = client.source.source_id expected_interval_avg = SOURCE_INTERVAL.mean() expected_number_of_packets = floor(SIM_TIME_LIMIT / expected_interval_avg) assert client.source.num_packets_sent == expected_number_of_packets assert (expected_number_of_packets - 1 <= server.sink.num_packets_received <= expected_number_of_packets) expected_transmission_delay = ( (PAYLOAD_SIZE.mean() + HEADER_SIZE) / BITRATE + PREAMBLE + IFS) expected_delay = ( (DISTANCE / SPEED_OF_LIGHT + expected_transmission_delay) * (num_stations - 1)) assert_allclose(server.sink.source_delays[source_id].mean(), expected_delay, rtol=0.1) client_if = client.get_interface_to(server) assert client_if.queue.size_trace.timeavg() == 0 expected_busy_ratio = expected_transmission_delay / expected_interval_avg assert_allclose(client_if.transceiver.tx_busy_trace.timeavg(), expected_busy_ratio, rtol=0.1)
def test_handlers_can_be_passed_and_accessed_via_sim_handlers_field(): def f1(sim): sim.data.append(1) sim.schedule(0, sim.handlers.get('second')) def f2(sim): sim.data.append(2) sim.schedule(0, sim.handlers.third) def f3(sim): sim.data.append(3) def init(sim): sim.schedule(0, sim.handlers['first']) ret = simulate([], init=init, handlers=dict(first=f1, second=f2, third=f3)) assert ret.data == [1, 2, 3]
def test_schedule_orders_events_by_time(): def f(sim): sim.data.append(f'{int(sim.stime)}F') sim.schedule(1.0, g) def g(sim): sim.data.append(f'{int(sim.stime)}G') def h(sim): sim.data.append(f'{int(sim.stime)}H') def init(sim): sim.schedule(1.0, f) sim.schedule(4.0, f) sim.schedule(3.0, h) ret = simulate([], init) assert ret.data == ['1F', '2G', '3H', '4F', '5G']
def test_objective_mm1_model(mean_arrival, mean_service): ret = simulate(QueueingSystem, stime_limit=4000, params={ 'arrival_mean': mean_arrival, 'service_mean': mean_service, 'capacity': -1, }) busy_rate = ret.data.server.busy_trace.timeavg() system_size = ret.data.system_size_trace.timeavg() est_arrival_mean = ret.data.source.intervals.statistic().mean() est_departure_mean = ret.data.sink.departures.statistic().mean() est_service_mean = ret.data.server.delays.mean() rho = mean_service / mean_arrival assert np.allclose(est_service_mean, mean_service, rtol=0.2) assert np.allclose(busy_rate, rho, rtol=0.2) assert np.allclose(system_size, rho / (1 - rho), atol=0.05, rtol=0.2) assert np.allclose(est_arrival_mean, mean_arrival, rtol=0.2) assert np.allclose(est_departure_mean, mean_arrival, rtol=0.2)
def test_tree_traverse_from_b_and_reverse_trace(): """In this test we check that init and finalize methods are really called. We start the test at node B instead of A (root) and make sure that both trace and traversal time change. Moreover, we reverse the trace in the end by passing a finalization function which calls `list.reverse()` """ def start_from_b(sim): root = sim.data.tree.find('B') sim.schedule(0, sim.handlers['enter'], args=[root]) def reverse_trace(sim): sim.data.trace.reverse() ret = simulate(TreeData, init=start_from_b, fin=reverse_trace, handlers={ 'enter': enter_node, 'leave': leave_node, }) np.testing.assert_allclose(ret.stime, 14.0, atol=0.1, rtol=0.01) assert ret.data.trace == ['E', 'D', 'C', 'B']
def test_cancel_tree_printing_before_deadline(): """Validate cancel operation works properly. In this test we schedule printing all the nodes ('A'..'E') at one moment (at t=5). However, we also launch tree traversal (from t=0), and we cancel the scheduled print when visiting the tree. It looks like we will cancel printing for nodes A, B and С. When printing, we also cancel entering the node, so simulation will stop at t=5, after print all nodes to which we didn't come. """ def enter_silent(sim, node): sim.cancel(sim.data.visit_evids[node.label]) del sim.data.visit_evids[node.label] for child in node.children: eid = sim.schedule(node.delay, enter_silent, args=(child,)) sim.data.enter_evids[child.label] = eid def visit_node(sim, node): sim.data.trace.append(node.label) sim.cancel(sim.data.enter_evids[node.label]) del sim.data.enter_evids[node.label] def init(sim): def schedule_visit(node): eid = sim.schedule(5, visit_node, args=(node,)) sim.data.visit_evids[node.label] = eid sim.data.tree.apply(schedule_visit) sim.schedule(0, enter_silent, args=(sim.data.tree,)) data_class = namedtuple( 'ModelData', ['tree', 'trace', 'visit_evids', 'enter_evids'] ) data = data_class(create_tree(), trace=[], visit_evids={}, enter_evids={}) ret = simulate(data, init) assert ret.data.trace == ['D'] assert ret.stime == 5
def test_mm1_single_hop_tandem_model(arrival, service, stime_limit): ret = simulate(QueueingTandemNetwork, stime_limit=stime_limit, params={ 'arrivals': [arrival], 'services': [service], 'queue_capacity': None, 'num_stations': 1, }) busy_rate = ret.data.servers[0].busy_trace.timeavg() system_size = ret.data.system_size_trace[0].timeavg() est_arrival_mean = ret.data.sources[0].intervals.statistic().mean() est_service_mean = ret.data.servers[0].service_intervals.mean() est_delay = ret.data.sources[0].delays.mean() est_departure_mean = ret.data.sink.arrival_intervals.statistic().mean() est_sys_wait = ret.data.system_wait_intervals[0].mean() est_queue_wait = ret.data.queues[0].wait_intervals.mean() mean_service = service.mean() mean_arrival = arrival.mean() rho = mean_service / mean_arrival expected_delay = mean_arrival * rho / (1 - rho) assert_allclose(est_service_mean, mean_service, rtol=0.25) assert_allclose(busy_rate, rho, rtol=0.25) assert_allclose(system_size, rho / (1 - rho), atol=0.05, rtol=0.25) assert_allclose(est_arrival_mean, mean_arrival, rtol=0.25) assert_allclose(est_departure_mean, mean_arrival, rtol=0.25) assert_allclose(est_delay, expected_delay, rtol=0.25) assert_allclose(est_sys_wait, expected_delay, rtol=0.25) assert_allclose(est_queue_wait, expected_delay - mean_service, 0.25) assert ret.data.queues[0].drop_ratio == 0 assert ret.data.queues[0].num_dropped == 0 assert ret.data.queues[0].num_arrived > 0 assert ret.data.servers[0].num_served > 0
def test_mm1_model(arrival, service, stime_limit): ret = simulate(QueueingSystem, stime_limit=stime_limit, params={ 'arrival': arrival, 'service': service, 'queue_capacity': None, }) busy_rate = ret.data.server.busy_trace.timeavg() system_size = ret.data.system_size_trace.timeavg() est_arrival_mean = ret.data.source.intervals.statistic().mean() est_departure_mean = ret.data.sink.arrival_intervals.statistic().mean() est_service_mean = ret.data.server.service_intervals.mean() est_delay = ret.data.source.delays.mean() est_sys_wait = ret.data.system_wait_intervals.mean() est_queue_wait = ret.data.queue.wait_intervals.mean() mean_service = service.mean() mean_arrival = arrival.mean() rho = mean_service / mean_arrival expected_delay = mean_arrival * rho / (1 - rho) assert_allclose(est_service_mean, mean_service, rtol=0.25) assert_allclose(busy_rate, rho, rtol=0.25) assert_allclose(system_size, rho / (1 - rho), rtol=0.25) assert_allclose(est_arrival_mean, mean_arrival, rtol=0.25) assert_allclose(est_departure_mean, mean_arrival, rtol=0.25) assert_allclose(est_delay, expected_delay, rtol=0.25) assert_allclose(est_sys_wait, expected_delay, rtol=0.25) assert_allclose(est_queue_wait, expected_delay - mean_service, 0.25) assert ret.data.queue.drop_ratio == 0 assert ret.data.queue.num_dropped == 0 assert ret.data.queue.num_arrived > 0 assert ret.data.server.num_served > 0