def test_reset(sim_engine): sim_engine = sim_engine(diff_config={'exec_numMotes': 1}) def _callback(): pass trickle_timer = TrickleTimer(Imin, Imax, K, _callback) trickle_timer.start() # get ASN of 't' and one of the end of the interval original_event_at_t = None original_event_at_end_of_interval = None for event in sim_engine.events: if event[3] == trickle_timer.unique_tag_base + '_at_t': original_event_at_t = event elif event[3] == trickle_timer.unique_tag_base + '_at_i': original_event_at_end_of_interval = event u.run_until_asn(sim_engine, sim_engine.getAsn() + 1) # reset the timer trickle_timer.reset() # interval should be the minimum value by reset() assert trickle_timer.interval == Imin # events should be re-scheduled accordingly for event in sim_engine.events: if event[3] == trickle_timer.unique_tag_base + '_at_t': assert original_event_at_t is not event elif event[3] == trickle_timer.unique_tag_base + '_at_i': assert original_event_at_end_of_interval is not event
def test_redundancy_constant(sim_engine, num_consistency): sim_engine = sim_engine(diff_config={'exec_numMotes': 1}) result = {'is_callback_called': False} def _callback(): result['is_callback_called'] = True trickle_timer = TrickleTimer(Imin, Imax, K, _callback) # set one slotframe long to the interval (for test purpose) INITIAL_INTERVAL = 1010 # ms trickle_timer.start() trickle_timer.interval = INITIAL_INTERVAL trickle_timer._start_next_interval() for _ in range(num_consistency): trickle_timer.increment_counter() u.run_until_asn(sim_engine, sim_engine.settings.tsch_slotframeLength) if num_consistency < K: assert result['is_callback_called'] == True else: assert result['is_callback_called'] == False assert trickle_timer.interval == INITIAL_INTERVAL * 2
def test_app_upstream(sim_engine, app): """Test Application Upstream Traffic - objective : test if app generates and sends packets as expected - precondition: form a 2-mote linear network - precondition: app sends 5 packets during the simulation time - action : run the simulation for 10 seconds - expectation : each application sends five packets """ sim_engine = sim_engine( { 'exec_numMotes': 2, 'exec_numSlotframesPerRun': 11, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'tsch_probBcast_ebProb': 0, 'app': app, 'app_pkPeriod': 2, 'app_pkPeriodVar': 0, 'app_pkLength': 90, 'app_burstTimestamp': 1, 'app_burstNumPackets': 5, }, force_initial_routing_and_scheduling_state=True, ) # give the network time to form u.run_until_asn(sim_engine, 1010) # the number of 'app.tx' is the same as the number of generated packets. logs = u.read_log_file(filter=['app.tx']) # five packets should be generated per application assert 5 <= len(logs)
def test_abort_on_responder(self, sim_engine): sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) install_sf(sim_engine.motes, SchedulingFunctionTwoStepForAbortion) root = sim_engine.motes[0] mote = sim_engine.motes[1] root.sf.issue_add_request(mote.get_mac_addr()) u.run_until_asn(sim_engine, 101) # mote should receive the request logs = u.read_log_file([SimLog.LOG_SIXP_RX['type']]) assert len(logs) == 1 assert logs[0]['_mote_id'] == mote.id # we should have two sixp.tx logs: one is for the request, the other is # for response logs = u.read_log_file([SimLog.LOG_SIXP_TX['type']]) assert len(logs) == 2 assert logs[0]['_mote_id'] == root.id assert logs[1]['_mote_id'] == mote.id assert logs[1]['packet']['app']['msgType'] == d.SIXP_MSG_TYPE_RESPONSE # abort the transaction on the responder assert len(mote.sixp.transaction_table) == 1 assert len(mote.tsch.txQueue) == 1 # handler should receive the "aborted" event assert mote.sf.received_aborted_event is False mote.sixp.abort_transaction(initiator_mac_addr=root.get_mac_addr(), responder_mac_addr=mote.get_mac_addr()) assert mote.sf.received_aborted_event is True assert len(mote.sixp.transaction_table) == 0 assert len(mote.tsch.txQueue) == 0
def test_interval_doubling(sim_engine): sim_engine = sim_engine(diff_config={'exec_numMotes': 1}) def _callback(): pass one_slotframe = sim_engine.settings.tsch_slotframeLength i_min = 1000 i_max = 2 trickle_timer = TrickleTimer(i_min, i_max, K, _callback) # set one slotframe long to the interval manually (for test purpose) INITIAL_INTERVAL = 1010 # ms trickle_timer.interval = INITIAL_INTERVAL trickle_timer._start_next_interval() assert trickle_timer.interval == INITIAL_INTERVAL # interval should be doubled u.run_until_asn(sim_engine, sim_engine.getAsn() + one_slotframe) assert trickle_timer.interval == INITIAL_INTERVAL * 2 # doubled interval will exceed the maximum interval. then, the resulting # interval should be the maximum value u.run_until_asn(sim_engine, sim_engine.getAsn() + one_slotframe * 2) assert trickle_timer.interval == i_min * pow(2, i_max)
def test_seqnum_increment(self, sim_engine, initial_seqnum): sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) # install a test SF install_sf(sim_engine.motes, SchedulingFunctionTwoStep) # for quick access mote_0 = sim_engine.motes[0] mote_1 = sim_engine.motes[1] # set initial SeqNum mote_0.sixp.seqnum_table[mote_1.get_mac_addr()] = initial_seqnum mote_1.sixp.seqnum_table[mote_0.get_mac_addr()] = initial_seqnum # execute one transaction mote_0.sf.issue_add_request(mote_1.get_mac_addr()) # wait a little bit u.run_until_asn(sim_engine, 500) # check the SeqNums both of the motes maintain if initial_seqnum == 255: expected_seqnum = 1 else: expected_seqnum = initial_seqnum + 1 assert mote_0.sixp.seqnum_table[ mote_1.get_mac_addr()] == expected_seqnum assert mote_1.sixp.seqnum_table[ mote_0.get_mac_addr()] == expected_seqnum
def test_dis_config(sim_engine, fixture_dis_mode): sim_engine = sim_engine( diff_config={ 'exec_numMotes': 2, 'rpl_extensions': [fixture_dis_mode], 'secjoin_enabled': False, 'app_pkPeriod': 0, 'tsch_keep_alive_interval': 0 }) root = sim_engine.motes[0] mote = sim_engine.motes[1] # give EB to mote eb = root.tsch._create_EB() mote.tsch._action_receiveEB(eb) # stop the trickle timer for this test root.rpl.trickle_timer.stop() # prepare sendPacket() for this test result = {'dis': None, 'dio': None} def sendPacket(self, packet): if packet['type'] == d.PKT_TYPE_DIS: dstIp = packet['net']['dstIp'] if fixture_dis_mode == 'dis_unicast': assert dstIp == root.get_ipv6_link_local_addr() else: assert dstIp == d.IPV6_ALL_RPL_NODES_ADDRESS result['dis'] = packet elif packet['type'] == d.PKT_TYPE_DIO: dstIp = packet['net']['dstIp'] if fixture_dis_mode == 'dis_unicast': assert dstIp == mote.get_ipv6_link_local_addr() else: assert dstIp == d.IPV6_ALL_RPL_NODES_ADDRESS result['dio'] = packet self.original_sendPacket(packet) mote.sixlowpan.original_sendPacket = mote.sixlowpan.sendPacket mote.sixlowpan.sendPacket = types.MethodType(sendPacket, mote.sixlowpan) root.sixlowpan.original_sendPacket = root.sixlowpan.sendPacket root.sixlowpan.sendPacket = types.MethodType(sendPacket, root.sixlowpan) mote.rpl.start() # run the simulation for a while u.run_until_asn(sim_engine, 1000) if fixture_dis_mode is None: assert result['dis'] is None assert result['dio'] is None else: assert result['dis'] is not None if fixture_dis_mode == 'dis_unicast': assert result['dio'] is not None else: # DIS is not sent immediately assert result['dio'] is None
def test_no_fragment_loss(self, sim_engine, app_pkLength, fragmentation, fragmentation_ff_discard_vrb_entry_policy): """ Test it with a basic case in which there is no fragment loss - objective : test if packets are delivered to the root (destination) - precondition: form a 3-mote linear topology - action : send packets from each motes except for the root - expectation : the root receives the packets """ sim_engine = sim_engine( { 'exec_numMotes': 3, 'exec_numSlotframesPerRun': 10000, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'app_pkPeriod': 5, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, 'sixlowpan_reassembly_buffers_num': 2, 'app_pkLength': app_pkLength, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy }, force_initial_routing_and_scheduling_state=True, ) # run the simulation for 1000 timeslots (10 seconds) u.run_until_asn(sim_engine, 1000) # the root should receive packet from both of the two motes during 10 seconds. # - Packets are generated at every 5 seconds # - The first packet is generated within the first 5 seconds # - the minimum e2e latency of one fragment from the leaf is about 2 sec # - a packet is divided into two fragments at most in this test # - two fragments from the leaf need at least 4 sec to reach the root senders = [] for log in u.read_log_file(filter=['app.rx']): srcIp = log['packet']['net']['srcIp'] if srcIp not in senders: senders.append(srcIp) if len(senders) == 2: # root should receive packets from both of the two motes # if it reaches here, it means success return assert False
def test_getter(self, sim_engine): num_channels = 2 sim_engine = sim_engine( diff_config={ 'conn_class': 'Random', 'exec_numMotes': 2, 'conn_random_init_min_neighbors': 1, 'phy_numChans': num_channels, }) # PDR and RSSI should not change over time for src, dst in zip(sim_engine.motes[:-1], sim_engine.motes[1:]): for channel in d.TSCH_HOPPING_SEQUENCE[:num_channels]: pdr = [] rssi = [] for _ in range(100): pdr.append( sim_engine.connectivity.get_pdr(src_id=src.id, dst_id=dst.id, channel=channel)) rssi.append( sim_engine.connectivity.get_rssi(src_id=src.id, dst_id=dst.id, channel=channel)) # proceed the simulator u.run_until_asn(sim_engine, sim_engine.getAsn() + 1) # compare two consecutive PDRs and RSSIs. They should be always # the same value. Then, the following condition of 'i != j' # should always false assert sum([(i != j) for i, j in zip(pdr[:-1], pdr[1:])]) == 0 assert sum([(i != j) for i, j in zip(rssi[:-1], rssi[1:])]) == 0 # PDR and RSSI should be the same within the same slot, of course for src, dst in zip(sim_engine.motes[:-1], sim_engine.motes[1:]): for channel in d.TSCH_HOPPING_SEQUENCE[:num_channels]: pdr = [] rssi = [] for _ in range(100): pdr.append( sim_engine.connectivity.get_pdr(src_id=src.id, dst_id=dst.id, channel=channel)) rssi.append( sim_engine.connectivity.get_rssi(src_id=src.id, dst_id=dst.id, channel=channel)) # compare two consecutive PDRs and RSSIs; all the pairs should # be same (all comparison, i != j, should be False). assert sum([(i != j) for i, j in zip(pdr[:-1], pdr[1:])]) == 0 assert sum([(i != j) for i, j in zip(rssi[:-1], rssi[1:])]) == 0
def test_fragmentation_and_reassembly( self, sim_engine, app_pkLength, fragmentation, fragmentation_ff_discard_vrb_entry_policy): """Test fragmentation and reassembly themselves (w/o forwarding) - objective : test if a packet is divided to the expected number - precondition: form a 2-mote linear topology - precondition: app scheduled is done by hand (app_pkPeriod=0) - action : send a packet to the root - expectation : the number of fragments is the expected value """ sim_engine = sim_engine( diff_config={ 'exec_numMotes': 2, 'exec_numSlotframesPerRun': 20, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, 'tsch_max_payload_len': self.TSCH_MAX_PAYLOAD, 'tsch_tx_queue_size': self.TSCH_TX_QUEUE_SIZE, 'app_pkLength': app_pkLength, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy }, force_initial_routing_and_scheduling_state=True) # send a packet from the leaf mote leaf = sim_engine.motes[1] # _send_a_single_packet() causes leaf to send a packet leaf.app._send_a_single_packet() # it's ready to test; run the simulation for long enough time u.run_until_asn(sim_engine, 1500) # check if fragment receptions happen the expected times logs = u.read_log_file(filter=['sixlowpan.pkt.rx']) assert (len([ log for log in logs if log['packet']['type'] == d.PKT_TYPE_FRAG ]) == math.ceil(float(app_pkLength) / self.TSCH_MAX_PAYLOAD))
def test_with_no_memory_for_fragment( self, sim_engine, sixlowpan_reassembly_buffers_num, fragmentation_ff_vrb_table_size, fragmentation, fragmentation_ff_discard_vrb_entry_policy): # We allocate no memory for PerHopReassembly and for FragmentForwarding # in order to see the stack behavior under the situation where it # cannot add an reassembly buffer nor VRB Table entry for an incoming # fragment. if ((sixlowpan_reassembly_buffers_num > 0) and (fragmentation_ff_vrb_table_size > 0)): # we skip this combination of parameters return sim_engine = sim_engine( diff_config={ 'exec_numMotes': 3, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'app_pkPeriod': 5, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, 'sixlowpan_reassembly_buffers_num': sixlowpan_reassembly_buffers_num, 'fragmentation_ff_vrb_table_size': fragmentation_ff_vrb_table_size, 'app_pkLength': 180, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy }, force_initial_routing_and_scheduling_state=True, ) u.run_until_asn(sim_engine, 5000) # send an application packet from root to the other motes for test with # downward traffic sim_engine.motes[0].app._send_ack(sim_engine.motes[1].id, 180) sim_engine.motes[0].app._send_ack(sim_engine.motes[2].id, 180) u.run_until_asn(sim_engine, 10100)
def test_abort_on_initiator(self, sim_engine, fixture_msg_type): sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) install_sf(sim_engine.motes, SchedulingFunctionThreeStep) root = sim_engine.motes[0] mote = sim_engine.motes[1] assert len(root.sixp.transaction_table) == 0 assert len(root.tsch.txQueue) == 0 root.sf.issue_add_request(mote.get_mac_addr()) if fixture_msg_type == d.SIXP_MSG_TYPE_REQUEST: logs = u.read_log_file([SimLog.LOG_SIXP_TX['type']]) assert len(logs) == 1 assert logs[0]['_mote_id'] == root.id assert logs[0]['packet']['app']['msgType'] == ( d.SIXP_MSG_TYPE_REQUEST ) else: assert fixture_msg_type == d.SIXP_MSG_TYPE_CONFIRMATION u.run_until_asn(sim_engine, 102) # we should have two sixp.rx logs: one is for the request, the # other is for response logs = u.read_log_file([SimLog.LOG_SIXP_RX['type']]) assert len(logs) == 2 assert logs[0]['_mote_id'] == mote.id assert logs[1]['_mote_id'] == root.id # we should have three sixp.tx logs for each msg_type logs = u.read_log_file([SimLog.LOG_SIXP_TX['type']]) assert len(logs) == 3 assert logs[0]['_mote_id'] == root.id assert logs[1]['_mote_id'] == mote.id assert logs[2]['_mote_id'] == root.id assert logs[2]['packet']['app']['msgType'] == ( d.SIXP_MSG_TYPE_CONFIRMATION ) # abort the transaction on the initiator assert len(root.sixp.transaction_table) == 1 assert len(root.tsch.txQueue) == 1 root.sixp.abort_transaction( initiator_mac_addr = root.get_mac_addr(), responder_mac_addr = mote.get_mac_addr() ) assert len(root.sixp.transaction_table) == 0 assert len(root.tsch.txQueue) == 0
def test_run_until_asn(sim_engine, repeat4times): sim_engine = sim_engine(diff_config={ 'exec_numMotes': 1, 'exec_numSlotframesPerRun': 1, }) assert sim_engine.getAsn() == 0 for target_asn in range(1, 10, 5): u.run_until_asn( sim_engine, target_asn=target_asn, ) assert sim_engine.getAsn() == target_asn
def test_network_advertisement(sim_engine, fixture_adv_frame): sim_engine = sim_engine( diff_config={ 'exec_numMotes': 1, 'exec_numSlotframesPerRun': 100, # with 101 slots per slotframe, that's 10,100 slot total }) u.run_until_asn(sim_engine, 10000) logs = u.read_log_file(filter=['tsch.txdone']) # root should send more than one EB in a default simulation run assert len([l for l in logs if l['packet']['type'] == fixture_adv_frame]) > 0
def test_compute_battery_lifetime(sim_engine): # reproduce Issue #360 sim_engine = sim_engine( diff_config={ 'exec_numSlotframesPerRun': 1, 'exec_numMotes': 2, 'phy_numChans': 1, 'radio_stats_log_period_s': 60 }) root = sim_engine.motes[0] mote = sim_engine.motes[1] # set 0% of PDR to their links channel = d.TSCH_HOPPING_SEQUENCE[0] sim_engine.connectivity.matrix.set_pdr_both_directions(mote_id_1=root.id, mote_id_2=mote.id, channel=channel, pdr=0) # make up a radio activity of mote, which is supposed to consume # energy mote.radio.stats['tx_data'] = 100 # force mote to log radio stats (at ASN 0) mote.radio._log_stats() # stop the simulator at the last ASN u.run_until_asn(sim_engine, 101) # make mote synched mote.tsch.setIsSync(True) # confirm we have relevant logs logs = u.read_log_file(['radio.stats', 'tsch.synced']) logs = [log for log in logs if log['_mote_id'] == mote.id] assert len(logs) == 2 logs[0]['_type'] == 'radio.status' logs[0]['_asn'] == 0 logs[1]['_type'] == 'tsch.synced' logs[1]['_asn'] == 101 # run compute_kpis, which should end without raising an exception output = run_compute_kpis_py() # test done assert True
def test_exception_at_runtime(sim_engine, repeat4times): """test if an exception raised in a SimEngine thread is propagated here Run a simulation in one slotframe """ sim_engine = sim_engine() sim_engine.scheduleAtAsn( asn = 10, cb = _raiseException, uniqueTag = ('engine','_raiseException'), intraSlotOrder = 1, ) with pytest.raises(MyException): u.run_until_asn( sim_engine, target_asn = 20, # past the _raiseException event )
def test_concurrent_transactions(self, sim_engine): """6P must return RC_ERR_BUSY when it receives a request from a peer with whom it has already another transaction in process. """ sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) # for quick access initiator = sim_engine.motes[0] responder = sim_engine.motes[1] # trigger an ADD transaction, which will terminate by timeout on the # initiator.sfinitiator's side initiator.sixp.send_request( dstMac = responder.get_mac_addr(), command = d.SIXP_CMD_ADD, cellList = [], timeout_value = 200 ) # wait a little bit u.run_until_asn(sim_engine, 200) # now responder should have a transaction; issue a DELETE request, # which should cause RC_ERR_BUSY result = {'is_callback_called': False} def request_callback(event, packet): result['is_callback_called'] = True assert event == d.SIXP_CALLBACK_EVENT_PACKET_RECEPTION assert packet['type'] == d.PKT_TYPE_SIXP assert packet['app']['msgType'] == d.SIXP_MSG_TYPE_RESPONSE assert packet['app']['code'] == d.SIXP_RC_ERR_BUSY initiator.sixp.send_request( dstMac = responder.get_mac_addr(), command = d.SIXP_CMD_DELETE, cellList = [], callback = request_callback ) # wait a little bit u.run_until_asn(sim_engine, sim_engine.getAsn() + 200) assert result['is_callback_called'] is True
def test_schedule_inconsistency(self, sim_engine, initial_seqnum): sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) # for quick access mote_0 = sim_engine.motes[0] mote_1 = sim_engine.motes[1] # set initial SeqNum; mote_0 has zero, mote_1 has non-zero (1) mote_0.sixp.seqnum_table[mote_1.get_mac_addr()] = 0 mote_1.sixp.seqnum_table[mote_0.get_mac_addr()] = 1 # prepare assertion result = {'is_schedule_inconsistency_detected': False} def detect_schedule_inconsistency(self, peerMac): assert mote_0.is_my_mac_addr(peerMac) result['is_schedule_inconsistency_detected'] = True mote_1.sf.detect_schedule_inconsistency = types.MethodType( detect_schedule_inconsistency, mote_1.sf) # send a request which causes the responder to detect schedule # inconsistency. the initiator should receive RC_ERR_SEQNUM. result['is_rc_err_seqnum_received'] = False def request_callback(event, packet): assert event == d.SIXP_CALLBACK_EVENT_PACKET_RECEPTION assert packet['app']['msgType'] == d.SIXP_MSG_TYPE_RESPONSE assert packet['app']['code'] == d.SIXP_RC_ERR_SEQNUM result['is_rc_err_seqnum_received'] = True mote_0.sixp.send_request(dstMac=mote_1.get_mac_addr(), command=d.SIXP_CMD_COUNT, callback=request_callback) # wait a little bit u.run_until_asn(sim_engine, 500) assert result['is_schedule_inconsistency_detected'] is True assert result['is_rc_err_seqnum_received'] is True
def test_get_physical_channel(sim_engine): sim_engine = sim_engine(diff_config={ 'exec_numMotes': 1, 'tsch_slotframeLength': 101 }) mote = sim_engine.motes[0] minimal_cell = mote.tsch.get_cell(slot_offset=0, channel_offset=0, mac_addr=None, slotframe_handle=0) assert minimal_cell is not None for i in range(len(d.TSCH_HOPPING_SEQUENCE)): if i > 0: u.run_until_asn(sim_engine, (sim_engine.getAsn() + sim_engine.settings.tsch_slotframeLength)) assert (previous_channel != mote.tsch._get_physical_channel(minimal_cell)) else: pass previous_channel = mote.tsch._get_physical_channel(minimal_cell)
def test_transaciton_type(self, sim_engine, scheduling_function): sim_engine = sim_engine(**COMMON_SIM_ENGINE_ARGS) # install the test scheduling function to the motes install_sf(sim_engine.motes, scheduling_function) # trigger an ADD transaction sim_engine.motes[0].sf.issue_add_request(sim_engine.motes[1].id) u.run_until_asn(sim_engine, 1000) # trigger a DELETE transaction sim_engine.motes[0].sf.issue_delete_request(sim_engine.motes[1].id) u.run_until_asn(sim_engine, sim_engine.getAsn() + 1000) # trigger a RELOCATE transaction sim_engine.motes[0].sf.issue_relocate_request(sim_engine.motes[1].id) u.run_until_asn(sim_engine, sim_engine.getAsn() + 1000) # done assert True
def test_fragment_loss( self, sim_engine, fragmentation, fragmentation_ff_discard_vrb_entry_policy, target_datagram_offset, ): """ Test fragmentation with fragment loss - objective : test if a packet is lost there is a missing fragment - precondition: form a 3-mote linear topology - precondition: app scheduled is done by hand (app_pkPeriod=0) - precondition: a packet is divided into three fragments - action : send a packet from the leaf - action : drop one fragment of the packet - expectation : the root doesn't receive (reassemble) the packet """ sim_engine = sim_engine( { 'exec_numMotes': 3, 'exec_numSlotframesPerRun': 10, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'app': 'AppPeriodic', 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'app_pkLength': 270, 'tsch_probBcast_ebProb': 0, 'sixlowpan_reassembly_buffers_num': 2, 'tsch_max_payload_len': 90, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy }, force_initial_routing_and_scheduling_state=True) # send a packet from the leaf mote leaf = sim_engine.motes[2] # _send_a_single_packet() causes leaf to send a packet leaf.app._send_a_single_packet() # retrieve fragments in its TX queue fragments = [] for frame in leaf.tsch.txQueue: if frame['type'] == d.PKT_TYPE_FRAG: fragments.append(frame) # make sure its TX queue has three fragments assert len(fragments) == 3 # remove one fragment from the TX queue for fragment in fragments: if fragment['net']['datagram_offset'] == target_datagram_offset: leaf.tsch.txQueue.remove(fragment) break # it's ready to test; run the simulation for long enough time u.run_until_asn(sim_engine, 1000) # the root should not receive a packet from the leaf # - the intermediate node should receive two fragments # - the number of fragment receptions by the root depends: # - if the 1st fragment is lost, the root receives one fragment # - if the 2nd or 3rd fragment is lost, the root receives two # - the root should not be able to reassemble a packet # - the root should not receive the packet logs = u.read_log_file(filter=['app.rx', 'sixlowpan.pkt.rx']) fragment_reception_count = 0 for log in logs: if ((log['_type'] == 'sixlowpan.pkt.rx') and (log['_mote_id'] == 1) and (log['packet']['type'] == d.PKT_TYPE_FRAG)): # count the fragment receptions by the intermediate node, whose # _mote_id is 1 fragment_reception_count += 1 elif log['_type'] == 'app.rx': # this should never happen; a packet never reaches the root assert False # again, the intermediate node should receive two fragments from the # leaf assert fragment_reception_count == 2
def test_e2e_latency( self, sim_engine, fragmentation, fragmentation_ff_discard_vrb_entry_policy, ): """Test end-to-end latency of a fragmented packet - objective : test if each forwarding technique shows expected end-to-end latency - precondition: form a 4-mote linear topology - precondition: a packet is divided into two fragments - action : send a packet from the leaf to the root - expectation : the root should receive the last fragment in slotframes of the expected number after the first one is tran - note : the intermediate node has a RX cell to the root just *after* a TX cell from the leaf """ # latency is expressed in the number of slotframes expected_e2e_latency = {'PerHopReassembly': 6, 'FragmentForwarding': 4} sim_engine = sim_engine( { 'exec_numMotes': 4, 'exec_numSlotframesPerRun': 10, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'rpl_daoPeriod': 0, 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, 'app_pkLength': 180, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy }, force_initial_routing_and_scheduling_state=True) sim_settings = SimEngine.SimSettings.SimSettings() # send a packet; its fragments start being forwarded at the next # timeslot where it has a dedicated TX cell leaf = sim_engine.motes[3] # _send_a_single_packet() causes leaf to send a packet leaf.app._send_a_single_packet() # run the simulation for long enough time u.run_until_asn(sim_engine, 1000) logs = u.read_log_file(filter=['app.rx', 'prop.transmission']) # asn_start: ASN the first fragment is transmitted by the leaf # asn_end : ASN the last fragment is received by the root asn_start = 0 asn_end = 0 for log in logs: if ((log['_type'] == 'sixlowpan.pkt.tx') and (log['packet']['srcMac'] == 3) and (log['packet']['type'] == d.PKT_TYPE_FRAG)): asn_start = log['_asn'] if log['_type'] == 'app.rx': # log 'app.rx' means the last fragment is received # by the root asn_end = log['_asn'] break e2e_latency = int( math.ceil( float(asn_end - asn_start) / sim_settings.tsch_slotframeLength)) assert e2e_latency == expected_e2e_latency[fragmentation]
def test_entry_expiration(self, sim_engine, fragmentation, fragmentation_ff_discard_vrb_entry_policy): """Test lifetime management on memory entries - objective : test if an expired memory entry is removed - precondition: form a 3-mote linear topology - action : inject a fragment to hop1 mote - action : wait until 50% of its expiration time - action : inject another fragment to hop1 mote - action : wait until expiration time of the first created entry - action : inject a fragment to hop1 mote to trigger memory housekeeping - expectation : the entry for the first fragment is removed """ sim_engine = sim_engine( diff_config={ 'exec_numMotes': 3, 'exec_numSlotframesPerRun': 60, 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, 'sixlowpan_reassembly_buffers_num': 2, 'fragmentation_ff_vrb_table_size': 2, 'fragmentation': fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy, }, force_initial_routing_and_scheduling_state=True) sim_settings = SimEngine.SimSettings.SimSettings() root = sim_engine.motes[0] hop1 = sim_engine.motes[1] hop2 = sim_engine.motes[2] # prepare three fragments: # fragment1_0: the first fragment of a packet # fragment2_0: the first fragment of a different packet # fragment2_1: the second fragment of the different packet fragment1_0 = { 'type': d.PKT_TYPE_FRAG, 'mac': { 'srcMac': hop2.id, 'dstMac': hop1.id, }, 'net': { 'srcIp': hop2.id, 'dstIp': root.id, 'hop_limit': d.IPV6_DEFAULT_HOP_LIMIT, 'datagram_size': 270, 'datagram_tag': 1, 'datagram_offset': 0, 'packet_length': 90 } } fragment2_0 = copy.copy(fragment1_0) fragment2_0['net'] = copy.deepcopy(fragment1_0['net']) fragment2_0['net']['datagram_tag'] = 2 fragment2_1 = copy.copy(fragment2_0) fragment2_1['net'] = copy.deepcopy(fragment2_0['net']) fragment2_1['net']['datagram_offset'] = 90 fragment2_1['net']['original_packet_type'] = d.PKT_TYPE_DATA, # compute the lifetime of an entry slots_per_sec = int(1.0 / sim_settings.tsch_slotDuration) if fragmentation == 'PerHopReassembly': memory_lifetime = d.SIXLOWPAN_REASSEMBLY_BUFFER_LIFETIME * slots_per_sec elif fragmentation == 'FragmentForwarding': memory_lifetime = d.SIXLOWPAN_VRB_TABLE_ENTRY_LIFETIME * slots_per_sec expiration_time = memory_lifetime + 1 # inject the first fragment assert get_memory_usage(hop1, fragmentation) == 0 hop1.sixlowpan.recvPacket(fragment1_0) assert get_memory_usage(hop1, fragmentation) == 1 # run the simulation until 50% of the lifetime u.run_until_asn(sim_engine, expiration_time / 2) # inject another fragment (the first fragment of a packet). hop1 # creates a new entry for this fragment (packet) hop1.sixlowpan.recvPacket(fragment2_0) assert get_memory_usage(hop1, fragmentation) == 2 # run the simulation until its expiration u.run_until_asn(sim_engine, expiration_time) # inject the other fragment (the second fragment of a packet). this # fragment doesn't cause hop1 to create a new entry hop1.sixlowpan.recvPacket(fragment2_1) # the memory should have only one entry for fragment2_0 and fragment2_1 assert get_memory_usage(hop1, fragmentation) == 1
def test_msf(self, sim_engine): """ Test Scheduling Function Traffic Adaptation - objective : test if msf adjust the number of allocated cells in accordance with traffic - precondition: form a 2-mote linear network - precondition: the network is formed - action : change traffic - expectation : MSF should trigger ADD/DELETE/RELOCATE accordingly """ # to make this test easy, change # MSF_HOUSEKEEPINGCOLLISION_PERIOD to 1 second msf_housekeeping_period_backup = d.MSF_HOUSEKEEPINGCOLLISION_PERIOD d.MSF_HOUSEKEEPINGCOLLISION_PERIOD = 1 sim_engine = sim_engine( diff_config={ 'exec_randomSeed': 3413860673863013345, 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'exec_numMotes': 2, 'exec_numSlotframesPerRun': 4000, 'rpl_daoPeriod': 0, 'tsch_keep_alive_interval': 0, 'tsch_probBcast_ebProb': 0, 'secjoin_enabled': False, 'sf_class': 'MSF', 'conn_class': 'Linear', }) # for quick access root = sim_engine.motes[0] mote = sim_engine.motes[1] # disable DIO def do_nothing(self): pass mote.rpl._send_DIO = types.MethodType(do_nothing, mote) # get the mote joined eb = root.tsch._create_EB() eb_dummy = { 'type': d.PKT_TYPE_EB, 'mac': { 'srcMac': '00-00-00-AA-AA-AA', # dummy 'dstMac': d.BROADCAST_ADDRESS, # broadcast 'join_metric': 1000 } } mote.tsch._action_receiveEB(eb) mote.tsch._action_receiveEB(eb_dummy) dio = root.rpl._create_DIO() dio['mac'] = { 'srcMac': root.get_mac_addr(), 'dstMac': d.BROADCAST_ADDRESS } mote.sixlowpan.recvPacket(dio) # 1. test autonomous cell installation # 1.1 test Non-SHARED autonomous cell cells = [ cell for cell in mote.tsch.get_cells( mac_addr=None, slotframe_handle=SchedulingFunctionMSF.SLOTFRAME_HANDLE) if cell.options == [d.CELLOPTION_TX, d.CELLOPTION_RX] ] assert len(cells) == 1 # 1.2 test SHARED autonomous cell to root cells = [ cell for cell in mote.tsch.get_cells( mac_addr=root.get_mac_addr(), slotframe_handle=SchedulingFunctionMSF.SLOTFRAME_HANDLE) if cell.options == [d.CELLOPTION_TX, d.CELLOPTION_RX, d.CELLOPTION_SHARED] ] assert len(cells) == 1 # 2. test dedicated cell allocation # 2.1 decrease MSF_MIN_NUM_TX and MSF_MAX_NUMCELLS to speed up this test d.MSF_MIN_NUM_TX = 10 d.MSF_MAX_NUMCELLS = 10 # 2.2 confirm the mote doesn't have any dedicated cell to its parent cells = [ cell for cell in mote.tsch.get_cells( mac_addr=root.get_mac_addr(), slotframe_handle=SchedulingFunctionMSF.SLOTFRAME_HANDLE) if cell.options == [d.CELLOPTION_TX] ] assert len(cells) == 0 # 2.3 the mote should have triggered a 6P to allocate one # dedicated cell logs = u.read_log_file(filter=[SimLog.LOG_SIXP_TX['type']]) assert len(logs) == 1 packet = logs[0]['packet'] assert packet['mac']['dstMac'] == root.get_mac_addr() assert packet['app']['msgType'] == d.SIXP_MSG_TYPE_REQUEST assert packet['app']['code'] == d.SIXP_CMD_ADD assert packet['app']['numCells'] == 1 assert packet['app']['cellOptions'] == [d.CELLOPTION_TX] # in order to test the traffic adaptation mechanism of MSF, # disable the pending bit feature assert mote.tsch.pending_bit_enabled is True mote.tsch.pending_bit_enabled = False # wait until the managed cell is available u.run_until_asn( sim_engine, sim_engine.getAsn() + mote.settings.tsch_slotframeLength * 2) # mote should have one managed cell scheduled cells = [ cell for cell in mote.tsch.get_cells( mac_addr=root.get_mac_addr(), slotframe_handle=SchedulingFunctionMSF.SLOTFRAME_HANDLE) if cell.options == [d.CELLOPTION_TX] ] assert len(cells) == 1 # 2.4 send an application packet per slotframe mote.settings.app_pkPeriod = (mote.settings.tsch_slotframeLength / 2 * mote.settings.tsch_slotDuration) mote.app.startSendingData() # 2.5 run for 10 slotframes assert mote.sf.cell_utilization == 0.0 u.run_until_asn( sim_engine, sim_engine.getAsn() + mote.settings.tsch_slotframeLength * 10) # 2.6 confirm the cell usage reaches 100% assert mote.sf.cell_utilization == 1.0 # 2.7 one dedicated cell should be allocated in the next 2 slotframes u.run_until_asn( sim_engine, sim_engine.getAsn() + mote.settings.tsch_slotframeLength * 2) cells = [ cell for cell in mote.tsch.get_cells( mac_addr=root.get_mac_addr(), slotframe_handle=SchedulingFunctionMSF.SLOTFRAME_HANDLE) if cell.options == [d.CELLOPTION_TX] ] assert len(cells) == 2 slot_offset = cells[0].slot_offset # adjust the packet interval mote.settings.app_pkPeriod = (mote.settings.tsch_slotframeLength / 3 * mote.settings.tsch_slotDuration) # 3. test cell relocation # 3.1 increase the following Rpl values in order to avoid invalidating # the root as a parent mote.rpl.of.MAX_NUM_OF_CONSECUTIVE_FAILURES_WITHOUT_ACK = 100 mote.rpl.of.UPPER_LIMIT_OF_ACCEPTABLE_ETX = 100 mote.rpl.of.MAXIMUM_STEP_OF_RANK = 100 # 3.2 deny input frames over the dedicated cell on the side of the root def rxDone_wrapper(self, packet, channel): if ((packet is not None) and ((self.engine.getAsn() % mote.settings.tsch_slotframeLength) == slot_offset)): self.active_cell = None self.waitingFor = None # silently discard this packet return False else: return self.rxDone_original(packet, channel) root.tsch.rxDone_original = root.tsch.rxDone root.tsch.rxDone = types.MethodType(rxDone_wrapper, root.tsch) # 3.3 run for the next 20 slotframes asn_start = sim_engine.getAsn() u.run_until_asn( sim_engine, sim_engine.getAsn() + mote.settings.tsch_slotframeLength * 20) # 3.5 RELOCATE should have happened logs = [ log for log in u.read_log_file(filter=['sixp.comp'], after_asn=asn_start) if ((log['_mote_id'] == mote.id) and ( log['cmd'] == d.SIXP_CMD_RELOCATE)) ] assert len(logs) == 1 # 4. test dedicated cell deallocation # 4.1 stop application packet transmission mote.settings.app_pkPeriod = 0 # 4.2 run for a while asn_start = sim_engine.getAsn() u.run_until_asn( sim_engine, sim_engine.getAsn() + mote.settings.tsch_slotframeLength * 20) # 4.3 DELETE should have happened logs = [ log for log in u.read_log_file(filter=['sixp.comp'], after_asn=asn_start) if ((log['_mote_id'] == mote.id) and ( log['cmd'] == d.SIXP_CMD_DELETE)) ] assert len(logs) > 0 # put the backup value to d.MSF_HOUSEKEEPINGCOLLISION_PERIOD d.MSF_HOUSEKEEPINGCOLLISION_PERIOD = msf_housekeeping_period_backup
def test_locked_slot_in_relocation_request(self, sim_engine): # MSF shouldn't select a slot offset out of the candidate cell # list which is in locked_slots sim_engine = sim_engine( diff_config={ 'exec_numMotes': 2, 'sf_class': 'MSF', 'conn_class': 'Linear', 'secjoin_enabled': False, 'app_pkPeriod': 0, 'rpl_daoPeriod': 0, 'rpl_extensions': [], 'tsch_keep_alive_interval': 0, 'tsch_probBcast_ebProb': 0 }) root = sim_engine.motes[0] mote = sim_engine.motes[1] u.get_join(root, mote) # wait for a while u.run_until_asn(sim_engine, 2 * sim_engine.settings.tsch_slotframeLength) # mote should have one dedicated cell cells = mote.tsch.get_cells(root.get_mac_addr(), mote.sf.SLOTFRAME_HANDLE) assert len(cells) == 2 cells = [cell for cell in cells if cell.options == [d.CELLOPTION_TX]] assert len(cells) == 1 cell = cells[0] # send a RELOCATE request to mote, which has the used slot # offset in both of the candidate cell list and the relocation # cell list target_slot_offset = cell.slot_offset + 1 if (cell.slot_offset + 1) == sim_engine.settings.tsch_slotframeLength: target_slot_offset = 1 # put target_slot_offset into locked_slots. target_slot_offset # is in the candidate cell list mote.sf.locked_slots.add(target_slot_offset) root.sixp.send_request(dstMac=mote.get_mac_addr(), command=d.SIXP_CMD_RELOCATE, cellOptions=[d.CELLOPTION_RX], numCells=1, relocationCellList=[{ 'slotOffset': cell.slot_offset, 'channelOffset': cell.channel_offset }], candidateCellList=[{ 'slotOffset': target_slot_offset, 'channelOffset': 0 }], callback=None) u.run_until_asn( sim_engine, sim_engine.getAsn() + 2 * sim_engine.settings.tsch_slotframeLength) logs = u.read_log_file(filter=[SimLog.LOG_SIXP_RX['type']]) assert len(logs) == 4 # including the first round-trip for ADD response = logs[-1]['packet'] assert response['app']['msgType'] == d.SIXP_MSG_TYPE_RESPONSE assert response['app']['code'] == d.SIXP_RC_SUCCESS assert len(response['app']['cellList']) == 0
def test_avg_hops(sim_engine, fragmentation, app_pkLength, pkt_loss_mode): sim_engine = sim_engine( diff_config={ 'exec_numSlotframesPerRun': 40, 'exec_numMotes': 10, 'fragmentation': fragmentation, 'app': 'AppPeriodic', 'app_pkPeriod': 0, 'app_pkLength': app_pkLength, 'tsch_probBcast_ebProb': 0, 'rpl_daoPeriod': 0, 'conn_class': 'Linear' }, force_initial_routing_and_scheduling_state=True, ) # in this test, the leaf sends two application packets. when pkt_loss_mode # is 'with_pkt_loss', the second application packet will be lost at the # root. # shorthands root = sim_engine.motes[0] leaf = sim_engine.motes[-1] sim_settings = SimSettings.SimSettings() # make the app send an application packet leaf.app._send_a_single_packet() # wait for some time u.run_until_asn(sim_engine, 2020) # the root should receive the first application packet logs = u.read_log_file([SimLog.LOG_APP_RX['type']]) assert len(logs) == 1 assert logs[0]['_mote_id'] == root.id assert logs[0]['packet']['net']['srcIp'] == leaf.get_ipv6_global_addr() assert logs[0]['packet']['net']['dstIp'] == root.get_ipv6_global_addr() assert logs[0]['packet']['type'] == d.PKT_TYPE_DATA # make the root not receive at 6LoWPAN layer anything if pkt_loss_mode is # 'with_pkt_loss' if pkt_loss_mode == 'with_pkt_loss': assert root.dagRoot is True def recvPacket(self, packet): # do nothing; ignore the incoming packet pass root.sixlowpan.recvPacket = types.MethodType(recvPacket, root.sixlowpan) elif pkt_loss_mode == 'without_pkt_loss': # do nothing pass else: raise NotImplemented() # make the app send another application packet leaf.app._send_a_single_packet() # run the simulator until it ends u.run_until_end(sim_engine) # confirm the leaf sent two application packets logs = u.read_log_file([SimLog.LOG_APP_TX['type']]) assert len(logs) == 2 for i in range(2): assert logs[i]['_mote_id'] == leaf.id assert logs[i]['packet']['net']['srcIp'] == leaf.get_ipv6_global_addr() assert logs[i]['packet']['net']['dstIp'] == root.get_ipv6_global_addr() assert logs[i]['packet']['type'] == d.PKT_TYPE_DATA # run compute_kpis.py against the log file compute_kpis_path = os.path.join(os.path.dirname(__file__), '../bin', 'compute_kpis.py') output = subprocess.check_output('{0} \'{1}\''.format( 'python', compute_kpis_path), shell=True).split('\n') # remove blank lines output = [line for line in output if not re.match(r'^\s*$', line)] # confirm if compute_kpis.py referred the right log file # the first line of output has the log directory name assert (re.search(os.path.basename(sim_settings.getOutputFile()), output[0]) is not None) # convert the body of the output, which is a JSON string, to an object json_string = '\n'.join(output[1:-1]) kpis = json.loads(json_string) # the avg_hops should be the same number as leaf.id since we use a linear # topology here. assert kpis['null'][str(leaf.id)]['avg_hops'] == leaf.id
def test_dodag_parent(sim_engine, fixture_rank_value): sim_engine = sim_engine(diff_config={ 'exec_numMotes': 3, 'secjoin_enabled': False }) root = sim_engine.motes[0] parent = sim_engine.motes[1] child = sim_engine.motes[2] # get them connected to the network eb = root.tsch._create_EB() eb_dummy = { 'type': d.PKT_TYPE_EB, 'mac': { 'srcMac': '00-00-00-AA-AA-AA', # dummy 'dstMac': d.BROADCAST_ADDRESS, # broadcast 'join_metric': 1000 } } parent.tsch._action_receiveEB(eb) parent.tsch._action_receiveEB(eb_dummy) child.tsch._action_receiveEB(eb) child.tsch._action_receiveEB(eb_dummy) dio = root.rpl._create_DIO() dio['mac'] = {'srcMac': root.get_mac_addr()} parent.rpl.action_receiveDIO(dio) dio = parent.rpl._create_DIO() dio['mac'] = {'srcMac': parent.get_mac_addr()} child.rpl.action_receiveDIO(dio) # make sure they are ready for the test assert parent.clear_to_send_EBs_DATA() is True assert child.clear_to_send_EBs_DATA() is True assert len(child.rpl.of.parents) == 1 assert child.rpl.of.parents[0]['mac_addr'] == parent.get_mac_addr() # create a DIO of 'parent' fot the test dio = parent.rpl._create_DIO() dio['mac'] = {'srcMac': parent.get_mac_addr()} if fixture_rank_value == 'smaller': dio['app']['rank'] = child.rpl.get_rank() - d.RPL_MINHOPRANKINCREASE elif fixture_rank_value == 'same': dio['app']['rank'] = child.rpl.get_rank() elif fixture_rank_value == 'larger': dio['app']['rank'] = child.rpl.get_rank() + d.RPL_MINHOPRANKINCREASE else: raise NotImplementedError() # process the global clock u.run_until_asn(sim_engine, 10) # give the dio to 'child' child.rpl.action_receiveDIO(dio) # see what happened if fixture_rank_value == 'smaller': # the parent should remain in the parent set of the child assert child.rpl.of.parents[0]['mac_addr'] == parent.get_mac_addr() else: # the parent should have been removed from the parent set of the child assert len(child.rpl.of.parents) == 0 # the child should send a DIO having INFINITE_RANK logs = u.read_log_file(filter=[SimLog.LOG_RPL_DIO_TX['type']], after_asn=sim_engine.getAsn() - 1) assert len(logs) == 1 assert logs[0]['packet']['app']['rank'] == 65535
def test_vanilla_scenario( sim_engine, fixture_exec_numMotes, fixture_data_flow, fixture_secjoin_enabled, fixture_app_pkLength, fixture_fragmentation, fixture_ff_vrb_policy_missing_fragment, fixture_ff_vrb_policy_last_fragment, fixture_sf_class, ): """ Let the network form, send data packets up and down. """ # initialize the simulator fragmentation_ff_discard_vrb_entry_policy = [] if fixture_ff_vrb_policy_missing_fragment: fragmentation_ff_discard_vrb_entry_policy += ['missing_fragment'] if fixture_ff_vrb_policy_last_fragment: fragmentation_ff_discard_vrb_entry_policy += ['last_fragment'] sim_engine = sim_engine( diff_config={ 'exec_numMotes': fixture_exec_numMotes, 'exec_numSlotframesPerRun': 10000, 'secjoin_enabled': fixture_secjoin_enabled, 'app_pkLength': fixture_app_pkLength, 'app_pkPeriod': 0, # disable, will be send by test 'rpl_daoPeriod': 60, 'tsch_probBcast_ebProb': 0.33, 'fragmentation': fixture_fragmentation, 'fragmentation_ff_discard_vrb_entry_policy': fragmentation_ff_discard_vrb_entry_policy, 'sf_class': fixture_sf_class, 'conn_class': 'Linear', }, ) # === network forms # give the network time to form u.run_until_asn(sim_engine, 20 * 60 * 100) # verify no packet was dropped #check_no_packet_drop() # verify that all nodes are sync'ed tsch_check_all_nodes_synced(sim_engine.motes) # verify that all nodes are join'ed secjoin_check_all_nodes_joined(sim_engine.motes) # verify neighbor tables check_neighbor_tables(sim_engine.motes) # verify that all nodes have acquired rank and preferred parent rpl_check_all_node_prefered_parent(sim_engine.motes) rpl_check_all_node_rank(sim_engine.motes) # verify that all nodes are sending EBs, DIOs and DAOs tsch_check_all_nodes_send_EBs(sim_engine.motes) rpl_check_all_nodes_send_DIOs(sim_engine.motes) rpl_check_all_motes_send_DAOs(sim_engine.motes) # verify that root has stored enough DAO information to compute source routes rpl_check_root_parentChildfromDAOs(sim_engine.motes) # verify that all nodes have a dedicated cell to their parent if fixture_sf_class != 'SFNone': tsch_all_nodes_check_dedicated_cell(sim_engine.motes) # === send data up/down # appcounter increments at each packet appcounter = 0 # pick a "datamote" which will send/receive data datamote = sim_engine.motes[-1] # pick furthest mote # get the DAG root dagroot = sim_engine.motes[sim_engine.DAGROOT_ID] # verify no packets yet received by root assert len(u.read_log_file([SimLog.LOG_APP_RX['type']])) == 0 # send packets upstream (datamote->root) if fixture_data_flow.find("up") != -1: for _ in range(10): # inject data at the datamote datamote.app._send_a_single_packet() # give the data time to reach the root u.run_until_asn(sim_engine, sim_engine.getAsn() + 10000) # verify datamote got exactly one packet #count_num_app_rx(appcounter) # increment appcounter appcounter += 1 # send data downstream (root->datamote) if fixture_data_flow.find("down") != -1: for _ in range(10): # inject data at the root dagroot.app._send_ack(datamote.id) # give the data time to reach the datamote u.run_until_asn(sim_engine, sim_engine.getAsn() + 10000) # verify datamote got exactly one packet #count_num_app_rx(appcounter) # increment appcounter appcounter += 1
def test_tx_with_two_slotframes(sim_engine): sim_engine = sim_engine( diff_config={ 'app_pkPeriod': 0, 'exec_numMotes': 2, 'exec_numSlotframesPerRun': 1000, 'secjoin_enabled': False, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'rpl_extensions': [], 'rpl_daoPeriod': 0 }) # shorthands root = sim_engine.motes[0] hop_1 = sim_engine.motes[1] # add one slotframe to the two motes for mote in sim_engine.motes: mote.tsch.add_slotframe(1, 101) asn_at_end_of_simulation = (sim_engine.settings.tsch_slotframeLength * sim_engine.settings.exec_numSlotframesPerRun) u.run_until_everyone_joined(sim_engine) assert sim_engine.getAsn() < asn_at_end_of_simulation # put DIO to hop1 dio = root.rpl._create_DIO() dio['mac'] = {'srcMac': root.get_mac_addr()} hop_1.rpl.action_receiveDIO(dio) # install one TX cells to each slotframe for i in range(2): hop_1.tsch.addCell(slotOffset=i + 1, channelOffset=0, neighbor=root.get_mac_addr(), cellOptions=[d.CELLOPTION_TX], slotframe_handle=i) root.tsch.addCell(slotOffset=i + 1, channelOffset=0, neighbor=hop_1.get_mac_addr(), cellOptions=[d.CELLOPTION_RX], slotframe_handle=i) # the first dedicated cell is scheduled at slot_offset 1, the other is at # slot_offset 2 cell_in_slotframe_0 = hop_1.tsch.get_cells(root.get_mac_addr(), 0)[0] cell_in_slotframe_1 = hop_1.tsch.get_cells(root.get_mac_addr(), 1)[0] # run until the end of this slotframe slot_offset = sim_engine.getAsn() % 101 u.run_until_asn(sim_engine, sim_engine.getAsn() + (101 - slot_offset - 1)) # send two application packets, which will be sent over the dedicated cells hop_1.app._send_a_single_packet() hop_1.app._send_a_single_packet() # run for one slotframe asn = sim_engine.getAsn() assert (asn % 101) == 100 # the next slot is slotoffset 0 u.run_until_asn(sim_engine, asn + 101) # check logs ## TX side (hop_1) logs = [ log for log in u.read_log_file(filter=[SimLog.LOG_TSCH_TXDONE['type']], after_asn=asn) if log['_mote_id'] == hop_1.id ] assert len(logs) == 2 assert (logs[0]['_asn'] % 101) == cell_in_slotframe_0.slot_offset assert (logs[1]['_asn'] % 101) == cell_in_slotframe_1.slot_offset ## RX side (root) logs = [ log for log in u.read_log_file(filter=[SimLog.LOG_TSCH_RXDONE['type']], after_asn=asn) if log['_mote_id'] == root.id ] assert len(logs) == 2 assert (logs[0]['_asn'] % 101) == cell_in_slotframe_0.slot_offset assert (logs[1]['_asn'] % 101) == cell_in_slotframe_1.slot_offset # confirm hop_1 has the minimal cell assert len(hop_1.tsch.get_cells(None)) == 1 assert (hop_1.tsch.get_cells(None)[0].options == [ d.CELLOPTION_TX, d.CELLOPTION_RX, d.CELLOPTION_SHARED ])
def test_tx_cell_selection(sim_engine, packet_type, destination, expected_cellOptions): # cell selection rules: # # - [CELLOPTION_TX] should be used for a unicast packet to a neighbor to whom a sender # has a dedicated TX cell # - [CELLOPTION_TX,CELLOPTION_RX,CELLOPTION_SHARED] should be used otherwise # # With force_initial_routing_and_scheduling_state True, each mote has one # shared (TX/RX/SHARED) cell and one TX cell to its parent. sim_engine = sim_engine(diff_config={ 'exec_numMotes': 3, 'sf_class': 'SFNone', 'conn_class': 'Linear', 'app_pkPeriod': 0, 'app_pkPeriodVar': 0, 'tsch_probBcast_ebProb': 0, }, force_initial_routing_and_scheduling_state=True) parent = sim_engine.motes[0] mote = sim_engine.motes[1] child = sim_engine.motes[2] packet = { 'type': packet_type, 'app': { 'rank': mote.rpl.get_rank(), }, 'net': { 'srcIp': mote.get_ipv6_link_local_addr() }, } # With packet_type=d.PKT_TYPE_DATA, we'll test if the right cell is chosen # to send a fragment. Set 180 to packet_length so that the packet is # divided into two fragments. if packet_type == d.PKT_TYPE_DATA: packet['net']['packet_length'] = 180 # set destination IPv6 address and and a corresponding neighbor entry if destination == 'broadcast': packet['net']['dstIp'] = d.IPV6_ALL_RPL_NODES_ADDRESS elif destination == 'parent': packet['net']['dstIp'] = parent.get_ipv6_link_local_addr() mote.sixlowpan.on_link_neighbor_list.append(parent.get_mac_addr()) elif destination == 'child': packet['net']['dstIp'] = child.get_ipv6_link_local_addr() mote.sixlowpan.on_link_neighbor_list.append(child.get_mac_addr()) # send a packet to the target destination mote.sixlowpan.sendPacket(packet) # wait for long enough for the packet to be sent u.run_until_asn(sim_engine, 1000) # see logs logs = [] # as mentioned above, we'll see logs for fragment packets when # packet_type=d.PKT_TYPE_DATA if packet_type == d.PKT_TYPE_DATA: test_packet_type = d.PKT_TYPE_FRAG else: test_packet_type = packet_type for log in u.read_log_file(filter=['tsch.txdone']): if ((mote.is_my_mac_addr(log['packet']['mac']['srcMac'])) and (log['packet']['type'] == test_packet_type)): logs.append(log) # transmission could be more than one due to retransmission assert (len(logs) > 0) for log in logs: slotframe = mote.tsch.slotframes[0] cell = slotframe.get_cells_at_asn(log['_asn'])[0] assert cell.options == expected_cellOptions