def test_send_single_packet(self): """Test fetching a single content object over the simulation bus""" self.icn_forwarder1.start_forwarder() self.icn_forwarder2.start_forwarder() self.simulation_bus.start_process() fid1 = self.icn_forwarder1.linklayer.faceidtable.get_or_create_faceid(AddressInfo("icnfwd2", 0)) self.icn_forwarder1.icnlayer.fib.add_fib_entry(Name("/test"), fid1) self.icn_forwarder2.icnlayer.cs.add_content_object(Content("/test/data", "HelloWorld"), static=True) interest = Interest("/test/data") wire_data = self.encoder.encode(interest) self.fetchiface.send(wire_data, "icnfwd1") res, src = self.fetchiface.receive() self.assertEqual(src, "icnfwd1") c = self.encoder.decode(res) self.assertEqual(c, Content("/test/data", "HelloWorld"))
def test_simple_thunk_request_from_lower(self): """test receiving a thunk request from the network""" self.thunklayer.fib.add_fib_entry(Name("/dat"), [2]) self.thunklayer.fib.add_fib_entry(Name("/fct"), [1]) name = Name("/fct/f1") name += "_(/dat/data/d1)" name += "THUNK" name += "NFN" interest = Interest(name) self.thunklayer.queue_from_lower.put([1,interest]) res1 = self.thunklayer.queue_to_lower.get(timeout=2) res2 = self.thunklayer.queue_to_lower.get(timeout=2) res3 = self.thunklayer.queue_to_lower.get(timeout=2) res4 = self.thunklayer.queue_to_lower.get(timeout=2) self.assertEqual(res1, [1, Interest(Name('/fct/f1/THUNK'))]) n2 = Name("/dat/data/d1") n2 += '/fct/f1(_)' n2 += 'THUNK' n2 += 'NFN' self.assertEqual(res2, [1, Interest(n2)]) n3 = Name("/fct/f1") n3 += '_(/dat/data/d1)' n3 += 'THUNK' n3 += 'NFN' self.assertEqual(res3, [1, Interest(n3)]) self.assertEqual(res4, [1, Interest(Name('/dat/data/d1/THUNK'))]) content1 = Content(res1[1].name, str(3)) self.thunklayer.queue_from_lower.put([1, content1]) content2 = Content(res2[1].name, str(6)) self.thunklayer.queue_from_lower.put([1, content2]) content3 = Content(res3[1].name, str(9)) self.thunklayer.queue_from_lower.put([1, content3]) content4 = Content(res4[1].name, str(12)) self.thunklayer.queue_from_lower.put([1, content4]) res = self.thunklayer.queue_to_lower.get(timeout=2) c = Content(name, str(6)) self.assertEqual(res, [1, c])
def get_next_inner_computation(self, arg: str): """ Handles the inner computation part from get_next. Transforms and encodes the name and puts it into the queue_to_lower and calls get_content() to retrieve the result. :param arg: the name as a string :return: the content from the content object """ print("[get_next - inner computation] starts here.") # Start of transformation and component encoding name_str = self.transform_inner(arg) # print("[get_next - inner computation] after transform:", arg) name_after_transform = Name(name_str) name = self.encode_name_components(name_after_transform) # End of transformation and component encoding print("[get_next - inner computation] after encoding:", name) self.queue_to_lower.put((self.packetid, Interest(name))) inner_result = self.get_content(name) print("[get_next - inner computation] ends here with result:", inner_result) return inner_result
def test_ICNLayer_interest_to_app_layer_no_pit(self): """Test sending and interest message from APP with no PIT entry""" queue_to_higher = multiprocessing.Queue() queue_from_higher = multiprocessing.Queue() self.icn_layer.queue_to_higher = queue_to_higher self.icn_layer.queue_from_higher = queue_from_higher self.icn_layer._interest_to_app = True self.icn_layer.start_process() face_id = 1 from_face_id = 2 n = Name("/test/data") i = Interest(n) self.icn_layer.fib.add_fib_entry(n, [face_id], True) self.icn_layer.queue_from_lower.put([from_face_id, i]) try: data = self.icn_layer.queue_to_higher.get(timeout=2.0) except: self.fail() self.assertEqual(data[1], i) self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
def main(args): prefix = Name(args.icnprefix) log_level = logging.DEBUG if args.format == "ndntlv": encoder = NdnTlvEncoder() else: encoder = SimpleStringEncoder(log_level=log_level) repo = ICNDataRepository(args.datapath, prefix, args.port, log_level, encoder=encoder, autoconfig=args.autoconfig) repo.start_repo() repo.linklayer.process.join()
def compute_local(self, prepended_prefix: Name, ast: AST, interest: Interest) -> bool: if not isinstance( ast, AST_FuncCall): #only start if computation function local return False function_name = Name(ast._element) if not self.cs.find_content_object(function_name): return False #do not start computation pit = self.pit.get_container() pit_entry = self.pit.find_pit_entry(interest.name) if not pit_entry: return True faceid = pit_entry.faceids[0] addr_info = self.faceidtable.get_address_info(faceid) if "rsu" in addr_info.address and len(function_name.string_components) > 1 \ and "id" not in function_name.string_components[1]: #do not start computation if it comes form another RSU #todo imporve this for ip return False return True
def setUp(self): try: shutil.rmtree(self.path) os.remove("/tmp/repo") except: pass self.path = "/tmp/repo" try: os.stat(self.path) except: os.mkdir(self.path) with open( self.path + "/d2", 'w+') as content_file: content_file.write("data2") factory = PiCNSyncDataStructFactory() factory.register("cs", ContentStoreMemoryExact) factory.register("fib", ForwardingInformationBaseMemoryPrefix) factory.register("pit", PendingInterestTableMemoryExact) factory.register("faceidtable", FaceIDDict) factory.register("thunkTable", ThunkList) factory.register("planTable", PlanTable) factory.create_manager() self.cs = factory.manager.cs() self.fib = factory.manager.fib() self.pit = factory.manager.pit() self.faceidtable = factory.manager.faceidtable() self.thunkTable = factory.manager.thunkTable() self.parser = DefaultNFNParser() self.planTable = factory.manager.planTable(self.parser) self.repo = SimpleFileSystemRepository("/tmp/repo", Name("/dat/data"), multiprocessing.Manager()) self.thunklayer = BasicThunkLayer(self.cs, self.fib, self.pit, self.faceidtable, self.thunkTable, self.planTable, self.parser, self.repo) self.thunklayer.queue_to_higher = multiprocessing.Queue() self.thunklayer.queue_to_lower = multiprocessing.Queue() self.thunklayer.queue_from_higher = multiprocessing.Queue() self.thunklayer.queue_from_lower = multiprocessing.Queue() self.thunklayer.start_process()
def handle_ca(self, faceid: int, content: Content, to_lower: multiprocessing.Queue): """Unpack the received ca message and create interests for all available chunks.""" content.name = self.unpack(content.name) ca_entry = self._ca_table.get(content.name) ca_entry.received_all = True chunks_str = content.content.split(";") request_entry = self.get_request_entry(content.name) if request_entry and not self.pass_through: if chunks_str[0] == "complete": # Neighbour has complete data ca_entry.completely_available = True if request_entry.md_complete: self.create_chunk_interests(faceid, request_entry, ca_entry, to_lower) else: ca_entry.chunks = [Name(chunk) for chunk in chunks_str] # Create interests for all chunks that are available from neighbour for chunk in ca_entry.chunks: self.logger.info("TO NEIGHBOUR:", self.pack_ca(chunk, ca_entry)) if chunk not in request_entry.requested_chunks and chunk not in [ i.name for i in request_entry.chunks ]: request_entry.requested_chunks.append(chunk) to_lower.put( [faceid, Interest(self.pack_ca(chunk, ca_entry))]) self._request_table.append(request_entry) # If there is no name in requested_md try to request the remaining chunks. # This is only necessary to get a NACK, so the simulation continues. if not request_entry.requested_md: for chunk in [ i for i in request_entry.requested_chunks if i not in ca_entry.chunks ]: self.logger.info("TO ORIGINAL SOURCE:", chunk) to_lower.put([faceid, Interest(chunk)]) self._request_table.remove(request_entry) self._ca_table[content.name] = ca_entry
def test_fetching_a_lot_of_packets(self): """sending a lot of packets using a forwarder and chunking""" self.ICNRepo.start_repo() self.forwarder.start_forwarder() self.add_face_and_forwadingrule() self.path = "/tmp/repo_unit_test" try: os.stat(self.path) except: os.mkdir(self.path) for i in range(1,10): name = "/f" + str(i) with open(self.path + name, 'w+') as content_file: content_file.write(self.data3) time.sleep(2) for i in range(1,10): fname = "/f" + str(i) icn_name = "/test/data" + name content = self.fetch.fetch_data(Name(icn_name), timeout=10) self.assertEqual(content, self.data3)
def test_chunk_single_metadata(self): name = Name("/test/data") string = "A" * 4096 + "B" * 4096 + "C" * 4096 content = Content(name, string) md, content = self.chunkifyer.chunk_data(content) md_name_comp = ['/test/data'] md_data_comp = ['mdo:/test/data/c0;/test/data/c1;/test/data/c2:'] content_name_comp = ['/test/data/c0', '/test/data/c1', '/test/data/c2'] content_data_comp = ["A" * 4096, "B" * 4096, "C" * 4096] for i in range(0, len(md)): self.assertEqual(md[i].name.to_string(), md_name_comp[i]) self.assertEqual(md[i].content, md_data_comp[i]) for i in range(0, len(content)): self.assertEqual(content[i].name.to_string(), content_name_comp[i]) self.assertEqual(content[i].content, content_data_comp[i])
def test_get_forwarder_advertisement(self): """Test simple retrieval of the forwarder advertisement""" self.autoconflayer.start_process() # Send forwarder solicitation name = Name('/autoconfig/forwarders') interest = Interest(name) self.faceidtable.add(42, AddressInfo(('127.13.37.42', 4567), 0)) self.queue_from_lower.put([42, interest]) # Receive forwarder advertisement fid, packet = self.queue_to_lower.get() self.assertEqual(42, fid) self.assertIsInstance(packet, Content) self.assertEqual(name, packet.name) lines: List[str] = [ line for line in packet.content.split('\n') if len(line) > 0 ] self.assertEqual(4, len(lines)) self.assertEqual('udp4://127.0.1.1:1337', lines[0]) self.assertIn('r:/global', lines) self.assertIn('pg:/test/repos', lines) self.assertIn('pl:/home', lines)
def test_ageing(self): waittime: float = 3.0 peerfid = self.linklayer.faceidtable.get_or_create_faceid( AddressInfo(self.peer, 0)) self.routinglayer._ageing_interval = 0.5 self.routinglayer.start_process() # Collect all packets for a short time timeout = datetime.utcnow() + timedelta(seconds=waittime) packets = [] while datetime.utcnow() < timeout: try: packets.append(self.queue_to_lower.get(timeout=waittime / 10)) except queue.Empty: pass filtered = [ p for p in packets if p[0] == peerfid and p[1] == Interest(Name('/routing')) ] self.assertGreater(len(filtered), 5)
def test_ICNLayer_interest_forward_deduplication(self): """Test ICN layer with no CS and no PIT entry and deduplication""" self.icn_layer.start_process() to_face_id = 1 from_face_id_1 = 2 from_face_id_2 = 3 # Add entry to the fib name = Name("/test") interest1 = Interest("/test/data") interest2 = Interest("/test/data") self.icn_layer.fib.add_fib_entry(name, [to_face_id]) # forward entry self.queue1_icn_routing_up.put([from_face_id_1, interest1]) try: face_id, data = self.queue1_icn_routing_down.get(timeout=2.0) except: self.fail() self.queue1_icn_routing_up.put([from_face_id_2, interest2], block=True) self.assertTrue(self.queue1_icn_routing_down.empty()) time.sleep(3) # check output self.assertEqual(face_id, to_face_id) self.assertEqual(data, interest1) time.sleep(0.3) # sleep required, since there is no blocking get before the checks # check data structures self.assertEqual(self.icn_layer.cs.get_container_size(), 0) self.assertEqual(self.icn_layer.fib.get_container_size(), 1) self.assertEqual(self.icn_layer.pit.get_container_size(), 1) self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id]) self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name) self.assertEqual(len(self.icn_layer.pit.find_pit_entry(interest1.name).faceids), 2) self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).faceids, [from_face_id_1, from_face_id_2]) self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).name, interest1.name)
def test_chunk_multiple_metadata_reassemble(self): """Test chunking metadata with three metadata objects and 10 chunks and reassemble""" name = Name("/test/data") string = "A" * 4096 + "B" * 4096 + "C" * 4096 + "D" * 4096 + "E" * 4096 + "F" * 4096 + "G" * 4096 + "H" * 4096 \ + "I" * 4096 + "J" * 4000 content = Content(name, string) md, chunked_content = self.chunkifyer.chunk_data(content) md_name_comp = ['/test/data', '/test/data/m1', '/test/data/m2'] md_data_comp = [ 'mdo:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1', 'mdo:/test/data/c4;/test/data/c5;/test/data/c6;/test/data/c7:/test/data/m2', 'mdo:/test/data/c8;/test/data/c9:' ] content_name_comp = [ '/test/data/c0', '/test/data/c1', '/test/data/c2', '/test/data/c3', '/test/data/c4', '/test/data/c5', '/test/data/c6', '/test/data/c7', '/test/data/c8', '/test/data/c9' ] content_data_comp = [ "A" * 4096, "B" * 4096, "C" * 4096, "D" * 4096, "E" * 4096, "F" * 4096, "G" * 4096, "H" * 4096, "I" * 4096, "J" * 4000 ] for i in range(0, len(md)): self.assertEqual(md[i].name.to_string(), md_name_comp[i]) self.assertEqual(md[i].content, md_data_comp[i]) for i in range(0, len(chunked_content)): self.assertEqual(chunked_content[i].name.to_string(), content_name_comp[i]) self.assertEqual(chunked_content[i].content, content_data_comp[i]) reassembled_content = self.chunkifyer.reassamble_data( md[0].name, chunked_content) self.assertEqual(content, reassembled_content)
def test_ICNLayer_content_to_app_layer(self): """get content to app layer""" queue_to_higher = multiprocessing.Queue() queue_from_higher = multiprocessing.Queue() self.icn_layer.queue_to_higher = queue_to_higher self.icn_layer.queue_from_higher = queue_from_higher self.icn_layer.start_process() face_id = -1 from_face_id = 1 n = Name("/test/data") self.icn_layer.pit.add_pit_entry(n, face_id, interest=None, local_app=True) self.assertEqual(self.icn_layer.pit.get_container_size(), 1) c = Content(n, "HelloWorld") self.icn_layer.queue_from_lower.put([from_face_id, c]) try: data = self.icn_layer.queue_to_higher.get(timeout=2.0) except: self.fail() self.assertEqual(data, [1, c])
def chunk_data(self, packet: Content) -> (List[Content], List[Content]): """Split content to chunks and generate metadata""" name = packet.name data = packet.content content_size = len(packet.content) chunks = [data[i:i + self._chunksize] for i in range(0, len(data), self._chunksize)] num_of_chunks = len(chunks) meta_data = [] for i in range(0, num_of_chunks, self._num_of_names_in_metadata): endindex = min(i+self._num_of_names_in_metadata, num_of_chunks) md_num = int(i/self._num_of_names_in_metadata) next = 0 if i + self._num_of_names_in_metadata < num_of_chunks: next = int(i/self._num_of_names_in_metadata)+1 meta_data.append(self.generate_meta_data(i, endindex, md_num, next, packet.name, content_size)) content = [] for i in range(0, num_of_chunks): chunk_name = Name(name.to_string() + "/c" + str(i)) content.append(Content(chunk_name, chunks[i])) return meta_data, content
def _handle_forwarders(self, packet: Packet, addr_info: AddressInfo): if not isinstance(packet, Content): return if len(packet.content) > 0 and packet.content[0] == 128: self.logger.error( f'This implementation cannot handle the autoconfig binary wire format.' ) return # Parse the received packet: # Parse the first line containing the forwarder's ip:port. lines: List[str] = packet.content.split('\n') scheme, addr = lines[0].split('://', 1) if scheme != 'udp4': self.logger.error( f'Don\'t know how to handle scheme {scheme} in forwarder advertisement.' ) return host, port = addr.split(':') fwd_addr = AddressInfo((host, int(port)), addr_info.interface_id) fwd_fid = self._linklayer.faceidtable.get_or_create_faceid(fwd_addr) # Parse the following lines of type:value pairs, only process routes. for line in lines[1:]: if len(line.strip()) == 0: continue t, n = line.split(':') if t == 'r': name: Name = Name(n) for interest in self._held_interests: if name.is_prefix_of(interest.name): self.queue_to_lower.put([fwd_fid, interest]) self._held_interests = [ i for i in self._held_interests if not name.is_prefix_of(i.name) ] # Only cancel the forwarder solicitation timer if there are not held interests left. if self._solicitation_timer is not None and len( self._held_interests) == 0: self._solicitation_timer.cancel() self._solicitation_timer = None
def parse_nfn_str(name: str) -> Name: name = name.replace("""'""", "") parser = DefaultNFNParser() optimizer = BaseNFNOptimizer(None, None, None, None) if '/NFN' in name: name = name.replace("/NFN", "") ast = parser.parse(name) if ast is None: return None names = optimizer._get_names_from_ast(ast) if names is None or names == []: names = optimizer._get_functions_from_ast(ast) if names is None or names == []: return None prepend_name = Name(names[0]) if prepend_name is None: return None name_str = optimizer._set_prepended_name(ast, prepend_name, ast) if name_str is None: return None res = parser.nfn_str_to_network_name(name_str) return res
def setUp(self): self.data1 = "data1" self.data2 = 'A' * 5000 self.data3 = 'B' * 5000 + 'C' * 5000 + 'DE' * 5000 self.encoder = self.get_encoder() self.path = "/tmp/repo_unit_test" try: os.stat(self.path) except: os.mkdir(self.path) with open(self.path + "/f1", 'w+') as content_file: content_file.write(self.data1) with open(self.path + "/f2", 'w+') as content_file: content_file.write(self.data2) with open(self.path + "/f3", 'w+') as content_file: content_file.write('B' * 5000 + 'C' * 5000 + 'DE' * 5000) self.ICNRepo: ICNDataRepository = ICNDataRepository("/tmp/repo_unit_test", Name("/test/data"), 0, encoder=self.get_encoder(), log_level=255) self.repo_port = self.ICNRepo.linklayer.interfaces[0].get_port() self.fetch = Fetch("127.0.0.1", self.repo_port, encoder=self.get_encoder())
def run_simulation(self): self.forwarder.start_all() time.sleep(1.0) self.repo.start_all() time.sleep(1.0) self.client.start_all() time.sleep(5.0) # Send an interest with a fixed name, let autoconfig figure out where to get the data from name = Name('/test/prefix/repos/testrepo/testcontent') interest = Interest(name) self.client.queue_from_higher.put([None, interest]) try: data = self.client.queue_to_higher.get(timeout=20.0) except queue.Empty: self.fail() if isinstance( data[1], Content ) and data[1].name == name and data[1].content == "testcontent": print("Simulation successful") else: print("Error")
def test_ICNLayer_interest_to_app_layer_cs(self): """Test sending and interest message from APP with a CS entry""" queue_to_higher = multiprocessing.Queue() queue_from_higher = multiprocessing.Queue() self.icn_layer.queue_to_higher = queue_to_higher self.icn_layer.queue_from_higher = queue_from_higher self.icn_layer._interest_to_app = True self.icn_layer.start_process() face_id = 1 from_face_id = 2 n = Name("/test/data") i = Interest(n) c = Content(n, "Hello World") self.icn_layer.fib.add_fib_entry(n, [face_id], True) self.icn_layer.cs.add_content_object(c) self.icn_layer.queue_from_lower.put([from_face_id, i]) try: to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0) except: self.fail() self.assertEqual(to_face_id, from_face_id) self.assertEqual(data, c) self.assertTrue(self.icn_layer.queue_to_higher.empty()) # --> was answered by using Content from cache
def data_from_lower(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data): self.logger.info(f'Received data from lower: {data}') if len(data) != 2: self.logger.warn('Expects [fid, Packet] from lower') return rcv_fid, packet = data now = datetime.utcnow() if packet.name == self._prefix: if isinstance(packet, Interest): self.logger.info('Received routing interest') output: str = '' for name, fid, dist, timeout in self.rib.entries(): if timeout is None: output = f'{output}{name}:{dist}:-1\n' else: output = f'{output}{name}:{dist}:{int((timeout - now).total_seconds())}\n' content: Content = Content(self._prefix, output.encode('utf-8')) self.queue_to_lower.put([rcv_fid, content]) elif isinstance(packet, Content): self.logger.info('Received routing content') rib: BaseRoutingInformationBase = self.rib lines: List[str] = [ l for l in packet.content.split('\n') if len(l) > 0 ] for line in lines: name, dist, timeout = line.rsplit(':', 2) if timeout == '-1': timeout = self._rib_maxage else: timeout = timedelta(seconds=int(timeout)) rib.insert(Name(name), rcv_fid, int(dist) + 1, now + min(timeout, self._rib_maxage)) return self.queue_to_higher.put(data)
def test_ICNLayer_interest_from_app_layer_pit(self): """Test sending and interest message from APP with a PIT entry --> interest not for higher layer""" queue_to_higher = multiprocessing.Queue() queue_from_higher = multiprocessing.Queue() self.icn_layer.queue_to_higher = queue_to_higher self.icn_layer.queue_from_higher = queue_from_higher self.icn_layer._interest_to_app=True self.icn_layer.start_process() face_id = 1 from_face_id = 2 n = Name("/test/data") i = Interest(n) self.icn_layer.fib.add_fib_entry(n, [face_id], True) self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id, i, local_app=False) self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0]) self.icn_layer.queue_from_higher.put([0, i]) try: to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0) except: self.fail() self.assertEqual(to_face_id, face_id) self.assertEqual(i, data) self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i) self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0]) #Just forward, not from local app
def test_ICNForwarder_simple_find_content_one_node(self): """Test a simple forwarding scenario, getting content from a Node""" self.forwarder1.start_forwarder() # new content testMgmtSock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) testMgmtSock1.connect(("127.0.0.1", self.forwarder1_port)) testMgmtSock1.send( "GET /icnlayer/newcontent/%2Ftest%2Fdata%2Fobject:HelloWorld HTTP/1.1\r\n\r\n" .encode()) data = testMgmtSock1.recv(1024) testMgmtSock1.close() time.sleep(3) self.assertEqual( data.decode(), "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newcontent OK\r\n" ) #create test content name = Name("/test/data/object") test_content = Content(name, content="HelloWorld") cs_fwd1 = self.forwarder1.icnlayer.cs self.assertEqual( cs_fwd1.find_content_object(name).content, test_content) #create interest interest = Interest("/test/data/object") encoded_interest = self.encoder.encode(interest) #send interest self.testSock.sendto(encoded_interest, ("127.0.0.1", self.forwarder1_port)) #receive content encoded_content, addr = self.testSock.recvfrom(8192) content = self.encoder.decode(encoded_content) self.assertEqual(content, test_content)
def test_ICNLayer_interest_forward_longest_match(self): """Test ICN layer with no CS and no PIT entry and longest match""" self.icn_layer.start_process() to_face_id = 1 from_face_id = 2 #Add entry to the fib name = Name("/test") interest = Interest("/test/data") self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True) #forward entry self.queue1_icn_routing_up.put([from_face_id, interest]) try: face_id, data = self.queue1_icn_routing_down.get(timeout=2.0) except: self.fail() #check output self.assertEqual(face_id, to_face_id) self.assertEqual(data, interest) #check data structures self.assertEqual(self.icn_layer.cs.get_container_size(), 0) self.assertEqual(self.icn_layer.fib.get_container_size(), 1) self.assertEqual(self.icn_layer.pit.get_container_size(), 1) self.assertEqual( self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id]) self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name) self.assertEqual( self.icn_layer.pit.find_pit_entry(interest.name).faceids[0], from_face_id) self.assertEqual( self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_simple_thunk_request_from_lower_data_local_mdo_cached(self): """test receiving a thunk request from the network with some data local meta data cached""" self.thunklayer.fib.add_fib_entry(Name("/dat"), [2]) self.thunklayer.fib.add_fib_entry(Name("/fct"), [1]) self.thunklayer.cs.add_content_object( Content(Name("/fct/f1"), "mdo:2:data")) name = Name("/fct/f1") name += "_(/dat/data/d1)" name += "THUNK" name += "NFN" interest = Interest(name) self.thunklayer.queue_from_lower.put([1, interest]) res1 = self.thunklayer.queue_to_lower.get(timeout=2) res2 = self.thunklayer.queue_to_lower.get(timeout=2) res3 = self.thunklayer.queue_to_lower.get(timeout=2) n1 = Name("/dat/data/d1") n1 += '/fct/f1(_)' n1 += 'THUNK' n1 += 'NFN' self.assertEqual(res1, [1, Interest(n1)]) n2 = Name("/fct/f1") n2 += '_(/dat/data/d1)' n2 += 'THUNK' n2 += 'NFN' self.assertEqual(res2, [1, Interest(n2)]) content1 = Content(res1[1].name, str(9)) self.thunklayer.queue_from_lower.put([1, content1]) content2 = Content(res2[1].name, str(9)) self.thunklayer.queue_from_lower.put([1, content2]) content3 = Content(res3[1].name, str(2)) self.thunklayer.queue_from_lower.put([1, content3]) res = self.thunklayer.queue_to_lower.get() c = Content(name, str(2)) self.assertEqual(res, [1, c])
def test_ready_computations(self): """Test if the list of ready computations is complete""" name = Name("/test") name1 = Name("/data") name2 = Name("/hello") name3 = Name("/world") self.computationList.add_computation(name, 0, Interest(name)) self.computationList.add_computation(name1, 1, Interest(name1)) self.computationList.add_computation(name2, 2, Interest(name2)) self.computationList.add_computation(name3, 3, Interest(name3)) request_name = Name("/request") request_name2 = Name("/request2") self.computationList.container[0].add_name_to_await_list(request_name) self.computationList.container[1].add_name_to_await_list(request_name) self.computationList.container[2].add_name_to_await_list(request_name) self.computationList.container[3].add_name_to_await_list(request_name2) self.computationList.push_data(Content(request_name)) ready_comps = self.computationList.get_ready_computations() self.assertEqual(len(ready_comps), 3) self.assertEqual(ready_comps, self.computationList.container[:3])
def setup_faces_and_connections(self): self.rsu1.start_forwarder() self.rsu2.start_forwarder() self.rsu3.start_forwarder() self.simulation_bus.start_process() #setup rsu1 self.mgmt_client1.add_face("rsu2", None, 0) self.mgmt_client1.add_forwarding_rule(Name("/rsu"), 0) self.mgmt_client1.add_new_content(Name("/rsu/func/f1"), "PYTHON\nf\ndef f(a):\n for i in range(0,30000000):\n a.upper()\n return a.upper() + ' RSU1'") #setup rsu2 self.mgmt_client2.add_face("rsu1", None, 0) self.mgmt_client2.add_face("rsu3", None, 0) self.mgmt_client2.add_forwarding_rule(Name("/rsu"), 0) #self.mgmt_client2.add_forwarding_rule(Name("/rsu"), 1) self.mgmt_client2.add_new_content(Name("/rsu/func/f1"), "PYTHON\nf\ndef f(a):\n for i in range(0,60000000):\n a.upper()\n return a.upper() + ' RSU2'") #setup rsu3 self.mgmt_client3.add_face("rsu2", None, 0) self.mgmt_client3.add_forwarding_rule(Name("/rsu"), 0) self.mgmt_client3.add_new_content(Name("/rsu/func/f1"), "PYTHON\nf\ndef f(a):\n for i in range(0,50000000):\n a.upper()\n return a.upper() + ' RSU3'")
def entries(self) -> List[Tuple[Name, int, int, datetime]]: return [(Name(name), fid, dist, timeout) for name, fid, dist, timeout in self._tree.collapse()]
def __iter__(self) -> Iterator[Tuple[Name, int, int, datetime]]: collapsed: List[Tuple[List[bytes], int, int, datetime]] = self._tree.collapse() for name, fid, dist, timeout in collapsed: yield (Name(name), fid, dist, timeout)