class BasicEncoder(object): """Abstract Encoder for the BasicPacketEncoding Layer""" def __init__(self, logger_name="BasicEncoder", log_level=255): self.__logger_name = logger_name self.__log_level = log_level self.logger = Logger(self.__logger_name, self.__log_level) def set_log_level(self, log_level): self.logger.setLevel(log_level) @abc.abstractmethod def encode(self, packet: Packet): """encode a packet to wireformat""" @abc.abstractmethod def decode(self, wire_data) -> Packet: """decode a packet to Packet data structure""" def __getstate__(self): d = dict(self.__dict__) if 'logger' in d: del d['logger'] return d def __setstate__(self, d): self.__dict__.update( d ) #need to store logger parameter and recreate logger here, since it cannot be pickled self.logger = Logger(self.__logger_name, self.__log_level)
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a PinnedNFN Server...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: Extended NDN Packet Format") # Packet encoder encoder = ExtendedNdnTlvEncoder(log_level) # Start server = PiCN.Playground.Heartbeats.Nodes.HeartbeatComputationStack( port=args.port, log_level=log_level, encoder=encoder) server.start_forwarder() server.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("ICNForwarder", log_level) # Info logger.info("Starting a Heartbeat Nodes...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: Extended NDN Packet Format") # Packet encoder encoder = ExtendedNdnTlvEncoder(log_level) # Start forwarder = HeartbeatForwarderStack(args.port, log_level, encoder) forwarder.start_forwarder() forwarder.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a Repo...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.Playground.AssistedSharing.RepoStack( args.port, log_level, encoder) forwarder.start_forwarder() forwarder.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("ICNForwarder", log_level) # Info logger.info("Starting a CCN Forwarder...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.ProgramLibs.ICNForwarder.ICNForwarder( args.port, log_level, encoder, autoconfig=args.autoconfig) forwarder.start_forwarder() forwarder.linklayer.process.join()
def main(args): thread_list = [] logger = Logger("ExpLogger", 255) params = list(range(201, 701)) #random.shuffle(params) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(20000) sock.bind(("0.0.0.0", 0)) for i in list(params): ip = args.ip plain = args.plain port = args.port format = args.format interestName = Name("/the/prefix/bernoulli/" + str(i) + "/1/pNFN") arguments = [format, ip, port, plain, interestName, logger, sock] thread = Thread(target = run_singel_interest, args= arguments) time.sleep(0.5) thread.start() thread_list.append(thread) # Packet encoder, interestName) # run_singel_interest(args, interestName) for t in thread_list: t.join()
def __init__(self, port=8500, log_level=255, encoder: BasicEncoder = NdnTlvEncoder): # debug level logger = Logger("Publisher", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory1 = PiCNSyncDataStructFactory() synced_data_struct_factory1.register("face_id_table", FaceIDDict) synced_data_struct_factory1.create_manager() face_id_table = synced_data_struct_factory1.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.repo_layer = RepoLayer(log_level=log_level) self.stack: LayerStack = LayerStack( [self.repo_layer, self.packet_encoding_layer, self.link_layer])
def __init__(self, replica_id, port=9500, log_level=255, encoder: BasicEncoder = NdnTlvEncoder): # debug level logger = Logger("Repo", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterestTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() face_id_table = synced_data_struct_factory.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icn_layer = BasicICNLayer(log_level=log_level) self.pinned_computation_layer = PinnedComputationLayer( replica_id, log_level=log_level) # tell icn_layer that there is a higher layer which might satisfy interests self.icn_layer._interest_to_app = True # TODO -- decide here if it should be forwarded upwards or not # self.icn_layer._interest_to_app = lambda interest: interest.name.components[-1] == b"pNFN" # setup stack self.stack: LayerStack = LayerStack([ self.pinned_computation_layer, self.icn_layer, self.packet_encoding_layer, self.link_layer ]) # set CS, FIB, PIT in forwarding layer self.icn_layer.cs = cs self.icn_layer.fib = fib self.icn_layer.pit = pit
def __init__(self, ip: str, port: Optional[int], log_level=255, encoder: BasicEncoder = None, autoconfig: bool = False, interfaces=None, session_keys: Optional[Dict] = None, name: str = None, polling_interval: float = 1.0, ping_interval: float = 2.0): super().__init__(ip, port, log_level, encoder, autoconfig, interfaces, name) self.ip = ip self._logger = Logger("FetchSession", log_level) self._pending_sessions: List[Name] = [] self._running_sessions: Dict[Name:Name] = dict( ) if session_keys is None else session_keys self._has_session: bool = True if session_keys is not None else False self._session_initiator = 'session_connector' self._session_identifier = 'sid' self._polling_interval = polling_interval self._ping_interval = ping_interval self._manager = Manager() self._mutex = self._manager.Lock() self.receive_process = Process(target=self._receive_session, args=( self.lstack.queue_to_higher, self._polling_interval, self._mutex, )) self.ping_process = Process(target=self._ping_messages, args=(self._ping_interval, )) self.receive_process.start() self.ping_process.start()
def __init__(self, replica_id, port=9500, log_level=255, encoder: ExtendedNdnTlvEncoder = ExtendedNdnTlvEncoder): # debug level logger = Logger("Server", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() face_id_table = synced_data_struct_factory.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = HeartbeatPacketEncodingLayer( self.encoder, log_level=log_level) self.heartbeat_network_layer = HeartbeatNetworkLayer( log_level=log_level, interest_to_app=True) self.heartbeat_computation_layer = HeartbeatComputationLayer( replica_id, log_level=log_level) # setup stack self.stack: LayerStack = LayerStack([ self.heartbeat_computation_layer, self.heartbeat_network_layer, self.packet_encoding_layer, self.link_layer ]) # set CS, FIB, PIT in forwarding layer self.heartbeat_network_layer.cs = cs self.heartbeat_network_layer.fib = fib self.heartbeat_network_layer.pit = pit
def __init__(self, port=9500, http_port=8080, log_level=255, encoder: BasicEncoder = NdnTlvEncoder, database_path="/tmp", flush_database=False, pem_path=None): # debug level logger = Logger("Repo", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory1 = PiCNSyncDataStructFactory() synced_data_struct_factory1.register("face_id_table", FaceIDDict) synced_data_struct_factory1.register("cs", ContentStorePersistentExact) synced_data_struct_factory1.create_manager() face_id_table = synced_data_struct_factory1.manager.face_id_table() storage = synced_data_struct_factory1.manager.cs( db_path=database_path + "/beesens-cs.db") if flush_database: storage.delete_all() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.storage_layer = StorageLayer(log_level=log_level) self.interface_layer = InterfaceLayer(http_port=http_port, log_level=log_level, pem_path=pem_path, flush_database=flush_database) # setup stack self.stack: LayerStack = LayerStack([ self.interface_layer, self.storage_layer, self.packet_encoding_layer, self.link_layer ]) # pass cs to storage layer self.storage_layer.storage = storage
def __init__(self, database_path, port=9000, log_level=255, encoder: BasicEncoder = None, flush_database=False): # debug level logger = Logger("PushRepo", log_level) # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level=log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStorePersistentExact) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs(db_path=database_path + "/pushrepo.db") if flush_database: cs.delete_all() faceidtable = synced_data_struct_factory.manager.faceidtable() # default interface interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer(self.encoder, log_level=log_level) self.repolayer = PushRepositoryLayer(log_level=log_level) self.lstack: LayerStack = LayerStack([ self.repolayer, self.packetencodinglayer, self.linklayer ]) self.repolayer.cs = cs # mgmt self.mgmt = Mgmt(cs, None, None, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, port=9000, log_level=255, encoder: BasicEncoder = None, interfaces: List[BaseInterface] = None, executors: BaseNFNExecutor = None, ageing_interval: int = 3, use_thunks=False): # debug level logger = Logger("NFNForwarder", log_level) logger.info("Start PiCN NFN Forwarder on port " + str(port)) # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.register("computation_table", NFNComputationList) synced_data_struct_factory.register("timeoutprevention_dict", TimeoutPreventionMessageDict) if use_thunks: synced_data_struct_factory.register("thunktable", ThunkList) synced_data_struct_factory.register("plantable", PlanTable) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() faceidtable = synced_data_struct_factory.manager.faceidtable() self.parser = DefaultNFNParser() if use_thunks: thunktable = synced_data_struct_factory.manager.thunktable() plantable = synced_data_struct_factory.manager.plantable( self.parser) #setup chunkifier self.chunkifier = SimpleContentChunkifyer() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icnlayer = BasicICNLayer(log_level=log_level, ageing_interval=ageing_interval) self.chunklayer = BasicChunkLayer(self.chunkifier, log_level=log_level) # setup nfn self.icnlayer._interest_to_app = True if executors is None: self.executors = {"PYTHON": NFNPythonExecutor()} else: self.executors = executors self.r2cclient = TimeoutR2CHandler() comp_table = synced_data_struct_factory.manager.computation_table( self.r2cclient, self.parser) self.nfnlayer = BasicNFNLayer(cs, fib, pit, faceidtable, comp_table, self.executors, self.parser, self.r2cclient, log_level=log_level) if use_thunks: self.thunk_layer = BasicThunkLayer(cs, fib, pit, faceidtable, thunktable, plantable, self.parser, log_level=log_level) self.nfnlayer.optimizer = ThunkPlanExecutor( cs, fib, pit, faceidtable, plantable) timeoutprevention_dict = synced_data_struct_factory.manager.timeoutprevention_dict( ) self.timeoutpreventionlayer = BasicTimeoutPreventionLayer( timeoutprevention_dict, comp_table, pit=pit, log_level=log_level) if use_thunks: self.lstack: LayerStack = LayerStack([ self.nfnlayer, self.chunklayer, self.timeoutpreventionlayer, self.thunk_layer, self.icnlayer, self.packetencodinglayer, self.linklayer ]) else: self.lstack: LayerStack = LayerStack([ self.nfnlayer, self.chunklayer, self.timeoutpreventionlayer, self.icnlayer, self.packetencodinglayer, self.linklayer ]) self.icnlayer.cs = cs self.icnlayer.fib = fib self.icnlayer.pit = pit # mgmt self.mgmt = Mgmt(self.icnlayer.cs, self.icnlayer.fib, self.icnlayer.pit, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("NFNForwarder", log_level) # Info logger.info("Starting a NFN Forwarder...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder( log_level) if args.optimizer == "Edge": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder, ageing_interval=1) logger.info("Edge Computing Node") forwarder.icnlayer.pit.set_pit_timeout(2) forwarder.icnlayer.cs.set_cs_timeout(30) forwarder.nfnlayer.optimizer = EdgeComputingOptimizer( forwarder.icnlayer.cs, forwarder.icnlayer.fib, forwarder.icnlayer.pit, forwarder.linklayer.faceidtable) elif args.optimizer == "MapReduce": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder) logger.info("Using MapReduce Optimizer") forwarder.nfnlayer.optimizer = MapReduceOptimizer( forwarder.icnlayer.cs, forwarder.icnlayer.fib, forwarder.icnlayer.pit, forwarder.linklayer.faceidtable) elif args.optimizer == "Thunks": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder(args.port, log_level, encoder, use_thunks=True) logger.info("Using Thunks for Planning and Optimizing") else: forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder) forwarder.start_forwarder() forwarder.linklayer.process.join()
def __init__(self, port=9000, log_level=255, encoder: ExtendedNdnTlvEncoder = None, interfaces: List[BaseInterface] = None): # debug level logger = Logger("ICNForwarder", log_level) # packet encoder if encoder is None: self.encoder = () else: encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit(pit_timeout=60) face_id_table = synced_data_struct_factory.manager.face_id_table() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.link_layer = BasicLinkLayer(interfaces, face_id_table, log_level=log_level) self.packet_encoding_layer = HeartbeatPacketEncodingLayer( self.encoder, log_level=log_level) self.icn_layer = HeartbeatNetworkLayer(log_level=log_level) self.lstack: LayerStack = LayerStack( [self.icn_layer, self.packet_encoding_layer, self.link_layer]) self.icn_layer.cs = cs self.icn_layer.fib = fib self.icn_layer.pit = pit # mgmt self.mgmt = Mgmt(cs, fib, pit, self.link_layer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, foldername: Optional[str], prefix: Name, port=9000, log_level=255, encoder: BasicEncoder = None, autoconfig: bool = False, autoconfig_routed: bool = False, interfaces: List[BaseInterface] = None, use_thunks=False): """ :param foldername: If None, use an in-memory repository. Else, use a file system repository. """ logger = Logger("ICNRepoSession", log_level) logger.info("Start PiCN Data Repository with Sessions") # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level) self.encoder = encoder # chunkifyer self.chunkifyer = SimpleContentChunkifyer() # repo manager = multiprocessing.Manager() if foldername is None: self.repo: BaseRepository = SimpleMemoryRepository(prefix, manager, logger) else: self.repo: BaseRepository = SimpleFileSystemRepository(foldername, prefix, manager, logger) # initialize layers synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("faceidtable", FaceIDDict) if use_thunks: synced_data_struct_factory.register("thunktable", ThunkList) synced_data_struct_factory.register("plantable", PlanTable) synced_data_struct_factory.create_manager() faceidtable = synced_data_struct_factory.manager.faceidtable() if use_thunks: self.parser = DefaultNFNParser() thunktable = synced_data_struct_factory.manager.thunktable() plantable = synced_data_struct_factory.manager.plantable(self.parser) if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer(self.encoder, log_level=log_level) self.chunklayer = BasicChunkLayer(self.chunkifyer, log_level=log_level) self.repolayer = SessionRepositoryLayer(self.repo, log_level=log_level) if use_thunks: self.thunklayer = BasicThunkLayer(None, None, None, faceidtable, thunktable, plantable, self.parser, self.repo, log_level=log_level) logger.info("Using Thunks") if use_thunks: self.lstack: LayerStack = LayerStack([ self.repolayer, self.chunklayer, self.thunklayer, self.packetencodinglayer, self.linklayer ]) else: self.lstack: LayerStack = LayerStack([ self.repolayer, self.chunklayer, self.packetencodinglayer, self.linklayer ]) if autoconfig: self.autoconfiglayer = AutoconfigRepoLayer(name=prefix.string_components[-1], addr='127.0.0.1', linklayer=self.linklayer, repo=self.repo, register_global=autoconfig_routed, log_level=log_level) self.lstack.insert(self.autoconfiglayer, below_of=self.chunklayer) # mgmt self.mgmt = Mgmt(None, None, None, self.linklayer, mgmt_port, self.start_repo, repo_path=foldername, repo_prfx=prefix, log_level=log_level)
def __init__(self, port=9000, log_level=255, encoder: BasicEncoder = None, routing: bool = False, peers=None, autoconfig: bool = False, interfaces: List[BaseInterface] = None, ageing_interval: int = 3, node_name: str = None): # debug level logger = Logger("ICNForwarder", log_level) # FIXME: Why isn't this self.logger??? self._node_name = node_name # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level=log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterestTableMemoryExact) synced_data_struct_factory.register("rib", TreeRoutingInformationBase) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() rib = None if routing: rib = synced_data_struct_factory.manager.rib() faceidtable = synced_data_struct_factory.manager.faceidtable() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icnlayer = BasicICNLayer(log_level=log_level, ageing_interval=ageing_interval) self.lstack: LayerStack = LayerStack( [self.icnlayer, self.packetencodinglayer, self.linklayer]) if autoconfig: self.autoconfiglayer: AutoconfigServerLayer = AutoconfigServerLayer( linklayer=self.linklayer, address='127.0.0.1', registration_prefixes=[(Name('/testnetwork/repos'), True)], log_level=log_level) self.lstack.insert(self.autoconfiglayer, below_of=self.icnlayer) if routing: self.routinglayer = BasicRoutingLayer(self.linklayer, peers=peers, log_level=log_level) self.lstack.insert(self.routinglayer, below_of=self.icnlayer) self.icnlayer.cs = cs self.icnlayer.fib = fib # ----- by Luc # FIXME: How to pass these parameters to __init__ self.icnlayer.fib.logger = logger self.icnlayer.fib.node_name = self._node_name # ----- by Luc # FIXME: How to pass these parameters to __init__ self.icnlayer.pit = pit self.icnlayer.pit.logger = logger self.icnlayer.pit.node_name = self._node_name # ----- if autoconfig: self.autoconfiglayer.fib = fib if routing: self.routinglayer.rib = rib self.routinglayer.fib = fib # mgmt self.mgmt = Mgmt(cs, fib, pit, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def main(args): logger = Logger( "ICNPushRepo", logging.DEBUG) # note: set later according to cli/config arguments logger.info("Starting a Push Repository...") # Parse Configuration file conf = None if args.config != "none": try: conf = ConfigParser(args.config) logger.info("Successfully parsed configuration file.") except CouldNotOpenConfigError: conf = None logger.warning( "Could not open configuration file. Proceed with command line arguments or default values." ) except CouldNotParseError: logger.warning( "Could not parse configuration file. Proceed with command line arguments or default values." ) except MalformedConfigurationError as e: logger.warning( "Invalid configuration file. Proceed with command line arguments or default values. Hint: " + str(e)) # Choose command line arguments before config file arguments before default values if not args.port: if conf and conf.udp_port: args.port = conf.udp_port else: args.port = default_port if not args.format: if conf and conf.format: args.format = conf.format else: args.format = default_format if not args.logging: if conf and conf.logging: args.logging = conf.logging else: args.logging = default_logging # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger.setLevel(log_level) # Info logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) logger.info("Database: " + args.database_path) logger.info("Flush DB: " + str(args.flush_database)) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder( log_level) # Start forwarder = PiCN.ProgramLibs.ICNPushRepository.ICNPushRepository( args.database_path, args.port, log_level, encoder, args.flush_database) forwarder.start_forwarder() forwarder.linklayer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting an NFN server...") logger.info("Replica ID: " + str(args.id)) logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start server = PiCN.Playground.ForwardingStrategy.NFNStack(replica_id=args.id, port=args.port, log_level=log_level, encoder=encoder) server.start_forwarder() server.link_layer.process.join()
def __setstate__(self, d): self.__dict__.update( d ) #need to store logger parameter and recreate logger here, since it cannot be pickled self.logger = Logger(self.__logger_name, self.__log_level)
def main(argv): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("MobilitySimulationExec", log_level) # Info logger.info("Mobility Simulation config params...") logger.info("#run: " + str(args.run)) logger.info("#mobiles: " + str(args.mobiles)) logger.info("#stations: " + str(args.stations)) logger.info("Log Level: " + args.logging) logger.info("Optimizer: " + str(args.optimizer)) random.seed(args.run) # create a list of mobile nodes named_functions = { "/rsu/func/f1": "PYTHON\nf\ndef f(a, b, c):\n return a+b+c", "/rsu/func/f2": "PYTHON\nf\ndef f(a, b, c):\n return a*b*c", "/rsu/func/f3": "PYTHON\nf\ndef f(a, b, c):\n return a-b-c", "/rsu/func/f4": "PYTHON\nf\ndef f(a, b, c):\n return a**b**c", "/rsu/func/f5": "PYTHON\nf\ndef f(a, b, c):\n return a/b/c" } function_names = [ Name("/rsu/func/f1/_(1,2,3)/NFN"), Name("/rsu/func/f2/_(1,2,3)/NFN"), Name("/rsu/func/f3/_(1,2,3)/NFN"), Name("/rsu/func/f4/_(1,2,3)/NFN"), Name("/rsu/func/f5/_(1,2,3)/NFN") ] # create instances of stationary nodes stationary_nodes_list = [] for i in range(0, args.stations): stationary_nodes_list.append(StationaryNode(node_id=i, com_range=0.5)) # create instances of mobile nodes mobile_nodes_list = [] for i in range(0, args.mobiles): # random vehicle speed between 30km/h and 100 km/h speed = random.randrange(50, 121, 2) # let vehicles spawn from both sites of the simulation if (i % 2) == 0: mobile_nodes_list.append( MobileNode(node_id=i, spawn_point=0, speed=speed, direction=1)) else: mobile_nodes_list.append( MobileNode(node_id=i, spawn_point=(len(stationary_nodes_list) - 1), speed=speed, direction=-1)) simulation = None if args.optimizer == "Edge": simulation = MobilitySimulation(run_id=args.run, mobile_nodes=mobile_nodes_list, stationary_nodes=stationary_nodes_list, stationary_node_distance=0.5, named_functions=named_functions, function_names=function_names, forwarder="NFNForwarder", optimizer="EdgeComputingOptimizer", use_distribution_helper=True) else: simulation = MobilitySimulation(run_id=args.run, mobile_nodes=mobile_nodes_list, stationary_nodes=stationary_nodes_list, stationary_node_distance=0.5, named_functions=named_functions, function_names=function_names, forwarder="NFNForwarder", optimizer="ToDataFirstOptimizer", use_distribution_helper=True) simulation.run()
def __init__(self, logger_name="PiCNProcess", log_level=255): self._process: multiprocessing.Process = None self.logger = Logger(logger_name, log_level) self.__logger_name = logger_name self.__log_level = log_level
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a BeeSens Repo...") logger.info("UDP Port: " + str(args.port)) logger.info("Web Port: " + str(args.web_port)) logger.info("Database Path: " + str(args.database_path)) logger.info("Empty Database: " + str(args.flush_database)) logger.info("PEM Path: " + str(args.pem_path)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder(log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.Playground.BeeSensRepo.RepoStack(args.port, http_port=args.web_port, log_level=log_level, encoder=encoder, database_path=args.database_path, flush_database=args.flush_database, pem_path=args.pem_path) forwarder.start_forwarder() forwarder.link_layer.process.join()
def __init__(self, run_id: int, mobile_nodes: List[MobileNode], stationary_nodes: List[StationaryNode], stationary_node_distance: float, named_functions: dict, function_names: list, forwarder: str = "NFNForwarder", optimizer: str = "ToDataFirstOptimizer", use_distribution_helper: bool = False, log_level=logging.DEBUG): """ Configuration of the mobility simulation :param run_id the identifier of the simulation run :param mobile_nodes a list of mobile nodes part of the simulation :param stationary_nodes a list of stationary nodes forming the infrastructure :param stationary_node_distance the distance between the stationary nodes :param named_functions a dictionary of named function definitions used to be executed :param function_names a list of function names to be assigned to the mobile nodes :param forwarder the NFN forwarder to be used :param optimizer the NFN resolution strategy optimizer to be used in the simulation :param use_distribution_helper A flag indicating if the default distribution helper (ZipfMandelbrotDistribution) shall be used or not; default = False :param log_level the log level of the logger to be used; default: logging.DEBUG """ self._run_id = run_id self._forwarder = forwarder self._optimizer = optimizer self._mobile_nodes = mobile_nodes self._stationary_nodes = stationary_nodes self._stationary_node_distance = stationary_node_distance self.logger = Logger("MobilitySimulation", log_level) self.to_car_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self.to_rsu_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self._velocities = [] self._heading_directions = [] self._starting_points = [] for node in self._mobile_nodes: self._velocities.append(node.speed) self._heading_directions.append(node.direction) self._starting_points.append(node.spawn_point) self._is_running = False # flag indicating if the simulation is running or not self._function_names = function_names # list of function names to be invoked by the nodes self._named_functions = named_functions # a dict of function definitions to be invoked self._chunk_size = 8192 self._simulation_bus = SimulationBus( packetencoder=SimpleStringEncoder()) self._stationary_node_name_prefix = Name("/rsu") self._mobile_node_to_computation = [0] * len( mobile_nodes) # index which mobile node issues which computation if use_distribution_helper: # TODO in the future support more distribution types, e.g., uniform, gaussian, etc. dist_array = ZipfMandelbrotDistribution.create_zipf_mandelbrot_distribution( len(self._function_names), 0.7, 0.7) for i in range(0, len(mobile_nodes)): self._mobile_node_to_computation[i] = ZipfMandelbrotDistribution.\ get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names)) - 1 # get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names), run_id) - 1 self._compute_rsu_connection_time() self._setup_simulation_network()
class FetchSessions(Fetch): """Fetch Tool for PiCN supporting sessions""" def __init__(self, ip: str, port: Optional[int], log_level=255, encoder: BasicEncoder = None, autoconfig: bool = False, interfaces=None, session_keys: Optional[Dict] = None, name: str = None, polling_interval: float = 1.0, ping_interval: float = 2.0): super().__init__(ip, port, log_level, encoder, autoconfig, interfaces, name) self.ip = ip self._logger = Logger("FetchSession", log_level) self._pending_sessions: List[Name] = [] self._running_sessions: Dict[Name:Name] = dict( ) if session_keys is None else session_keys self._has_session: bool = True if session_keys is not None else False self._session_initiator = 'session_connector' self._session_identifier = 'sid' self._polling_interval = polling_interval self._ping_interval = ping_interval self._manager = Manager() self._mutex = self._manager.Lock() self.receive_process = Process(target=self._receive_session, args=( self.lstack.queue_to_higher, self._polling_interval, self._mutex, )) self.ping_process = Process(target=self._ping_messages, args=(self._ping_interval, )) self.receive_process.start() self.ping_process.start() def handle_session(self, name: Name, packet: Packet) -> None: """ :param name Name to be fetched :param packet Packet with session handshake """ if isinstance(packet, Content): target_name: Name = Name( f"/{self._session_identifier}/{packet.content}") session_confirmation: Content = Content(target_name, packet.content, None) self.send_content(content=session_confirmation) self._running_sessions[name] = target_name self._pending_sessions.remove(name) self._has_session = True return None def get_session_name(self, name: Name) -> Optional[Name]: """Fetches the session name from the session store. Returns None otherwise param name Name of repository to get session key for """ if name in self._running_sessions: return self._running_sessions[name] else: return None def end_session(self, name: Name) -> None: """Terminates a session by deleting the associated id from the session store. param name Name to terminate session with """ self._logger.debug( f"Terminating session with repo {name} (session was {self})") content = Content(self.get_session_name(name), 'terminate') self.send_content(content) self._pending_sessions.remove(name) del self._running_sessions[name] self._has_session = False if not self._running_sessions else True return None def _ping_messages(self, ping_interval: float = 2.0) -> None: self._logger.debug(f"--> Starting ping messages at {time.time}") while True: if self._has_session: for repo in self._running_sessions: self._logger.debug( f"Sending ping to {repo} at {time.time}") conntent = Content(self.get_session_name(repo), 'ping') self.send_content(conntent) time.sleep(ping_interval) return None def _receive_session(self, queue: Queue, polling_interval: float, mutex: Lock) -> None: while True: self._logger.debug(f"--> : Waiting for mutex in loop ...") mutex.acquire(blocking=True) packet = None if not queue.empty(): packet = queue.get()[1] mutex.release() if isinstance(packet, Content): print(f"--> : Receive loop got: {packet.content}") elif isinstance(packet, Nack): self._logger.debug( f"--> One time receive got Nack: {packet.reason}") elif packet is None: self._logger.debug(f"--> : No packet in queue") else: self._logger.debug( f"--> : Whoops, we just cleared a non content object from the queue! {packet}" ) time.sleep(polling_interval) return None def fetch_data(self, name: Name, timeout: float = 4.0, use_session: bool = True) -> Optional[str]: """Fetch data from the server :param name Name to be fetched :param timeout Timeout to wait for a response. Use 0 for infinity :param use_session Set to False if sessions shouldn't be used even if they are available. """ if name in self._running_sessions and use_session: # Create interest with session interest: Interest = Interest(self._running_sessions.get(name)) else: # Create normal interest interest: Interest = Interest(name) self._mutex.acquire(blocking=True) self.send_interest(interest) packet = self.receive_packet(timeout) self._mutex.release() if self._session_initiator in interest.name.to_string( ): # Check if we need to handle session initiation new_name = Name(name.components[:-1]) self._pending_sessions.append(new_name) self.handle_session(new_name, packet) if isinstance(packet, Content): self._logger.debug( f"--> One time receive got content: {packet.content}") return packet.content elif isinstance(packet, Nack): self._logger.debug( f"--> One time receive got nack: {packet.reason}") return f"Received Nack: {str(packet.reason.value)}" return None def send_interest(self, interest: Interest) -> None: if self.autoconfig: self.lstack.queue_from_higher.put([None, interest]) else: self.lstack.queue_from_higher.put([self.fid, interest]) return None def receive_packet(self, timeout: float) -> Packet: if timeout == 0: packet = self.lstack.queue_to_higher.get()[1] else: packet = self.lstack.queue_to_higher.get(timeout=timeout)[1] return packet def send_content(self, content: Union[Content, Tuple[Name, str]]) -> None: if isinstance(content, Content): c = content else: c = Content(content[0], content[1], None) if self.autoconfig: self.lstack.queue_from_higher.put([None, c]) else: self.lstack.queue_from_higher.put([self.fid, c]) return None def stop_fetch(self): """Close everything""" self.receive_process.terminate() self.lstack.stop_all() self.lstack.close_all() def __repr__(self): headers = ['Target', 'Session ID'] data = [[k, v] for k, v in self._running_sessions.items()] return f"Running sessions for <<{self.name}>>:\n{tabulate(data, headers=headers, showindex=True, tablefmt='fancy_grid')}"
def __init__(self, logger_name="BasicEncoder", log_level=255): self.__logger_name = logger_name self.__log_level = log_level self.logger = Logger(self.__logger_name, self.__log_level)
class MobilitySimulation(object): """This simulation setup is used to simulate a set of mobile nodes within a NFN based infrastructure""" def __init__(self, run_id: int, mobile_nodes: List[MobileNode], stationary_nodes: List[StationaryNode], stationary_node_distance: float, named_functions: dict, function_names: list, forwarder: str = "NFNForwarder", optimizer: str = "ToDataFirstOptimizer", use_distribution_helper: bool = False, log_level=logging.DEBUG): """ Configuration of the mobility simulation :param run_id the identifier of the simulation run :param mobile_nodes a list of mobile nodes part of the simulation :param stationary_nodes a list of stationary nodes forming the infrastructure :param stationary_node_distance the distance between the stationary nodes :param named_functions a dictionary of named function definitions used to be executed :param function_names a list of function names to be assigned to the mobile nodes :param forwarder the NFN forwarder to be used :param optimizer the NFN resolution strategy optimizer to be used in the simulation :param use_distribution_helper A flag indicating if the default distribution helper (ZipfMandelbrotDistribution) shall be used or not; default = False :param log_level the log level of the logger to be used; default: logging.DEBUG """ self._run_id = run_id self._forwarder = forwarder self._optimizer = optimizer self._mobile_nodes = mobile_nodes self._stationary_nodes = stationary_nodes self._stationary_node_distance = stationary_node_distance self.logger = Logger("MobilitySimulation", log_level) self.to_car_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self.to_rsu_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self._velocities = [] self._heading_directions = [] self._starting_points = [] for node in self._mobile_nodes: self._velocities.append(node.speed) self._heading_directions.append(node.direction) self._starting_points.append(node.spawn_point) self._is_running = False # flag indicating if the simulation is running or not self._function_names = function_names # list of function names to be invoked by the nodes self._named_functions = named_functions # a dict of function definitions to be invoked self._chunk_size = 8192 self._simulation_bus = SimulationBus( packetencoder=SimpleStringEncoder()) self._stationary_node_name_prefix = Name("/rsu") self._mobile_node_to_computation = [0] * len( mobile_nodes) # index which mobile node issues which computation if use_distribution_helper: # TODO in the future support more distribution types, e.g., uniform, gaussian, etc. dist_array = ZipfMandelbrotDistribution.create_zipf_mandelbrot_distribution( len(self._function_names), 0.7, 0.7) for i in range(0, len(mobile_nodes)): self._mobile_node_to_computation[i] = ZipfMandelbrotDistribution.\ get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names)) - 1 # get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names), run_id) - 1 self._compute_rsu_connection_time() self._setup_simulation_network() ########################### # METHODS ########################### def _compute_rsu_connection_time(self): """this method computes the connection time of a mobile node to a stationary node based on velocity of the mobile node and the communication range of the stationary node. Further enhancements of this simulation should include physical and MAC layer related communication conditions (e.g., propagation delay, fading, etc.) """ self._contact_time = [] for mobile_node in self._mobile_nodes: speed_in_ms = mobile_node.speed / 3.6 distance_in_m = 1000 * self._stationary_node_distance self._contact_time.append(distance_in_m / speed_in_ms * 1e9) def _setup_stationary_nodes(self): """configure the NFN com. stack at the stationary nodes""" for node in self._stationary_nodes: # install the NFN forwarder and the mgmt client tool at the stationary node if self._forwarder == "NFNForwarder": node.nfn_forwarder = NFNForwarder( 0, encoder=SimpleStringEncoder(), interfaces=[ self._simulation_bus.add_interface( f"rsu{node.node_id}") ], ageing_interval=10) elif self._forwarder == "NFNForwarderData": node.nfn_forwarder = NFNForwarderData( 0, encoder=SimpleStringEncoder(), interfaces=[ self._simulation_bus.add_interface( f"rsu{node.node_id}") ], chunk_size=self._chunk_size, num_of_forwards=1, ageing_interval=10) else: self.logger.error( "Forwarder: " + self._forwarder + " is not supported! Use 'NFNForwarder' or 'NFNForwarderData'!" ) # install the optimizer if self._optimizer == "ToDataFirstOptimizer": node.nfn_forwarder.nfnlayer.optimizer = ToDataFirstOptimizer( node.nfn_forwarder.icnlayer.cs, node.nfn_forwarder.icnlayer.fib, node.nfn_forwarder.icnlayer.pit, node.nfn_forwarder.linklayer.faceidtable) elif self._optimizer == "EdgeComputingOptimizer": node.nfn_forwarder.nfnlayer.optimizer = EdgeComputingOptimizer( node.nfn_forwarder.icnlayer.cs, node.nfn_forwarder.icnlayer.fib, node.nfn_forwarder.icnlayer.pit, node.nfn_forwarder.linklayer.faceidtable) # install the mgmt client tool at the node node.mgmt_tool = MgmtClient( node.nfn_forwarder.mgmt.mgmt_sock.getsockname()[1]) node.nfn_forwarder.icnlayer.cs.set_cs_timeout(60) def _setup_connections_for_stationary_nodes(self): """configure the connections """ loop_variable = 0 for node in self._stationary_nodes: if loop_variable == 0: # setup first RSU faceid_rsu_1st = node.nfn_forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo("rsu" + str(1), 0)) node.nfn_forwarder.icnlayer.fib.add_fib_entry( Name("/nR"), [faceid_rsu_1st]) elif loop_variable == (len(self._stationary_nodes) - 1): # setup last RSU faceid_rsu_last = node.nfn_forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo("rsu" + str(loop_variable - 2), 0)) node.nfn_forwarder.icnlayer.fib.add_fib_entry( Name("/nL"), [faceid_rsu_last]) else: faceid_node_left = node.nfn_forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo("rsu" + str(loop_variable - 1), 0)) faceid_node_right = node.nfn_forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo("rsu" + str(loop_variable + 1), 0)) node.nfn_forwarder.icnlayer.fib.add_fib_entry( Name("/nL"), [faceid_node_left]) node.nfn_forwarder.icnlayer.fib.add_fib_entry( Name("/nR"), [faceid_node_right]) loop_variable = +1 def _assign_named_functions_to_stationary_execution_nodes(self): """configure executables to the stationary nodes""" for node in self._stationary_nodes: for function in zip(self._named_functions.keys(), self._named_functions.values()): node.nfn_forwarder.icnlayer.cs.add_content_object(Content( Name(function[0]), function[1]), static=True) def _setup_mobile_nodes(self): """configure the mobile nodes""" for node in self._mobile_nodes: node.forwarder = ICNForwarder( 0, encoder=SimpleStringEncoder(), routing=True, interfaces=[ self._simulation_bus.add_interface(f"car{node.node_id}") ]) node.fetch = Fetch( f"car{node.node_id}", None, 255, SimpleStringEncoder(), interfaces=[ self._simulation_bus.add_interface(f"ftcar{node.node_id}") ]) node.mgmt_tool = MgmtClient( node.forwarder.mgmt.mgmt_sock.getsockname()[1]) for stationary_node in self._stationary_nodes: car_face_id = node.forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo(f"rsu{stationary_node.node_id}", 0)) self.to_rsu_faces[stationary_node.node_id][ node.node_id] = car_face_id rsu_face_id = node.forwarder.linklayer.faceidtable.get_or_create_faceid( AddressInfo(f"car{stationary_node.node_id}", 0)) self.to_car_faces[stationary_node.node_id][ node.node_id] = rsu_face_id def _setup_simulation_network(self): """configure a network according to the configuration""" self.logger.debug("Setup simulation network ...") # setup stationary nodes self._setup_stationary_nodes() self.logger.debug("\t setup stationary nodes done") # setup connections self._setup_connections_for_stationary_nodes() self.logger.debug("\t setup connections between stationary nodes done") # assign functions to stationary nodes self._assign_named_functions_to_stationary_execution_nodes() self.logger.debug("\t assign named functions to stationary nodes done") # setup mobile nodes self._setup_mobile_nodes() self.logger.debug("\t setup mobile nodes done") # start node self.start_nodes() self.logger.debug("\t setup complete -> start nodes") def reconnect_car(self, mobile_node_number, new_rsu_number): if len(self._stationary_nodes) <= new_rsu_number or new_rsu_number < 0: self.logger.error( f"{time.time():.5f} --- Cannot reconnect mobile node with id {mobile_node_number} " f"to stationary node with id {new_rsu_number}, not part of this simulation" ) return connected_rsu = self.connected_rsu[mobile_node_number] self._mobile_nodes[ mobile_node_number].forwarder.icnlayer.fib.remove_fib_entry( self._stationary_node_name_prefix) self._mobile_nodes[ mobile_node_number].forwarder.icnlayer.fib.add_fib_entry( self._stationary_node_name_prefix, [self.to_rsu_faces[new_rsu_number][mobile_node_number]]) self._stationary_nodes[ connected_rsu].nfn_forwarder.icnlayer.fib.remove_fib_entry( Name(f"/car/car{mobile_node_number}")) self._stationary_nodes[ connected_rsu].nfn_forwarder.icnlayer.pit.remove_pit_entry_by_fid( self.to_car_faces[connected_rsu][mobile_node_number]) self._stationary_nodes[ new_rsu_number].nfn_forwarder.icnlayer.fib.add_fib_entry( Name(f"/car/car{mobile_node_number}"), [self.to_car_faces[new_rsu_number][mobile_node_number]]) self.connected_rsu[mobile_node_number] = connected_rsu + \ self._heading_directions[mobile_node_number] self._car_send_interest( self._mobile_nodes[mobile_node_number], self._function_names[ self._mobile_node_to_computation[mobile_node_number]]) def _car_send_interest(self, mobile_node, name): try: mobile_node.fetch.fetch_data( name, timeout=1 ) # if trouble reduce timeout to 0.1. Parse result from log except: pass def start_nodes(self): # Starting nodes for stationary_node in self._stationary_nodes: stationary_node.nfn_forwarder.start_forwarder() for mobile_node in self._mobile_nodes: mobile_node.forwarder.start_forwarder() self._simulation_bus.start_process() def stop_nodes(self): # stop nodes if not self._is_running: for stationary_node in self._stationary_nodes: stationary_node.nfn_forwarder.stop_forwarder() for mobile_node in self._mobile_nodes: mobile_node.forwarder.stop_forwarder() self._simulation_bus.stop_process() else: self.logger.error( "Simulation not started yet -- cleaning resources is necessary!" ) def run(self): """run the experiment, hand over the cars""" self._is_running = True self.connected_rsu = [] self.logger.debug("Start Simulation") for i in range(0, len(self._mobile_nodes)): self.connected_rsu.append(self._starting_points[i]) self._mobile_nodes[i].forwarder.icnlayer.fib.add_fib_entry( self._stationary_node_name_prefix, [self.to_rsu_faces[self.connected_rsu[i]][i]]) self._stationary_nodes[self.connected_rsu[ i]].nfn_forwarder.icnlayer.fib.add_fib_entry( Name(f"car{i}"), [self.to_car_faces[self.connected_rsu[i]][i]]) self._car_send_interest( self._mobile_nodes[i], self._function_names[self._mobile_node_to_computation[i]]) self.connection_time = [time.time()] * len(self._mobile_nodes) steps = 5 * len(self._mobile_nodes) while (self._is_running): time_ns = time.time_ns() for i in range(0, len(self._mobile_nodes)): if time_ns - self.connection_time[i] > self._contact_time[i]: #print(f"{time.time():.5f} -- " + "Car", i, "reconnects from", self.connected_rsu[i], "to", self.connected_rsu[i] + self._heading_directions[i]) self.logger.info("Car " + str(i) + " reconnects from " + str(self.connected_rsu[i]) + " to " + str(self.connected_rsu[i] + self._heading_directions[i])) new_rsu_number = self.connected_rsu[ i] + self._heading_directions[i] self.reconnect_car(i, new_rsu_number) self.connection_time[i] = time.time_ns() steps -= 1 if steps <= 0: self._is_running = False self.logger.debug("Simulation Terminated!") self.stop_nodes()