def __init__(self, port=8500, log_level=255, encoder: BasicEncoder = NdnTlvEncoder): # debug level logger = Logger("Publisher", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory1 = PiCNSyncDataStructFactory() synced_data_struct_factory1.register("face_id_table", FaceIDDict) synced_data_struct_factory1.create_manager() face_id_table = synced_data_struct_factory1.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.repo_layer = RepoLayer(log_level=log_level) self.stack: LayerStack = LayerStack( [self.repo_layer, self.packet_encoding_layer, self.link_layer])
def main(args): thread_list = [] logger = Logger("ExpLogger", 255) params = list(range(201, 701)) #random.shuffle(params) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(20000) sock.bind(("0.0.0.0", 0)) for i in list(params): ip = args.ip plain = args.plain port = args.port format = args.format interestName = Name("/the/prefix/bernoulli/" + str(i) + "/1/pNFN") arguments = [format, ip, port, plain, interestName, logger, sock] thread = Thread(target = run_singel_interest, args= arguments) time.sleep(0.5) thread.start() thread_list.append(thread) # Packet encoder, interestName) # run_singel_interest(args, interestName) for t in thread_list: t.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a Repo...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.Playground.AssistedSharing.RepoStack( args.port, log_level, encoder) forwarder.start_forwarder() forwarder.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("ICNForwarder", log_level) # Info logger.info("Starting a Heartbeat Nodes...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: Extended NDN Packet Format") # Packet encoder encoder = ExtendedNdnTlvEncoder(log_level) # Start forwarder = HeartbeatForwarderStack(args.port, log_level, encoder) forwarder.start_forwarder() forwarder.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a PinnedNFN Server...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: Extended NDN Packet Format") # Packet encoder encoder = ExtendedNdnTlvEncoder(log_level) # Start server = PiCN.Playground.Heartbeats.Nodes.HeartbeatComputationStack( port=args.port, log_level=log_level, encoder=encoder) server.start_forwarder() server.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting a BeeSens Repo...") logger.info("UDP Port: " + str(args.port)) logger.info("Web Port: " + str(args.web_port)) logger.info("Database Path: " + str(args.database_path)) logger.info("Empty Database: " + str(args.flush_database)) logger.info("PEM Path: " + str(args.pem_path)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder(log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.Playground.BeeSensRepo.RepoStack(args.port, http_port=args.web_port, log_level=log_level, encoder=encoder, database_path=args.database_path, flush_database=args.flush_database, pem_path=args.pem_path) forwarder.start_forwarder() forwarder.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("Repo", log_level) # Info logger.info("Starting an NFN server...") logger.info("Replica ID: " + str(args.id)) logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start server = PiCN.Playground.ForwardingStrategy.NFNStack(replica_id=args.id, port=args.port, log_level=log_level, encoder=encoder) server.start_forwarder() server.link_layer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("ICNForwarder", log_level) # Info logger.info("Starting a CCN Forwarder...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder # Start forwarder = PiCN.ProgramLibs.ICNForwarder.ICNForwarder( args.port, log_level, encoder, autoconfig=args.autoconfig) forwarder.start_forwarder() forwarder.linklayer.process.join()
def main(args): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("NFNForwarder", log_level) # Info logger.info("Starting a NFN Forwarder...") logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder( log_level) if args.optimizer == "Edge": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder, ageing_interval=1) logger.info("Edge Computing Node") forwarder.icnlayer.pit.set_pit_timeout(2) forwarder.icnlayer.cs.set_cs_timeout(30) forwarder.nfnlayer.optimizer = EdgeComputingOptimizer( forwarder.icnlayer.cs, forwarder.icnlayer.fib, forwarder.icnlayer.pit, forwarder.linklayer.faceidtable) elif args.optimizer == "MapReduce": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder) logger.info("Using MapReduce Optimizer") forwarder.nfnlayer.optimizer = MapReduceOptimizer( forwarder.icnlayer.cs, forwarder.icnlayer.fib, forwarder.icnlayer.pit, forwarder.linklayer.faceidtable) elif args.optimizer == "Thunks": forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder(args.port, log_level, encoder, use_thunks=True) logger.info("Using Thunks for Planning and Optimizing") else: forwarder = PiCN.ProgramLibs.NFNForwarder.NFNForwarder( args.port, log_level, encoder) forwarder.start_forwarder() forwarder.linklayer.process.join()
def __init__(self, replica_id, port=9500, log_level=255, encoder: BasicEncoder = NdnTlvEncoder): # debug level logger = Logger("Repo", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterestTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() face_id_table = synced_data_struct_factory.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icn_layer = BasicICNLayer(log_level=log_level) self.pinned_computation_layer = PinnedComputationLayer( replica_id, log_level=log_level) # tell icn_layer that there is a higher layer which might satisfy interests self.icn_layer._interest_to_app = True # TODO -- decide here if it should be forwarded upwards or not # self.icn_layer._interest_to_app = lambda interest: interest.name.components[-1] == b"pNFN" # setup stack self.stack: LayerStack = LayerStack([ self.pinned_computation_layer, self.icn_layer, self.packet_encoding_layer, self.link_layer ]) # set CS, FIB, PIT in forwarding layer self.icn_layer.cs = cs self.icn_layer.fib = fib self.icn_layer.pit = pit
def __init__(self, replica_id, port=9500, log_level=255, encoder: ExtendedNdnTlvEncoder = ExtendedNdnTlvEncoder): # debug level logger = Logger("Server", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # create datastruct synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() face_id_table = synced_data_struct_factory.manager.face_id_table() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = HeartbeatPacketEncodingLayer( self.encoder, log_level=log_level) self.heartbeat_network_layer = HeartbeatNetworkLayer( log_level=log_level, interest_to_app=True) self.heartbeat_computation_layer = HeartbeatComputationLayer( replica_id, log_level=log_level) # setup stack self.stack: LayerStack = LayerStack([ self.heartbeat_computation_layer, self.heartbeat_network_layer, self.packet_encoding_layer, self.link_layer ]) # set CS, FIB, PIT in forwarding layer self.heartbeat_network_layer.cs = cs self.heartbeat_network_layer.fib = fib self.heartbeat_network_layer.pit = pit
def __init__(self, port=9500, http_port=8080, log_level=255, encoder: BasicEncoder = NdnTlvEncoder, database_path="/tmp", flush_database=False, pem_path=None): # debug level logger = Logger("Repo", log_level) # packet encoder encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory1 = PiCNSyncDataStructFactory() synced_data_struct_factory1.register("face_id_table", FaceIDDict) synced_data_struct_factory1.register("cs", ContentStorePersistentExact) synced_data_struct_factory1.create_manager() face_id_table = synced_data_struct_factory1.manager.face_id_table() storage = synced_data_struct_factory1.manager.cs( db_path=database_path + "/beesens-cs.db") if flush_database: storage.delete_all() # initialize layers self.link_layer = BasicLinkLayer([UDP4Interface(port)], face_id_table, log_level=log_level) self.packet_encoding_layer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.storage_layer = StorageLayer(log_level=log_level) self.interface_layer = InterfaceLayer(http_port=http_port, log_level=log_level, pem_path=pem_path, flush_database=flush_database) # setup stack self.stack: LayerStack = LayerStack([ self.interface_layer, self.storage_layer, self.packet_encoding_layer, self.link_layer ]) # pass cs to storage layer self.storage_layer.storage = storage
def __init__(self, database_path, port=9000, log_level=255, encoder: BasicEncoder = None, flush_database=False): # debug level logger = Logger("PushRepo", log_level) # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level=log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStorePersistentExact) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs(db_path=database_path + "/pushrepo.db") if flush_database: cs.delete_all() faceidtable = synced_data_struct_factory.manager.faceidtable() # default interface interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer(self.encoder, log_level=log_level) self.repolayer = PushRepositoryLayer(log_level=log_level) self.lstack: LayerStack = LayerStack([ self.repolayer, self.packetencodinglayer, self.linklayer ]) self.repolayer.cs = cs # mgmt self.mgmt = Mgmt(cs, None, None, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, ip: str, port: Optional[int], log_level=255, encoder: BasicEncoder = None, autoconfig: bool = False, interfaces=None, session_keys: Optional[Dict] = None, name: str = None, polling_interval: float = 1.0, ping_interval: float = 2.0): super().__init__(ip, port, log_level, encoder, autoconfig, interfaces, name) self.ip = ip self._logger = Logger("FetchSession", log_level) self._pending_sessions: List[Name] = [] self._running_sessions: Dict[Name:Name] = dict( ) if session_keys is None else session_keys self._has_session: bool = True if session_keys is not None else False self._session_initiator = 'session_connector' self._session_identifier = 'sid' self._polling_interval = polling_interval self._ping_interval = ping_interval self._manager = Manager() self._mutex = self._manager.Lock() self.receive_process = Process(target=self._receive_session, args=( self.lstack.queue_to_higher, self._polling_interval, self._mutex, )) self.ping_process = Process(target=self._ping_messages, args=(self._ping_interval, )) self.receive_process.start() self.ping_process.start()
def __init__(self, port=9000, log_level=255, encoder: BasicEncoder = None, interfaces: List[BaseInterface] = None, executors: BaseNFNExecutor = None, ageing_interval: int = 3, use_thunks=False): # debug level logger = Logger("NFNForwarder", log_level) logger.info("Start PiCN NFN Forwarder on port " + str(port)) # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.register("computation_table", NFNComputationList) synced_data_struct_factory.register("timeoutprevention_dict", TimeoutPreventionMessageDict) if use_thunks: synced_data_struct_factory.register("thunktable", ThunkList) synced_data_struct_factory.register("plantable", PlanTable) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() faceidtable = synced_data_struct_factory.manager.faceidtable() self.parser = DefaultNFNParser() if use_thunks: thunktable = synced_data_struct_factory.manager.thunktable() plantable = synced_data_struct_factory.manager.plantable( self.parser) #setup chunkifier self.chunkifier = SimpleContentChunkifyer() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icnlayer = BasicICNLayer(log_level=log_level, ageing_interval=ageing_interval) self.chunklayer = BasicChunkLayer(self.chunkifier, log_level=log_level) # setup nfn self.icnlayer._interest_to_app = True if executors is None: self.executors = {"PYTHON": NFNPythonExecutor()} else: self.executors = executors self.r2cclient = TimeoutR2CHandler() comp_table = synced_data_struct_factory.manager.computation_table( self.r2cclient, self.parser) self.nfnlayer = BasicNFNLayer(cs, fib, pit, faceidtable, comp_table, self.executors, self.parser, self.r2cclient, log_level=log_level) if use_thunks: self.thunk_layer = BasicThunkLayer(cs, fib, pit, faceidtable, thunktable, plantable, self.parser, log_level=log_level) self.nfnlayer.optimizer = ThunkPlanExecutor( cs, fib, pit, faceidtable, plantable) timeoutprevention_dict = synced_data_struct_factory.manager.timeoutprevention_dict( ) self.timeoutpreventionlayer = BasicTimeoutPreventionLayer( timeoutprevention_dict, comp_table, pit=pit, log_level=log_level) if use_thunks: self.lstack: LayerStack = LayerStack([ self.nfnlayer, self.chunklayer, self.timeoutpreventionlayer, self.thunk_layer, self.icnlayer, self.packetencodinglayer, self.linklayer ]) else: self.lstack: LayerStack = LayerStack([ self.nfnlayer, self.chunklayer, self.timeoutpreventionlayer, self.icnlayer, self.packetencodinglayer, self.linklayer ]) self.icnlayer.cs = cs self.icnlayer.fib = fib self.icnlayer.pit = pit # mgmt self.mgmt = Mgmt(self.icnlayer.cs, self.icnlayer.fib, self.icnlayer.pit, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, port=9000, log_level=255, encoder: ExtendedNdnTlvEncoder = None, interfaces: List[BaseInterface] = None): # debug level logger = Logger("ICNForwarder", log_level) # packet encoder if encoder is None: self.encoder = () else: encoder.set_log_level(log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact) synced_data_struct_factory.register("face_id_table", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit(pit_timeout=60) face_id_table = synced_data_struct_factory.manager.face_id_table() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.link_layer = BasicLinkLayer(interfaces, face_id_table, log_level=log_level) self.packet_encoding_layer = HeartbeatPacketEncodingLayer( self.encoder, log_level=log_level) self.icn_layer = HeartbeatNetworkLayer(log_level=log_level) self.lstack: LayerStack = LayerStack( [self.icn_layer, self.packet_encoding_layer, self.link_layer]) self.icn_layer.cs = cs self.icn_layer.fib = fib self.icn_layer.pit = pit # mgmt self.mgmt = Mgmt(cs, fib, pit, self.link_layer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, logger_name="BasicEncoder", log_level=255): self.__logger_name = logger_name self.__log_level = log_level self.logger = Logger(self.__logger_name, self.__log_level)
def __setstate__(self, d): self.__dict__.update( d ) #need to store logger parameter and recreate logger here, since it cannot be pickled self.logger = Logger(self.__logger_name, self.__log_level)
def main(argv): # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger = Logger("MobilitySimulationExec", log_level) # Info logger.info("Mobility Simulation config params...") logger.info("#run: " + str(args.run)) logger.info("#mobiles: " + str(args.mobiles)) logger.info("#stations: " + str(args.stations)) logger.info("Log Level: " + args.logging) logger.info("Optimizer: " + str(args.optimizer)) random.seed(args.run) # create a list of mobile nodes named_functions = { "/rsu/func/f1": "PYTHON\nf\ndef f(a, b, c):\n return a+b+c", "/rsu/func/f2": "PYTHON\nf\ndef f(a, b, c):\n return a*b*c", "/rsu/func/f3": "PYTHON\nf\ndef f(a, b, c):\n return a-b-c", "/rsu/func/f4": "PYTHON\nf\ndef f(a, b, c):\n return a**b**c", "/rsu/func/f5": "PYTHON\nf\ndef f(a, b, c):\n return a/b/c" } function_names = [ Name("/rsu/func/f1/_(1,2,3)/NFN"), Name("/rsu/func/f2/_(1,2,3)/NFN"), Name("/rsu/func/f3/_(1,2,3)/NFN"), Name("/rsu/func/f4/_(1,2,3)/NFN"), Name("/rsu/func/f5/_(1,2,3)/NFN") ] # create instances of stationary nodes stationary_nodes_list = [] for i in range(0, args.stations): stationary_nodes_list.append(StationaryNode(node_id=i, com_range=0.5)) # create instances of mobile nodes mobile_nodes_list = [] for i in range(0, args.mobiles): # random vehicle speed between 30km/h and 100 km/h speed = random.randrange(50, 121, 2) # let vehicles spawn from both sites of the simulation if (i % 2) == 0: mobile_nodes_list.append( MobileNode(node_id=i, spawn_point=0, speed=speed, direction=1)) else: mobile_nodes_list.append( MobileNode(node_id=i, spawn_point=(len(stationary_nodes_list) - 1), speed=speed, direction=-1)) simulation = None if args.optimizer == "Edge": simulation = MobilitySimulation(run_id=args.run, mobile_nodes=mobile_nodes_list, stationary_nodes=stationary_nodes_list, stationary_node_distance=0.5, named_functions=named_functions, function_names=function_names, forwarder="NFNForwarder", optimizer="EdgeComputingOptimizer", use_distribution_helper=True) else: simulation = MobilitySimulation(run_id=args.run, mobile_nodes=mobile_nodes_list, stationary_nodes=stationary_nodes_list, stationary_node_distance=0.5, named_functions=named_functions, function_names=function_names, forwarder="NFNForwarder", optimizer="ToDataFirstOptimizer", use_distribution_helper=True) simulation.run()
def __init__(self, logger_name="PiCNProcess", log_level=255): self._process: multiprocessing.Process = None self.logger = Logger(logger_name, log_level) self.__logger_name = logger_name self.__log_level = log_level
def __init__(self, port=9000, log_level=255, encoder: BasicEncoder = None, routing: bool = False, peers=None, autoconfig: bool = False, interfaces: List[BaseInterface] = None, ageing_interval: int = 3, node_name: str = None): # debug level logger = Logger("ICNForwarder", log_level) # FIXME: Why isn't this self.logger??? self._node_name = node_name # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level=log_level) self.encoder = encoder # setup data structures synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("cs", ContentStoreMemoryExact) synced_data_struct_factory.register( "fib", ForwardingInformationBaseMemoryPrefix) synced_data_struct_factory.register("pit", PendingInterestTableMemoryExact) synced_data_struct_factory.register("rib", TreeRoutingInformationBase) synced_data_struct_factory.register("faceidtable", FaceIDDict) synced_data_struct_factory.create_manager() cs = synced_data_struct_factory.manager.cs() fib = synced_data_struct_factory.manager.fib() pit = synced_data_struct_factory.manager.pit() rib = None if routing: rib = synced_data_struct_factory.manager.rib() faceidtable = synced_data_struct_factory.manager.faceidtable() # default interface if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() # initialize layers self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer( self.encoder, log_level=log_level) self.icnlayer = BasicICNLayer(log_level=log_level, ageing_interval=ageing_interval) self.lstack: LayerStack = LayerStack( [self.icnlayer, self.packetencodinglayer, self.linklayer]) if autoconfig: self.autoconfiglayer: AutoconfigServerLayer = AutoconfigServerLayer( linklayer=self.linklayer, address='127.0.0.1', registration_prefixes=[(Name('/testnetwork/repos'), True)], log_level=log_level) self.lstack.insert(self.autoconfiglayer, below_of=self.icnlayer) if routing: self.routinglayer = BasicRoutingLayer(self.linklayer, peers=peers, log_level=log_level) self.lstack.insert(self.routinglayer, below_of=self.icnlayer) self.icnlayer.cs = cs self.icnlayer.fib = fib # ----- by Luc # FIXME: How to pass these parameters to __init__ self.icnlayer.fib.logger = logger self.icnlayer.fib.node_name = self._node_name # ----- by Luc # FIXME: How to pass these parameters to __init__ self.icnlayer.pit = pit self.icnlayer.pit.logger = logger self.icnlayer.pit.node_name = self._node_name # ----- if autoconfig: self.autoconfiglayer.fib = fib if routing: self.routinglayer.rib = rib self.routinglayer.fib = fib # mgmt self.mgmt = Mgmt(cs, fib, pit, self.linklayer, mgmt_port, self.stop_forwarder, log_level=log_level)
def __init__(self, foldername: Optional[str], prefix: Name, port=9000, log_level=255, encoder: BasicEncoder = None, autoconfig: bool = False, autoconfig_routed: bool = False, interfaces: List[BaseInterface] = None, use_thunks=False): """ :param foldername: If None, use an in-memory repository. Else, use a file system repository. """ logger = Logger("ICNRepoSession", log_level) logger.info("Start PiCN Data Repository with Sessions") # packet encoder if encoder is None: self.encoder = SimpleStringEncoder(log_level=log_level) else: encoder.set_log_level(log_level) self.encoder = encoder # chunkifyer self.chunkifyer = SimpleContentChunkifyer() # repo manager = multiprocessing.Manager() if foldername is None: self.repo: BaseRepository = SimpleMemoryRepository(prefix, manager, logger) else: self.repo: BaseRepository = SimpleFileSystemRepository(foldername, prefix, manager, logger) # initialize layers synced_data_struct_factory = PiCNSyncDataStructFactory() synced_data_struct_factory.register("faceidtable", FaceIDDict) if use_thunks: synced_data_struct_factory.register("thunktable", ThunkList) synced_data_struct_factory.register("plantable", PlanTable) synced_data_struct_factory.create_manager() faceidtable = synced_data_struct_factory.manager.faceidtable() if use_thunks: self.parser = DefaultNFNParser() thunktable = synced_data_struct_factory.manager.thunktable() plantable = synced_data_struct_factory.manager.plantable(self.parser) if interfaces is not None: self.interfaces = interfaces mgmt_port = port else: interfaces = [UDP4Interface(port)] mgmt_port = interfaces[0].get_port() self.linklayer = BasicLinkLayer(interfaces, faceidtable, log_level=log_level) self.packetencodinglayer = BasicPacketEncodingLayer(self.encoder, log_level=log_level) self.chunklayer = BasicChunkLayer(self.chunkifyer, log_level=log_level) self.repolayer = SessionRepositoryLayer(self.repo, log_level=log_level) if use_thunks: self.thunklayer = BasicThunkLayer(None, None, None, faceidtable, thunktable, plantable, self.parser, self.repo, log_level=log_level) logger.info("Using Thunks") if use_thunks: self.lstack: LayerStack = LayerStack([ self.repolayer, self.chunklayer, self.thunklayer, self.packetencodinglayer, self.linklayer ]) else: self.lstack: LayerStack = LayerStack([ self.repolayer, self.chunklayer, self.packetencodinglayer, self.linklayer ]) if autoconfig: self.autoconfiglayer = AutoconfigRepoLayer(name=prefix.string_components[-1], addr='127.0.0.1', linklayer=self.linklayer, repo=self.repo, register_global=autoconfig_routed, log_level=log_level) self.lstack.insert(self.autoconfiglayer, below_of=self.chunklayer) # mgmt self.mgmt = Mgmt(None, None, None, self.linklayer, mgmt_port, self.start_repo, repo_path=foldername, repo_prfx=prefix, log_level=log_level)
def __init__(self, run_id: int, mobile_nodes: List[MobileNode], stationary_nodes: List[StationaryNode], stationary_node_distance: float, named_functions: dict, function_names: list, forwarder: str = "NFNForwarder", optimizer: str = "ToDataFirstOptimizer", use_distribution_helper: bool = False, log_level=logging.DEBUG): """ Configuration of the mobility simulation :param run_id the identifier of the simulation run :param mobile_nodes a list of mobile nodes part of the simulation :param stationary_nodes a list of stationary nodes forming the infrastructure :param stationary_node_distance the distance between the stationary nodes :param named_functions a dictionary of named function definitions used to be executed :param function_names a list of function names to be assigned to the mobile nodes :param forwarder the NFN forwarder to be used :param optimizer the NFN resolution strategy optimizer to be used in the simulation :param use_distribution_helper A flag indicating if the default distribution helper (ZipfMandelbrotDistribution) shall be used or not; default = False :param log_level the log level of the logger to be used; default: logging.DEBUG """ self._run_id = run_id self._forwarder = forwarder self._optimizer = optimizer self._mobile_nodes = mobile_nodes self._stationary_nodes = stationary_nodes self._stationary_node_distance = stationary_node_distance self.logger = Logger("MobilitySimulation", log_level) self.to_car_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self.to_rsu_faces = [[0] * len(self._mobile_nodes) for i in range(len(self._stationary_nodes)) ] # rsu, car -> faceid self._velocities = [] self._heading_directions = [] self._starting_points = [] for node in self._mobile_nodes: self._velocities.append(node.speed) self._heading_directions.append(node.direction) self._starting_points.append(node.spawn_point) self._is_running = False # flag indicating if the simulation is running or not self._function_names = function_names # list of function names to be invoked by the nodes self._named_functions = named_functions # a dict of function definitions to be invoked self._chunk_size = 8192 self._simulation_bus = SimulationBus( packetencoder=SimpleStringEncoder()) self._stationary_node_name_prefix = Name("/rsu") self._mobile_node_to_computation = [0] * len( mobile_nodes) # index which mobile node issues which computation if use_distribution_helper: # TODO in the future support more distribution types, e.g., uniform, gaussian, etc. dist_array = ZipfMandelbrotDistribution.create_zipf_mandelbrot_distribution( len(self._function_names), 0.7, 0.7) for i in range(0, len(mobile_nodes)): self._mobile_node_to_computation[i] = ZipfMandelbrotDistribution.\ get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names)) - 1 # get_next_zipfmandelbrot_random_number(dist_array, len(self._function_names), run_id) - 1 self._compute_rsu_connection_time() self._setup_simulation_network()
def main(args): logger = Logger( "ICNPushRepo", logging.DEBUG) # note: set later according to cli/config arguments logger.info("Starting a Push Repository...") # Parse Configuration file conf = None if args.config != "none": try: conf = ConfigParser(args.config) logger.info("Successfully parsed configuration file.") except CouldNotOpenConfigError: conf = None logger.warning( "Could not open configuration file. Proceed with command line arguments or default values." ) except CouldNotParseError: logger.warning( "Could not parse configuration file. Proceed with command line arguments or default values." ) except MalformedConfigurationError as e: logger.warning( "Invalid configuration file. Proceed with command line arguments or default values. Hint: " + str(e)) # Choose command line arguments before config file arguments before default values if not args.port: if conf and conf.udp_port: args.port = conf.udp_port else: args.port = default_port if not args.format: if conf and conf.format: args.format = conf.format else: args.format = default_format if not args.logging: if conf and conf.logging: args.logging = conf.logging else: args.logging = default_logging # Log Level if args.logging == 'error': log_level = logging.ERROR elif args.logging == 'warning': log_level = logging.WARNING elif args.logging == 'info': log_level = logging.INFO elif args.logging == 'debug': log_level = logging.DEBUG else: log_level = 255 logger.setLevel(log_level) # Info logger.info("UDP Port: " + str(args.port)) logger.info("Log Level: " + args.logging) logger.info("Packet Format: " + args.format) logger.info("Database: " + args.database_path) logger.info("Flush DB: " + str(args.flush_database)) # Packet encoder encoder = NdnTlvEncoder( log_level) if args.format == 'ndntlv' else SimpleStringEncoder( log_level) # Start forwarder = PiCN.ProgramLibs.ICNPushRepository.ICNPushRepository( args.database_path, args.port, log_level, encoder, args.flush_database) forwarder.start_forwarder() forwarder.linklayer.process.join()