def create_n_check_tmp_files(delete_first=False, create_ramdisk=False): """ Create tmp dirs if not existing yet. Raises ------ AlreadyRunning """ if os.path.exists(PATH_TMP): if delete_first: try: shutil.rmtree(PATH_LOGS) except BaseException: pass # umount ramdisk try_umount_ramdisk() else: # Ticket #12 raise AlreadyRunning("Another instance of %s might be running! Did you invoke the cleanup script '%s'?" % (PROJECT_NAME, PATH_CLEANUP_SCRIPT)) if not os.path.exists(PATH_TMP): log.debug("Creating directory '%s'!", PATH_TMP) os.makedirs(PATH_TMP) if create_ramdisk: if not os.path.ismount(PATH_TMP): log.info("creating ramdisk at '%s'", PATH_TMP) # TODO: better way for ramdisk creation? run_shell("sudo mount -t ramfs ramfs {}".format(PATH_TMP)) else: log.info("ramdisk still exists at '%s' ... ", PATH_TMP)
def adjust_interfaces_to_number_of_links(self, node_id, interfaces): """ For each connection, add an additional interface. Parameters ---------- node_id interfaces : list<type> Returns ------- Interfaces """ adjusted_interfaces = [] for _if in interfaces: if not isinstance(_if, Interface.HubWiFi) and not isinstance( _if, Interface.Management): log.debug( "connections for '%s':'%s'", node_id, singletons.network_backend.get_all_connections().get( node_id)) adjusted_interfaces.extend( type(_if) for _ in singletons.network_backend.get_all_connections()[node_id]) else: adjusted_interfaces.append(type(_if)) return Interfaces.Interfaces.factory(adjusted_interfaces)
def check(self): while True: if self.writer_thread.shall_terminate(): log.debug("stopping '%s'", self.__class__.__name__) return events = self.selector.select(timeout=SELECT_TIMEOUT) # SelectorKey, int for key, mask in events: fd = key.fileobj prefix = self.prefixes.get(fd, "N/A") data = fd.read(1) if data: self.descriptor_buffers[fd] += data.decode('utf-8') # line-buffered try: newline_idx = self.descriptor_buffers[fd].index("\n") # ValueError: substring not found # for "\n" except ValueError: continue data = self.descriptor_buffers[fd][:newline_idx] self.fh_logfile.write('%s: %s\n' % (prefix, data)) self.fh_logfile.flush() self.descriptor_buffers[fd] = self.descriptor_buffers[ fd][:newline_idx]
def _exec(emu_node, commands): if commands: log.debug("network config for node: %s:\n%s", emu_node.id, commands) emu_node.virtualization_layer.run_commands_eager_check_ret_val(StringIO(commands)) # notify EventSystem ev.update([emu_node.id], 1.0, add=True)
def handle_select(rlist, xlist): if xlist: raise ZeroMQException("Unknown error occurred during a select() call") if self.reset_socket in rlist: self.reset_socket.recv() self.reset() return True if self.sub_socket in rlist: local_distance_matrix = self.recv_distance_matrix() if config.is_debug(): log.info("received distance matrix: %s", local_distance_matrix) log.info("step ...") singletons.simulation_manager.step(1, distance_matrix=local_distance_matrix) self.send_sync() # wait for sync reply or error rlist, _, xlist = zmq.select([self.reset_socket, self.svc], [], []) # rlist, _, xlist = zmq.select([self.svc], [], []) if handle_select(rlist, xlist): return True # self.sync() if config.is_debug(): log.debug("stepped ...") if self.svc in rlist: self.recv_sync() return True
def reset_fd_state(self): for selector_key in self.selector.get_map().values(): _file = selector_key.fileobj if _file.closed: log.debug("removing closed file '%s' from '%s'", _file, self.__class__.__name__) del self.descriptor_buffers[_file] del self.prefixes[_file] self.selector.unregister(_file)
def parse_core_config_file_positions(file_path): """ Parse the core xml config file and return the positions of the nodes. Parameters ---------- file_path : str Path to the core xml file Returns ------- dict<int, (float, float)> Node positions. The file looks like this: <?xml version="1.0" encoding="UTF-8"?> <scenario compiled="true" name="chain3.xml" version="1.0" xmlns="nmfPlan" xmlns:CORE="coreSpecific"> <host id="n100" name="n100"> <type domain="CORE">ServalNode</type> <interface id="n100/eth0" name="eth0"> <member index="0" type="channel">net33/chan0</member> <member type="network">net33</member> <address type="mac">00:00:00:aa:00:be</address> <address type="IPv4">10.0.36.1/24</address> <address type="IPv6">2001:36::1/64</address> </interface> <alias domain="COREID">100</alias> <point lat="47.550510742" lon="-122.106359809" type="gps"/> </host> ... """ # Open XML document using minidom parser # TODO: #32: error handling! DOMTree = xml.dom.minidom.parse(file_path) scenario = DOMTree.documentElement version = scenario.getAttribute("version") log.debug("version: %s", version) check_version(version) # store for each node the position positions = OrderedDict() hosts = scenario.getElementsByTagName("host") for idx, host in enumerate(hosts, 1): # NOTE: we choose our own node ids because the ones in the core topology file may not start with 1 host_id = idx log.debug("parsing host: %s", host_id) for point in host.getElementsByTagName("point"): lat = float(point.getAttribute("lat")) lon = float(point.getAttribute("lon")) positions[host_id] = (lat, lon) return positions
def create_n_connect_central_nodes(self, interfaces): """ Parameters ---------- interfaces Returns ------- dict<int, CentralNode> """ from miniworld.model.singletons.Singletons import singletons # create CentralNode s but only if there is a HubWiFi interface # TODO: REMOVE cnt = 0 central_nodes_dict = {} # connect local devices for _if in filter(lambda x: is_central_node_interface(x), interfaces): if cnt == 1: raise ValueError("Only one '%s' interface supported at the moment!" % HubWiFi) # TODO: REFACTOR! # TODO: #54: make amount of nodes configurable count_central_nodes = 1 for i in range(0, count_central_nodes): central_node = self.network_backend_bootstrapper.central_node_type( self.network_backend_bootstrapper, id=i + 1) # central_node.id = self.get_br_name(central_node.id, central_node.interface) # TODO: #54 make configurable! log.debug("creating CentralNode with id: %s", central_node.id) central_node.start(switch=False, bridge_dev_name=central_node.id) central_nodes_dict[central_node.id] = central_node # remember new bridges self.event_monitor.add_new_bridge(central_node.id) cnt += 1 # connect via server boundaries (overlay) node_ids = singletons.simulation_manager.get_emulation_node_ids() for x, y in zip(node_ids, node_ids[1:]): emulation_node_x = singletons.simulation_manager.get_emulation_node_for_idx(x) emulation_node_y = singletons.simulation_manager.get_emulation_node_for_idx(y) log.info("connecting %s<->%s", emulation_node_x, emulation_node_y) self.connection_across_servers(self, emulation_node_x, emulation_node_y) return central_nodes_dict
def reset(self): # TODO: #2 : DOC # first stop checking bg processes for return codes # then SIGTERM, then SIGKILL log.debug("'%s' reset ...", self.__class__.__name__) self.stop_bg_checker_thread() with self.lock: # stop LogWriter first if self.log_writer: self.log_writer.reset() log.warn("sending SIGTERM to all processes ...") # TODO: Ticket #2 log.info("terminating %s subproccesses" % len(self.subprocesses)) for subproc in self.subprocesses: subproc_info = ' '.join( subproc.args) + (" (PID = %s)" % subproc.pid) log.debug("terminating '%s'" % subproc_info) try: subproc.terminate() TIMEOUT = 5 log.debug("waiting for %s to terminate (%s)", subproc_info, TIMEOUT) subproc.wait(timeout=TIMEOUT) log.debug("terminated %s", subproc_info) except subprocess.TimeoutExpired as e: log.warn( "Subprocess: '%s' did not shutdown in time (%s). Killing ..." % (subproc_info, TIMEOUT)) subproc.kill() subproc.wait() # clear subprocesses log.debug("cleared subprocesses index ...") self.subprocesses = [] if self.log_writer: self.log_writer.start()
def check_error_codes(self): # check processes every x seconds for return codes subproc_error = None while True: if self.bg_checker_thread.shall_terminate(): return self.bg_checker_thread.shall_terminate_event.wait(5.0) with self.lock: log.debug("checking bg processes return codes ...") for subproc in self.subprocesses: log.debug("%s => %s", subproc, subproc.returncode) if subproc.returncode > 0: exception = BackgroundProcessError( "The subprocess '%s' exited with error code '%s'. See the log file for the output!" % (subproc, subproc.returncode)) self.bg_checker_thread.exception_handler(exception) # delete subprocess with error, error is already logged del self.subprocesses[subproc_error]
def run_sub_process_popen(cmd, stdout=None, stderr=None, stdin=None, **kwargs): cmd = fmt_cmd_template(cmd) symbol_devnull = "devnull" cmd_as_list = shlex.split(cmd) # note: do not use subprocess.PIPE! May cause deadlock! # see: http://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ if stdin is None: stdin = devnull if stdout is symbol_devnull: stdout = devnull if stderr is symbol_devnull: stderr = devnull p = subprocess.Popen(cmd_as_list, close_fds=True, stdout=stdout, stderr=stderr, stdin=stdin, **kwargs) log.debug("started %s", ' '.join(p.args) + (" (PID = %s)" % p.pid)) return p, cmd
def parse_core_config_file(file_path, include_interfaces=False): """ Parse the core xml config file and return which nodes are connected with each other. Parameters ---------- file_path : str Path to the core xml file Returns ------- dict<int, set<int> Which nodes are connected to each other (upper triangular matrix) dict<(int, int), (int, int)> If include_interfaces The file looks like this: <?xml version="1.0" encoding="UTF-8"?> <scenario compiled="true" name="chain3.xml" version="1.0" xmlns="nmfPlan" xmlns:CORE="coreSpecific"> <network id="net0" name="net0"> <type>ethernet</type> <alias domain="COREID">36433</alias> <member type="interface">n1/eth0</member> <member type="interface">n2/eth0</member> <member type="channel">net0/chan0</member> <channel id="net0/chan0" name="chan0"> <type>ethernet</type> <member index="0" type="interface">n1/eth0</member> <member index="1" type="interface">n2/eth0</member> </channel> </network> ... """ # Open XML document using minidom parser DOMTree = xml.dom.minidom.parse(file_path) scenario = DOMTree.documentElement version = scenario.getAttribute("version") log.debug("version: %s", version) check_version(version) # type:dict<int, list<int>> # store for each node the nodes which are connected to it connections = defaultdict(set) if not include_interfaces else {} networks = scenario.getElementsByTagName("network") for network in networks: log.debug("parsing network: %s", network.getAttribute("id")) for channel in network.getElementsByTagName("channel"): cur_node_id = None cur_interface = None for member in channel.getElementsByTagName("member"): if (member.getAttribute("type") == "interface"): log.debug(member.childNodes[0].data) # split "n1/eth1" to 1, 1 node_id, interface = member.childNodes[0].data.split("/") node_id = int(node_id[1:]) interface = int(interface.split('eth')[1]) interface += 1 if cur_node_id is None: cur_node_id = node_id cur_interface = interface else: if include_interfaces: connections[(cur_node_id, cur_interface)] = (node_id, interface) else: connections[cur_node_id].add(node_id) if cur_node_id >= node_id: if include_interfaces: connections[(node_id, interface)] = (cur_node_id, cur_interface) else: connections[node_id].add(cur_node_id) return connections
def reset(self): log.debug("resetting %s" % self.__class__.__name__) self.stop()
def fun(expecter, idx, cnt): log.debug("%d/%d clients synced ...", idx, cnt)
def recv_sync(self): if config.is_debug(): log.debug("syncing with server [done]") self.recv()