def stop_vpp_service(node): """Stop VPP service on the specified topology node. :param node: Topology node. :type node: dict """ DUTSetup.stop_service(node, Constants.VPP_UNIT)
def restart_honeycomb_and_vpp_on_duts(*nodes): """Restart the Honeycomb service on specified DUT nodes. Use the keyword "Check Honeycomb Startup State" to check when Honeycomb is fully restarted. :param nodes: List of nodes to restart Honeycomb on. :type nodes: list :raises HoneycombError: If Honeycomb failed to restart. """ logger.console("\nRestarting Honeycomb service ...") cmd = "sudo service honeycomb restart " errors = [] for node in nodes: if node['type'] == NodeType.DUT: ssh = SSH() ssh.connect(node) (ret_code, _, _) = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: errors.append(node['host']) try: DUTSetup.setup_dut(node) except Exception as err: logger.debug(err) errors.append(node['host']) continue logger.info("Restart of Honeycomb and VPP on node {0} is " "in progress ...".format(node['host'])) if errors: raise HoneycombError('Node(s) {0} failed to restart Honeycomb' ' and/or VPP.'.format(errors))
def restart_vpp_service(node): """Restart VPP service on the specified topology node. :param node: Topology node. :type node: dict """ DUTSetup.restart_service(node, Constants.VPP_UNIT)
def verify_vpp_installed(node): """Verify that VPP is installed on the specified topology node. :param node: Topology node. :type node: dict """ DUTSetup.verify_program_installed(node, u"vpp")
def start_vpp_service_on_all_duts(nodes): """Start up the VPP service on all nodes. :param nodes: Nodes in the topology. :type nodes: dict """ DUTSetup.start_service_on_all_duts(nodes, Constants.VPP_UNIT)
def stop_vpp_service(node): """Stop VPP service on the specified node. :param node: VPP node. :type node: dict :raises RuntimeError: If VPP service fails to stop. """ DUTSetup.stop_service(node, Constants.VPP_UNIT)
def apply_config(self, filename=None, retries=60, restart_vpp=True): """Generate and apply VPP configuration for node. Use data from calls to this class to form a startup.conf file and replace /etc/vpp/startup.conf with it on node. :param filename: Startup configuration file name. :param retries: Number of times (default 60) to re-try waiting. :param restart_vpp: Whether to restart VPP. :type filename: str :type retries: int :type restart_vpp: bool. :raises RuntimeError: If writing config file failed or restart of VPP failed or backup of VPP startup.conf failed. """ self.dump_config(self._nodeconfig) ssh = SSH() ssh.connect(self._node) if filename is None: filename = self._vpp_startup_conf if self._vpp_startup_conf_backup is not None: ret, _, _ = \ ssh.exec_command('sudo cp {src} {dest}'. format(src=self._vpp_startup_conf, dest=self._vpp_startup_conf_backup)) if ret != 0: raise RuntimeError('Backup of config file failed on node ' '{name}'.format(name=self._hostname)) ret, _, _ = \ ssh.exec_command('echo "{config}" | sudo tee {filename}'. format(config=self._vpp_config, filename=filename)) if ret != 0: raise RuntimeError( 'Writing config file failed to node {name}'.format( name=self._hostname)) if restart_vpp: DUTSetup.start_service(self._node, Constants.VPP_UNIT) # Sleep <waittime> seconds, up to <retry> times, # and verify if VPP is running. for _ in range(retries): time.sleep(1) ret, stdout, _ = \ ssh.exec_command('echo show pci | nc 0 5002 || ' 'echo "VPP not yet running"') if ret == 0 and 'VPP not yet running' not in stdout: break else: raise RuntimeError( 'VPP failed to restart on node {name}'.format( name=self._hostname))
def verify_vpp_on_all_duts(nodes): """Verify that VPP is installed on all DUT nodes. :param nodes: Nodes in the topology. :type nodes: dict """ for node in nodes.values(): if node['type'] == NodeType.DUT: DUTSetup.start_service(node, Constants.VPP_UNIT) VPPUtil.vpp_show_version_verbose(node) VPPUtil.vpp_show_interfaces(node)
def stop_vpp_service(node, node_key=None): """Stop VPP service on the specified topology node. :param node: Topology node. :param node_key: Topology node key. :type node: dict :type node_key: str """ DUTSetup.stop_service(node, Constants.VPP_UNIT) if node_key: Topology.del_node_socket_id(node, SocketType.PAPI, node_key) Topology.del_node_socket_id(node, SocketType.STATS, node_key)
def restart_vpp_service(node, node_key=None): """Restart VPP service on the specified topology node. :param node: Topology node. :param node_key: Topology node key. :type node: dict :type node_key: str """ DUTSetup.restart_service(node, Constants.VPP_UNIT) if node_key: Topology.add_new_socket(node, SocketType.PAPI, node_key, Constants.SOCKSVR_PATH) Topology.add_new_socket(node, SocketType.STATS, node_key, Constants.SOCKSTAT_PATH)
def verify_vpp(node): """Verify that VPP is installed and started on the specified topology node. :param node: Topology node. :type node: dict :raises RuntimeError: If VPP service fails to start. """ VPPUtil.verify_vpp_installed(node) try: # Verify responsivness of vppctl. VPPUtil.verify_vpp_started(node) # Verify responsivness of PAPI. VPPUtil.show_log(node) finally: DUTSetup.get_service_logs(node, Constants.VPP_UNIT)
def stop_vpp_service(node, node_key=None): """Stop VPP service on the specified topology node. Disconnect possibly connected PAPI executor. :param node: Topology node. :param node_key: Topology node key. :type node: dict :type node_key: str """ # Containers have a separate lifecycle, but better be safe. PapiSocketExecutor.disconnect_all_sockets_by_node(node) DUTSetup.stop_service(node, Constants.VPP_UNIT) if node_key: Topology.del_node_socket_id(node, SocketType.PAPI, node_key) Topology.del_node_socket_id(node, SocketType.STATS, node_key)
def start_hoststack_test_program(node, namespace, core_list, program): """Start the specified HostStack test program. :param node: DUT node. :param namespace: Net Namespace to run program in. :param core_list: List of cpu's to pass to taskset to pin the test program to a different set of cores on the same numa node as VPP. :param program: Test program. :type node: dict :type namespace: str :type core_list: str :type program: dict :returns: Process ID :rtype: int :raises RuntimeError: If node subtype is not a DUT or startup failed. """ if node[u"type"] != u"DUT": raise RuntimeError(u"Node type is not a DUT!") program_name = program[u"name"] DUTSetup.kill_program(node, program_name, namespace) if namespace == u"default": shell_cmd = u"sh -c" else: shell_cmd = f"ip netns exec {namespace} sh -c" env_vars = f"{program[u'env_vars']} " if u"env_vars" in program else u"" args = program[u"args"] program_path = program.get(u"path", u"") # NGINX used `worker_cpu_affinity` in configuration file taskset_cmd = u"" if program_name == u"nginx" else \ f"taskset --cpu-list {core_list}" cmd = f"nohup {shell_cmd} \'{env_vars}{taskset_cmd} " \ f"{program_path}{program_name} {args} >/tmp/{program_name}_" \ f"stdout.log 2>/tmp/{program_name}_stderr.log &\'" try: exec_cmd_no_error(node, cmd, sudo=True) return DUTSetup.get_pid(node, program_name)[0] except RuntimeError: stdout_log, stderr_log = \ HoststackUtil.get_hoststack_test_program_logs(node, program) raise RuntimeError(f"Start {program_name} failed!\nSTDERR:\n" \ f"{stderr_log}\nSTDOUT:\n{stdout_log}") return None
def restart_vpp_service(node, node_key=None): """Restart VPP service on the specified topology node. Disconnect possibly connected PAPI executor. :param node: Topology node. :param node_key: Topology node key. :type node: dict :type node_key: str """ # Containers have a separate lifecycle, but better be safe. PapiSocketExecutor.disconnect_all_sockets_by_node(node) DUTSetup.restart_service(node, Constants.VPP_UNIT) if node_key: Topology.add_new_socket(node, SocketType.PAPI, node_key, Constants.SOCKSVR_PATH) Topology.add_new_socket(node, SocketType.STATS, node_key, Constants.SOCKSTAT_PATH)
def verify_vpp(node): """Verify that VPP is installed and started on the specified topology node. Adjust privileges so user can connect without sudo. :param node: Topology node. :type node: dict :raises RuntimeError: If VPP service fails to start. """ DUTSetup.verify_program_installed(node, 'vpp') try: # Verify responsiveness of vppctl. VPPUtil.verify_vpp_started(node) # Adjust privileges. VPPUtil.adjust_privileges(node) # Verify responsiveness of PAPI. VPPUtil.show_log(node) VPPUtil.vpp_show_version(node) finally: DUTSetup.get_service_logs(node, Constants.VPP_UNIT)
def enable_coredump_limit_vpp(self, node): """Enable coredump for VPP PID by setting no core limits on DUT if setting of core limit by this library is enabled. :param node: DUT Node in the topology. :type node: dict """ if node[u"type"] == NodeType.DUT and self.is_core_limit_enabled(): vpp_pid = DUTSetup.get_pid(node, u"vpp") self.enable_coredump_limit(node, vpp_pid)
def enable_coredump_limit_vpp_on_all_duts(self, nodes): """Enable coredump for all VPP PIDs by setting no core limits on all DUTs if setting of core limit by this library is enabled. :param nodes: Nodes in the topology. :type nodes: dict """ for node in nodes.values(): if node['type'] == NodeType.DUT and self.is_core_limit_enabled(): vpp_pid = DUTSetup.get_vpp_pid(node) self.enable_coredump_limit(node, vpp_pid)
def tg_get_interface_driver(node, pci_addr): """Get interface driver from the TG node. :param node: Node to get interface driver on (must be TG node). :param pci_addr: PCI address of the interface. :type node: dict :type pci_addr: str :returns: Interface driver or None if not found. :rtype: str :raises RuntimeError: If PCI rescan or lspci command execution failed. """ return DUTSetup.get_pci_dev_driver(node, pci_addr)
def qemu_start(self): """Start QEMU and wait until VM boot. :returns: VM node info. :rtype: dict """ cmd_opts = OptionString() cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}") cmd_opts.extend(self._params) message = f"QEMU: Start failed on {self._node[u'host']}!" try: DUTSetup.check_huge_page( self._node, u"/dev/hugepages", int(self._opt.get(u"mem"))) exec_cmd_no_error( self._node, cmd_opts, timeout=300, sudo=True, message=message ) self._wait_until_vm_boot() except RuntimeError: self.qemu_kill_all() raise return self._vm_info
def qemu_start(self): """Start QEMU and wait until VM boot. :returns: VM node info. :rtype: dict """ cmd_opts = OptionString() cmd_opts.add('{bin_path}/qemu-system-{arch}'.format( bin_path=Constants.QEMU_BIN_PATH, arch=Topology.get_node_arch(self._node))) cmd_opts.extend(self._params) message = ('QEMU: Start failed on {host}!'. format(host=self._node['host'])) try: DUTSetup.check_huge_page( self._node, '/dev/hugepages', self._opt.get('mem')) exec_cmd_no_error( self._node, cmd_opts, timeout=300, sudo=True, message=message) self._wait_until_vm_boot() except RuntimeError: self.qemu_kill_all() raise return self._vm_info
def qemu_start(self): """Start QEMU and wait until VM boot. .. note:: First set at least node to run QEMU on. :returns: VM node info. :rtype: dict """ # Qemu binary path bin_path = ('{qemu_path}{qemu_bin}'. format(qemu_path=self._qemu_opt.get('qemu_path'), qemu_bin=self._qemu_opt.get('qemu_bin'))) # Memory and huge pages mem = ('-object memory-backend-file,id=mem,size={mem_size}M,' 'mem-path={path},share=on -m {mem_size} -numa node,memdev=mem'. format(mem_size=self._qemu_opt.get('mem_size'), path=self._qemu_opt.get('huge_mnt'))) # Drive option drive = ('-drive file={disk_image},format=raw,cache=none,if=virtio' '{locking}'. format(disk_image=self._qemu_opt.get('disk_image'), locking=',file.locking=off'\ if self._qemu_version_is_greater('2.10') else '')) # SSH forwarding ssh = ('-net user,hostfwd=tcp::{ssh_fwd_port}-:22'. format(ssh_fwd_port=self._qemu_opt.get('ssh_fwd_port'))) # Setup QMP via unix socket qmp = ('-qmp unix:{qmp_sock},server,nowait'. format(qmp_sock=self._qemu_opt.get('qmp_sock'))) # Setup QGA via chardev (unix socket) and isa-serial channel qga = ('-chardev socket,path={qga_sock},server,nowait,id=qga0 ' '-device isa-serial,chardev=qga0'. format(qga_sock=self._qemu_opt.get('qga_sock'))) # Setup serial console serial = ('-chardev socket,host=127.0.0.1,port={serial_port},id=gnc0,' 'server,nowait -device isa-serial,chardev=gnc0'. format(serial_port=self._qemu_opt.get('serial_port'))) # Graphic setup graphic = '-monitor none -display none -vga none' # PID file pid = ('-pidfile {pid_file}'. format(pid_file=self._qemu_opt.get('pid_file'))) # By default check only if hugepages are available. # If 'huge_allocate' is set to true try to allocate as well. DUTSetup.check_huge_page(self._node, self._qemu_opt.get('huge_mnt'), self._qemu_opt.get('mem_size'), allocate=self._qemu_opt.get('huge_allocate')) # Run QEMU cmd = ('{bin_path} {smp} {mem} {ssh} {options} {drive} {qmp} {serial} ' '{qga} {graphic} {pid}'. format(bin_path=bin_path, smp=self._qemu_opt.get('smp'), mem=mem, ssh=ssh, options=self._qemu_opt.get('options'), drive=drive, qmp=qmp, serial=serial, qga=qga, graphic=graphic, pid=pid)) try: ret_code, _, _ = self._ssh.exec_command_sudo(cmd, timeout=300) if int(ret_code) != 0: raise RuntimeError('QEMU start failed on {host}'. format(host=self._node['host'])) # Wait until VM boot self._wait_until_vm_boot() except (RuntimeError, SSHTimeout): self.qemu_kill_all() self.qemu_clear_socks() raise logger.trace('QEMU started successfully.') # Update interface names in VM node dict self._update_vm_interfaces() # Return VM node dict return self._vm_info