def ext_host_status_overview(self, results, index, cfg): super(ExtHostImpl, self).ext_host_status_overview(results, index, cfg) if results[index]["ext_host_ssh_ping"]: results[index][ "ext_host_needed_sudos_to_add"] = P4STA_utils.\ check_needed_sudos( {"sudo_rights": results[index]["ext_host_sudo_rights"]}, ["/usr/bin/pkill", "/usr/bin/killall", "/sbin/rmmod", "/sbin/modprobe", "/home/" + cfg["ext_host_user"] + "/p4sta/externalHost/dpdkExtHost/build/receiver"], dynamic_mode_inp=results[index]["list_of_path_possibilities"]) try: answer = P4STA_utils.execute_ssh( cfg["ext_host_user"], cfg["ext_host_ssh"], "[ -d '/home/" + cfg["ext_host_user"] + "/p4sta/externalHost/dpdkExtHost/dpdk-19.11/build' ] " "&& echo '1'") print(answer) if answer[0] == "1": results[index]["custom_checks"] = [[ True, "DPDK", "is installed" ]] else: results[index]["custom_checks"] = [[ False, "DPDK", "no installation found" ]] except Exception: results[index]["custom_checks"] = [[ False, "DPDK", "An error occured while checking " "the DPDK installation path" ]] try: answer = P4STA_utils.execute_ssh( cfg["ext_host_user"], cfg["ext_host_ssh"], "cat /sys/kernel/mm/hugepages/hugepages-2048kB/" "nr_hugepages") try: num = int(answer[0]) if num < 500: results[index]["custom_checks"].append([ False, "DPDK Hugepages", "The number of configured hugepages(" + str(num) + ") is too low or zero!" ]) else: results[index]["custom_checks"].append([ True, "DPDK Hugepages", "The number of configured hugepages is " + str(num) + "!" ]) except Exception: results[index]["custom_checks"].append([ False, "DPDK Hugepages", "An error occured while checking the hugepages: " + answer ]) except Exception: results[index]["custom_checks"].append( [False, "DPDK Hugepages", "ERROR"])
def stamper_status_overview(self, results, index, cfg): # instances inheriting from abstract_target could implement additional # checks in the following way: # res["custom_checks"] = [[True=green/False=red, # "ipv4_forwarding"(=label to check), # "=1"(=text indicating result of check)]] res = {} res["stamper_ssh_ping"] = (os.system("timeout 1 ping " + cfg["stamper_ssh"] + " -c 1") == 0) res["stamper_sudo_rights"], list_of_path_possibilities = \ P4STA_utils.check_sudo( cfg["stamper_user"], cfg["stamper_ssh"], dynamic_mode=True) print("Target sudo path possibilities:") print(list_of_path_possibilities) if res["stamper_ssh_ping"]: res["stamper_compile_status_color"], res["p4_compile_status"] = \ self.check_if_p4_compiled(cfg) else: res["stamper_compile_status_color"], res["p4_compile_status"] = ( False, "P4-Stamper is not reachable at SSH IP!") # needed sudos = defined in target_config.json + dynamic sudos needed_sudos = self.target_cfg["status_check"]["needed_sudos_to_add"] \ + self.needed_dynamic_sudos(cfg) res["stamper_needed_sudos_to_add"] = P4STA_utils.check_needed_sudos( {"sudo_rights": res["stamper_sudo_rights"]}, needed_sudos, dynamic_mode_inp=list_of_path_possibilities) # store in results list (no return possible for a thread) results[index] = res
def loadgen_status_overview(self, host, results, index): def check_iface(user, ip, iface, namespace=""): ipv4, mac, prefix, up_state, iface_found = \ P4STA_utils.fetch_interface(user, ip, iface, namespace) if ipv4 == "" or ipv4 == []: ipv4 = "n/a" if mac == "" or mac == []: mac = "device not found" return ipv4, mac, prefix, up_state def check_namespaces(user, ip): namespaces = P4STA_utils.execute_ssh(user, ip, "ip netns list") answer = [] for ns in namespaces: if ns != "": ns_name = ns.split(" ")[0] answer.append([str(ns)] + P4STA_utils.execute_ssh( user, ip, "sudo ip netns exec " + str(ns_name) + " ifconfig")) return answer res = {} res["ssh_ping"] = (os.system("timeout 1 ping " + host["ssh_ip"] + " -c 1") == 0) if res["ssh_ping"]: if "namespace_id" in host: res["fetched_ipv4"], res["fetched_mac"], \ res["fetched_prefix"], res["up_state"] = check_iface( host['ssh_user'], host['ssh_ip'], host['loadgen_iface'], host["namespace_id"]) else: res["fetched_ipv4"], res["fetched_mac"], \ res["fetched_prefix"], res["up_state"] = check_iface( host['ssh_user'], host['ssh_ip'], host['loadgen_iface'], "") res["sudo_rights"], list_of_path_possibilities = \ P4STA_utils.check_sudo( host['ssh_user'], host['ssh_ip'], dynamic_mode=True) print("Loadgen sudo path possibilities:") print(list_of_path_possibilities) res["needed_sudos_to_add"] = P4STA_utils.check_needed_sudos( {"sudo_rights": res["sudo_rights"]}, self.loadgen_cfg["status_check"]["needed_sudos_to_add"], dynamic_mode_inp=list_of_path_possibilities) res["ip_routes"] = P4STA_utils.execute_ssh(host['ssh_user'], host['ssh_ip'], "ip route") res["namespaces"] = check_namespaces(host['ssh_user'], host['ssh_ip']) else: res["sudo_rights"] = ["not reachable"] res["needed_sudos_to_add"] = [] res["fetched_ipv4"], res["fetched_mac"] = ("", "") res["fetched_prefix"], res["up_state"] = ("", "down") res["ip_routes"] = [] res["namespaces"] = [] results[index] = res
def stop_all_servers(servers): for server in servers: # no need to define namespace because # sudo pkill kills all processes running on host print("Trying to stop all iPerf3 instances at server " + server["ssh_ip"]) P4STA_utils.execute_ssh(server["ssh_user"], server["ssh_ip"], "sudo pkill iperf3")
def start_client(ssh_user, ssh_ip, dst_ip, port, ns_option, duration, json_id, flag): exec = str(ns_option) + " iperf3 -c " + str( dst_ip) + " -T s" + str(json_id) + " -p " + str( port) + " " + flag + " -J --logfile s" + str( json_id) + ".json -t " + str(duration) print(exec) P4STA_utils.execute_ssh(ssh_user, ssh_ip, exec)
def check_namespaces(user, ip): namespaces = P4STA_utils.execute_ssh(user, ip, "ip netns list") answer = [] for ns in namespaces: if ns != "": ns_name = ns.split(" ")[0] answer.append([str(ns)] + P4STA_utils.execute_ssh( user, ip, "sudo ip netns exec " + str(ns_name) + " ifconfig")) return answer
def stop_external(self, file_id): self.cfg = P4STA_utils.read_current_cfg() P4STA_utils.execute_ssh(self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], "sudo killall external_host_python_receiver") input = [ "ssh", "-o ConnectTimeout=5", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"], "cd /home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python; ./check_extH_status.sh; exit" ] time.sleep(0.2) c = 0 while True: # wait until exthost stopped time.sleep(0.3) c = c + 1 res = subprocess.run(input, stdout=subprocess.PIPE).stdout result = res.decode() if result.find("1") > -1 or c > 59: # if 1 is found by check_extH_status.sh at external host # external Host has finished saving csv files break subprocess.run([ "scp", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python/raw_packet_counter_" + file_id + ".csv", P4STA_utils.get_results_path(file_id) ]) subprocess.run([ "scp", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python/packet_sizes_" + file_id + ".csv", P4STA_utils.get_results_path(file_id) ]) subprocess.run([ "scp", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python/timestamp1_list_" + file_id + ".csv", P4STA_utils.get_results_path(file_id) ]) subprocess.run([ "scp", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python/timestamp2_list_" + file_id + ".csv", P4STA_utils.get_results_path(file_id) ]) P4STA_utils.execute_ssh( self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], "cd /home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python; rm *.csv") return True
def start_stamper_software(self, cfg): try: input = "ip route get " + cfg["stamper_ssh"] output = subprocess.run(input, stdout=subprocess.PIPE, shell=True).stdout.decode("utf-8").replace( "\n", "").split(" ") route_iface = output[output.index("dev") + 1] # Assumes that all bmv2 management SSH IPs are in /24 subnet # should be 10.99.66.0/24 ip_splitted = cfg["ext_host_ssh"].split(".") subprocess.run("sudo ip route add " + ".".join(ip_splitted[:3]) + ".0/24 via " + cfg["stamper_ssh"] + " dev " + route_iface, stdout=subprocess.PIPE, shell=True) except Exception: print(traceback.format_exc()) print("ERROR: Adding ip route to bmv2 server failed. " "Mininet hosts could not be reachable.") lines_check = P4STA_utils.execute_ssh( cfg["stamper_user"], cfg["stamper_ssh"], "cat " + cfg["bmv2_dir"] + "/LICENSE") if len(lines_check) > 100: subprocess.run([ self.realPath + "/scripts/start_mininet.sh", "/home/" + cfg["stamper_user"] + "/p4sta/stamper/bmv2/scripts/netgen.py", cfg["stamper_user"], cfg["stamper_ssh"], project_path ]) else: print("BMV2 DIR NOT FOUND AT BMV2 TARGET: " + str(cfg["bmv2_dir"])) return "BMV2 DIR NOT FOUND AT BMV2 TARGET: " + str(cfg["bmv2_dir"])
def check_port_open(server_ssh_user, server_ip, port, ns_option): answer_list = P4STA_utils.execute_ssh( server_ssh_user, server_ip, str(ns_option) + " if lsof -Pi :" + str(port) + " -sTCP:LISTEN -t >/dev/null; then echo running; fi " + ns_option_end) return "running" in answer_list
def ext_host_status_overview(self, results, index, cfg): def check_iface(user, ip, iface, namespace=""): ipv4, mac, prefix, up_state, iface_found = \ P4STA_utils.fetch_interface(user, ip, iface, namespace) if ipv4 == "" or ipv4 == []: ipv4 = "n/a" if mac == "" or mac == []: mac = "device not found" return ipv4, mac, prefix, up_state res = {"ext_host_ssh_ping": (os.system( "timeout 1 ping " + cfg["ext_host_ssh"] + " -c 1") == 0)} if res["ext_host_ssh_ping"]: res["ext_host_sudo_rights"], list_of_path_possibilities = \ P4STA_utils.check_sudo( cfg["ext_host_user"], cfg["ext_host_ssh"], dynamic_mode=True) print("Ext Host sudo path possibilities:") print(list_of_path_possibilities) res["list_of_path_possibilities"] = list_of_path_possibilities iface_status = check_iface( cfg["ext_host_user"], cfg["ext_host_ssh"], cfg["ext_host_if"]) res["ext_host_fetched_ipv4"] = iface_status[0] res["ext_host_fetched_mac"] = iface_status[1] res["ext_host_fetched_prefix"] = iface_status[2] res["ext_host_up_state"] = iface_status[3] # store in results list (no return possible for a thread) results[index] = res
def loadgen_status_overview(self, host, results, index): super(LoadGeneratorImpl, self).loadgen_status_overview(host, results, index) answer = P4STA_utils.execute_ssh(host["ssh_user"], host["ssh_ip"], "iperf3 -v") version = "" try: for line in answer: if line.find("iperf") > -1: version = line except Exception: pass if version != "": version = re.sub('[^0-9,.]', '', version) ver_split = version.split(".") if len(ver_split) == 3 and ver_split[0] == "3": if float(".".join(ver_split[1:3])) >= 1.3: results[index]["custom_checks"] = [[ True, "iPerf3", version ]] else: results[index]["custom_checks"] = [[ False, "iPerf3", version + " [version older than 3.1.3 will not work]" ]] elif len(ver_split) == 2 and ver_split[0] == "3": if float(ver_split[1] > 7): results[index]["custom_checks"] = [[ True, "iPerf3", version ]] else: results[index]["custom_checks"] = [[ False, "iPerf3", version + " [version older than 3.1.3 will not work]" ]] else: answer = P4STA_utils.execute_ssh(host["ssh_user"], host["ssh_ip"], "which iperf3") if answer[0] != "": results[index]["custom_checks"] = [[ False, "iPerf3", "version not detected but installed at " + answer[0] ]] else: results[index]["custom_checks"] = [[ False, "iPerf3", "installation not found" ]]
def check_iface(user, ip, iface, namespace=""): ipv4, mac, prefix, up_state, iface_found = \ P4STA_utils.fetch_interface(user, ip, iface, namespace) if ipv4 == "" or ipv4 == []: ipv4 = "n/a" if mac == "" or mac == []: mac = "device not found" return ipv4, mac, prefix, up_state
def port_lists(self): temp = {} real_ports = [] # stores all real port name possibilities nr_ports = self.target_cfg["nr_ports"] for i in range(1, nr_ports + 1): for z in range(0, 4): real_ports.append(str(i) + "/" + str(z)) real_ports.append("bf_pci0") if "p4_ports" in self.target_cfg: logical_ports = self.target_cfg["p4_ports"] else: P4STA_utils.log_error( "Error reading key 'p4_ports' from target_config.json") logical_ports = [-1 for i in range(nr_ports * 4 + 1)] temp["real_ports"] = real_ports temp["logical_ports"] = logical_ports return temp
def start_external(self, file_id, multi=1, tsmax=(2**32 - 1)): self.cfg = P4STA_utils.read_current_cfg() cmd = "cd /home/" + self.cfg[ "ext_host_user"] + "/p4sta/externalHost/dpdkExtHost/; touch " \ "receiver_stop; sleep 0.5; rm receiver_stop; " \ "sudo build/receiver 0" if self.cfg["selected_target"] == "bmv2": # if mininet # load vfio module cmd = "sudo rmmod vfio-pci; sudo rmmod vfio_iommu_type1; " \ "sudo rmmod vfio; sudo modprobe vfio-pci; " + cmd cmd += " --vdev=eth_af_packet42,iface=" + self.cfg[ "ext_host_if"] + ",blocksz=4096,framesz=2048,framecnt=512," \ "qpairs=1,qdisc_bypass=0" cmd += " -- --name " + file_id + " > foo.out 2> foo.err < /dev/null &" print(cmd) res = P4STA_utils.execute_ssh(self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], cmd) print("started DPDK-based external host") print(res) errors = () return errors
def check_if_p4_compiled(self, cfg): all_files = P4STA_utils.execute_ssh( cfg["stamper_user"], cfg["stamper_ssh"], "ls /home/" + cfg["stamper_user"] + "/p4sta/stamper/bmv2/data") found_jsons = [] for item in all_files: if item.endswith("json") and item.find(cfg["program"]) > -1: found_jsons.append(item) if len(found_jsons) > 0: return True, "Found compiled " + " ".join( found_jsons ) + " in /home/" + cfg["stamper_user"] + "/p4sta/stamper/bmv2/data" else: return False, "No compiled " + cfg[ "program"] + ".json found in /home/" + cfg[ "stamper_user"] + "/p4sta/stamper/bmv2/data"
def stamper_status_overview(self, results, index, cfg): super(TargetImpl, self).stamper_status_overview(results, index, cfg) try: answer = P4STA_utils.execute_ssh( cfg["stamper_user"], cfg["stamper_ssh"], "cat /proc/sys/net/ipv4/ip_forward") if answer[0] == "0": results[index]["custom_checks"] = [[ False, "IPv4 forwarding", "0 (disabled)" ]] elif answer[0] == "1": results[index]["custom_checks"] = [[ True, "IPv4 forwarding", "1 (enabled)" ]] else: results[index]["custom_checks"] = [[ False, "IPv4 forwarding", "error" ]] except Exception: print(traceback.format_exc())
def stamper_status(self, cfg): pid = self.get_pid(cfg) print(pid) if pid > 0: dev_status = "Yes! PID: " + str(pid) running = True try: lines_pm = P4STA_utils.execute_ssh( cfg["stamper_user"], cfg["stamper_ssh"], "cat /home/" + cfg["stamper_user"] + "/p4sta/stamper/bmv2/data/mn.log") except Exception: lines_pm = ["Error while reading mininet output"] else: dev_status = "not running" running = False lines_pm = [ "No portmanager available", "Are you sure you selected a target before?" ] return lines_pm, running, dev_status
def reset_p4_registers(self, cfg): interface = grpc_interface.TofinoInterface(cfg["stamper_ssh"], device_id=0) if type(interface) == str: print("error resetting registers ..") print(interface) return interface interface.bind_p4_name(cfg["program"]) try: # clear counters cntr_port_list = self.get_used_ports_list(cfg) for name in [ "Ingress.ingress_counter", "Ingress.ingress_stamped_counter", "Egress.egress_counter", "Egress.egress_stamped_counter" ]: print("CLEAR COUNTER: " + "pipe.Switch" + name) try: interface.clear_indirect_counter("pipe.Switch" + name, id_list=cntr_port_list) except Exception: P4STA_utils.log_error(traceback.format_exc()) # clear indirect registers for name in [ "delta_register", "min_register", "max_register", "delta_register_high", "delta_register_pkts", "multi_counter_register" ]: print("CLEAR REGISTER: " + name) try: interface.clear_register("pipe.SwitchIngress." + name) except Exception: P4STA_utils.log_error(traceback.format_exc()) except Exception: P4STA_utils.log_error(traceback.format_exc()) finally: interface.teardown()
def execute_ssh(self, cfg, arg): # input = ["ssh", cfg["stamper_user"] + "@" + cfg["stamper_ssh"], arg] # res = subprocess.Popen(input, stdout=subprocess.PIPE).stdout # return res.read().decode().split("\n") return P4STA_utils.execute_ssh(cfg["stamper_user"], cfg["stamper_ssh"], arg)
def start_external(self, file_id, multi=1, tsmax=(2**32 - 1)): self.cfg = P4STA_utils.read_current_cfg() ext_py_dir = self.host_cfg["real_path"] errors = () # check pip3 modules answer = P4STA_utils.execute_ssh( self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], "python3 -c 'import pkgutil; print(1 if pkgutil.find_loader" "(\"setproctitle\") else 0)'") if answer[0] == "0": errors = errors + ( "Python Module 'setproctitle' not found at external host -> " "'pip3 install setproctitle'", ) return errors answer = P4STA_utils.execute_ssh( self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], "mkdir -p /home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python; " "sudo killall external_host_python_receiver") input = [ "scp", ext_py_dir + "/pythonRawSocketExtHost.py", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python" ] res = subprocess.run(input, stdout=subprocess.PIPE, timeout=3).stdout input = [ "scp", ext_py_dir + "/check_extH_status.sh", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"] + ":/home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python" ] res = subprocess.run(input, stdout=subprocess.PIPE, timeout=3).stdout args = "chmod +x /home/" + self.cfg["ext_host_user"] + \ "/p4sta/externalHost/python/pythonRawSocketExtHost.py; " \ "chmod +x /home/" + self.cfg["ext_host_user"] + \ "/p4sta/externalHost/python/check_extH_status.sh;" \ " rm -f /home/" + self.cfg["ext_host_user"] + \ "/p4sta/externalHost/python/pythonRawSocketExtHost.log" print(args) res = P4STA_utils.execute_ssh(self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], args) print("now start python extHost") call = "sudo ./pythonRawSocketExtHost.py --name " + file_id + \ " --interface " + self.cfg["ext_host_if"] + " --multi " + str( multi) + " --tsmax " + str(tsmax) args = "cd /home/" + self.cfg["ext_host_user"] + \ "/p4sta/externalHost/python/; nohup " + call + \ " > foo.out 2> foo.err < /dev/null &" print(args) res = P4STA_utils.execute_ssh(self.cfg["ext_host_user"], self.cfg["ext_host_ssh"], args) time.sleep(2) # wait for the ext-host to succeed/fail # check if interface is not found or other crash input = [ "ssh", self.cfg["ext_host_user"] + "@" + self.cfg["ext_host_ssh"], "cd /home/" + self.cfg["ext_host_user"] + "/p4sta/externalHost/python; cat pythonRawSocketExtHost.log; " "exit" ] res = subprocess.run(input, stdout=subprocess.PIPE, timeout=3).stdout result = res.decode("utf-8") if result.find("Errno 19") > -1: errors = errors + ("Interface " + str(self.cfg["ext_host_if"]) + " not found at external host: " + result, ) elif result.find("Exception") > -1: errors = errors + ("An exception occurred: " + result, ) elif result.find("Started") == -1: errors = errors + ("Ext host not started properly", ) return errors
def run_loadgens(self, file_id, duration, l4_selected, packet_size_mtu, results_path, loadgen_rate_limit, loadgen_flows, loadgen_server_groups): self.cfg = P4STA_utils.read_current_cfg() loadgen_flows = int(loadgen_flows) def check_ns(host): if "namespace_id" in host: return "sudo ip netns exec " + str(host["namespace_id"]) else: return "" def thread_join(thrs): for thread in thrs: thread.start() for thread in thrs: thread.join() def check_iperf_server(server_ssh_user, server_ip, start_port, ns_option): # ssh into server and check locally if port is open def check_port_open(server_ssh_user, server_ip, port, ns_option): answer_list = P4STA_utils.execute_ssh( server_ssh_user, server_ip, str(ns_option) + " if lsof -Pi :" + str(port) + " -sTCP:LISTEN -t >/dev/null; then echo running; fi " + ns_option_end) return "running" in answer_list def check_thrd(server_ssh_user, server_ip, port, ns_option): print_str = "check iPerf3 Server Port " + str( port) + " at " + server["ssh_ip"] + \ " with Namespace " + ns_option + " => " time.sleep(0.5) if not check_port_open(server_ssh_user, server_ip, port, ns_option): time.sleep(1.5) if not check_port_open(server_ssh_user, server_ip, port, ns_option): print_str += "[fail]" raise Exception("iPerf3 Server Port " + str(port) + " at " + server["ssh_ip"] + " not open.") else: print_str += "[ok]" print(print_str) else: print_str += "[ok]" print(print_str) ns_option_end = "" if ns_option != "": ns_option = ns_option + " bash -c '" ns_option_end = "'" thrds = [] for add in range(loadgen_flows): x = threading.Thread(target=check_thrd, args=(server_ssh_user, server_ip, start_port + add, ns_option)) thrds.append(x) thread_join(thrds) # iperf -s threads at one (!) host def start_servers(ssh_user, ssh_ip, start_port, flows, ns_option, server_dict): def start_server(ssh_user, ssh_ip, start_port, ns_option): P4STA_utils.execute_ssh( ssh_user, ssh_ip, str(ns_option) + " iperf3 -s -p " + str(start_port)) port = int(start_port) for fl in range(flows): print("iperf3 server flow " + str(fl) + " start at " + str(ssh_ip) + " port " + str(port) + " with NS option " + str(ns_option)) server_dict["open_iperf_ports"].append(port) thread = threading.Thread(target=start_server, args=(ssh_user, ssh_ip, port, ns_option)) thread.start() port = port + 1 def stop_all_servers(servers): for server in servers: # no need to define namespace because # sudo pkill kills all processes running on host print("Trying to stop all iPerf3 instances at server " + server["ssh_ip"]) P4STA_utils.execute_ssh(server["ssh_user"], server["ssh_ip"], "sudo pkill iperf3") # iperf -c threads at one (!) host def start_clients(ssh_user, ssh_ip, dst_ip, start_port, flows, ns_option, duration, flag, start_json_id): def start_client(ssh_user, ssh_ip, dst_ip, port, ns_option, duration, json_id, flag): exec = str(ns_option) + " iperf3 -c " + str( dst_ip) + " -T s" + str(json_id) + " -p " + str( port) + " " + flag + " -J --logfile s" + str( json_id) + ".json -t " + str(duration) print(exec) P4STA_utils.execute_ssh(ssh_user, ssh_ip, exec) threads = [] port = int(start_port) json_id = int(start_json_id) for fl in range(flows): print("iperf3 client " + str(fl) + " start at " + str(ssh_ip) + " connect to " + str(dst_ip) + " port " + str(port) + " with NS option " + str(ns_option)) thread = threading.Thread(target=start_client, args=(ssh_user, ssh_ip, dst_ip, port, ns_option, duration, json_id, flag)) threads.append(thread) port = port + 1 json_id = json_id + 1 thread_join(threads) # [1,2,3,4,5,6,7...] with y = 3 => 1,2,3,1,2,3,1,2,3 def custom_modulo(x, y): if x % y == 0: return y else: return x % y # first kill all running iperf instances for loadgen_grp in self.cfg["loadgen_groups"]: if loadgen_grp["use_group"] == "checked": stop_all_servers(loadgen_grp["loadgens"]) iperf_server_groups = [] iperf_client_groups = [] for loadgen_grp in self.cfg["loadgen_groups"]: if loadgen_grp["use_group"] == "checked": if loadgen_grp["group"] in loadgen_server_groups: iperf_server_groups.append(loadgen_grp) else: iperf_client_groups.append(loadgen_grp) num_clients = sum([len(x["loadgens"]) for x in iperf_client_groups]) print("num_clients") print(num_clients) if loadgen_rate_limit > 0 and num_clients > 0: limit_per_host_in_bit_s = int( (loadgen_rate_limit * 1000000) / (loadgen_flows * num_clients)) limit_str = "-b " + str(limit_per_host_in_bit_s) elif loadgen_rate_limit > 0 and num_clients == 0: limit_str = "-b " + str( int((loadgen_rate_limit * 1000000) / loadgen_flows)) else: limit_str = "" if l4_selected == "tcp": # normally MSS = MTU - 40 here - 16 because 16 byte tstamps added mss = int(packet_size_mtu) - 56 flag = "-M " + str(mss) flag = flag + " " + limit_str else: # timestamps in payload! no need for extra 16 byte space mss = int(packet_size_mtu) - 40 if limit_str == "": flag = "-u -b 100G --length " + str( mss) # 100G option allows to use maximum speed else: flag = "-u " + limit_str + "--length " + str(mss) print("iperf flags: " + flag) # case where only one group and one DUT port is used if len(iperf_client_groups) == 0 and len(iperf_server_groups) == 1: iperf_client_groups = copy.deepcopy(iperf_server_groups) # move first entry to end first_loadgen = iperf_client_groups[0]["loadgens"][0] del (iperf_client_groups[0]["loadgens"][0]) iperf_client_groups[0]["loadgens"].append(first_loadgen) counter = 1 for client in iperf_client_groups[0]["loadgens"]: client["id"] = counter counter += 1 num_clients = 1 print("num_clients updated because only one loadgen group is used") print(num_clients) # case where all groups are servers elif len(iperf_client_groups) == 0 and len(iperf_server_groups) == len( self.cfg["loadgen_groups"]): iperf_client_groups = copy.deepcopy(iperf_server_groups) first_group = iperf_client_groups[0] del (iperf_client_groups[0]) iperf_client_groups.append(first_group) counter = 1 for client_grp in iperf_client_groups: client_grp["group"] = counter counter = counter + 1 for server_group in iperf_server_groups: for server in server_group["loadgens"]: server["open_iperf_ports"] = [] start_port = 5101 for server_group in iperf_server_groups: for server in server_group["loadgens"]: for i in range(num_clients): start_servers(server["ssh_user"], server["ssh_ip"], str(start_port), loadgen_flows, check_ns(server), server_dict=server) start_port = start_port + loadgen_flows start_port = 5101 check_threads = list() for server_group in iperf_server_groups: for server in server_group["loadgens"]: for i in range(num_clients): x = threading.Thread(target=check_iperf_server, args=(server["ssh_user"], server["ssh_ip"], start_port, check_ns(server))) check_threads.append(x) start_port = start_port + loadgen_flows thread_join(check_threads) print("iperf server groups") print(iperf_server_groups) print("iperf client groups") print(iperf_client_groups) threads = list() json_id = 1 for client_group in iperf_client_groups: for client in client_group["loadgens"]: client["num_started_flows"] = 0 for server_group in iperf_server_groups: for server in server_group["loadgens"]: start_client = False do_break = False # if more clients in client grp than server grp connect # remaining clients to first servers again if len(client_group["loadgens"]) >= len( server_group["loadgens"]): c_mod = custom_modulo( client["id"], len(server_group["loadgens"])) if server["id"] == c_mod \ and client["loadgen_ip"] != \ server["loadgen_ip"]: start_client = True do_break = True else: c_mod = custom_modulo( server["id"], len(client_group["loadgens"])) if client["id"] == c_mod \ and client["loadgen_ip"] != \ server["loadgen_ip"]: start_client = True do_break = False if start_client: if len(server["open_iperf_ports"]) > 0: start_port = server["open_iperf_ports"][0] for i in range(loadgen_flows): if start_port + i \ in server["open_iperf_ports"]: server["open_iperf_ports"].remove( start_port + i) print("select port range starting at " + str(start_port) + " to connect from " + client["loadgen_ip"] + " to " + server["loadgen_ip"]) x = threading.Thread( target=start_clients, args=(client["ssh_user"], client["ssh_ip"], server["loadgen_ip"], start_port, loadgen_flows, check_ns(client), duration, flag, json_id)) threads.append(x) json_id = json_id + loadgen_flows client["num_started_flows"] = \ client["num_started_flows"] + loadgen_flows if do_break: break else: print( "No available port found in server dict: " + str(server) + " from server group id " + str(server_group["group"])) thread_join(threads) # get jsons from clients json_id = 1 for client_grp in iperf_client_groups: for client in client_grp["loadgens"]: for f in range(client["num_started_flows"]): exc = "scp " + client["ssh_user"] + "@" + client[ "ssh_ip"] + ":s" + str( json_id + f) + ".json " + results_path + "/iperf3_s" \ + str(json_id + f) + "_" + str(file_id) + ".json" print(exc) subprocess.run(exc, shell=True) json_id = json_id + client["num_started_flows"] # delete in second loop because # e.g. in case of docker one rm -f deletes all jsons for all clients for client_grp in iperf_client_groups: for client in client_grp["loadgens"]: P4STA_utils.execute_ssh(client["ssh_user"], client["ssh_ip"], "rm -f s*.json") for server_group in iperf_server_groups: stop_all_servers(server_group["loadgens"])
def start_server(ssh_user, ssh_ip, start_port, ns_option): P4STA_utils.execute_ssh( ssh_user, ssh_ip, str(ns_option) + " iperf3 -s -p " + str(start_port))
def get_server_install_script(self, user_name, ip, target_specific_dict={}): print("INSTALLING TOFINO STAMPER TARGET:") print(target_specific_dict) sde_path = "" if "sde" in target_specific_dict and len( target_specific_dict["sde"]) > 1: sde_path = target_specific_dict["sde"] else: lines = P4STA_utils.execute_ssh(user_name, ip, "cat $HOME/.bashrc") for line in lines: if line.find("export SDE_INSTALL") > -1: lrep = line.replace("\n", "") start = lrep.find("=") + 1 sde_path = lrep[start:] print("/**************************/") print("Found $SDE_INSTALL at target " + ip + " = " + sde_path) print("/**************************/") if sde_path == "": print("\033[1;33m/**********************************" "****************************/") print("WARNING: SDE Path not found on Tofino, using " "/opt/bf-sde-9.3.0") print("/*********************************************" "*****************/\033[0m") sde_path = "/opt/bf-sde-9.3.0" # create install_tofino.sh add_sudo_rights_str = "#!/bin/bash\nadd_sudo_rights() {\ncurrent_" \ "user=$USER\n if (sudo -l | grep -q " \ "'(ALL : ALL) SETENV: NOPASSWD: '$1); then\n " \ " echo 'visudo entry already exists';" \ "\n else\n sleep 0.1\n echo " \ "$current_user' ALL=(ALL:ALL) NOPASSWD:" \ "SETENV:'$1 | " \ "sudo EDITOR='tee -a' visudo; \n fi\n}\n" with open(dir_path + "/scripts/install_tofino.sh", "w") as f: f.write(add_sudo_rights_str) for sudo in self.target_cfg["status_check"]["needed_sudos_to_add"]: if sudo.find("run_switchd.sh") > -1: f.write("add_sudo_rights " + sde_path + "/run_switchd.sh\n") else: f.write("add_sudo_rights $(which " + sudo + ")\n") os.chmod(dir_path + "/scripts/install_tofino.sh", 0o775) lst = [] lst.append('echo "====================================="') lst.append('echo "Installing Barefoot Tofino stamper target on ' + ip + '"') lst.append('echo "====================================="') lst.append('echo "START: Copying Tofino files on remote server:"') lst.append('cd ' + dir_path + "/scripts") lst.append('if ssh -o ConnectTimeout=2 -o StrictHostKeyChecking=no ' + user_name + '@' + ip + ' "echo \'ssh to ' + ip + ' ***worked***\';"; [ $? -eq 255 ]; then') lst.append(' echo "====================================="') lst.append(' echo "\033[0;31m ERROR: Failed to connect to Stamper ' 'server with IP: ' + ip + ' \033[0m"') lst.append(' echo "====================================="') lst.append('else') lst.append( ' chmod +x stop_switchd.sh start_switchd.sh switchd_status.sh') lst.append(' ssh -o ConnectTimeout=2 -o StrictHostKeyChecking=no ' + user_name + '@' + ip + ' \"echo "SSH to stamper device **' '*__worked__***\"; mkdir -p /home/' + user_name + '/p4sta/stamper/tofino1/"') lst.append(" echo ") lst.append(' scp install_tofino.sh ' + user_name + '@' + ip + ':/home/' + user_name + '/p4sta/stamper/tofino1/') lst.append(' ssh -o ConnectTimeout=2 -o StrictHostKeyChecking=no ' + user_name + '@' + ip + ' "cd /home/' + user_name + '/p4sta/stamper/tofino1/; chmod +x install_tofino.sh;"') lst.append( ' ssh -t -o ConnectTimeout=2 -o StrictHostKeyChecking=no ' + user_name + '@' + ip + ' "cd /home/' + user_name + '/p4sta/stamper/tofino1; ./install_tofino.sh ' + ';"') lst.append(' echo "Downloading bfruntime.proto from Tofino Target"') # important to use dir_path because THIS file is stored # there but target could be other tofino lst.append(' cd ' + dir_path + '/bfrt_grpc') lst.append(' scp ' + user_name + '@' + ip + ':' + sde_path + '/install/share/bf_rt_shared/proto/bfruntime.proto proto/ ') lst.append(' echo "Building python3 stub at management server from ' 'bfruntime.proto for Tofino Target"') lst.append(' source ../../../pastaenv/bin/activate; python3 -m ' 'grpc_tools.protoc -I ./proto --python_out=. ' '--grpc_python_out=. ./proto/bfruntime.proto; deactivate') lst.append(' echo "FINISHED setting up Barefoot Tofino target"') lst.append(' echo "====================================="') lst.append(' echo "\033[0;31m IMPORTANT NOTE: P4 source code must be ' 'compiled manually on Tofino after compiling Intel/Barefoot' ' SDE\033[0m"') lst.append(' echo "====================================="') lst.append(' echo ""') lst.append(' echo "====================================="') lst.append(' echo "\033[0;31m IMPORTANT NOTE: Please stop the install' '.sh script in the CLI and restart P4STA with ./run.sh in o' 'rder to load the freshly compiled grpc files correctly. ' '\033[0m"') lst.append(' echo "====================================="') lst.append('fi') return lst
def deploy(self, cfg): try: # client id is chosen randomly by TofinoInterface interface = grpc_interface.TofinoInterface(cfg["stamper_ssh"], device_id=0) if type(interface) == str: # error case interface = grpc_interface.TofinoInterface(cfg["stamper_ssh"], device_id=0) if type(interface) == str: total_error = interface.replace("\n", "") else: total_error = "" else: total_error = "" if len(total_error) < 2: total_error = interface.bind_p4_name(cfg["program"]) except Exception: total_error = total_error + str(traceback.format_exc()).replace( "\n", "") if len(total_error) < 2 and interface is not None: try: interface.delete_ports() interface.clear_multicast_groups() ignore_list = [ "multi_counter_register", "ingress_counter", "egress_counter", "ingress_stamped_counter", "egress_stamped_counter", "pipe.SwitchIngress.delta_register", "pipe.SwitchIngress.delta_register_high", "pipe.SwitchIngress.delta_register_pkts", "pipe.SwitchIngress.min_register", "pipe.SwitchIngress.max_register" ] interface.clear_all_tables(ignore_list) hosts = [] for loadgen_grp in cfg["loadgen_groups"]: for host in loadgen_grp["loadgens"]: if host["p4_port"] != "320": hosts.append(host) for dut in cfg["dut_ports"]: if dut["use_port"] == "checked": hosts.append(dut) if cfg["ext_host_real"] != "bf_pci0" and cfg[ "ext_host"] != "320": hosts.append({ "p4_port": cfg["ext_host"], "real_port": cfg["ext_host_real"], "speed": cfg["ext_host_speed"], "fec": cfg["ext_host_fec"], "an": cfg["ext_host_an"] }) interface.set_ports(hosts) self.deploy_tables(cfg, interface) mcast_inp = [{ "node_id": 1, "group_id": 1, "port": int(cfg["ext_host"]) }] node_id = 2 # add to group with own group id +1 # (because ext host already blocks ID 1) for loadgen_grp in cfg["loadgen_groups"]: for host in loadgen_grp["loadgens"]: mcast_inp.append({ "node_id": node_id, "group_id": loadgen_grp["group"] + 1, "port": int(host["p4_port"]) }) node_id = node_id + 1 # same DUT port in group for duplication feature start_group = 50 for dut in cfg["dut_ports"]: if "dataplane_duplication" in dut and dut[ "dataplane_duplication"].isdigit() and int( dut["dataplane_duplication"]) > 0: try: duplication_scale = int( dut["dataplane_duplication"]) if duplication_scale < 0: raise ValueError except ValueError: P4STA_utils.log_error( "Input Dataplane Duplication for DUT port " + str(dut["real_port"]) + " is not a valid number - " "no duplication activated.") duplication_scale = 0 for i in range(duplication_scale): mcast_inp.append({ "node_id": node_id, "group_id": start_group + dut["id"], "port": int(dut["p4_port"]) }) node_id = node_id + 1 interface.set_multicast_groups(mcast_inp) except Exception: total_error = total_error + str( traceback.format_exc()).replace("\n", "") finally: interface.teardown() # set traffic shaping thrift = pd_fixed_api.PDFixedConnect(cfg["stamper_ssh"]) def set_shape(host, key=""): try: if key + "shape" in host and host[key + "shape"] != "": if (type(host[key + "p4_port"]) == str and host[key + "p4_port"].isdigit()) or type( host[key + "p4_port"] == int): if (type(host[key + "shape"]) == str and host[key + "shape"].isdigit()) or type( host[key + "shape"] == int): if int(host[key + "shape"]) == 0: thrift.disable_port_shaping( int(host[key + "p4_port"])) elif 2147483647 > int(host[key + "shape"]) > 0: # set in kbit/s thrift.set_port_shaping_rate( int(host[key + "p4_port"]), int(host[key + "shape"]) * 1000) thrift.enable_port_shaping( int(host[key + "p4_port"])) print("Limit port " + str(host[key + "p4_port"]) + " to " + str(host[key + "shape"]) + " Mbit/s outgoing (from stamper).") else: raise Exception( "wrong limit (" + str(host[key + "shape"]) + ") for p4_port: " + str(host[key + "p4_port"]) + ". Allowed from 0 (disable) to " "2147483646") else: raise Exception("wrong type for shape: " + str(host[key + "shape"])) else: raise Exception("wrong type for p4_port: " + str(host[key + "p4_port"])) except Exception: print(traceback.format_exc()) if thrift.error: total_error = total_error + "\n" + thrift.error_message else: for loadgen_grp in cfg["loadgen_groups"]: for host in loadgen_grp["loadgens"]: set_shape(host) for dut in cfg["dut_ports"]: set_shape(dut) cfg["ext_host_p4_port"] = cfg["ext_host"] set_shape(cfg, key="ext_host_") print("Set port shaping finished.") if total_error is not "": return total_error