def save_config_as_file(request): print("SAVE CONFIG:") saved, cfg = updateCfg(request) time_created = time.strftime('%d.%m.%Y-%H:%M:%S', time.localtime()) file_name = cfg["selected_target"] + "_" + str(time_created) + ".json" P4STA_utils.write_config(cfg, file_name) return HttpResponseRedirect("/")
def create_new_cfg_from_template(request): print("CREATE CONFIG:") path = globals.core_conn.root.get_template_cfg_path( request.POST["selected_cfg_template"]) with open(path, "r") as f: cfg = json.load(f) P4STA_utils.write_config(cfg) return HttpResponseRedirect("/")
def main(): global selected_run_id global core_conn global project_path core_conn = rpyc.connect('localhost', 6789) project_path = core_conn.root.get_project_path() P4STA_utils.set_project_path(project_path) selected_run_id = core_conn.root.getLatestMeasurementId()
def render_loadgens(request, file_id, duration=10): try: process_loadgens = rpyc.timed(globals.core_conn.root.process_loadgens, duration * 2) results = process_loadgens(file_id) results.wait() results = results.value if results is not None: output, total_bits, error, total_retransmits, total_byte, \ custom_attr, to_plot = results output = P4STA_utils.flt(output) custom_attr = P4STA_utils.flt(custom_attr) else: error = True output = [ "Sorry an error occured!", "The core returned NONE from loadgens which is a result" " of an internal error in the loadgen module." ] total_bits = total_retransmits = total_byte = 0 custom_attr = {"l4_type": "", "name_list": [], "elems": {}} cfg = P4STA_utils.read_result_cfg(file_id) return render( request, "middlebox/output_loadgen.html", { "cfg": cfg, "output": output, "total_gbits": analytics.find_unit_bit_byte(total_bits, "bit"), "cachebuster": str(time.time()).replace(".", ""), "total_retransmits": total_retransmits, "total_gbyte": analytics.find_unit_bit_byte(total_byte, "byte"), "error": error, "custom_attr": custom_attr, "filename": file_id, "time": time.strftime("%H:%M:%S %d.%m.%Y", time.localtime( int(file_id))) }) except Exception as e: print(e) return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("render loadgen error: " + str(e)) })
def download_all_zip(request): # first check if cached results are already available # this step is not neccessary for download_external_results because # it's only possible to trigger inside the ext host results view # which already generated the graphs cfg = P4STA_utils.read_result_cfg(globals.selected_run_id) folder = P4STA_utils.get_results_path(globals.selected_run_id) fid = str(globals.selected_run_id) # ext host files = get_ext_host_zip_list() trigger_generation = False for file in files: if not os.path.isfile(file[0]): trigger_generation = True if trigger_generation: # trigger generation of graphs _ = analytics.main( str(globals.selected_run_id), cfg["multicast"], P4STA_utils.get_results_path(globals.selected_run_id)) # stamper files.append([ folder + "/stamper_" + fid + ".json", "results/" + fid + "/stamper_" + fid + ".json" ]) files.append([ folder + "/output_stamperice_" + fid + ".txt", "results/" + fid + "/output_stamperice_" + fid + ".txt" ]) # loadgen files.extend([[ "results/" + fid + "/generated/loadgen_1.svg", "results/" + fid + "/generated/loadgen_1.svg" ], [ "results/" + fid + "/generated/loadgen_2.svg", "results/" + fid + "/generated/loadgen_2.svg" ], [ "results/" + fid + "/generated/loadgen_3.svg", "results/" + fid + "/generated/loadgen_3.svg" ]]) zip = pack_zip(files, fid, "stamper_and_ext_host_") try: os.remove("create_graphs.sh") except Exception: pass return zip
def dygraph(request): if request.is_ajax(): cfg = P4STA_utils.read_result_cfg(globals.selected_run_id) try: extH_results = analytics.main( str(globals.selected_run_id), cfg["multicast"], P4STA_utils.get_results_path(globals.selected_run_id)) # list for "Dygraph" javascript graph graph_list = [] counter = 1 adjusted_latency_list, unit = analytics.find_unit( extH_results["latency_list"]) for latency in adjusted_latency_list: graph_list.append([counter, latency]) counter = counter + 1 timestamp1_list = analytics.read_csv( P4STA_utils.get_results_path(globals.selected_run_id), "timestamp1_list", str(globals.selected_run_id)) timestamp2_list = analytics.read_csv( P4STA_utils.get_results_path(globals.selected_run_id), "timestamp2_list", str(globals.selected_run_id)) # time in ms when packet was timestamped but starting at 0 ms time_throughput = [] if len(timestamp1_list) > 0 and len(timestamp1_list) == len( timestamp2_list): for i in range(0, len(timestamp1_list)): time_throughput.append( int( round((timestamp2_list[i] - timestamp2_list[0]) / 1000000))) return render( request, "middlebox/dygraph.html", { "latencies": graph_list, "time_throughput": time_throughput, "unit": unit }) except Exception: print(traceback.format_exc()) return render( request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("render external error: " + str(traceback.format_exc())) })
def get_ext_host_zip_list(): globals.core_conn.root.external_results(globals.selected_run_id) fid = str(globals.selected_run_id) files = [ "results/" + fid + "/generated/latency.svg", "results/" + fid + "/generated/latency_sec.svg", "results/" + fid + "/generated/latency_bar.svg", "results/" + fid + "/generated/latency_sec_y0.svg", "results/" + fid + "/generated/latency_y0.svg", "results/" + fid + "/generated/ipdv.svg", "results/" + fid + "/generated/ipdv_sec.svg", "results/" + fid + "/generated/pdv.svg", "results/" + fid + "/generated/pdv_sec.svg", "results/" + fid + "/generated/speed.svg", "results/" + fid + "/generated/packet_rate.svg", "results/" + fid + "/generated/speed_upscaled.svg", "results/" + fid + "/generated/packet_rate_upscaled.svg", ] folder = P4STA_utils.get_results_path(fid) for i in range(0, len(files)): name = files[i][files[i][16:].find("/") + 17:] files[i] = [ files[i], "results/" + fid + "/" + name[:-4] + fid + ".svg" ] files.append([ folder + "/timestamp1_list_" + fid + ".csv", "results/" + fid + "/timestamp1_list_" + fid + ".csv" ]) files.append([ folder + "/timestamp2_list_" + fid + ".csv", "results/" + fid + "/timestamp2_list_" + fid + ".csv" ]) files.append([ folder + "/packet_sizes_" + fid + ".csv", "results/" + fid + "/packet_sizes_" + fid + ".csv" ]) files.append([ folder + "/raw_packet_counter_" + fid + ".csv", "results/" + fid + "/raw_packet_counter_" + fid + ".csv" ]) files.append([ folder + "/output_external_host_" + fid + ".txt", "results/" + fid + "/output_external_host_" + fid + ".txt" ]) files.append(["analytics/analytics.py", "analytics/analytics.py"]) files.append(["analytics/README.MD", "analytics/README.MD"]) f = open("create_graphs.sh", "w+") f.write("#!/bin/bash\n") f.write("python3 analytics/analytics.py --id " + fid) f.close() os.chmod("create_graphs.sh", 0o777) # make run script executable files.append(["create_graphs.sh", "create_graphs.sh"]) files.append( [folder + "/config_" + fid + ".json", "data/config_" + fid + ".json"]) return files
def download_loadgen_results(request): file_id = str(globals.selected_run_id) cfg = P4STA_utils.read_result_cfg(globals.selected_run_id) files = [ ["results/" + file_id + "/generated/loadgen_1.svg", "loadgen_1.svg"], ["results/" + file_id + "/generated/loadgen_2.svg", "loadgen_2.svg"], ["results/" + file_id + "/generated/loadgen_3.svg", "loadgen_3.svg"] ] folder = P4STA_utils.get_results_path(globals.selected_run_id) file_id = str(file_id) files.append([ folder + "/output_loadgen_" + file_id + ".txt", "output_loadgen_" + file_id + ".txt" ]) zip_file = pack_zip(files, file_id, cfg["selected_loadgen"] + "_") return zip_file
def open_selected_config(request): print("OPEN SELECTED CONFIG:") cfg = P4STA_utils.read_current_cfg(request.POST["selected_cfg_file"]) # check if old style cfg is used and convert to new style if "dut1_real" in cfg and "loadgen_clients" in cfg: print("Old CFG structure -> converting to new style....") cfg["dut_ports"] = [{"id": 1}, {"id": 2}] cfg["dut_ports"][0]["p4_port"] = cfg.pop("dut1") cfg["dut_ports"][0]["real_port"] = cfg.pop("dut1_real") cfg["dut_ports"][0]["stamp_outgoing"] = cfg.pop("dut_1_outgoing_stamp") cfg["dut_ports"][0]["use_port"] = "checked" cfg["dut_ports"][1]["p4_port"] = cfg.pop("dut2") cfg["dut_ports"][1]["real_port"] = cfg.pop("dut2_real") cfg["dut_ports"][1]["stamp_outgoing"] = cfg.pop("dut_2_outgoing_stamp") cfg["dut_ports"][1]["use_port"] = cfg.pop("dut_2_use_port") to_del = [] for key, value in cfg.items(): if key.find("dut1") > -1 or key.find("dut2") > -1: to_del.append(key) for key in to_del: cfg.pop(key) cfg["loadgen_groups"] = [{ "group": 1, "loadgens": [], "use_group": "checked" }, { "group": 2, "loadgens": [], "use_group": "checked" }] for host in cfg["loadgen_servers"]: cfg["loadgen_groups"][0]["loadgens"].append(host) cfg.pop("loadgen_servers") for host in cfg["loadgen_clients"]: cfg["loadgen_groups"][1]["loadgens"].append(host) cfg.pop("loadgen_clients") P4STA_utils.write_config(cfg) return HttpResponseRedirect("/")
def get_stamper_startup_log(request): try: log = globals.core_conn.root.get_stamper_startup_log() log = P4STA_utils.flt(log) return render(request, "middlebox/stamper_startup_log.html", {"log": log}) except Exception as e: return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("stamper Log: " + str(e)) })
def page_analyze_return(request, saved): if globals.selected_run_id is None: globals.selected_run_id = globals.core_conn.root. \ getLatestMeasurementId() if globals.selected_run_id is not None: id_int = int(globals.selected_run_id) cfg = P4STA_utils.read_result_cfg(globals.selected_run_id) id_ex = time.strftime('%H:%M:%S %d.%m.%Y', time.localtime(id_int)) id_list = [] found = globals.core_conn.root.getAllMeasurements() for f in found: time_created = time.strftime('%H:%M:%S %d.%m.%Y', time.localtime(int(f))) id_list.append([f, time_created]) id_list_final = [] for f in range(0, len(id_list)): if id_list[f][1] != id_ex: id_list_final.append(id_list[f]) error = False else: cfg = P4STA_utils.read_current_cfg() saved = False id_int = 0 id_list_final = [] id_ex = "no data sets available" error = True return render( request, "middlebox/page_analyze.html", { **cfg, **{ "id": [id_int, id_ex], "id_list": id_list_final, "saved": saved, "ext_host_real": cfg["ext_host_real"], "error": error } })
def setup_ssh_checker(request): ssh_works = False ping_works = (os.system("timeout 1 ping " + request.POST[ "ip"] + " -c 1") == 0) # if ping works it should be true if ping_works: answer = P4STA_utils.execute_ssh(request.POST["user"], request.POST["ip"], "echo ssh_works") answer = list(answer) if len(answer) > 0 and answer[0] == "ssh_works": ssh_works = True return JsonResponse({"ping_works": ping_works, "ssh_works": ssh_works})
def ping(request): if request.is_ajax(): try: output = globals.core_conn.root.ping() output = P4STA_utils.flt(output) return render(request, "middlebox/output_ping.html", {"output": output}) except Exception as e: return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("ping error: " + str(e)) })
def stamper_status_wrapper(request, html_file): stamper_status = rpyc.timed(globals.core_conn.root.stamper_status, 40) stamper_status_job = stamper_status() try: stamper_status_job.wait() result = stamper_status_job.value cfg, lines_pm, running, dev_status = result cfg = P4STA_utils.flt(cfg) # cfg contains host status information lines_pm = P4STA_utils.flt(lines_pm) return render( request, html_file, { "dev_status": dev_status, "dev_is_running": running, "pm": lines_pm, "cfg": cfg }) except Exception as e: print(e) return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("stamper status error " + str(e)) })
def download_stamper_results(request): folder = P4STA_utils.get_results_path(globals.selected_run_id) files = [[ folder + "/stamper_" + str(globals.selected_run_id) + ".json", "results/stamper_" + str(globals.selected_run_id) + ".json" ], [ folder + "/output_stamperice_" + str(globals.selected_run_id) + ".txt", "results/output_stamperice_" + str(globals.selected_run_id) + ".txt" ]] return pack_zip(files, str(globals.selected_run_id), "stamperice_results_")
def status_overview(request): try: status_overview = rpyc.timed(globals.core_conn.root.status_overview, 60)() status_overview.wait() cfg = status_overview.value cfg = P4STA_utils.flt(cfg) return render(request, "middlebox/output_status_overview.html", cfg) except Exception as e: print(e) return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("stamper status error: " + str(e)) })
def stamper_results(request): if request.is_ajax(): try: sw = globals.core_conn.root.stamper_results( globals.selected_run_id) sw = P4STA_utils.flt(sw) if "error" in sw: raise Exception(sw["error"]) return render(request, "middlebox/output_stamper_results.html", sw) except Exception as e: print(e) return render( request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("render stamper results error: " + str(e)) })
def get_all_targets(): targets = P4STA_utils.flt(globals.core_conn.root.get_all_targets()) return targets
def setup_devices(request): if request.method == "POST": print(request.POST) setup_devices_cfg = {} if request.POST.get("enable_stamper") == "on": setup_devices_cfg["stamper_user"] = request.POST["stamper_user"] setup_devices_cfg["stamper_ssh_ip"] = request.POST["stamper_ip"] setup_devices_cfg["selected_stamper"] = request.POST[ "selected_stamper"] target_cfg = globals.core_conn.root.get_target_cfg( setup_devices_cfg["selected_stamper"]) setup_devices_cfg["target_specific_dict"] = {} if "config" in target_cfg \ and "stamper_specific" in target_cfg["config"]: for cfg in target_cfg["config"]["stamper_specific"]: if cfg["target_key"] in request.POST: setup_devices_cfg["target_specific_dict"][ cfg["target_key"]] = request.POST[ cfg["target_key"]] if request.POST.get( "enable_ext_host") == "on" and "ext_host_user" in request.POST: setup_devices_cfg["ext_host_user"] = request.POST["ext_host_user"] setup_devices_cfg["ext_host_ssh_ip"] = request.POST["ext_host_ip"] setup_devices_cfg["selected_extHost"] = request.POST[ "selected_extHost"] setup_devices_cfg["selected_loadgen"] = request.POST[ "selected_loadgen"] setup_devices_cfg["loadgens"] = [] for i in range(1, 99): if ("loadgen_user_" + str(i)) in request.POST: loadgen = {"loadgen_user": request.POST[ "loadgen_user_" + str(i)], "loadgen_ssh_ip": request.POST[ "loadgen_ip_" + str(i)]} setup_devices_cfg["loadgens"].append(loadgen) print("===================================================") print("=== Setup Device Config from management UI: ======") print("===================================================") print(setup_devices_cfg) # only create install script if button is clicked if "create_setup_script_button" in request.POST: globals.core_conn.root.write_install_script(setup_devices_cfg) # now write config.json with new data if request.POST.get("enable_stamper") == "on": path = globals.core_conn.root.get_template_cfg_path( request.POST["selected_stamper"]) cfg = globals.core_conn.root.open_cfg_file(path) cfg["stamper_ssh"] = request.POST["stamper_ip"] cfg["stamper_user"] = request.POST["stamper_user"] if request.POST.get( "enable_ext_host") == "on" \ and "ext_host_user" in request.POST: cfg["ext_host_user"] = request.POST["ext_host_user"] cfg["ext_host_ssh"] = request.POST["ext_host_ip"] cfg["selected_extHost"] = request.POST["selected_extHost"] cfg["selected_loadgen"] = request.POST["selected_loadgen"] # add all loadgens to loadgen group 1 and 2 cfg["loadgen_groups"] = [ {"group": 1, "loadgens": [], "use_group": "checked"}, {"group": 2, "loadgens": [], "use_group": "checked"}] grp1 = setup_devices_cfg["loadgens"][ len(setup_devices_cfg["loadgens"]) // 2:] grp2 = setup_devices_cfg["loadgens"][ :len(setup_devices_cfg["loadgens"]) // 2] id_c = 1 for loadgen in grp1: cfg["loadgen_groups"][0]["loadgens"].append( {"id": id_c, "loadgen_iface": "", "loadgen_ip": "", "loadgen_mac": "", "real_port": "", "p4_port": "", "ssh_ip": loadgen["loadgen_ssh_ip"], "ssh_user": loadgen["loadgen_user"]}) id_c = id_c + 1 id_c = 1 for loadgen in grp2: cfg["loadgen_groups"][1]["loadgens"].append( {"id": id_c, "loadgen_iface": "", "loadgen_ip": "", "loadgen_mac": "", "real_port": "", "p4_port": "", "ssh_ip": loadgen["loadgen_ssh_ip"], "ssh_user": loadgen["loadgen_user"]}) id_c = id_c + 1 if globals.core_conn.root.check_first_run(): P4STA_utils.write_config(cfg) globals.core_conn.root.first_run_finished() return HttpResponseRedirect("/run_setup_script/") # cancel case globals.core_conn.root.first_run_finished() return HttpResponseRedirect("/") else: # request the page print("### Setup Devices #####") params = {} params["stampers"] = P4STA_utils.flt( globals.core_conn.root.get_all_targets()) params["stampers"].sort(key=lambda y: y.lower()) params["extHosts"] = P4STA_utils.flt( globals.core_conn.root.get_all_extHost()) params["extHosts"].sort(key=lambda y: y.lower()) # bring python on position 1 if "PythonExtHost" in params["extHosts"]: params["extHosts"].insert(0, params["extHosts"].pop( params["extHosts"].index("PythonExtHost"))) params["loadgens"] = P4STA_utils.flt( globals.core_conn.root.get_all_loadGenerators()) params["loadgens"].sort(key=lambda y: y.lower()) params["isFirstRun"] = globals.core_conn.root.check_first_run() all_target_cfg = {} for stamper in params["stampers"]: # directly converting to json style because True # would be uppercase otherwise => JS needs "true" all_target_cfg[stamper] = P4STA_utils.flt( globals.core_conn.root.get_stamper_target_obj( target_name=stamper).target_cfg) params["all_target_cfg"] = json.dumps(all_target_cfg) return render(request, "middlebox/setup_page.html", {**params})
def start_external(request): if request.is_ajax(): print("start_external is ajax") try: cfg = P4STA_utils.read_current_cfg() ext_host_cfg = \ globals.core_conn.root.get_current_extHost_obj().host_cfg print(ext_host_cfg) if "status_check" in ext_host_cfg \ and "needed_sudos_to_add" in ext_host_cfg["status_check"]: print("if true") sudos_ok = [] indx_of_sudos_missing = [] for found_sudo in globals.core_conn.root.check_sudo( cfg["ext_host_user"], cfg["ext_host_ssh"]): print("found sudo: " + str(found_sudo)) if found_sudo.find("Error checking sudo status") > -1: return render( request, "middlebox/timeout.html", { "inside_ajax": True, "error": "Error checking sudo status of " "ext host, sure it is reachable?" }) i = 0 _nsta = ext_host_cfg["status_check"]["needed_sudos_to_add"] for needed_sudo in _nsta: if found_sudo.find(needed_sudo) > -1: sudos_ok.append(True) elif i not in indx_of_sudos_missing: indx_of_sudos_missing.append(i) i = i + 1 if len(sudos_ok) < len(_nsta): error_msg = "Missing visudos: " for i in indx_of_sudos_missing: error_msg = error_msg + _nsta[i] + " | " print(error_msg) return render(request, "middlebox/timeout.html", { "inside_ajax": True, "error": error_msg }) new_id = globals.core_conn.root.set_new_measurement_id() print("\nSET NEW MEASUREMENT ID") print(new_id) print("###############################\n") stamper_running, errors = globals.core_conn.root.start_external() mtu_list = [] for loadgen_grp in cfg["loadgen_groups"]: if loadgen_grp["use_group"] == "checked": for host in loadgen_grp["loadgens"]: if "namespace_id" in host \ and host["namespace_id"] != "": host["mtu"] = globals.core_conn.root.fetch_mtu( host['ssh_user'], host['ssh_ip'], host['loadgen_iface'], host["namespace_id"]) else: host["mtu"] = globals.core_conn.root.fetch_mtu( host['ssh_user'], host['ssh_ip'], host['loadgen_iface']) mtu_list.append(int(host["mtu"])) return render( request, "middlebox/output_external_started.html", { "running": stamper_running, "errors": list(errors), "cfg": cfg, "min_mtu": min(mtu_list) }) except Exception as e: print(e) return render( request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("start external host error: " + str(e)) }) else: print("start_external request is not ajax! Do nothing.")
def page_run(request): cfg = P4STA_utils.read_current_cfg() return render(request, "middlebox/page_run.html", cfg)
def configure_page(request): if globals.core_conn.root.check_first_run(): print("FIRST RUN! Redirect to /setup_devices") return HttpResponseRedirect("/setup_devices") saved = "" target_cfg = P4STA_utils.flt(globals.core_conn.root.get_target_cfg()) if type(target_cfg) == dict and "error" in target_cfg: return render(request, "middlebox/timeout.html", { **target_cfg, **{ "inside_ajax": False } }) if request.method == "POST": saved, cfg = updateCfg(request) target_cfg = P4STA_utils.flt(globals.core_conn.root.get_target_cfg()) else: cfg = P4STA_utils.read_current_cfg() cfg["target_cfg"] = target_cfg # The following config updates are only for UI representation targets_without_selected = [] all_targets = get_all_targets() for target in all_targets: if cfg["selected_target"] != target: targets_without_selected.append(target) cfg["targets_without_selected"] = targets_without_selected cfg["all_available_targets"] = all_targets available_cfg_files = P4STA_utils.flt( globals.core_conn.root.get_available_cfg_files()) final_sorted_by_target = [] for target in sorted(all_targets): found = False final_sorted_by_target.append("###" + target) for elem in available_cfg_files: if elem.find(target) > -1: final_sorted_by_target.append(elem) found = True if not found: del final_sorted_by_target[-1] cfg["available_configs"] = final_sorted_by_target cfg["saved"] = saved loadgens_without_selected = globals.core_conn.root.get_all_loadGenerators() if cfg["selected_loadgen"] in loadgens_without_selected: loadgens_without_selected.remove(cfg["selected_loadgen"]) cfg["loadgens_without_selected"] = P4STA_utils.flt( loadgens_without_selected) exthosts_without_selected = globals.core_conn.root.get_all_extHost() if cfg["selected_extHost"] in exthosts_without_selected: exthosts_without_selected.remove(cfg["selected_extHost"]) cfg["exthosts_without_selected"] = P4STA_utils.flt( exthosts_without_selected) # if field "p4_ports" in target config, # target uses separate hardware ports & p4 ports (e.g. tofino) # now only hw (front) ports are left but relabeled as # "ports" and p4-ports are ignored # ports_list in abstract_target creates mapping 1->1 cfg["port_mapping"] = "p4_ports" in target_cfg cfg["cfg"] = cfg # needed for dynamic target input_individual return render(request, "middlebox/page_config.html", cfg)
def updateCfg(request): print(request.POST) cfg = P4STA_utils.read_current_cfg() target_cfg = globals.core_conn.root.get_target_cfg() ports = globals.core_conn.root.get_ports() real_ports = ports["real_ports"] logical_ports = ports["logical_ports"] try: # if target has changed first request all port config again if cfg["selected_target"] is not request.POST["target"]: cfg["selected_target"] = request.POST["target"] cfg = P4STA_utils.flt(cfg) P4STA_utils.write_config(cfg) target_cfg = globals.core_conn.root.get_target_cfg() ports = globals.core_conn.root.get_ports() real_ports = ports["real_ports"] logical_ports = ports["logical_ports"] cfg["selected_loadgen"] = request.POST["selected_loadgen"] cfg["selected_extHost"] = request.POST["selected_extHost"] # rebuild loadgen_groups list based on user choice if len(cfg["loadgen_groups"]) != int( request.POST["num_loadgen_groups"]): cfg["loadgen_groups"] = [] for i in range(1, int(request.POST["num_loadgen_groups"]) + 1): cfg["loadgen_groups"].append({"group": i, "loadgens": []}) for loadgen_grp in cfg["loadgen_groups"]: # num_servers = int(request.POST["num_grp_" + str(loadgen_grp["group"])]) servers = [] i = 1 for j in range(1, num_servers + 1): s = {"id": j} _grp = str(loadgen_grp["group"]) _key = "s" + _grp + "_" + str(i) + "_real_port" while _key not in request.POST: i += 1 if i == 99: break s["real_port"] = str(request.POST["s" + _grp + "_" + str(i) + "_real_port"]) try: s["p4_port"] = logical_ports[real_ports.index( s["real_port"])].strip("\n") except Exception as e: print("FAILED: Finding: " + str(e)) s["p4_port"] = s["real_port"] s["ssh_ip"] = str(request.POST["s" + _grp + "_" + str(i) + "_ssh_ip"]) s["ssh_user"] = str(request.POST["s" + _grp + "_" + str(i) + "_ssh_user"]) s["loadgen_iface"] = str( request.POST["s" + _grp + "_" + str(i) + "_loadgen_iface"]) s["loadgen_mac"] = str( request.POST["s" + str(loadgen_grp["group"]) + "_" + str(i) + "_loadgen_mac"]) s["loadgen_ip"] = str( request.POST["s" + _grp + "_" + str(i) + "_loadgen_ip"]).split(" ")[0].split("/")[0] if "s" + _grp + "_" + str(i) + "_namespace" in request.POST: s["namespace_id"] = str( request.POST["s" + str(loadgen_grp["group"]) + "_" + str(i) + "_namespace"]) # read target specific config from webinterface for t_inp in target_cfg["inputs"]["input_table"]: try: if "s" + _grp + "_" + str(i) + "_" + \ t_inp["target_key"] in request.POST: s[t_inp["target_key"]] = str( request.POST["s" + str(loadgen_grp["group"]) + "_" + str(i) + "_" + t_inp["target_key"]]) elif "restrict" not in t_inp \ or t_inp["restrict"] == "loadgen": s[t_inp["target_key"]] = "" except Exception as e: print(traceback.format_exc()) print("\n#\nError parsing special target " "config parameters:" + str(e)) servers.append(s) i += 1 if str(request.POST["add_to_grp_" + _grp]) == "1": s = {} s["id"] = num_servers + 1 s["real_port"] = "" s["p4_port"] = "" s["loadgen_ip"] = "" s["ssh_ip"] = "" s["ssh_user"] = "" s["loadgen_iface"] = "" # add default values to target specific inputs for t_inp in target_cfg["inputs"]["input_table"]: if "default_value" in t_inp: s[t_inp["target_key"]] = t_inp["default_value"] else: s[t_inp["target_key"]] = "" servers.append(s) # set target specific default values for s in servers: for t_inp in target_cfg["inputs"]["input_table"]: if "default_value" in t_inp and t_inp["target_key"] in s \ and s[t_inp["target_key"]] == "": s[t_inp["target_key"]] = t_inp["default_value"] loadgen_grp["loadgens"] = servers cfg["dut_ports"] = [] for loadgen_grp in cfg["loadgen_groups"]: cfg["dut_ports"].append({"id": loadgen_grp["group"]}) try: # read target specific config from webinterface for t_inp in target_cfg["inputs"]["input_individual"]: if t_inp["target_key"] in request.POST: cfg[t_inp["target_key"]] = str( request.POST[t_inp["target_key"]]) else: cfg[t_inp["target_key"]] = "" for t_inp in target_cfg["inputs"]["input_table"]: for dut in cfg["dut_ports"]: if "dut" + str(dut["id"]) + "_" + t_inp["target_key"] \ in request.POST: dut[t_inp["target_key"]] = str( request.POST["dut" + str(dut["id"]) + "_" + t_inp["target_key"]]) elif "restrict" not in t_inp or t_inp["restrict"] == "dut": dut[t_inp["target_key"]] = "" if "default_value" in t_inp \ and dut[t_inp["target_key"]] == "": dut[t_inp["target_key"]] = t_inp["default_value"] if "ext_host_" + t_inp["target_key"] in request.POST: cfg["ext_host_" + t_inp["target_key"]] = str( request.POST["ext_host_" + t_inp["target_key"]]) elif "restrict" not in t_inp \ or t_inp["restrict"] == "ext_host": cfg["ext_host_" + t_inp["target_key"]] = "" if "default_value" in t_inp \ and ("ext_host_" + t_inp["target_key"]) in cfg \ and cfg["ext_host_" + t_inp["target_key"]] == "": cfg["ext_host_" + t_inp["target_key"]] = \ t_inp["default_value"] except Exception as e: print("EXCEPTION: " + str(e)) print(traceback.format_exc()) cfg["ext_host_real"] = str(request.POST["ext_host_real"]) try: cfg["ext_host"] = logical_ports[real_ports.index( cfg["ext_host_real"])].strip("\n") except Exception as e: print("FAILED: Finding Ext-Host Real Port: " + str(e)) # check if second,third, ... dut port should be used or not for dut in cfg["dut_ports"]: if int(dut["id"]) == 1: dut["use_port"] = "checked" else: try: if "dut_" + str(dut["id"]) + "_use_port" in request.POST: dut["use_port"] = request.POST["dut_" + str(dut["id"]) + "_use_port"] else: dut["use_port"] = "checked" except Exception: dut["use_port"] = "checked" for loadgen_grp in cfg["loadgen_groups"]: if loadgen_grp["group"] == dut["id"]: loadgen_grp["use_group"] = dut["use_port"] try: if "dut_" + str(dut["id"]) + "_outgoing_stamp" in request.POST: dut["stamp_outgoing"] = str( request.POST["dut_" + str(dut["id"]) + "_outgoing_stamp"]) else: dut["stamp_outgoing"] = "unchecked" except Exception: print(traceback.format_exc()) dut["stamp_outgoing"] = "checked" if "dut" + str(dut["id"]) + "_real" in request.POST: dut["real_port"] = str(request.POST["dut" + str(dut["id"]) + "_real"]) try: dut["p4_port"] = logical_ports[real_ports.index( dut["real_port"])].strip("\n") except Exception: dut["p4_port"] = "" else: dut["real_port"] = "" dut["p4_port"] = "" cfg["multicast"] = str(request.POST["multicast"]) cfg["stamper_ssh"] = str(request.POST["stamper_ssh"]) cfg["ext_host_ssh"] = str(request.POST["ext_host_ssh"]) cfg["ext_host_user"] = str(request.POST["ext_host_user"]) cfg["stamper_user"] = str(request.POST["stamper_user"]) cfg["ext_host_if"] = str(request.POST["ext_host_if"]) cfg["program"] = str(request.POST["program"]) cfg["forwarding_mode"] = str(request.POST["forwarding_mode"]) if "stamp_tcp" in request.POST: cfg["stamp_tcp"] = "checked" else: cfg["stamp_tcp"] = "unchecked" if "stamp_udp" in request.POST: cfg["stamp_udp"] = "checked" else: cfg["stamp_udp"] = "unchecked" # save config to file "database" print("write config") cfg = P4STA_utils.flt(cfg) P4STA_utils.write_config(cfg) print("finished config write") return True, cfg except Exception as e: print("EXCEPTION: " + str(e)) print(traceback.format_exc()) return False, cfg
def external_results(request): if request.is_ajax(): cfg = P4STA_utils.read_result_cfg(globals.selected_run_id) try: extH_results = analytics.main( str(globals.selected_run_id), cfg["multicast"], P4STA_utils.get_results_path(globals.selected_run_id)) ipdv_range = extH_results["max_ipdv"] - extH_results["min_ipdv"] pdv_range = extH_results["max_pdv"] - extH_results["min_pdv"] rate_jitter_range = extH_results["max_packets_per_second"] - \ extH_results["min_packets_per_second"] latency_range = extH_results["max_latency"] - extH_results[ "min_latency"] display = True time_created = time.strftime( '%H:%M:%S %d.%m.%Y', time.localtime(int(globals.selected_run_id))) return render( request, "middlebox/output_external_results.html", { "display": display, "filename": globals.selected_run_id, "raw_packets": extH_results["num_raw_packets"], "time": time_created, "cfg": cfg, "cachebuster": str(time.time()).replace(".", ""), "processed_packets": extH_results["num_processed_packets"], "average_latency": analytics.find_unit(extH_results["avg_latency"]), "min_latency": analytics.find_unit(extH_results["min_latency"]), "max_latency": analytics.find_unit(extH_results["max_latency"]), "total_throughput": extH_results["total_throughput"], "min_ipdv": analytics.find_unit(extH_results["min_ipdv"]), "max_ipdv": analytics.find_unit(extH_results["max_ipdv"]), "ipdv_range": analytics.find_unit(ipdv_range), "min_pdv": analytics.find_unit(extH_results["min_pdv"]), "max_pdv": analytics.find_unit([extH_results["max_pdv"]]), "ave_pdv": analytics.find_unit(extH_results["avg_pdv"]), "pdv_range": analytics.find_unit(pdv_range), "min_rate_jitter": extH_results["min_packets_per_second"], "max_rate_jitter": extH_results["max_packets_per_second"], "ave_packet_sec": extH_results["avg_packets_per_second"], "rate_jitter_range": rate_jitter_range, "threshold": cfg["multicast"], "ave_ipdv": analytics.find_unit(extH_results["avg_ipdv"]), "latency_range": analytics.find_unit(latency_range), "ave_abs_ipdv": analytics.find_unit(extH_results["avg_abs_ipdv"]), "latency_std_deviation": analytics.find_unit(extH_results["latency_std_deviation"]), "latency_variance": analytics.find_unit_sqr(extH_results["latency_variance"]) }) except Exception: print(traceback.format_exc()) return render( request, "middlebox/timeout.html", { "inside_ajax": True, "error": ("render external error: " + str(traceback.format_exc())) })