def run(): net_cleanup() net = Mininet(KiteTopo(bw_a=1000, bw_b=1000, delay_ms_a=10, delay_ms_b=10, loss_a=0, loss_b=0), link=TCLink, autoStaticArp=True, switch=OVSBridge, controller=None) net.start() setup_net(net, ip_tun=True, quic_tun=True, gdb=False, tcpdump=True, multipath=True) #CLI(net, script="../scripts/mininet/mininet_auto_run_udp") CLI(net) teardown_net(net) net.stop() net_cleanup()
def run(): currentValue = 50 #Name of output file file_path = 'out.log' #Remove old log file if os.path.exists(file_path): os.remove(file_path) #x-axis label var_name = "sym_bw" #x-axis range var_range = [5, 300] #x-axis step range var_step_length = 20 #Samples for each point repetitions = 10 for i in range(var_range[0], var_range[1], var_step_length): replaceSubStringInFile( str(currentValue) + "m", str(i * 2) + "m", "../scripts/mininet/auto_iperf_udp.sh") currentValue = i * 2 for j in range(repetitions): #Append log to file with open(file_path, "a") as myfile: myfile.write(var_name + "|" + str(i) + "\n") #Execute one mininet configuration net_cleanup() net = Mininet(KiteTopo(bw_a=i, bw_b=i, delay_ms_a=0, delay_ms_b=0, loss_a=0, loss_b=0, queue_size_a=1000, queue_size_b=1000), link=TCLink, autoStaticArp=True, switch=OVSBridge, controller=None) net.start() setup_net(net, ip_tun=True, quic_tun=True, gdb=False, tcpdump=False, multipath=True) CLI(net, script="../scripts/mininet/auto_iperf_udp.sh") teardown_net(net) net.stop() #Clean up multinet net_cleanup() #Concatenate name, generate diagram name = "udp_" + var_name + str(var_range[0]) + "_" + str( var_range[1]) + "_" + str(random.randint(5, 10000)) print("Filename: " + name) os.system("python3 generate_diagram_udp.py " + name) #--OBS-- Only on systems with GUI os.system("xdg-open " + name + ".jpg &")
print "experiment %d/%d" % (i, len(gen)) # for size in [1000, 10000, 1000000]: # for size in [1000, 10000, 50000, 1000000]: for size in [1000, 10000, 50000, 1000000]: elapsed_list = [] for c in MyTopo.CANDIDATES: if topo is None: topo = MyTopo(gemodel=args.gemodel, **v) net = Mininet(topo, link=TCLink, host=CPULimitedHost) net.start() else: try: net.stop() except: pass net_cleanup() topo = MyTopo(gemodel=args.gemodel, **v) net = Mininet(topo, link=TCLink, host=CPULimitedHost) net.start() # server.cmd("kill $HTTP_PID") # topo.change_parameters(**v) client = net.get("client") server = net.get("server") topo.disable_ipv6(client) topo.disable_ipv6(server) s1 = net.get("s1") s2 = net.get("s2") time.sleep(1) print client.cmd("ping %s -c 2" % topo.server_ip, verbose=True) MAX_FAILS = 5 # with high loss rates, fails can occur, such as impossible to perform handshake
def experimental_design(ranges, file_sizes, topo_opts, topo_names, database_name='results.db'): generate_random_files(file_sizes) dir_path = path.dirname(path.abspath(__file__)) filename = os.path.join(dir_path, "wsp_owd_8") nrows, ncols = 8, 139 matrix = load_wsp(filename, nrows, ncols) gen = ParamsGenerator(ranges, matrix) vals = gen.generate_all_values() # vals = generate_variance_tests(ranges) conn = sqlite3.connect(os.path.join(dir_path, database_name)) cursor = conn.cursor() sql_create_table = gen.generate_sql_create_table( additional_values=[('test_name', str), ('elapsed_time', float), ('var_elapsed_time', float), ('file_size', int)]) print sql_create_table cursor.execute(sql_create_table) conn.commit() setLogLevel('info') for i, v in enumerate(list(vals)[0:]): for key, value in v.items(): if isinstance(value, list): v[key] = value[0] for setup_topo_opts, test_name in zip(topo_opts, topo_names): print "net config == " + str(setup_topo_opts) print "v == " + str(v) topo = KiteTopo(**v) net = Mininet(topo, link=TCLink, host=CPULimitedHost) net.start() setup_net(net, **setup_topo_opts) print "experiment %d/%d" % (i, len(gen)) for size in file_sizes: print "file size %d" % size client = net['cl'] server = net['web'] def run(): now = datetime.datetime.now() client.cmd( 'curl 10.3.0.2/random_%d --connect-timeout 5 --output /dev/null' % size) err = int(client.cmd("echo $?")) if err != 0: print("client returned err %d" % err) return 0 elapsed_ms = (datetime.datetime.now() - now).total_seconds() * 1000 time.sleep(1) print "elapsed: %f milliseconds for %s" % (elapsed_ms, test_name) return elapsed_ms results = list( filter(lambda x: x, sorted(run() for _ in range(9)))) avg = sum(results) / len(results) if results else 0 median = results[int(len(results) / 2)] if results else 0 std_dev = sum( abs(x - avg) for x in results) / len(results) if results else 0 print "median = %dms, avg = %dms, std_dev = %dms" % ( median, avg, std_dev) # ugly way to handle failed results... values_list = flatten([v[k] for k in sorted(v.keys()) ]) + [test_name, median, std_dev, size] sql_values_list = gen.generate_sql_insert(values_list) print sql_values_list cursor.execute(sql_values_list) conn.commit() print "committed" teardown_net(net) net.stop() net_cleanup()