def run_test(ctx): print('running test: {}'.format(os.path.basename(__file__)[:-3])) remoteHosts = ['beta', 'gamma'] srv_params = {} clt_params = {} supported_protocols = [ "tcp-throughput", "tcp-tls-throughput", "quic-throughput" ] num_iterations = 10 timeout_ctr_limit = 3 sim_dur = shared.calc_simulation_time(supported_protocols, num_iterations, timeout_ctr_limit, analyzing_rates, analyzing_delay) print("simulation duration is: {}".format(sim_dur)) iterations = list(range(num_iterations)) for host in remoteHosts: avail = shared.host_alive(ctx, host) if not avail: raise Exception("Host {} not available".format(host)) beta_iface_to_alpha = ctx.config['beta']['netem-interfaces-to-alpha'] beta_iface_to_gamma = ctx.config['beta']['netem-interfaces-to-gamma'] interfaces = [beta_iface_to_alpha, beta_iface_to_gamma] srv_params['-uc-listen-addr'] = '192.186.23.3' srv_params['-port'] = '64321' clt_params['-control-addr'] = '192.186.23.3' clt_params['-control-protocol'] = 'tcp' clt_params['-streams'] = '1' clt_params['-addr'] = '192.186.25.2' clt_params['-deadline'] = '120' clt_params['-buffer-length'] = '1400' clt_params['-update-interval'] = '1' # goodput_rate_avg for all protocols total_goodput_rate_avg = {} total_results_debug = {} # 1. iterate over protocols for protocol in supported_protocols: print("\n-------- analyzing: {} --------".format(protocol)) visited_rate = [] visited_delay = [] quotients_all_rates_over_delays = [] kbits_normalized = [] # 2. iterate over rate for rate in analyzing_rates: print("\n------ configuring rate to: {} --------".format(rate)) # 3. determine bytes for transmission regarding rate clt_bytes = int(shared.calc_clt_bytes(rate)) clt_params['-bytes'] = str(clt_bytes) quotients_single_rate_over_delays = [] analyzed_delay_per_rate = [] # 4. deepest for loop: iterate over delay for delay in analyzing_delay: print( "\n------ configuring delay to: {} --------".format(delay)) # holds results of ALL iters per single delay and rate tuple kbits_per_delay = [] for iteration in iterations: print( "\n -------- {}. iteration -------".format(iteration)) # ensures we dont get stuck in a popen.wait(deadline) deadlock timeout_ctr = 0 # reset queue at netem middlebox shared.netem_reset(ctx, 'beta', interfaces=interfaces) # 5. we know everything: so configure! shared.netem_configure(ctx, 'beta', interfaces=interfaces, netem_params={ 'rate': '{}kbit'.format(rate), 'delay': '{}'.format(delay) }) # ensure server is running "fresh" per iter => no saved crypto cookies # note: using this we cant get "ssh" debug data # due to background cmd # we could implement a logging routine in mapago writing to a log file on srv... shared.mapago_reset(ctx, 'gamma') shared.prepare_server(ctx, srv_params) # ensures client mapago creation does not happen before server is ready sleep(5) clt_params['-module'] = '{}'.format(protocol) print("\n starting module: {}".format( clt_params['-module'])) msmt_results = [] while len(msmt_results ) < 1 and timeout_ctr < timeout_ctr_limit: print("\nIssueing prepare_client!\n") msmt_results = shared.prepare_client(ctx, clt_params) # check if client not terminated if len(msmt_results) < 1: print( "\n!!!!!!Error!!!!!! Client NOT terminated! reissue until client terminates!" ) timeout_ctr += 1 if timeout_ctr >= timeout_ctr_limit: print("\nTimeout ctr limit reached! Iteration failed") kbits_iter = 0 else: kbits_iter = analyze_data(msmt_results, protocol, clt_bytes) kbits_per_delay.append(kbits_iter) kbits_per_delay_normalized = 0 # account all iters for kbits_iter in kbits_per_delay: kbits_per_delay_normalized += kbits_iter kbits_per_delay_normalized = kbits_per_delay_normalized / num_iterations print("\n mean kbits per delay: {}".format( kbits_per_delay_normalized)) # 6. calculate for single delay and rate tuple our goodput_rate_quotient # i.e. rate = 5, delay = 2; rate = 5, delay = 5; rate = 5, delay = 10 # "red" goodput_rate_quotient_avg = kbits_per_delay_normalized / rate # 7. add to list of quietnts for single rate iver losses quotients_single_rate_over_delays.append( goodput_rate_quotient_avg) # 7.5 add los to list analyzed_delay_per_rate.append(delay) # 8. ok: we got all quoient for a given SINGLE rate and all LOSSES # add it to the list: where we store all RATES and the corresponding list # for the SINGLE rate and all LOSSES quotients_all_rates_over_delays.append( quotients_single_rate_over_delays) visited_rate.append(rate) visited_delay.append(analyzed_delay_per_rate) # 9. we got the list of lists for a single protocol complete: add it total_goodput_rate_avg[protocol] = (visited_rate, visited_delay, quotients_all_rates_over_delays) shared.save_raw_data( os.path.basename(__file__)[:-3], total_goodput_rate_avg) print("\n visited_rate: ", visited_rate) print("\n visited_delay: ", visited_delay) print("\n total_goodput_rate_avg: ", total_goodput_rate_avg) print("\nsleeping") sleep(5) print("\n next protocol") ''' QUIC thesis results: - These results were obtained in the context of the measurement - Used this line for verifying the result total_goodput_rate_avg = {"tcp-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250]], [[0.8987872192411489, 0.7823267454919813, 0.8717731267708974, 0.9449281470690319], [0.8914808616112653, 0.9393961996215485, 0.9553501262186983, 0.9014234459920567], [0.9502326564667469, 0.9542850021635289, 0.9561571123219293, 0.9509310693945888], [0.9563853871451465, 0.9563302765309082, 0.9564144806993923, 0.9553549476575104]]], "tcp-tls-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250]], [[0.7915266722574326, 0.7601227070334426, 0.7381980505336566, 0.8927837899522644], [0.915011005190392, 0.9349899435998139, 0.7538226523850817, 0.7739993436101752], [0.8963259250550947, 0.9095146161880139, 0.9074419480214088, 0.8893709701924917], [0.9146203613669235, 0.9309780058311868, 0.8878567577638666, 0.8991129060391729]]], "quic-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250], [0, 10, 50, 250]], [[0.0, 0.0, 0.0, 0.0], [0.3006895781631222, 0.13806085691509123, 0.30098765980594716, 0.44843030517187144], [0.9044825487791093, 0.8881095657532313, 0.901846031337234, 0.8991592469774472], [0.9027135581514798, 0.9059463999240039, 0.9052832465014307, 0.8994225971623447]]]} ''' plot_data(total_goodput_rate_avg)
def run_test(ctx): print('running test: {}'.format(os.path.basename(__file__)[:-3])) remoteHosts = ['beta', 'gamma'] srv_params = {} clt_params = {} supported_protocols = [ "tcp-throughput", "tcp-tls-throughput", "quic-throughput" ] print("rate: ", analyzing_rates) print("delay: ", analyzing_delay) num_iterations = 10 timeout_ctr_limit = 3 sim_dur = shared.calc_simulation_time(supported_protocols, num_iterations, timeout_ctr_limit, analyzing_rates, analyzing_delay) print("simulation duration is: {}".format(sim_dur)) iterations = list(range(num_iterations)) for host in remoteHosts: avail = shared.host_alive(ctx, host) if not avail: raise Exception("Host {} not available".format(host)) beta_iface_to_alpha = ctx.config['beta']['netem-interfaces-to-alpha'] beta_iface_to_gamma = ctx.config['beta']['netem-interfaces-to-gamma'] interfaces = [beta_iface_to_alpha, beta_iface_to_gamma] srv_params['-uc-listen-addr'] = '192.186.23.3' srv_params['-port'] = '64321' clt_params['-control-addr'] = '192.186.23.3' clt_params['-control-protocol'] = 'tcp' clt_params['-streams'] = '1' clt_params['-addr'] = '192.186.25.2' clt_params['-deadline'] = '120' clt_params['-buffer-length'] = '1400' clt_params['-update-interval'] = '1' # goodput_rate_avg for all protocols total_goodput_rate_avg = {} total_results_debug = {} # 1. iterate over protocols for protocol in supported_protocols: print("\n-------- analyzing: {} --------".format(protocol)) visited_rate = [] visited_delay = [] quotients_all_rates_over_delays = [] kbits_normalized = [] # 2. iterate over rate for rate in analyzing_rates: print("\n------ configuring rate to: {} --------".format(rate)) # 3. determine bytes for transmission regarding rate clt_bytes = int(shared.calc_clt_bytes(rate)) clt_params['-bytes'] = str(clt_bytes) quotients_single_rate_over_delays = [] analyzed_delay_per_rate = [] # 4. deepest for loop: iterate over delay for delay in analyzing_delay: # holds results of ALL iters per single delay and rate tuple kbits_per_delay = [] jitter = (float(delay) / 100) * jitter_ratio print("\nsetting jitter to: ", jitter) print( "\n------ configuring delay and jitter to: {}, {} --------" .format(delay, jitter)) for iteration in iterations: print( "\n -------- {}. iteration -------".format(iteration)) # ensures we dont get stuck in a popen.wait(deadline) deadlock timeout_ctr = 0 # reset queue at netem middlebox shared.netem_reset(ctx, 'beta', interfaces=interfaces) # 5. we know everything: so configure! shared.netem_configure(ctx, 'beta', interfaces=interfaces, netem_params={ 'rate': '{}kbit'.format(rate), 'delay+jitter': '{}ms {}ms'.format( delay, jitter) }) # ensure server is running "fresh" per iter => no saved crypto cookies # note: using this we cant get "ssh" debug data # due to background cmd # we could implement a logging routine in mapago writing to a log file on srv... shared.mapago_reset(ctx, 'gamma') shared.prepare_server(ctx, srv_params) # ensures client mapago creation does not happen before server is ready sleep(5) clt_params['-module'] = '{}'.format(protocol) print("\n starting module: {}".format( clt_params['-module'])) msmt_results = [] while len(msmt_results ) < 1 and timeout_ctr < timeout_ctr_limit: print("\nIssueing prepare_client!\n") msmt_results = shared.prepare_client(ctx, clt_params) # check if client not terminated if len(msmt_results) < 1: print( "\n!!!!!!Error!!!!!! Client NOT terminated! reissue until client terminates!" ) timeout_ctr += 1 if timeout_ctr >= timeout_ctr_limit: print("\nTimeout ctr limit reached! Iteration failed") kbits_iter = 0 else: kbits_iter = analyze_data(msmt_results, protocol, clt_bytes) kbits_per_delay.append(kbits_iter) kbits_per_delay_normalized = 0 # account all iters for kbits_iter in kbits_per_delay: kbits_per_delay_normalized += kbits_iter kbits_per_delay_normalized = kbits_per_delay_normalized / num_iterations print("\n mean kbits per delay: {}".format( kbits_per_delay_normalized)) # 6. calculate for single delay and rate tuple our goodput_rate_quotient # i.e. rate = 5, delay = 2; rate = 5, delay = 5; rate = 5, delay = 10 # "red" goodput_rate_quotient_avg = kbits_per_delay_normalized / rate # 7. add to list of quietnts for single rate iver losses quotients_single_rate_over_delays.append( goodput_rate_quotient_avg) # 7.5 add los to list analyzed_delay_per_rate.append(delay) # 8. ok: we got all quoient for a given SINGLE rate and all LOSSES # add it to the list: where we store all RATES and the corresponding list # for the SINGLE rate and all LOSSES quotients_all_rates_over_delays.append( quotients_single_rate_over_delays) visited_rate.append(rate) visited_delay.append(analyzed_delay_per_rate) # 9. we got the list of lists for a single protocol complete: add it total_goodput_rate_avg[protocol] = (visited_rate, visited_delay, quotients_all_rates_over_delays) shared.save_raw_data( os.path.basename(__file__)[:-3], total_goodput_rate_avg) print("\n visited_rate: ", visited_rate) print("\n visited_delay: ", visited_delay) print("\n total_goodput_rate_avg: ", total_goodput_rate_avg) print("\nsleeping") sleep(5) print("\n next protocol") ''' QUIC thesis results: - These results were obtained in the context of the measurement - Used this line for verifying the result total_goodput_rate_avg = {"quic-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.80161227838563056, 0.8040652428144947, 0.8041416107786962, 0.7940765487525084, 0.7444791929425141, 0.0], [0.8949120673581473, 0.8681223437939265, 0.8227633821170063, 0.0, 0.0, 0.0], [0.9020158717586202, 0.8678721881786707, 0.7935316913337243, 0.0, 0.0, 0.0]]], "tcp-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000]], [[0.8947504090914252, 0.6525793098184648, 0.7649036420047517, 0.8389996637955015, 0.7478415226187547, 0.4889345563833384], [0.9368367623192994, 0.7945618978868387, 0.8481709831094519, 0.8196944154301594, 0.622114906545979, 0.18108507164912754], [0.9544446957447422, 0.9316989097856575, 0.924426515051143, 0.8985641638385886, 0.8391914638969366, 0.0], [0.9563714427147644, 0.9561166758797403, 0.954858844154299, 0.9530127448208954, 0.8657837061661853, 0.0]]], "tcp-tls-throughput": [[5, 50, 250, 500], [[0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000], [0, 10, 50, 250, 1000, 10000]], [[0.905965133648742, 0.625319284545624, 0.6197808646068161, 0.7395122680163873, 0.7086336896330845, 0.48219699140931727], [0.7789134555766118, 0.7740938538514139, 0.9094230363344891, 0.6078073032754268, 0.6734109346990803, 0.18402407325358308], [0.920490309867192, 0.7166749984938212, 0.7119296816954054, 0.8449847624341839, 0.7851837084388277, 0.0], [0.8985949229726562, 0.8379584627409254, 0.7977987060452126, 0.7697167567685701, 0.7220478414231848, 0.0]]]} ''' plot_data(total_goodput_rate_avg)
def run_test(ctx): print('running test: {}'.format(os.path.basename(__file__)[:-3])) remoteHosts = ['beta', 'gamma'] srv_params = {} clt_params = {} supported_protocols = [ "quic-throughput", "tcp-tls-throughput", "tcp-throughput" ] print("rate: ", analyzing_rates) num_iterations = 10 timeout_ctr_limit = 1 sim_dur = shared.calc_simulation_time(supported_protocols, num_iterations, timeout_ctr_limit, analyzing_rates, analyzing_mean_loss_bursts) print("simulation duration for single per is: {}".format(sim_dur)) print("simulation duration for {} good_bursts is: {}".format( len(analyzing_mean_good_bursts), len(analyzing_mean_good_bursts) * sim_dur[0])) ''' QUIC thesis results: - These results were obtained in the context of the measurement - Used this line for verifying the result ''' total_goodput_rate_avg_over_mean_per = { "25": { "tcp-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.3616450580977332, 0.0, 0.0, 0.0], [0.7234947141190292, 0.0, 0.0, 0.0], [0.9278888981614432, 0.5996304493905685, 0.0, 0.0], [0.8922310802098178, 0.2780643542771529, 0.0, 0.0]]], "tcp-tls-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.20886972200480453, 0.0, 0.0, 0.0], [0.7356625688614528, 0.0, 0.0, 0.0], [0.9169528368319088, 0.5024409361258316, 0.0, 0.0], [0.6833873917879962, 0.15321988407061807, 0.0, 0.0]]], "quic-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.7481241153171786, 0.34033798138246135, 0.0, 0.0], [0.897155362287013, 0.4813757475480597, 0.0, 0.0], [0.7450584883114868, 0.869712028761885, 0.5534287329464594, 0.0], [0.0, 0.0, 0.0, 0.0]]] }, "50": { "tcp-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.8332938324305929, 0.0, 0.0, 0.0], [0.9522346712010926, 0.37311149478163663, 0.0, 0.0], [ 0.9346601075656121, 0.7139442966051202, 0.2279162701541853, 0.0 ], [ 0.7352887142936739, 0.3036182771879675, 0.2731531575174177, 0.15353118933526913 ]]], "tcp-tls-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.8371767520031044, 0.0, 0.0, 0.0], [0.9197378416767897, 0.45035891136918105, 0.0, 0.0], [0.9250451750032288, 0.9165682532416928, 0.0, 0.0], [ 0.5861799456745075, 0.7485430119229288, 0.27744199130139224, 0.1528655752716819 ]]], "quic-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.8958718303327223, 0.7262371290095782, 0.0, 0.0], [0.899704914444364, 0.735718315752719, 0.11044326101851897, 0.0], [ 0.8993799611784086, 0.8961340219135785, 0.3007580842235445, 0.13454097718073696 ], [0.0, 0.0, 0.0, 0.0]]] }, "100": { "tcp-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.9515523382062098, 0.41353804612863443, 0.0, 0.0], [0.953372772177897, 0.7425003502736932, 0.2703997540235107, 0.0], [0.7305014429039499, 0.730493209215142, 0.6045949998468568, 0.0], [ 0.7585419095585217, 0.7093557385597014, 0.44193353159976345, 0.15301336038833968 ]]], "tcp-tls-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.9318681644972888, 0.571726416633851, 0.0, 0.0], [ 0.9352538049424096, 0.9369171724827207, 0.6430219681774378, 0.0 ], [ 0.9086096430202438, 0.8927189108374337, 0.700087880812091, 0.14565182529065013 ], [ 0.9056127208435207, 0.4505387172922034, 0.3029285170292169, 0.30213224793107224 ]]], "quic-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[ 0.9000998879227554, 0.8885332409102622, 0.5087302354727139, 0.0 ], [ 0.9031369912931142, 0.9019036503250342, 0.7435758681218976, 0.4418874201949945 ], [ 0.8961946400188338, 0.9000579365061248, 0.883997947061482, 0.45129026826316043 ], [0.0, 0.0, 0.0, 0.0]]] } } plot_data(total_goodput_rate_avg_over_mean_per)
def run_test(ctx): print('running test: {}'.format(os.path.basename(__file__)[:-3])) remoteHosts = ['beta', 'gamma'] srv_params = {} clt_params = {} supported_protocols = [ "quic-throughput", "tcp-tls-throughput", "tcp-throughput" ] num_iterations = 10 timeout_ctr_limit = 1 sim_dur = shared.calc_simulation_time(supported_protocols, num_iterations, timeout_ctr_limit, analyzing_rates, analyzing_mean_loss_bursts) print("simulation duration for single per is: {}".format(sim_dur)) print("simulation duration for {} pers is: {}".format( len(analyzing_mean_pers), 4 * sim_dur[0])) iterations = list(range(num_iterations)) for host in remoteHosts: avail = shared.host_alive(ctx, host) if not avail: raise Exception("Host {} not available".format(host)) beta_iface_to_alpha = ctx.config['beta']['netem-interfaces-to-alpha'] beta_iface_to_gamma = ctx.config['beta']['netem-interfaces-to-gamma'] interfaces = [beta_iface_to_alpha, beta_iface_to_gamma] srv_params['-uc-listen-addr'] = '192.186.23.3' srv_params['-port'] = '64321' clt_params['-control-addr'] = '192.186.23.3' clt_params['-control-protocol'] = 'tcp' clt_params['-streams'] = '1' clt_params['-addr'] = '192.186.25.2' #clt_params['-deadline'] = '60' clt_params['-deadline'] = '90' clt_params['-buffer-length'] = '1400' clt_params['-update-interval'] = '1' # goodput_rate_avg for all protocols total_goodput_rate_avg = {} total_results_debug = {} total_goodput_rate_avg_over_mean_per = {} # to do add a variable to saved results per mean_per_rate # ok here must go 0. mean_per_rate for mean_per in analyzing_mean_pers: print("\n-------- analyzing mean_per: {} --------".format(mean_per)) #total_goodput_rate_avg_over_mean_per = {} # that is {"quic" : []} ....{"quic" : [], "tcp" : []} .... {"quic" : [], "tcp" : [], "tls" : []} total_goodput_rate_avg = {} # 1. iterate over protocols for protocol in supported_protocols: print("\n-------- analyzing: {} --------".format(protocol)) visited_rate = [] visited_loss = [] quotients_all_rates_over_losses = [] kbits_normalized = [] # 2. iterate over rate for rate in analyzing_rates: print("\n------ configuring rate to: {} --------".format(rate)) # 3. determine bytes for transmission regarding rate clt_bytes = int(shared.calc_clt_bytes(rate)) clt_params['-bytes'] = str(clt_bytes) quotients_single_rate_over_losses = [] analyzed_loss_per_rate = [] # 4. deepest for-loop: iterate over mean_loss_burst for mean_loss_burst in analyzing_mean_loss_bursts: print( "\n------ configuring mean_loss_burst to: {} --------". format(mean_loss_burst)) # holds results of ALL iters per single mean_loss_burst and rate tuple kbits_per_loss = [] r = 1 / mean_loss_burst # p is related to r an mean_per p_mean_per = mean_per / 100 print("configuring p_mean_per to: {}".format(p_mean_per)) p = (r * p_mean_per) / (1 - p_mean_per) print("configuring p to: {}".format(p)) print("configuring r to: {}".format(r)) good_state_holding_time = 1 / p print( "handling on average {} packets in good before going to bad" .format(good_state_holding_time)) print( "handling on average {} packets in bad before going to good" .format(mean_loss_burst)) for iteration in iterations: print("\n -------- {}. iteration -------".format( iteration)) # ensures we dont get stuck in a popen.wait(deadline) deadlock timeout_ctr = 0 # reset queue at netem middlebox shared.netem_reset(ctx, 'beta', interfaces=interfaces) # 5. we know everything: so configure! shared.netem_configure(ctx, 'beta', interfaces=interfaces, netem_params={ 'rate': '{}kbit'.format(rate), 'simpleGilbertLoss': '{}% {}%'.format( p * 100, r * 100) }) # ensure server is running "fresh" per iter => no saved crypto cookies # note: using this we cant get "ssh" debug data # due to background cmd # we could implement a logging routine in mapago writing to a log file on srv... shared.mapago_reset(ctx, 'gamma') shared.prepare_server(ctx, srv_params) # ensures client mapago creation does not happen before server is ready sleep(5) clt_params['-module'] = '{}'.format(protocol) print("\n starting module: {}".format( clt_params['-module'])) msmt_results = [] while len(msmt_results ) < 1 and timeout_ctr < timeout_ctr_limit: print("\nIssueing prepare_client!\n") msmt_results = shared.prepare_client( ctx, clt_params) if len(msmt_results) < 1: print( "\n!!!!!!Error!!!!!! Client NOT terminated! reissue until client terminates!" ) timeout_ctr += 1 if timeout_ctr >= timeout_ctr_limit: print( "\nTimeout ctr limit reached! Iteration failed" ) kbits_iter = 0 else: kbits_iter = analyze_data(msmt_results, protocol, clt_bytes) # kbits results of each iteration kbits_per_loss.append(kbits_iter) kbits_per_loss_normalized = 0 # account all iters for kbits_iter in kbits_per_loss: kbits_per_loss_normalized += kbits_iter kbits_per_loss_normalized = kbits_per_loss_normalized / num_iterations print("\n mean kbits per mean_loss_burst: {}".format( kbits_per_loss_normalized)) # 6. calculate for single mean_loss_burst and rate tuple our goodput_rate_quotient # i.e. rate = 5, mean_loss_burst = 2; rate = 5, mean_loss_burst = 5; rate = 5, mean_loss_burst = 10 # "red" goodput_rate_quotient_avg = kbits_per_loss_normalized / rate # 7. add to list of quietnts for single rate iver losses # for rate = 10 this holds the quotient values obtained for mean_loss_burst = [0,2,5,10 etc.] quotients_single_rate_over_losses.append( goodput_rate_quotient_avg) # 7.5 add los to list analyzed_loss_per_rate.append(mean_loss_burst) # 8. ok: we got all quoient for a given SINGLE rate and all LOSSES # add it to the list: where we store all RATES and the corresponding list # for the SINGLE rate and all LOSSES quotients_all_rates_over_losses.append( quotients_single_rate_over_losses) visited_rate.append(rate) visited_loss.append(analyzed_loss_per_rate) # 9. we got the list of lists for a single protocol complete: add it total_goodput_rate_avg[protocol] = ( visited_rate, visited_loss, quotients_all_rates_over_losses) # save current version of total_goodput_rate_avg # that is {"quic" : []} ....{"quic" : [], "tcp" : []} .... {"quic" : [], "tcp" : [], "tls" : []} interim_msmt_result = os.path.basename(__file__)[:-3] + "Interim" shared.save_raw_data(interim_msmt_result, total_goodput_rate_avg) print("\n visited_rate: ", visited_rate) print("\n visited_loss: ", visited_loss) print("\n total_goodput_rate_avg: ", total_goodput_rate_avg) print("\nsleeping") sleep(5) print("\n next protocol") total_goodput_rate_avg_over_mean_per[mean_per] = total_goodput_rate_avg shared.save_raw_data( os.path.basename(__file__)[:-3], total_goodput_rate_avg_over_mean_per) ''' QUIC thesis results: - These results were obtained in the context of the measurement - Used this line for verifying the result # mean-PER = 10% total_goodput_rate_avg_over_mean_per = {"10": {"tcp-tls-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.0, 0.0, 0.0, 0.0], [0.1796758624402684, 0.09681631223578252, 0.0, 0.22547596905631857], [0.8538951302118121, 0.3725531491798668, 0.44610610364628095, 0.30402388355520105], [0.4337156612991152, 0.11559839449667024, 0.1543480211862107, 0.15041069037363763]]], "tcp-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.0, 0.0, 0.0, 0.0], [0.18610042911192348, 0.0, 0.0, 0.13809595676961314], [0.7979374626906224, 0.6951387544139145, 0.7686291354056989, 0.1446643184561609], [0.45938173370385604, 0.4278040667088546, 0.5455886715930053, 0.3027597371671452]]], "quic-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.7386721300700039, 0.6911498543832674, 0.3453532850225947, 0.0], [0.8794329718993402, 0.6977864005464235, 0.5757086137437306, 0.594549969721013], [0.898514861402809, 0.7420622226960838, 0.7350185376835979, 0.7517594174868713], [0.0, 0.0, 0.0, 0.0]]]}} # mean-PER = 20% total_goodput_rate_avg_over_mean_per = {"20": {"tcp-tls-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.13424765923599835, 0.0, 0.2654570601010148, 0.15136688707590124]]], "tcp-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.08773547601322287, 0.09807980582160043, 0.0, 0.0], [0.2586507599749574, 0.26229089447621, 0.0, 0.31272853696807]]], "quic-throughput": [[500, 250, 50, 5], [[2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8, 16]], [[0.6552915924410667, 0.0, 0.0, 0.0], [0.5323270522190142, 0.0, 0.0, 0.0], [0.85228504351984, 0.5954838024532209, 0.6005059897673689, 0.30105901991509665], [0.0, 0.0, 0.0, 0.0]]]}} ''' plot_data(total_goodput_rate_avg_over_mean_per)