def LoSC_Bouillard_optimizeByDelayBound_rate(arrivals, sc: ServiceCurve, weights, foi): assert ( arrivals and len(arrivals) and weights and len(weights) and len(arrivals) == len(weights) and weights[foi] ), 'pre-conditions failed for GPS.LoSC_Bouillard_optimizeByDelayBound(...)' # re-indexing and sorting arr_foi = arrivals.pop(foi) weights_foi = weights.pop(foi) # heuristic for sorting here # r is the parameter we use here (descending) arrivals_weights = list(zip(arrivals, weights)) arrivals_weights.sort(key=lambda x: x[0].r, reverse=True) arrivals_, weights_ = list(zip(*arrivals_weights)) arrivals = list(arrivals_) weights = list(weights_) arrivals.append(arr_foi) weights.append(weights_foi) new_foi = len(arrivals) - 1 beta_i = None min_delay = None # in terms of delay best_j = None _iter = 1 start = datetime.datetime.now() for j in range(new_foi): if _iter % 5 <= 5: clear_last_line() logging.debug(f"j: {_iter} of {new_foi}") percentage = round(_iter / new_foi * 100) print( f"calculating {'#'*percentage}{'-'*(abs(100-percentage))} {percentage}%" ) beta_candidate = GPS.LoSC_Bouillard_new(arrivals, sc, weights, new_foi, j) delay_candidate = NC.delay_bound_token_bucket_rate_latency( arrivals[new_foi], beta_candidate) if min_delay is None or delay_candidate <= min_delay: # we ignore negative delay bounds as they are not reasonable if delay_candidate >= 0: beta_i = beta_candidate min_delay = delay_candidate best_j = j _iter += 1 write(f"j: {_iter-1} of {new_foi}") duration = datetime.datetime.now() - start print_write( "total computation time: ", ":".join([ str(round(float(i))).zfill(2) for i in str(duration).split(":") ])) return beta_i, f'best_j={best_j}'
def LoSC_Chang_optimizeByDelayBound(arrivals, sc: ServiceCurve, weights, foi, subsetlist): global _v assert ( arrivals and len(arrivals) and weights and len(weights) and len(arrivals) == len(weights) and weights[foi] ), 'pre-conditions failed for GPS.LoSC_Chang_optimizeByDelayBound(...)' beta_i = None min_delay = None # in terms of delay best_m = None _iter = 1 start = datetime.datetime.now() # mod = (((2 ** len(arrivals)) - 1) // 2500) if (((2 ** len(arrivals)) - 1) // 2500) != 0 \ # else 1 mod = 100000 for M in subsetlist: if len(M) == 0: continue if round(_iter % mod) == 0 and _v: clear_last_line() logging.debug(f"M: {_iter} of {(2**len(arrivals)) - 1}") percentage = round(_iter / ((2**len(arrivals)) - 1) * 100) print( f"calculating {'#'*percentage}{'-'*(abs(100-percentage))} {percentage}%" ) beta_candidate = GPS.LoSC_Chang(arrivals, sc, weights, foi, M) delay_candidate = NC.delay_bound_token_bucket_rate_latency( arrivals[foi], beta_candidate) if min_delay is None or delay_candidate <= min_delay: # we ignore negative delay bounds as they are not reasonable if delay_candidate >= 0: beta_i = beta_candidate min_delay = delay_candidate best_m = M _iter += 1 write(f"M: {_iter-1} of {(2**len(arrivals)) - 1}") duration = datetime.datetime.now() - start if _v: print_write( "total computation time: ", ":".join([ str(round(float(i))).zfill(2) for i in str(duration).split(":") ])) return beta_i, f'best_m (len)={len(best_m)}', len(best_m), best_m
def LoSC_Bouillard_optimizeByDelayBound_new(arrivals, sc: ServiceCurve, weights, foi): assert ( arrivals and len(arrivals) and weights and len(weights) and len(arrivals) == len(weights) and weights[foi] ), 'pre-conditions failed for GPS.LoSC_Bouillard_optimizeByDelayBound(...)' # re-indexing and sorting arr_foi = arrivals.pop(foi) weights_foi = weights.pop(foi) arrivals.insert(0, TokenBucket(r=0, b=0)) weights.insert(0, 0) for i in range(len(arrivals) - 1): ti_p1, ti_p1_ix = GPS.Bouillard_ti_new(weights, sc, arrivals, i) # swap ai_p1 = arrivals[i + 1] wi_p1 = weights[i + 1] arrivals[i + 1] = arrivals[ti_p1_ix] arrivals[ti_p1_ix] = ai_p1 weights[i + 1] = weights[ti_p1_ix] weights[ti_p1_ix] = wi_p1 # after the loop "arrivals" and "weights" are already sorted arrivals.pop(0) weights.pop(0) arrivals.append(arr_foi) weights.append(weights_foi) new_foi = len(arrivals) - 1 beta_i = None min_delay = None # in terms of delay best_j = None _iter = 1 start = datetime.datetime.now() for j in range(new_foi): if _iter % 5 <= 5: clear_last_line() logging.debug(f"j: {_iter} of {new_foi}") percentage = round(_iter / new_foi * 100) print( f"calculating {'#'*percentage}{'-'*(abs(100-percentage))} {percentage}%" ) beta_candidate = GPS.LoSC_Bouillard_new(arrivals, sc, weights, new_foi, j) delay_candidate = NC.delay_bound_token_bucket_rate_latency( arrivals[new_foi], beta_candidate) if min_delay is None or delay_candidate <= min_delay: # we ignore negative delay bounds as they are not reasonable if delay_candidate >= 0: beta_i = beta_candidate min_delay = delay_candidate best_j = j _iter += 1 write(f"j: {_iter-1} of {new_foi}") duration = datetime.datetime.now() - start print_write( "total computation time: ", ":".join([ str(round(float(i))).zfill(2) for i in str(duration).split(":") ])) return beta_i, f'best_j={best_j}'
def LoSC_Bouillard_optimizeByDelayBound(arrivals, sc: ServiceCurve, weights, foi): assert ( arrivals and len(arrivals) and weights and len(weights) and len(arrivals) == len(weights) and weights[foi] ), 'pre-conditions failed for GPS.LoSC_Bouillard_optimizeByDelayBound(...)' # re-indexing and sorting arr_foi = arrivals.pop(foi) weights_foi = weights.pop(foi) arrivals_ti_weight = [] for i, a in enumerate(arrivals): ti = GPS.Bouillard_ti(weights, sc, arrivals, i) arrivals_ti_weight.append((a, ti, weights[i])) arrivals_ti_weight.sort(key=lambda x: x[1], reverse=False) arrivals_, _, weights_ = list(zip(*arrivals_ti_weight)) arrivals = list(arrivals_) weights = list(weights_) arrivals.append(arr_foi) weights.append(weights_foi) new_foi = len(arrivals) - 1 beta_i = None min_delay = None # in terms of delay best_j = None _iter = 1 start = datetime.datetime.now() for j in range(new_foi): if _iter % 5 <= 5: clear_last_line() logging.debug(f"j: {_iter} of {new_foi}") percentage = round(_iter / new_foi * 100) print( f"calculating {'#'*percentage}{'-'*(abs(100-percentage))} {percentage}%" ) # ask whether all flows are included or not #if all included, then compare to PG result and they have to match # else throw error beta_candidate = GPS.LoSC_Bouillard(arrivals, sc, weights, new_foi, j) delay_candidate = NC.delay_bound_token_bucket_rate_latency( arrivals[new_foi], beta_candidate) if min_delay is None or delay_candidate <= min_delay: # we ignore negative delay bounds as they are not reasonable if delay_candidate >= 0: beta_i = beta_candidate min_delay = delay_candidate best_j = j _iter += 1 write(f"j: {_iter-1} of {new_foi}") duration = datetime.datetime.now() - start print_write( "total computation time: ", ":".join([ str(round(float(i))).zfill(2) for i in str(duration).split(":") ])) return beta_i, f'best_j={best_j}'
def hetro_arr_analysis_OBDB(number_of_flows, weight_mode: WeightsMode): result = dict() t, alphas, alphas_weights, foi_index, beta = setupInputs( number_of_flows, weight_mode) result["PG (General)"] = { 'LoSC': GPS.LoSC_PG(sc=beta, index=foi_index, weights=alphas_weights) } result["PG (General)"][ 'delay bound'] = NC.delay_bound_token_bucket_rate_latency( alpha=alphas[foi_index], beta=result['PG (General)']['LoSC']) print_write("Chang") print_write("-----\n\n") subsets = powerset_non_empty_generator(list(range(len(alphas)))) result["Chang"] = { 'LoSC': GPS.LoSC_Chang_optimizeByDelayBound(arrivals=alphas, sc=beta, weights=alphas_weights, foi=foi_index, subsetlist=subsets) } result["Chang"]['delay bound'] = NC.delay_bound_token_bucket_rate_latency( alpha=alphas[foi_index], beta=result['Chang']['LoSC'][0]) print_write("--done\n\n") print_write("Bouillard") print_write("---------\n\n") result['Bouillard'] = { 'LoSC': GPS.LoSC_Bouillard_optimizeByDelayBound( arrivals=copy.deepcopy(alphas), sc=beta, weights=copy.deepcopy(alphas_weights), foi=foi_index) } result['Bouillard'][ 'delay bound'] = NC.delay_bound_token_bucket_rate_latency( alpha=alphas[foi_index], beta=result['Bouillard']['LoSC'][0]) print_write("--done\n\n") print_write("BL") print_write("--\n\n") # we change N\{i} to N such that always i in M to make its semantic consistent with chang # semantic _subset_BL = powerset_non_empty_generator(list(range(len(alphas)))) subset_BL = filter_generator(lambda x: foi_index in x, _subset_BL) result["Burchard, Liebeherr"] = { 'LoSC': GPS.LoSC_BL_optimizeByDelayBound(arrivals=alphas, sc=beta, weights=alphas_weights, foi=foi_index, subsetlist=subset_BL) } result["Burchard, Liebeherr"][ 'delay bound'] = NC.delay_bound_token_bucket_rate_latency( alpha=alphas[foi_index], beta=result['Burchard, Liebeherr']['LoSC'][0]) print_write("--done\n\n") time.sleep(0.5) print_write() print_write("number of arrivals:", len(alphas)) print_write('flow of interest:', alphas[foi_index]) print_write('weights mode:', weight_mode) print_write('weight of the flow of interest:', alphas_weights[foi_index]) print_write("distinct weights: ", list(set(alphas_weights))) for key, value in result.items(): print_write(f'{key: <30}', ": ", value) print_write("\n\n\n")