def main(): # Print usage if corrupted CLI if len(sys.argv) < 3: print "usage: %s <consensus file> <descriptor file>" % sys.argv[0] print "example: %s cached-consensus cached-descriptors" % sys.argv[0] sys.exit(1) consensus_fname = sys.argv[1] descriptors_fname = sys.argv[2] # Initialize matplotlib/seaborn/etc. init_graph_style() guards = util.parse_consensus(consensus_fname) guards.update_guard_attributes() consensus_guards_n, consensus_cutoffs = do_analysis(guards) guards = util.parse_consensus(consensus_fname, descriptors_fname) guards.update_guard_attributes() desc_guards_n, desc_cutoffs = do_analysis(guards) assert (consensus_cutoffs == desc_cutoffs) # Change frequency of x-axis ticks fig, ax = plt.subplots() ax.set_xticks(xrange(0, THRESHOLD_MAX, 500)) ax.set_title("Fraction of original bandwidth over different bw cutoffs") plt.xlabel("Bandwidth cutoff in kB/s") plt.ylabel("Fraction of original bandwidth") speed = pd.Series(["descriptor bw", "consensus bw"]) graphs = np.dstack((desc_guards_n, consensus_guards_n)) sns.tsplot(graphs, condition=speed, time=desc_cutoffs) plt.show()
def main(): # Print usage if corrupted CLI if len(sys.argv) < 3: print "usage: %s <consensus file> <speed threshold in kB>" % sys.argv[0] print "example: %s cached-consensus 200" % sys.argv[0] sys.exit(1) # Get speed threshold and consensus filename from CLI speed_threshold = int(sys.argv[2]) consensus_fname = sys.argv[1] # Initialize matplotlib/seaborn/etc. init_graph_style() # Parse the consensus guards = util.parse_consensus(consensus_fname) # Consensus parsing is done: calculate all the guard attributes. guards.update_guard_attributes() # Get the original probability distribution. original_prob_distr = guards.get_prob_distr() # Prune the guard list (remove slow guards) guards.prune_guards(speed_threshold) # Pruning is done: recalculate the guard attributes. guards.update_guard_attributes() # Get the original probability distribution. pruned_prob_distr = guards.get_prob_distr() # Create uniform distribution too (for comparison) # We use the size of the original probability distribution since # it's the best case scenario (more guards) size = len(original_prob_distr) mean = 1 / float(size) uniform = [mean] * size logging.debug("Uniform distribution with mean = %s", mean) # Plot it! distrs = (uniform, original_prob_distr, pruned_prob_distr) labels = ("uniform probability distribution (best case) [%d guards]" % len(original_prob_distr), "original probability distribution [%d guards]" % len(original_prob_distr), "probability distribution after pruning at %s kB/s [%d guards]" % (speed_threshold, len(pruned_prob_distr))) # sns.violinplot(distrs, names=labels, inner="points", color="Set3") sns.boxplot(distrs, names=labels, color="Set2") # sns.kdeplot(original_prob_distr, cumulative=True) plt.show()
def main(): # Print usage if corrupted CLI if len(sys.argv) < 3: print "usage: %s <consensus file> <speed threshold in kB>" % sys.argv[0] print "example: %s cached-consensus 200" % sys.argv[0] sys.exit(1) # Get speed threshold and consensus filename from CLI speed_threshold = int(sys.argv[2]) consensus_fname = sys.argv[1] # Parse the consensus guards = util.parse_consensus(consensus_fname) # Do the analysis analysis(guards, speed_threshold)
def main(): # Print usage if corrupted CLI if len(sys.argv) < 2: print "usage: %s <consensus file> [<descriptor file>]" % sys.argv[0] print "example: %s cached-consensus cached-descriptors" % sys.argv[0] sys.exit(1) consensus_fname = sys.argv[1] # Initialize matplotlib/seaborn/etc. init_graph_style() if len(sys.argv) == 2: guards = util.parse_consensus(consensus_fname) elif len(sys.argv) == 3: guards = util.parse_consensus(consensus_fname, sys.argv[2]) else: log.error( "More arguments than needed. You don't know what you are doing! Exiting!" ) sys.exit(1) # Consensus parsing is done: calculate all the guard attributes. guards.update_guard_attributes() # Get the original probability distribution. original_prob_distr = guards.get_prob_distr() one_guard_bw, one_guard_probs = guards.get_probs_after_merge_duplicate_bw() three_guard_bw, three_guard_probs = sample_three_guards( one_guard_bw, one_guard_probs) # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX if len(sys.argv) == 2: guards = util.parse_consensus(consensus_fname) elif len(sys.argv) == 3: guards = util.parse_consensus(consensus_fname, sys.argv[2]) else: log.error( "More arguments than needed. You don't know what you are doing! Exiting!" ) sys.exit(1) guards.prune_guards(2000) # Consensus parsing is done: calculate all the guard attributes. guards.update_guard_attributes() # Get the original probability distribution. original_prob_distr = guards.get_prob_distr() one_guard_bw_2000, one_guard_probs_2000 = guards.get_probs_after_merge_duplicate_bw( ) logging.debug("ORIGINAL") logging.debug("three bw: %s" % three_guard_bw) logging.debug("three probs: %s" % three_guard_probs) logging.debug("one bw: %s" % one_guard_bw) logging.debug("one probs: %s" % one_guard_probs) logging.debug("=========") three_guard_bw, three_guard_probs = normalize_probs( three_guard_bw, three_guard_probs, one_guard_bw) three_guard_bw, three_guard_probs = normalize_probs( three_guard_bw, three_guard_probs, one_guard_bw_2000) one_guard_bw, one_guard_probs = normalize_probs(one_guard_bw, one_guard_probs, three_guard_bw) one_guard_bw, one_guard_probs = normalize_probs(one_guard_bw, one_guard_probs, one_guard_bw_2000) one_guard_bw_2000, one_guard_probs_2000 = normalize_probs( one_guard_bw_2000, one_guard_probs_2000, one_guard_bw) one_guard_bw_2000, one_guard_probs_2000 = normalize_probs( one_guard_bw_2000, one_guard_probs_2000, three_guard_bw) logging.debug("NORMALIZED") logging.debug("three bw: %s" % three_guard_bw) logging.debug("three probs: %s" % three_guard_probs) logging.debug("one bw: %s" % one_guard_bw) logging.debug("one probs: %s" % one_guard_probs) logging.debug("=========") one_guard_cdf = get_cdf_from_probs(one_guard_probs) one_guard_cdf_2000 = get_cdf_from_probs(one_guard_probs_2000) three_guard_cdf = get_cdf_from_probs(three_guard_probs) # Change frequency of x-axis ticks fig, ax = plt.subplots() ax.set_xscale('log') ax.set_yticks((0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)) # ax.set_xticks((10,1000,10000,100000)) plt.ylim(0, 1) if len(sys.argv) == 2: ax.set_title( "CDF of probabilities of guard bandwidth (%d samples) (consensus bandwidth)" % SAMPLES_N) else: ax.set_title( "CDF of probabilities of guard bandwidth (%d samples) (descriptor bandwidth)" % SAMPLES_N) plt.xlabel( "Guard bandwidth in kB/s (average guard bandwidth for 3 guards)") plt.ylabel("CDF of guard selection") speed = pd.Series([ "one guard", "three guards", "one guard (guard bw cutoff at 2000 kB/s)" ]) graphs = np.dstack((one_guard_cdf, three_guard_cdf, one_guard_cdf_2000)) sns.tsplot(graphs, condition=speed, time=one_guard_bw) plt.show()
def main(): # Print usage if corrupted CLI if len(sys.argv) < 2: print "usage: %s <consensus file> [<descriptor file>]" % sys.argv[0] print "example: %s cached-consensus cached-descriptors" % sys.argv[0] sys.exit(1) consensus_fname = sys.argv[1] # Initialize matplotlib/seaborn/etc. init_graph_style() if len(sys.argv) == 2: guards = util.parse_consensus(consensus_fname) elif len(sys.argv) == 3: guards = util.parse_consensus(consensus_fname, sys.argv[2]) else: log.error( "More arguments than needed. You don't know what you are doing! Exiting!" ) sys.exit(1) # Consensus parsing is done: calculate all the guard attributes. guards.update_guard_attributes() # List of cutoffs cutoffs = [] # Number of simulatenous clients on biggest/smallest guards biggest_guard_clients = [] median_guard_clients = [] smallest_guard_clients = [] # Calculate number of guards for different speed thresholds for cutoff in xrange(0, THRESHOLD_MAX, THRESHOLD_STEP): # Prune the guard list (remove slow guards) guards.prune_guards(cutoff) guards.update_guard_attributes() cutoffs.append(cutoff) smallest_guard = guards.get_smallest_guard() median_guard = guards.get_median_guard() biggest_guard = guards.get_biggest_guard() biggest_guard_clients.append(biggest_guard.guard_prob * USERS_N) median_guard_clients.append(median_guard.guard_prob * USERS_N) smallest_guard_clients.append(smallest_guard.guard_prob * USERS_N) # Change frequency of x-axis ticks fig, ax = plt.subplots() ax.set_xticks(xrange(0, THRESHOLD_MAX, 500)) ax.set_title( "Expected number of clients on biggest/smallest guard (consensus bw) (%d simulatenous clients)" % USERS_N) plt.xlabel("Bandwidth cutoff in kB/s") plt.ylabel("Expected number of clients") ax.set_yscale('log') sns.set(style="darkgrid", context="talk") speed = pd.Series([ "biggest guard (%s)" % biggest_guard.nickname, "median guard", "smallest guard" ]) graphs = np.dstack( (biggest_guard_clients, median_guard_clients, smallest_guard_clients)) sns.tsplot(graphs, condition=speed, time=cutoffs) plt.show()