def main(): parser = argparse.ArgumentParser( description='Calculate meta information for real traces', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('network') parser.add_argument('traceroutes') parser.add_argument('output') parser.add_argument('--maxi', type=int, default=500) # for paralelization parser.add_argument('--lower-bound', '-lb', type=int, default=0, dest='lb') parser.add_argument('--upper-bound', '-ub', type=int, default=-1, dest='ub') arguments = parser.parse_args() g = helpers.load_as_inferred_links(arguments.network) traceroutes = helpers.load_from_json(arguments.traceroutes) # traceroutes = random.sample(traceroutes, arguments.maxi) arguments.lb = arguments.lb if 0 <= arguments.lb <= len(traceroutes) else 0 arguments.ub = arguments.ub if 0 <= arguments.ub <= len( traceroutes) else len(traceroutes) result = filter(g, traceroutes[arguments.lb:arguments.ub]) helpers.save_to_json(arguments.output, result)
def sh_gen(arguments): ba_graph = helpers.load_network(arguments.ba_graph) trace_count = int(arguments.route_count) node_ids = range(ba_graph.vcount()) logger.info('Trace count: %d' % trace_count) random_pairs = [random.sample(node_ids, 2) for x in xrange(trace_count)] random_pairs = [(ba_graph.vs[x[0]]['name'], ba_graph.vs[x[1]]['name']) for x in random_pairs] logger.info('Random pair count: %d' % len(random_pairs)) shls = [] results = [] for s_name, t_name in random_pairs: shl = ba_graph.shortest_paths(s_name, t_name)[0][0] + 1 while shl == float('inf'): s, t = random.sample(node_ids, 2) s_name = ba_graph.vs[s]['name'] t_name = ba_graph.vs[t]['name'] shl = ba_graph.shortest_paths(s_name, t_name)[0][0] + 1 logger.debug('From %s to %s SH: %d' % (s_name, t_name, shl)) shls.append(shl) results.append([(s_name, t_name), shl]) # result = zip(random_pairs, shls) helpers.save_to_json(arguments.sh_path_output, results)
def upwalker_counter(g, meta, vf_g, arguments): N = g.vcount() progress = progressbar1.DummyProgressBar(end=10, width=15) if arguments.progressbar: progress = progressbar1.AnimatedProgressBar(end=len(meta), width=15) # trace_up_map = {} # random_up_map = {} trace_up_counter = [] random_up_counter = [] for m in meta: progress += 1 progress.show_progress() trace = m[helpers.TRACE] trace_dir = vft.trace_to_string(g, trace) trace_up_count = trace_dir.count('U') trace_up_counter.append(trace_up_count) # trace_up_map[trace_up_count] = trace_up_map.get(trace_up_count, 0) + # 1 s, t = trace[0], trace[-1] s_idx, t_idx = vft.node_to_nodeid(g, s), vft.node_to_nodeid(g, t) random_vf_route = helpers.random_route_walk(vf_g, s_idx, t_idx + N, len(trace), named=False, weight_field='VFweight') random_vf_route = vft.vf_route_converter(random_vf_route, N) random_vf_dir = vft.trace_to_string(g, random_vf_route) random_vf_count = random_vf_dir.count('U') random_up_counter.append(random_vf_count) # random_up_map[random_vf_count] = random_up_map.get(random_vf_count, # 0) + 1 real_counter = collections.Counter(trace_up_counter) real_up = ' '.join( ['%s: %s' % (k, real_counter[k]) for k in sorted(list(real_counter))]) random_counter = collections.Counter(random_up_counter) random_up = ' '.join([ '%s: %s' % (k, random_counter[k]) for k in sorted(list(random_counter)) ]) logger.info('') logger.info('Real trace UP counter: %s' % real_up) logger.info('Random vf trace up counter: %s' % random_up) helpers.save_to_json(arguments.out, { 'REAL': dict(real_counter), 'RANDOM': dict(random_counter) }) keys = sorted(set(list(real_counter) + list(random_counter))) logger.info('IDX;REAL;RANDOM') for k in keys: logger.info('%s;%d;%d' % (k, real_counter[k], random_counter[k]))
def ba_calc(arguments): ba_graph = helpers.load_network(arguments.ba_graph) sh_paths = helpers.load_from_json(arguments.point_pairs) out = arguments.out min_stretch = arguments.min_stretch max_stretch = arguments.max_stretch max_c = len(sh_paths) arguments.lb = arguments.lb if 0 <= arguments.lb <= max_c else 0 arguments.ub = arguments.ub if 0 <= arguments.ub <= max_c else max_c arguments.lb, arguments.ub = (min(arguments.lb, arguments.ub), max(arguments.lb, arguments.ub)) sh_paths = sh_paths[arguments.lb:arguments.ub] vf_g_closeness = vft.convert_to_vf(ba_graph, vfmode=vft.CLOSENESS) results = [[] for x in xrange(min_stretch, max_stretch + 1)] for stretch in xrange(min_stretch, max_stretch + 1): logger.info('Calculate results with stretch %d' % stretch) result = ba_generator(ba_graph, sh_paths, stretch, vf_g_closeness, arguments.progressbar) results[stretch - min_stretch] = result helpers.save_to_json(out, results)
def main(): parser = argparse.ArgumentParser(description='Calculate meta information for real traces', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('network') parser.add_argument('meta') parser.add_argument('output', type=argparse.FileType('w')) # parser.add_argument('--vfmode', type=str, default='labeled', dest='vfmode', # choices=['labeled', 'closeness']) # for paralelization parser.add_argument('--lower-bound', '-lb', type=int, default=0, dest='lb') parser.add_argument('--upper-bound', '-ub', type=int, default=-1, dest='ub') parser.add_argument('--progressbar', action='store_true') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('--with-prelabeled', action='store_true') parser.add_argument('--with-closeness', action='store_true') parser.add_argument('--with-degree', action='store_true') parser.add_argument('--with-lp-hard', action='store_true') parser.add_argument('--with-lp-soft', action='store_true') # parser.add_argument('--with-lp', action='store_true') # parser.add_argument('--with-vf', action='store_true') parser.add_argument('--try-per-trace', type=int, default=1, dest='try_per_trace') arguments = parser.parse_args() arguments.verbose = min(len(helpers.LEVELS), arguments.verbose) logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose]) g = helpers.load_network(arguments.network) meta = helpers.load_from_json(arguments.meta) arguments.lb = arguments.lb if 0 <= arguments.lb <= len(meta) else 0 arguments.ub = arguments.ub if 0 <= arguments.ub <= len(meta) else len(meta) flags = { FLAG_PRELABELED: arguments.with_prelabeled, FLAG_CLOSENESS: arguments.with_closeness, FLAG_DEGREE: arguments.with_degree, FLAG_LP_HARD: arguments.with_lp_hard, FLAG_LP_SOFT: arguments.with_lp_soft } # if arguments.vfmode == 'labeled': mode = vft.ORDER_PRELABELED # elif arguments.vfmode == 'closeness': mode = vft.ORDER_CLOSENESS # else: raise RuntimeError('Unhandled vfmode') meta = meta[arguments.lb:arguments.ub] # update meta at place purify(g, meta, flags, arguments.try_per_trace, arguments.progressbar) logger.info('Save to %s' % arguments.output) helpers.save_to_json(arguments.output, meta)
def wrap_watts_trace_gen(args): g = helpers.load_network(args.network) traceroutes = helpers.load_from_json(args.original_traceroutes) max_c = len(traceroutes) args.lb = args.lb if 0 <= args.lb <= max_c else 0 args.ub = args.ub if 0 <= args.ub <= max_c else max_c args.lb, args.ub = (min(args.lb, args.ub), max(args.lb, args.ub)) traceroutes = traceroutes[args.lb:args.ub] watts_traceroutes = watts_trace_gen(g, traceroutes, args.progressbar) helpers.save_to_json(args.traceroute_dest, watts_traceroutes)
def main(): parser = argparse.ArgumentParser( description= 'Filter out non vf and non lp traceroutes from given traceroute list', parents=[ argparse_general.commonParser, ], **argparse_general.commonParams) parser.add_argument('network') parser.add_argument('traceroutes') parser.add_argument( '--filter', default='sh+loop+ex+lp', help= 'Possible values: sh (short), loop (AS number repetition), ex (non existent), vf (non valley free), lp (non local preferenced), or any combination with + sign. Note that lp automatically means vf+lp' ) parser.add_argument('--lp-type', default='first', choices=['first', 'all'], dest='first_edge') parser.add_argument('output') arguments = parser.parse_args() arguments.verbose = min(len(helpers.LEVELS), arguments.verbose) logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose]) arguments.first_edge = arguments.first_edge == 'first' if arguments.first_edge: logger.debug('LP only first edge') else: logger.debug('LP all edge') g = helpers.load_network(arguments.network) traceroutes = helpers.load_from_json(arguments.traceroutes) arguments.lb = arguments.lb if 0 <= arguments.lb <= len(traceroutes) else 0 arguments.ub = arguments.ub if 0 <= arguments.ub <= len( traceroutes) else len(traceroutes) arguments.filter = arguments.filter.replace('lp', 'vf+lp') filters = arguments.filter.split('+') result = filter(g, traceroutes[arguments.lb:arguments.ub], filters, arguments.first_edge) helpers.save_to_json(arguments.output, result)
def main(): parser = argparse.ArgumentParser( description=('Generate test graphs'), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('graph_out', metavar='graph-out') parser.add_argument('trace_out', metavar='trace-out') parser.add_argument('--progressbar', action='store_true') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('--node-count', '-nc', type=int, dest='node_count', default=100) parser.add_argument('--network-type', choices=[x for x in graphs_map.iterkeys()], default='star') parser.add_argument('--trace-count', type=int, default=50) arguments = parser.parse_args() arguments.verbose = min(len(helpers.LEVELS), arguments.verbose) logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose]) show_progress = arguments.progressbar # g = graphs_map[arguments.network_type](arguments.node_count) # g = igraph.Graph.Barabasi(900, 9) # for n in g.vs: # n['closeness'] = g.closeness(n) # n['name'] = 'V%d' % n.index # g.save(arguments.graph_out) g = igraph.load(arguments.graph_out) pairs = [ random.sample(xrange(0, g.vcount()), 2) for x in xrange(0, arguments.trace_count) ] pairs = [[g.vs[x[0]]['name'], g.vs[x[1]]['name']] for x in pairs] traces = [] for p in pairs: trace = random.choice(g.get_all_shortest_paths(p[0], p[1])) trace = [g.vs[x]['name'] for x in trace] traces.append(trace) helpers.save_to_json(arguments.trace_out, traces)
def purify(g, out, count=1000, show_progress=False): logger.info('Started') nodes = range(0, g.vcount()) endpoints = [random.sample(nodes, 2) for idx in range(0, count)] progress = progressbar1.DummyProgressBar(end=10, width=15) if show_progress: progress = progressbar1.AnimatedProgressBar( end=len(endpoints), width=15) traces = [] for endpoint in endpoints: progress += 1 progress.show_progress() src, dst = endpoint trace = g.get_shortest_paths(src, dst)[0] if len(trace) > 0: traces.append([g.vs[x]['name'] for x in trace]) logger.info('Len last trace: %d' % len(trace)) helpers.save_to_json(out, traces)
def purify(g, meta, out, count=1000): results = list() results2 = list() results3 = list() all_vf = 0 all_nonvf = 0 all_vf_closeness = 0 all_nonvf_closeness = 0 short_results = list() short_results2 = list() short_results3 = list() all_short_vf = 0 all_short_nonvf = 0 all_short_vf_closeness = 0 all_short_nonvf_closeness = 0 long_results = list() long_results2 = list() long_results3 = list() all_long_vf = 0 all_long_nonvf = 0 all_long_vf_closeness = 0 all_long_nonvf_closeness = 0 # remove traces with already calculated all_path logger.warn('[r]ONLY NOT FILLED PATHS[/]') meta = [x for x in meta if not helpers.ALL_PATH_COUNT in x] # traces with a maximum stretch logger.warn('[r]!!!ONLY WITH LOW STRETCH[/]') meta = [x for x in meta if x[helpers.STRETCH] < 4] # shorter meta records logger.warn('[r]!!!ONLY SHORT TRACES[/]') meta = [x for x in meta if len(x[helpers.TRACE]) < 5] meta_map = {tuple(x[helpers.TRACE]): x for x in meta} # traceroutes = [x for x in meta if x[TRACE_LEN] == x[SH_LEN]] logger.info('All trace count: %d' % len(meta)) tr_count = min(len(meta), count) meta = random.sample(meta, tr_count) logger.info('Chosen trace count: %d' % len(meta)) real_vf = [x for x in meta if x[helpers.IS_VF] == 1] real_nonvf = [x for x in meta if x[helpers.IS_VF] == 0] real_vf_closeness = [x for x in meta if x[helpers.IS_VF_CLOSENESS] == 1] real_nonvf_closeness = [x for x in meta if x[helpers.IS_VF_CLOSENESS] == 0] logger.info('Real vf: %f[%d]' % ((len(real_vf)/float(len(meta)), len(real_vf)))) logger.info('Real nonvf: %f[%d]' % ((len(real_nonvf)/float(len(meta)), len(real_nonvf)))) logger.info('Real vf closeness: %f[%d]' % ((len(real_vf_closeness)/float(len(meta)), len(real_vf_closeness)))) logger.info('Real nonvf closeness: %f[%d]' % ((len(real_nonvf_closeness)/float(len(meta)), len(real_nonvf_closeness)))) logger.info('Remove unknown traces. Trace count before: %d' % len(meta)) traceroutes = [x[helpers.TRACE] for x in meta] traceroutes, ignored = vft.trace_clean(g, traceroutes) logger.info('Traceroutes after: %d. Ignored: %d' % (len(traceroutes), ignored)) traceroutes = vft.trace_in_vertex_id(g, traceroutes) progress = progressbar1.AnimatedProgressBar(end=len(traceroutes), width=15) for trace in traceroutes: progress += 1 progress.show_progress() for x in range(0, g.vcount()): g.vs[x]['traces'] = dict() s, t = trace[0], trace[-1] sh_path = g.get_all_shortest_paths(s, t, mode=i.OUT) all_path = helpers.dfs_mark(copy.deepcopy(g), s, t, len(trace)) # if len(sh_path) != len(all_path): # print len(sh_path) # print len(all_path) # print s, t # sanity check for x in all_path: if x[0] != s or x[-1] != t: logger.error('ALERT') if len(set([tuple(x) for x in all_path])) != len(all_path): logger.error('LENGTH ALERT') logger.error('%s' % len(all_path)) logger.error('%s' % len(set([tuple(x) for x in all_path]))) logger.error('%s' % sorted(all_path)) long_path = [x for x in all_path if len(x) == len(trace)] short_path = [x for x in all_path if len(x) < len(trace)] named_trace = [g.vs[x]['name'] for x in trace] extra_meta = { helpers.ALL_PATH_COUNT: len(all_path), helpers.SAME_LONG_PATH_COUNT: len(long_path), helpers.SHORTER_PATH_COUNT: len(short_path) } meta_map[tuple(named_trace)].update(extra_meta) vf_count = sum([1 if vft.is_valley_free(g, x, vfmode=vft.PRELABELED) else 0 for x in all_path]) nonvf = len(all_path) - vf_count vf_closeness_count = sum([1 if vft.is_valley_free(g, x, vfmode=vft.CLOSENESS) else 0 for x in all_path]) nonvf_closeness = len(all_path) - vf_closeness_count tmp = [1 if vft.is_valley_free(g, x, vfmode=vft.PRELABELED) else 0 for x in short_path] short_vf_count = sum(tmp) short_nonvf = len(tmp) - short_vf_count tmp = [1 if vft.is_valley_free(g, x, vfmode=vft.CLOSENESS) else 0 for x in short_path] short_vf_closeness_count = sum(tmp) short_nonvf_closeness = len(tmp) - short_vf_closeness_count tmp = [1 if vft.is_valley_free(g, x, vfmode=vft.PRELABELED) else 0 for x in long_path] long_vf_count = sum(tmp) long_nonvf = len(tmp) - long_vf_count tmp = [1 if vft.is_valley_free(g, x, vfmode=vft.CLOSENESS) else 0 for x in long_path] long_vf_closeness_count = sum(tmp) long_nonvf_closeness = len(tmp) - long_vf_closeness_count extra_meta = { helpers.ALL_PATH_VF_COUNT: vf_closeness_count, helpers.SAME_LONG_PATH_VF_COUNT: long_vf_closeness_count, helpers.SHORTER_PATH_VF_COUNT: short_vf_closeness_count } meta_map[tuple(named_trace)].update(extra_meta) all_vf += vf_count all_nonvf += nonvf all_vf_closeness += vf_closeness_count all_nonvf_closeness += nonvf_closeness all_long_vf += long_vf_count all_long_nonvf += long_nonvf all_long_vf_closeness += long_vf_closeness_count all_long_nonvf_closeness += long_nonvf_closeness all_short_vf += short_vf_count all_short_nonvf += short_nonvf all_short_vf_closeness += short_vf_closeness_count all_short_nonvf_closeness += short_nonvf_closeness results.append(vf_count / float(len(all_path))) results3.append(vf_closeness_count / float(len(all_path))) if len(all_path) > 1: results2.append(vf_count / float(len(all_path))) long_results.append(long_vf_count / float(len(long_path))) long_results3.append(long_vf_closeness_count / float(len(long_path))) if len(long_path) > 1: long_results2.append(long_vf_count / float(len(long_path))) if len(short_path) > 0: short_results.append(short_vf_count / float(len(short_path))) short_results3.append(short_vf_closeness_count / float(len(short_path))) else: pass # short_results.append(0) # short_results3.append(0) if len(short_path) > 1: short_results2.append(short_vf_count / float(len(short_path))) # save mofified meta meta_mod = [x for x in meta_map.itervalues()] helpers.save_to_json(out, meta_mod) # print results print 'ALL' print 'VF count: %d' % all_vf print 'VF CLOSENESS count: %d' % all_vf_closeness print 'Non vf count: %d' % all_nonvf print 'Non vf CLOSENESS count: %d' % all_nonvf_closeness print 'VF perc: %f' % (all_vf/float(all_vf + all_nonvf)) print 'VF CLOSENESS perc: %f' % (all_vf_closeness/float(all_vf_closeness + all_nonvf_closeness)) print 'Mean VF prob: %f' % np.mean(results) print 'Mean VF CLOSENESS prob: %f' % np.mean(results3) print 'Mean VF2 prob: %f' % np.mean(results2) print '==========' print 'SHORT' print 'VF count: %d' % all_short_vf print 'VF CLOSENESS count: %d' % all_short_vf_closeness print 'Non vf count: %d' % all_short_nonvf print 'Non vf CLOSENESS count: %d' % all_short_nonvf_closeness if all_short_vf + all_short_nonvf > 0: print 'VF perc: %f' % (all_short_vf/float(all_short_vf + all_short_nonvf)) if all_short_vf_closeness + all_short_nonvf_closeness > 0: print 'VF CLOSENESS perc: %f' % (all_short_vf_closeness/float(all_short_vf_closeness + all_short_nonvf_closeness)) print 'Mean VF prob: %f' % np.mean(short_results) print 'Mean VF CLOSENESS prob: %f' % np.mean(short_results3) print 'Mean VF2 prob: %f' % np.mean(short_results2) print '=-----------------' print 'LONG' print 'VF count: %d' % all_long_vf print 'VF CLOSENESS count: %d' % all_long_vf_closeness print 'Non vf count: %d' % all_long_nonvf print 'Non vf CLOSENESS count: %d' % all_long_nonvf_closeness print 'VF perc: %f' % (all_long_vf/float(all_long_vf + all_long_nonvf)) print 'VF CLOSENESS perc: %f' % (all_long_vf_closeness/float(all_long_vf_closeness + all_long_nonvf_closeness)) print 'Mean VF prob: %f' % np.mean(long_results) print 'Mean VF CLOSENESS prob: %f' % np.mean(long_results3) print 'Mean VF2 prob: %f' % np.mean(long_results2)
def purify(g, meta_original, out, count=1000, try_per_race=1, show_progress=False, with_lp=True): empty = 0 # remove traces with already calculated random paths logger.warn('[r]ONLY NOT FILLED PATHS[/]') meta_filled = [ x for x in meta_original if helpers.RANDOM_WALK_RUN_COUNT not in x ] # Filter if interested only in routes of stretch 1 # meta_filled = [x for x in meta_original # if x[helpers.TRACE_LEN]-x[helpers.SH_LEN] == 1] ## traces with a maximum stretch # logger.warn('[r]!!!ONLY WITH STRETCH[/]') # meta = [x for x in meta if x[helpers.STRETCH] > -1] # # shorter meta records # logger.warn('[r]!!!ONLY SHORT TRACES[/]') # meta = [x for x in meta if len(x[helpers.TRACE]) < 5] # meta_map = {tuple(x[helpers.TRACE]): x for x in meta_filled} logger.info('All trace count: %d' % len(meta_filled)) tr_count = min(len(meta_filled), count) meta_random = random.sample(meta_filled, tr_count) logger.info('Chosen subset count: %d' % len(meta_random)) # real_vf_degree = [x for x in meta_random if x[helpers.IS_VF_DEGREE] == 1] # real_nonvf_degree = [x for x in meta_random if x[helpers.IS_VF_DEGREE] == 0] # assert len(real_nonvf_degree) == tr_count - len(real_vf_degree) # real_vf_prelabeled = [x for x in meta_random if x[helpers.IS_VF_PRELABELED] == 1] # real_nonvf_prelabeled = [x for x in meta_random if x[helpers.IS_VF_PRELABELED] == 0] # assert len(real_nonvf_prelabeled) == tr_count - len(real_vf_prelabeled) # real_vf_closeness = [x for x in meta_random if x[helpers.IS_VF_CLOSENESS] == 1] # real_nonvf_closeness = [x for x in meta_random if x[helpers.IS_VF_CLOSENESS] == 0] # assert len(real_nonvf_closeness) == tr_count - len(real_vf_closeness) # logger.info('Real vf degree: %f[%d]' % ((len(real_vf_degree) / float(tr_count), # len(real_vf_degree)))) # logger.info('Real nonvf degree: %f[%d]' % ((len(real_nonvf_degree) / float(tr_count), # len(real_nonvf_degree)))) # logger.info('Real vf prelabeled: %f[%d]' % ((len(real_vf_prelabeled) / float(tr_count), # len(real_vf_prelabeled)))) # logger.info('Real nonvf prelabeled: %f[%d]' % ((len(real_nonvf_prelabeled) / float(tr_count), # len(real_nonvf_prelabeled)))) # logger.info('Real vf closeness: %f[%d]' % ((len(real_vf_closeness)/float(tr_count), len(real_vf_closeness)))) # logger.info('Real nonvf closeness: %f[%d]' % ((len(real_nonvf_closeness)/float(tr_count), len(real_nonvf_closeness)))) # traceroutes = [x[helpers.TRACE] for x in meta_random] # traceroutes = vft.trace_in_vertex_id(g, traceroutes) try: meta_random[0][helpers.TRACE] except Exception: meta_random = [{helpers.TRACE: x} for x in meta_random] progress = progressbar1.DummyProgressBar(end=10, width=15) if show_progress: progress = progressbar1.AnimatedProgressBar(end=len(meta_random), width=15) stretch_list = [] max_stretch = max( [x[helpers.TRACE_LEN] - x[helpers.SH_LEN] for x in meta_random]) for stretch in range(0, max_stretch + 1): metas = [ x for x in meta_random if x[helpers.TRACE_LEN] - x[helpers.SH_LEN] == stretch ] stretch_list.extend(list(repeat(stretch, len(metas)))) # print(stretch_list) lenghts = random.shuffle(stretch_list) strx_array = [] for idx, trace_meta in enumerate(meta_random): progress += 1 progress.show_progress() # print(trace_meta[helpers.TRACE]) shl = trace_meta[helpers.SH_LEN] trace = vft.trace_in_vertex_id(g, [ trace_meta[helpers.TRACE], ]) if len(trace) != 1: print 'PROBLEM' print trace_meta continue trace = trace[0] # print(trace) random_walk_closeness_route_vf = 0 random_walk_closeness_route_lp_soft = 0 random_walk_closeness_route_lp_hard = 0 random_walk_degree_route_vf = 0 random_walk_degree_route_lp_soft = 0 random_walk_degree_route_lp_hard = 0 random_walk_prelabeled_route_vf = 0 random_walk_prelabeled_route_lp_soft = 0 random_walk_prelabeled_route_lp_hard = 0 s, t = trace[0], trace[-1] for counter in xrange(0, try_per_race): # random_path = helpers.random_route_walk(g, s, t, len(trace)) # Modified random_path = helpers.random_route_walk( g, s, t, shl + stretch_list[idx]) # Modified if len(random_path) == 0: empty += 1 if vft.is_valley_free(g, random_path, vfmode=vft.CLOSENESS): random_walk_closeness_route_vf += 1 if (len(random_path) == shl + 1): strx_array.append(1) if with_lp: lp_soft = vft.is_local_preferenced(g, random_path, first_edge=True, vfmode=vft.CLOSENESS) lp_hard = vft.is_local_preferenced(g, random_path, first_edge=False, vfmode=vft.CLOSENESS) if lp_soft: random_walk_closeness_route_lp_soft += 1 if lp_hard: random_walk_closeness_route_lp_hard += 1 else: if (len(random_path) == shl + 1): strx_array.append(0) # if vft.is_valley_free(g, random_path, vfmode=vft.DEGREE): # random_walk_degree_route_vf += 1 # if with_lp: # lp_soft = vft.is_local_preferenced(g, random_path, # first_edge=True, # vfmode=vft.DEGREE) # lp_hard = vft.is_local_preferenced(g, random_path, # first_edge=False, # vfmode=vft.DEGREE) # if lp_soft: # random_walk_degree_route_lp_soft += 1 # if lp_hard: # random_walk_degree_route_lp_hard += 1 # if vft.is_valley_free(g, random_path, vfmode=vft.PRELABELED): # random_walk_prelabeled_route_vf += 1 # if with_lp: # lp_soft = vft.is_local_preferenced(g, random_path, # first_edge=True, # vfmode=vft.PRELABELED) # lp_hard = vft.is_local_preferenced(g, random_path, # first_edge=False, # vfmode=vft.PRELABELED) # if lp_soft: # random_walk_prelabeled_route_lp_soft += 1 # if lp_hard: # random_walk_prelabeled_route_lp_hard += 1 # sanity check # if random_path[0] != s or random_path[-1] != t: # logger.error('ALERT') if len(random_path) != len(set(random_path)): logger.error('LENGTH ERROR') extra_meta = { helpers.RANDOM_WALK_RUN_COUNT: try_per_race, helpers.RANDOM_WALK_VF_CLOSENESS_ROUTE: random_walk_closeness_route_vf, helpers.RANDOM_WALK_VF_DEGREE_ROUTE: random_walk_degree_route_vf, helpers.RANDOM_WALK_VF_PRELABELED_ROUTE: random_walk_prelabeled_route_vf, } if with_lp: extra_meta.update({ helpers.RANDOM_WALK_LP_SOFT_CLOSENESS_ROUTE: random_walk_closeness_route_lp_soft, helpers.RANDOM_WALK_LP_HARD_CLOSENESS_ROUTE: random_walk_closeness_route_lp_hard, helpers.RANDOM_WALK_LP_SOFT_DEGREE_ROUTE: random_walk_degree_route_lp_soft, helpers.RANDOM_WALK_LP_HARD_DEGREE_ROUTE: random_walk_degree_route_lp_hard, helpers.RANDOM_WALK_LP_SOFT_PRELABELED_ROUTE: random_walk_prelabeled_route_lp_soft, helpers.RANDOM_WALK_LP_HARD_PRELABELED_ROUTE: random_walk_prelabeled_route_lp_hard }) trace_meta.update(extra_meta) ## save modified meta # all meta_* get only references from meta_original helpers.save_to_json(out, meta_random) # meta_mod = [x for x in meta_map.itervalues()] # helpers.save_to_json(out, meta_mod) # calculate results # real_vf = [x[helpers.IS_VF_CLOSENESS] for x in meta_random] # real_vf_ratio = np.mean(real_vf) random_walk_vf_ratio_per_element = [ x[helpers.RANDOM_WALK_VF_CLOSENESS_ROUTE] / x[helpers.RANDOM_WALK_RUN_COUNT] for x in meta_random ] random_walk_vf_ratio = np.mean(random_walk_vf_ratio_per_element) # print results logger.info('') logger.info('Empty: %d' % empty) logger.info('Tested trace count: %d' % len(meta_random)) # logger.info('VF ratio in tested traces: %f' % real_vf_ratio) logger.info('VF ratio in random walks: %f' % random_walk_vf_ratio) logger.info('VF ratio in random walks for path stretch 1: %f' % np.mean(strx_array))
(len_component, (len(all_long_paths) - len(component_paths)))) logger.debug( 'REAL: %f[%d/%d] ALL: %f[%d/%d]' % (np.mean(real_path_vf), sum(real_path_vf), len(real_path_vf), np.mean(all_path_vf), sum(all_path_vf), len(all_path_vf))) real_connected_pairs_pertime.append(active_pair_counter) trace_count_per_pontpair_pertime.append(trace_count_per_pontpair) vf_trace_count_per_pontpair_pertime.append(vf_trace_count_per_pontpair) top_nodes_trace_pertime.append(len(current_top_nodes)) current_top_nodes = set(current_top_nodes) top_nodes_pertime.append(len(current_top_nodes)) # logger.info('Trace count: %d' % len(user_traces)) helpers.save_to_json('traces_user_SQ8t7_%d.json' % user, user_traces) # print 'saved %s' % user # np.savetxt('user-small%s.csv' % user, pair_activity_matrix, delimiter=';') trace_count_per_pontpair_pertime = np.matrix( trace_count_per_pontpair_pertime) vf_trace_count_per_pontpair_pertime = np.matrix( vf_trace_count_per_pontpair_pertime) print tmp = [ 100 * x / float(len(functionally_connected_pairs)) if len(functionally_connected_pairs) > 0 else 0 for x in real_connected_pairs_pertime ]
traceroutes = foodweb.get_traceroutes(arguments.traceroutes_input) elif arguments.type == Networks.weibo: pass elif arguments.type == Networks.text: traceroutes = text.get_traceroutes(arguments.traceroutes_input) elif arguments.type == Networks.wiki: traceroutes = wiki.get_traceroutes(arguments.traceroutes_input) elif arguments.type == Networks.metabolic: traceroutes = metabolic.get_traceroutes(arguments.traceroutes_input) elif arguments.type == Networks.wordnavi: traceroutes = wordnavi.get_traceroutes(arguments.traceroutes_input) else: raise RuntimeError('Unknown network type') msg = 'Save traceroutes to {trace}'.format(trace=arguments.json_traces) logger.info(msg) helpers.save_to_json(arguments.json_traces, traceroutes) if "network" in arguments.convert: # label network with caida labeling tool # first load previously converted traceroutes traceroutes = helpers.load_from_json(arguments.json_traces) logger.info('Trace count: {c}'.format(c=len(traceroutes))) # to increase accuracy if arguments.type == Networks.airport: traceroutes.extend([[y for y in reversed(x)] for x in traceroutes]) # convert with caida labeling tools logger.info('Caida labeling the graph') edge_list = helpers.caida_labeling(arguments.caida_folder, traceroutes, arguments.network_cliques)
def main(): parser = argparse.ArgumentParser( description=('Syntetic route generator'), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--progressbar', action='store_true') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('network') parser.add_argument('meta') parser.add_argument('all_trace_out', metavar='all-trace-out') parser.add_argument('syntetic_out', metavar='syntetic-out') parser.add_argument('--trace-count', '-tc', type=int, dest='trace_count', default=5000) parser.add_argument('--random-sample', dest='random_sample', action='store_true') parser.add_argument('--closeness-error', '-ce', type=float, dest='closeness_error', default=0.0) parser.add_argument('--core-limit-percentile', '-cl', type=int, dest='core_limit', default=0) parser.add_argument('--toggle-node-error-mode', action='store_true') arguments = parser.parse_args() arguments.verbose = min(len(helpers.LEVELS) - 1, arguments.verbose) logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose]) show_progress = arguments.progressbar g = helpers.load_network(arguments.network) g = g.simplify() meta = helpers.load_from_json(arguments.meta) if arguments.random_sample: random.shuffle(meta) meta = meta[:arguments.trace_count] N = g.vcount() cl = sorted([x['closeness'] for x in g.vs], reverse=True) logger.info('Min closeness: %s' % np.min(cl)) logger.info('Max closeness: %s' % np.max(cl)) logger.info('Mean closenss: %s' % np.mean(cl)) logger.info('10%% closeness: %s' % np.percentile(cl, 10)) logger.info('90%% closeness: %s' % np.percentile(cl, 90)) logger.info('Core limit: [r]%d%%[/]' % arguments.core_limit) change_probability = 100 * arguments.closeness_error logger.info('Change probability: [r]%6.2f%%[/]' % change_probability) core_limit = np.percentile(cl, arguments.core_limit) logger.info('Core limit in closeness: [bb]%f[/]' % core_limit) if arguments.toggle_node_error_mode: logger.info('[r]Node error mode[/]') msg = ( "If given node's closeness >= core_limit then the new ", "closeness in this node is ", #"rand(closeness_error ... old closeness)" "OLD_CLOSENSS * +/- closeness_error%") logger.info(''.join(msg)) logger.info('Minimum node closeness: [g]%f[/]' % arguments.closeness_error) for n in g.vs: if n['closeness'] < core_limit: continue # sign = -1 if random.uniform(-1, 1) < 0 else 1 # n['closeness'] = n['closeness'] * (1 + sign * arguments.closeness_error) new_closeness = random.uniform(arguments.closeness_error, n['closeness']) n['closeness'] = new_closeness g_labeled = vft.label_graph_edges(g, vfmode=vft.CLOSENESS) peer_edge_count = len([x for x in g_labeled.es if x['dir'] == LinkDir.P]) logger.info('Peer edge count: %d' % peer_edge_count) changed_edges = [] if not arguments.toggle_node_error_mode: msg = ("If the closeness values of the endpoints in given edge is ", "larger than the core_limit and ", "random(0,1) < closeness_error then change the direction ", "for this edge") logger.info(''.join(msg)) changed_u = changed_d = 0 changed_edges = [] changed_edgess = [] for edge in g_labeled.es: s, t = edge.source, edge.target s_cl = g_labeled.vs[s]['closeness'] t_cl = g_labeled.vs[t]['closeness'] if (s_cl < core_limit or t_cl < core_limit): continue if random.uniform(0, 1) > arguments.closeness_error: continue # if abs(s_cl - t_cl) / min(s_cl, t_cl) > 0.02: continue new_edge_dir = LinkDir.U if random.uniform(0, 1) > 0.5 else LinkDir.D if new_edge_dir != edge['dir']: if edge['dir'] == LinkDir.U: changed_u += 1 else: changed_d += 1 edge['dir'] = new_edge_dir changed_edges.append(edge) changed_edgess.append((edge.source, edge.target)) # if edge['dir'] == LinkDir.U: # changed_u += 1 # changed_edgess.append((edge.source, edge.target)) # edge['dir'] = LinkDir.D # changed_edges.append(edge) # elif edge['dir'] == LinkDir.D: # changed_d += 1 # changed_edgess.append((edge.source, edge.target)) # edge['dir'] = LinkDir.U # changed_edges.append(edge) logger.info('E count: %d' % g_labeled.ecount()) logger.info('Changed U: %d' % changed_u) logger.info('Changed D: %d' % changed_d) logger.info('Changed: %d' % (changed_d + changed_u)) changed_e = [(g_labeled.vs[e.source]['name'], g_labeled.vs[e.target]['name']) for e in changed_edges] changed_e = changed_e + [(x[1], x[0]) for x in changed_e] changed_e = set(changed_e) vf_g_closeness = vft.convert_to_vf(g, vfmode=vft.CLOSENESS, labeled_graph=g_labeled) # e_colors = [] # for e in vf_g_closeness.es: # if e.source < N and e.target < N: col = 'grey' # elif e.source < N and e.target >= N: col = 'blue' # elif e.source >= N and e.target >= N: col = 'red' # else: col = 'cyan' # e_colors.append(col) # igraph.plot(vf_g_closeness, "/tmp/closeness.pdf", # vertex_label=vf_g_closeness.vs['name'], # vertex_size=0.2, # edge_color=e_colors) pairs = [(g.vs.find(x[helpers.TRACE][0]).index, g.vs.find(x[helpers.TRACE][-1]).index, tuple(x[helpers.TRACE])) for x in meta] # pairs = list(set(pairs)) # random.shuffle(pairs) # visited = set() # pairs2 = [] # for x in pairs: # k = (x[0], x[1]) # if k in visited: continue # visited.add(k) # visited.add((k[1], k[0])) # pairs2.append(x) # pairs = pairs2 traces = [x[2] for x in pairs] stretches = [] syntetic_traces = [] sh_traces = [] base_traces = [] original_traces = [] bad = 0 progress = progressbar1.DummyProgressBar(end=10, width=15) if show_progress: progress = progressbar1.AnimatedProgressBar(end=len(pairs), width=15) for s, t, trace_original in pairs: progress += 1 progress.show_progress() trace_original_idx = [g.vs.find(x).index for x in trace_original] logger.debug('Original trace: %s -- %s -- %s', [g.vs[x]['name'] for x in trace_original_idx], vft.trace_to_string(g, trace_original_idx, vft.CLOSENESS), [g.vs[x]['closeness'] for x in trace_original_idx]) sh_routes = g.get_all_shortest_paths(s, t) sh_len = len(sh_routes[0]) sh_routes_named = [[g.vs[y]['name'] for y in x] for x in sh_routes] sh_trace_name = random.choice(sh_routes_named) base_trace_name = random.choice(sh_routes_named) candidates = vf_g_closeness.get_all_shortest_paths(s + N, t + N) candidates = [vft.vf_route_converter(x, N) for x in candidates] # candidates = [] if len(candidates) == 0: candidates = vft.get_shortest_vf_route(g_labeled, s, t, mode='vf', vf_g=vf_g_closeness, _all=True, vfmode=vft.CLOSENESS) if len(candidates) == 0: s_name, t_name = g.vs[s]['name'], g.vs[t]['name'] logger.debug("!!!No syntetic route from %s to %s" % (s_name, t_name)) continue logger.debug('Candidates from %s to %s:' % (g.vs[s]['name'], g.vs[t]['name'])) for c in candidates: logger.debug('%s -- %s -- %s' % ([g.vs[x]['name'] for x in c], vft.trace_to_string(g_labeled, c, vft.PRELABELED), [g.vs[x]['closeness'] for x in c])) chosen_one = random.choice(candidates) chosen_one_name = [g.vs[x]['name'] for x in chosen_one] # print chosen_one # print trace_original # pretty_plotter.pretty_plot(g, trace_original_idx, # chosen_one, changed_edgess, # spec_color=(0, 0, 0, 155)) hop_stretch = len(chosen_one) - sh_len stretches.append(hop_stretch) trace_original_e = zip(trace_original, trace_original[1:]) chosen_one_e = zip(chosen_one_name, chosen_one_name[1:]) trace_affected = any([x in changed_e for x in trace_original_e]) chosen_affected = any([x in changed_e for x in chosen_one_e]) logger.debug('Trace affected: %s' % trace_affected) logger.debug('Chosen affected: %s' % chosen_affected) # if hop_stretch > 2: # logger.debug('Base: %s' % trace_to_string(g_labeled, base_trace_name)) # logger.debug('SH: %s' % trace_to_string(g_labeled, sh_trace_name)) # logger.debug('Trace: %s' % trace_to_string(g_labeled, trace_original)) # logger.debug('Syntetic: %s' % trace_to_string(g_labeled, chosen_one_name)) if trace_affected or chosen_affected or hop_stretch > 2: # pretty_plotter.pretty_plot_all(g, traces, # chosen_one, changed_edgess, # spec_color=(0, 0, 0, 255)) bad += 1 syntetic_traces.append(chosen_one_name) sh_traces.append(sh_trace_name) base_traces.append(base_trace_name) original_traces.append(trace_original) logger.debug('From %s to %s chosen one %s' % (g.vs[s]['name'], g.vs[t]['name'], chosen_one_name)) result = zip(base_traces, sh_traces, original_traces, syntetic_traces) helpers.save_to_json(arguments.all_trace_out, result) helpers.save_to_json(arguments.syntetic_out, syntetic_traces) print 'Bad: %d' % bad c = collections.Counter(stretches) trace_count = len(syntetic_traces) logger.info('Stretch dist:') for k in c: logger.info('\t%d: %5.2f%%[%d]' % (k, 100 * c[k] / float(trace_count), c[k])) logger.info('Valid route count: %d' % trace_count) logger.info('Route count parameter: %d' % arguments.trace_count) logger.info('Generated valid pair count: %d' % len(pairs))
def purify(g, meta_original, out, count=1000, try_per_race=1, show_progress=False): empty = 0 # remove traces with already calculated random paths logger.warn('[r]ONLY NOT FILLED PATHS[/]') meta_filled = [ x for x in meta_original if helpers.RANDOM_NONVF_WALK_RUN_COUNT not in x ] logger.info('All trace count: %d' % len(meta_filled)) tr_count = min(len(meta_filled), count) meta_random = random.sample(meta_filled, tr_count) logger.info('Chosen subset count: %d' % len(meta_random)) real_vf_degree = [x for x in meta_random if x[helpers.IS_VF_DEGREE] == 1] real_nonvf_degree = [ x for x in meta_random if x[helpers.IS_VF_DEGREE] == 0 ] assert len(real_nonvf_degree) == tr_count - len(real_vf_degree) real_vf_prelabeled = [ x for x in meta_random if x[helpers.IS_VF_PRELABELED] == 1 ] real_nonvf_prelabeled = [ x for x in meta_random if x[helpers.IS_VF_PRELABELED] == 0 ] assert len(real_nonvf_prelabeled) == tr_count - len(real_vf_prelabeled) real_vf_closeness = [ x for x in meta_random if x[helpers.IS_VF_CLOSENESS] == 1 ] real_nonvf_closeness = [ x for x in meta_random if x[helpers.IS_VF_CLOSENESS] == 0 ] assert len(real_nonvf_closeness) == tr_count - len(real_vf_closeness) logger.info('Real vf degree: %f[%d]' % ((len(real_vf_degree) / float(tr_count), len(real_vf_degree)))) logger.info( 'Real nonvf degree: %f[%d]' % ((len(real_nonvf_degree) / float(tr_count), len(real_nonvf_degree)))) logger.info( 'Real vf prelabeled: %f[%d]' % ((len(real_vf_prelabeled) / float(tr_count), len(real_vf_prelabeled)))) logger.info('Real nonvf prelabeled: %f[%d]' % ((len(real_nonvf_prelabeled) / float(tr_count), len(real_nonvf_prelabeled)))) logger.info( 'Real vf closeness: %f[%d]' % ((len(real_vf_closeness) / float(tr_count), len(real_vf_closeness)))) logger.info('Real nonvf closeness: %f[%d]' % ((len(real_nonvf_closeness) / float(tr_count), len(real_nonvf_closeness)))) progress = progressbar1.DummyProgressBar(end=10, width=15) if show_progress: progress = progressbar1.AnimatedProgressBar(end=len(meta_random), width=15) for trace_meta in meta_random: progress += 1 progress.show_progress() trace = vft.trace_in_vertex_id(g, [ trace_meta[helpers.TRACE], ]) if len(trace) != 1: logger.error('PROBLEM') logger.error('%s' % trace_meta) continue trace = trace[0] random_nonvf_walk_closeness_route_count = 0 random_nonvf_walk_closeness_route_len = [] random_nonvf_walk_degree_route_count = 0 random_nonvf_walk_degree_route_len = [] random_nonvf_walk_prelabeled_route_count = 0 random_nonvf_walk_prelabeled_route_len = [] random_nonvf_walk_lp_soft_closeness_route_count = 0 random_nonvf_walk_lp_soft_degree_route_count = 0 random_nonvf_walk_lp_soft_prelabeled_route_count = 0 random_nonvf_walk_lp_hard_closeness_route_count = 0 random_nonvf_walk_lp_hard_degree_route_count = 0 random_nonvf_walk_lp_hard_prelabeled_route_count = 0 s, t = trace[0], trace[-1] for counter in xrange(0, try_per_race): isvf, random_path = helpers.random_nonvf_route( g, s, t, len(trace), vfmode=vft.CLOSENESS) assert len(random_path) > 0 if isvf: random_nonvf_walk_closeness_route_count += 1 lp_soft = vft.is_local_preferenced(g, trace, first_edge=True, vfmode=vft.CLOSENESS) lp_hard = vft.is_local_preferenced(g, trace, first_edge=False, vfmode=vft.CLOSENESS) if lp_soft: random_nonvf_walk_lp_soft_closeness_route_count += 1 if lp_hard: random_nonvf_walk_lp_hard_closeness_route_count += 1 random_nonvf_walk_closeness_route_len.append(len(random_path)) isvf, random_path = helpers.random_nonvf_route(g, s, t, len(trace), vfmode=vft.DEGREE) assert len(random_path) > 0 if isvf: random_nonvf_walk_degree_route_count += 1 lp_soft = vft.is_local_preferenced(g, trace, first_edge=True, vfmode=vft.DEGREE) lp_hard = vft.is_local_preferenced(g, trace, first_edge=False, vfmode=vft.DEGREE) if lp_soft: random_nonvf_walk_lp_soft_degree_route_count += 1 if lp_hard: random_nonvf_walk_lp_hard_degree_route_count += 1 random_nonvf_walk_degree_route_len.append(len(random_path)) isvf, random_path = helpers.random_nonvf_route( g, s, t, len(trace), vfmode=vft.PRELABELED) assert len(random_path) > 0 if isvf: random_nonvf_walk_prelabeled_route_count += 1 lp_soft = vft.is_local_preferenced(g, trace, first_edge=True, vfmode=vft.PRELABELED) lp_hard = vft.is_local_preferenced(g, trace, first_edge=False, vfmode=vft.PRELABELED) if lp_soft: random_nonvf_walk_lp_soft_prelabeled_route_count += 1 if lp_hard: random_nonvf_walk_lp_hard_prelabeled_route_count += 1 random_nonvf_walk_prelabeled_route_len.append(len(random_path)) # sanity check # if random_path[0] != s or random_path[-1] != t: # logger.error('ALERT') if len(random_path) != len(set(random_path)): logger.error('LENGTH ERROR') extra_meta = { helpers.RANDOM_NONVF_WALK_RUN_COUNT: try_per_race, helpers.RANDOM_NONVF_WALK_VF_CLOSENESS_ROUTE: random_nonvf_walk_closeness_route_count, helpers.RANDOM_NONVF_WALK_VF_CLOSENESS_ROUTE_LEN: random_nonvf_walk_closeness_route_len, helpers.RANDOM_NONVF_WALK_VF_DEGREE_ROUTE: random_nonvf_walk_degree_route_count, helpers.RANDOM_NONVF_WALK_VF_DEGREE_ROUTE_LEN: random_nonvf_walk_degree_route_len, helpers.RANDOM_NONVF_WALK_VF_PRELABELED_ROUTE: random_nonvf_walk_prelabeled_route_count, helpers.RANDOM_NONVF_WALK_VF_PRELABELED_ROUTE_LEN: random_nonvf_walk_prelabeled_route_len, helpers.RANDOM_NONVF_WALK_LP_SOFT_DEGREE_ROUTE: random_nonvf_walk_lp_soft_degree_route_count, helpers.RANDOM_NONVF_WALK_LP_SOFT_CLOSENESS_ROUTE: random_nonvf_walk_lp_soft_closeness_route_count, helpers.RANDOM_NONVF_WALK_LP_SOFT_PRELABELED_ROUTE: random_nonvf_walk_lp_soft_prelabeled_route_count, helpers.RANDOM_NONVF_WALK_LP_HARD_DEGREE_ROUTE: random_nonvf_walk_lp_hard_degree_route_count, helpers.RANDOM_NONVF_WALK_LP_HARD_CLOSENESS_ROUTE: random_nonvf_walk_lp_hard_closeness_route_count, helpers.RANDOM_NONVF_WALK_LP_HARD_PRELABELED_ROUTE: random_nonvf_walk_lp_hard_prelabeled_route_count } trace_meta.update(extra_meta) ## save modified meta # all meta_* get only references from meta_original helpers.save_to_json(out, meta_original) # calculate results real_vf = [x[helpers.IS_VF_CLOSENESS] for x in meta_random] real_vf_ratio = np.mean(real_vf) random_nonvf_walk_vf_ratio_per_element = [ x[helpers.RANDOM_NONVF_WALK_VF_CLOSENESS_ROUTE] / x[helpers.RANDOM_NONVF_WALK_RUN_COUNT] for x in meta_random ] random_nonvf_walk_vf_ratio = np.mean( random_nonvf_walk_vf_ratio_per_element) # print results logger.info('') logger.info('Empty: %d' % empty) logger.info('Tested trace count: %d' % len(meta_random)) logger.info('VF ratio in tested traces: %f' % real_vf_ratio) logger.info('VF ratio in random walks: %f' % random_nonvf_walk_vf_ratio)