Beispiel #1
0
def ba_calc(arguments):
    ba_graph = helpers.load_network(arguments.ba_graph)
    sh_paths = helpers.load_from_json(arguments.point_pairs)
    out = arguments.out

    min_stretch = arguments.min_stretch
    max_stretch = arguments.max_stretch

    max_c = len(sh_paths)
    arguments.lb = arguments.lb if 0 <= arguments.lb <= max_c else 0
    arguments.ub = arguments.ub if 0 <= arguments.ub <= max_c else max_c

    arguments.lb, arguments.ub = (min(arguments.lb, arguments.ub),
                                  max(arguments.lb, arguments.ub))

    sh_paths = sh_paths[arguments.lb:arguments.ub]
    vf_g_closeness = vft.convert_to_vf(ba_graph, vfmode=vft.CLOSENESS)

    results = [[] for x in xrange(min_stretch, max_stretch + 1)]

    for stretch in xrange(min_stretch, max_stretch + 1):
        logger.info('Calculate results with stretch %d' % stretch)
        result = ba_generator(ba_graph, sh_paths, stretch, vf_g_closeness,
                              arguments.progressbar)
        results[stretch - min_stretch] = result

    helpers.save_to_json(out, results)
Beispiel #2
0
def upwalk(upwalk_f, out_folder_path):
    data = helpers.load_from_json(upwalk_f)
    keys = sorted(list(set(list(data['RANDOM'].keys() + data['REAL'].keys()))))
    with open('{}/upwalk'.format(out_folder_path), 'w') as f:
        f.write('IDX;REAL;RANDOM\n')
        for k in keys:
            f.write('{};{};{}\n'.format(k, data['REAL'].get(k, 0),
                                        data['RANDOM'].get(k, 0)))
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser(description='Calculate meta information for real traces', formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('network')
    parser.add_argument('meta')
    parser.add_argument('output', type=argparse.FileType('w'))

    # parser.add_argument('--vfmode', type=str, default='labeled', dest='vfmode',
    #                     choices=['labeled', 'closeness'])

    # for paralelization
    parser.add_argument('--lower-bound', '-lb', type=int, default=0, dest='lb')
    parser.add_argument('--upper-bound', '-ub', type=int, default=-1, dest='ub')

    parser.add_argument('--progressbar', action='store_true')
    parser.add_argument('--verbose', '-v', action='count', default=0)

    parser.add_argument('--with-prelabeled', action='store_true')
    parser.add_argument('--with-closeness', action='store_true')
    parser.add_argument('--with-degree', action='store_true')

    parser.add_argument('--with-lp-hard', action='store_true')
    parser.add_argument('--with-lp-soft', action='store_true')
    # parser.add_argument('--with-lp', action='store_true')
    # parser.add_argument('--with-vf', action='store_true')

    parser.add_argument('--try-per-trace',
                        type=int, default=1, dest='try_per_trace')

    arguments = parser.parse_args()

    arguments.verbose = min(len(helpers.LEVELS), arguments.verbose)
    logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose])

    g = helpers.load_network(arguments.network)

    meta = helpers.load_from_json(arguments.meta)

    arguments.lb = arguments.lb if 0 <= arguments.lb <= len(meta) else 0
    arguments.ub = arguments.ub if 0 <= arguments.ub <= len(meta) else len(meta)

    flags = {
        FLAG_PRELABELED: arguments.with_prelabeled,
        FLAG_CLOSENESS: arguments.with_closeness,
        FLAG_DEGREE: arguments.with_degree,
        FLAG_LP_HARD: arguments.with_lp_hard,
        FLAG_LP_SOFT: arguments.with_lp_soft
    }

    # if arguments.vfmode == 'labeled': mode = vft.ORDER_PRELABELED
    # elif arguments.vfmode == 'closeness': mode = vft.ORDER_CLOSENESS
    # else: raise RuntimeError('Unhandled vfmode')

    meta = meta[arguments.lb:arguments.ub]
    # update meta at place
    purify(g, meta, flags, arguments.try_per_trace, arguments.progressbar)
    logger.info('Save to %s' % arguments.output)
    helpers.save_to_json(arguments.output, meta)
Beispiel #4
0
def wrap_watts_trace_gen(args):
    g = helpers.load_network(args.network)
    traceroutes = helpers.load_from_json(args.original_traceroutes)
    max_c = len(traceroutes)
    args.lb = args.lb if 0 <= args.lb <= max_c else 0
    args.ub = args.ub if 0 <= args.ub <= max_c else max_c

    args.lb, args.ub = (min(args.lb, args.ub), max(args.lb, args.ub))
    traceroutes = traceroutes[args.lb:args.ub]

    watts_traceroutes = watts_trace_gen(g, traceroutes, args.progressbar)

    helpers.save_to_json(args.traceroute_dest, watts_traceroutes)
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser(description='SANDBOX mode. Write something useful here', formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('network')
    parser.add_argument('meta')
    parser.add_argument('--top-node-ratio',
                        type=float, default=.1, dest='top_node_ratio')

    parser.add_argument('--progressbar', action='store_true')
    parser.add_argument('--verbose', '-v', action='count', default=0)

    arguments = parser.parse_args()

    arguments.verbose = min(len(helpers.LEVELS), arguments.verbose)
    logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose])

    g = helpers.load_network(arguments.network)
    meta = helpers.load_from_json(arguments.meta)
    logger.info('Graph loaded from: %s' % arguments.network)
    logger.info('Graph vertex count: %d' % g.vcount())

    end = int(g.vcount() * arguments.top_node_ratio)
    logger.info('Top node count: %d' % end)

    try:
        nodes = [(x.index, x['closeness']) for x in g.vs]
    except KeyError:
        logger.info('Calculate closeness values')
        progress = progressbar1.DummyProgressBar(end=10, width=15)
        if arguments.progressbar:
            progress = progressbar1.AnimatedProgressBar(end=g.vcount(),
                                                        width=15)
        for n in g.vs:
            progress += 1
            progress.show_progress()
            closeness = g.closeness(n)
            n['closeness'] = closeness
        g.save('%s_with_closeness.gml' % arguments.network)
        nodes = [(x.index, x['closeness']) for x in g.vs]

    nodes = sorted(nodes, reverse=True, key=lambda x: x[1])
    top_nodes = set([x[0] for x in nodes[:end]])

    purify(g, meta, top_nodes, arguments.progressbar)
Beispiel #6
0
def ba_merge(arguments):
    STRETCH, TRACE_COUNT, VF_COUNT, LP_COUNT = range(4)

    out_file = arguments.out
    results = dict()

    for piece in arguments.res:
        result_piece = helpers.load_from_json(piece)
        for res in result_piece:
            stretch = res[0]
            try:
                container = results[stretch]
            except KeyError:
                container = [stretch, 0, 0, 0]
                results[stretch] = container

            for idx in [TRACE_COUNT, VF_COUNT, LP_COUNT]:
                container[idx] += res[idx]

    results_list = []
    for idx in sorted(results.iterkeys()):
        results_list.append(results[idx])

    helpers.save_to_json_file(out_file, results_list)
Beispiel #7
0
    elif arguments.type == Networks.wiki:
        traceroutes = wiki.get_traceroutes(arguments.traceroutes_input)
    elif arguments.type == Networks.metabolic:
        traceroutes = metabolic.get_traceroutes(arguments.traceroutes_input)
    elif arguments.type == Networks.wordnavi:
        traceroutes = wordnavi.get_traceroutes(arguments.traceroutes_input)
    else:
        raise RuntimeError('Unknown network type')
    msg = 'Save traceroutes to {trace}'.format(trace=arguments.json_traces)
    logger.info(msg)
    helpers.save_to_json(arguments.json_traces, traceroutes)

if "network" in arguments.convert:
    # label network with caida labeling tool
    # first load previously converted traceroutes
    traceroutes = helpers.load_from_json(arguments.json_traces)
    logger.info('Trace count: {c}'.format(c=len(traceroutes)))
    # to increase accuracy
    if arguments.type == Networks.airport:
        traceroutes.extend([[y for y in reversed(x)] for x in traceroutes])

    # convert with caida labeling tools
    logger.info('Caida labeling the graph')
    edge_list = helpers.caida_labeling(arguments.caida_folder, traceroutes,
                                       arguments.network_cliques)

    # save to given output file using caida output format
    logger.info('Save to file %s' % arguments.network_output)
    with open(arguments.network_output, 'w') as f:
        for e in edge_list:
            f.write('%s|%s|%s\n' % (e[0], e[1], e[2]))
Beispiel #8
0
def main():
    parser = argparse.ArgumentParser(
        description=('Syntetic route generator'),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--progressbar', action='store_true')
    parser.add_argument('--verbose', '-v', action='count', default=0)

    parser.add_argument('network')
    parser.add_argument('meta')
    parser.add_argument('all_trace_out', metavar='all-trace-out')
    parser.add_argument('syntetic_out', metavar='syntetic-out')
    parser.add_argument('--trace-count',
                        '-tc',
                        type=int,
                        dest='trace_count',
                        default=5000)
    parser.add_argument('--random-sample',
                        dest='random_sample',
                        action='store_true')

    parser.add_argument('--closeness-error',
                        '-ce',
                        type=float,
                        dest='closeness_error',
                        default=0.0)

    parser.add_argument('--core-limit-percentile',
                        '-cl',
                        type=int,
                        dest='core_limit',
                        default=0)

    parser.add_argument('--toggle-node-error-mode', action='store_true')

    arguments = parser.parse_args()

    arguments.verbose = min(len(helpers.LEVELS) - 1, arguments.verbose)
    logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose])

    show_progress = arguments.progressbar

    g = helpers.load_network(arguments.network)
    g = g.simplify()

    meta = helpers.load_from_json(arguments.meta)

    if arguments.random_sample:
        random.shuffle(meta)

    meta = meta[:arguments.trace_count]

    N = g.vcount()

    cl = sorted([x['closeness'] for x in g.vs], reverse=True)
    logger.info('Min closeness: %s' % np.min(cl))
    logger.info('Max closeness: %s' % np.max(cl))
    logger.info('Mean closenss: %s' % np.mean(cl))
    logger.info('10%% closeness: %s' % np.percentile(cl, 10))
    logger.info('90%% closeness: %s' % np.percentile(cl, 90))

    logger.info('Core limit: [r]%d%%[/]' % arguments.core_limit)
    change_probability = 100 * arguments.closeness_error
    logger.info('Change probability: [r]%6.2f%%[/]' % change_probability)

    core_limit = np.percentile(cl, arguments.core_limit)
    logger.info('Core limit in closeness: [bb]%f[/]' % core_limit)

    if arguments.toggle_node_error_mode:
        logger.info('[r]Node error mode[/]')
        msg = (
            "If given node's closeness >= core_limit then the new ",
            "closeness in this node is ",
            #"rand(closeness_error ... old closeness)"
            "OLD_CLOSENSS * +/- closeness_error%")
        logger.info(''.join(msg))
        logger.info('Minimum node closeness: [g]%f[/]' %
                    arguments.closeness_error)
        for n in g.vs:
            if n['closeness'] < core_limit:
                continue
            # sign = -1 if random.uniform(-1, 1) < 0 else 1
            # n['closeness'] = n['closeness'] * (1 + sign * arguments.closeness_error)

            new_closeness = random.uniform(arguments.closeness_error,
                                           n['closeness'])
            n['closeness'] = new_closeness

    g_labeled = vft.label_graph_edges(g, vfmode=vft.CLOSENESS)
    peer_edge_count = len([x for x in g_labeled.es if x['dir'] == LinkDir.P])
    logger.info('Peer edge count: %d' % peer_edge_count)

    changed_edges = []

    if not arguments.toggle_node_error_mode:
        msg = ("If the closeness values of the endpoints in given edge is ",
               "larger than the core_limit and ",
               "random(0,1) < closeness_error then change the direction ",
               "for this edge")
        logger.info(''.join(msg))

        changed_u = changed_d = 0
        changed_edges = []
        changed_edgess = []
        for edge in g_labeled.es:
            s, t = edge.source, edge.target
            s_cl = g_labeled.vs[s]['closeness']
            t_cl = g_labeled.vs[t]['closeness']

            if (s_cl < core_limit or t_cl < core_limit): continue
            if random.uniform(0, 1) > arguments.closeness_error: continue
            # if abs(s_cl - t_cl) / min(s_cl, t_cl) > 0.02: continue

            new_edge_dir = LinkDir.U if random.uniform(0,
                                                       1) > 0.5 else LinkDir.D

            if new_edge_dir != edge['dir']:
                if edge['dir'] == LinkDir.U:
                    changed_u += 1
                else:
                    changed_d += 1

                edge['dir'] = new_edge_dir
                changed_edges.append(edge)
                changed_edgess.append((edge.source, edge.target))

            # if edge['dir'] == LinkDir.U:
            #     changed_u += 1
            #     changed_edgess.append((edge.source, edge.target))
            #     edge['dir'] = LinkDir.D
            #     changed_edges.append(edge)
            # elif edge['dir'] == LinkDir.D:
            #     changed_d += 1
            #     changed_edgess.append((edge.source, edge.target))
            #     edge['dir'] = LinkDir.U
            #     changed_edges.append(edge)
        logger.info('E count: %d' % g_labeled.ecount())
        logger.info('Changed U: %d' % changed_u)
        logger.info('Changed D: %d' % changed_d)
        logger.info('Changed: %d' % (changed_d + changed_u))

    changed_e = [(g_labeled.vs[e.source]['name'],
                  g_labeled.vs[e.target]['name']) for e in changed_edges]
    changed_e = changed_e + [(x[1], x[0]) for x in changed_e]

    changed_e = set(changed_e)

    vf_g_closeness = vft.convert_to_vf(g,
                                       vfmode=vft.CLOSENESS,
                                       labeled_graph=g_labeled)

    # e_colors = []
    # for e in vf_g_closeness.es:
    #     if e.source < N and e.target < N: col = 'grey'
    #     elif e.source < N and e.target >= N: col = 'blue'
    #     elif e.source >= N and e.target >= N: col = 'red'
    #     else: col = 'cyan'
    #     e_colors.append(col)
    # igraph.plot(vf_g_closeness, "/tmp/closeness.pdf",
    #             vertex_label=vf_g_closeness.vs['name'],
    #             vertex_size=0.2,
    #             edge_color=e_colors)

    pairs = [(g.vs.find(x[helpers.TRACE][0]).index,
              g.vs.find(x[helpers.TRACE][-1]).index, tuple(x[helpers.TRACE]))
             for x in meta]

    # pairs = list(set(pairs))

    # random.shuffle(pairs)

    # visited = set()
    # pairs2 = []
    # for x in pairs:
    #     k = (x[0], x[1])
    #     if k in visited: continue
    #     visited.add(k)
    #     visited.add((k[1], k[0]))
    #     pairs2.append(x)

    # pairs = pairs2

    traces = [x[2] for x in pairs]

    stretches = []
    syntetic_traces = []
    sh_traces = []
    base_traces = []
    original_traces = []
    bad = 0

    progress = progressbar1.DummyProgressBar(end=10, width=15)
    if show_progress:
        progress = progressbar1.AnimatedProgressBar(end=len(pairs), width=15)

    for s, t, trace_original in pairs:
        progress += 1
        progress.show_progress()

        trace_original_idx = [g.vs.find(x).index for x in trace_original]
        logger.debug('Original trace: %s -- %s -- %s',
                     [g.vs[x]['name'] for x in trace_original_idx],
                     vft.trace_to_string(g, trace_original_idx, vft.CLOSENESS),
                     [g.vs[x]['closeness'] for x in trace_original_idx])

        sh_routes = g.get_all_shortest_paths(s, t)
        sh_len = len(sh_routes[0])

        sh_routes_named = [[g.vs[y]['name'] for y in x] for x in sh_routes]
        sh_trace_name = random.choice(sh_routes_named)
        base_trace_name = random.choice(sh_routes_named)

        candidates = vf_g_closeness.get_all_shortest_paths(s + N, t + N)
        candidates = [vft.vf_route_converter(x, N) for x in candidates]
        # candidates = []

        if len(candidates) == 0:
            candidates = vft.get_shortest_vf_route(g_labeled,
                                                   s,
                                                   t,
                                                   mode='vf',
                                                   vf_g=vf_g_closeness,
                                                   _all=True,
                                                   vfmode=vft.CLOSENESS)

        if len(candidates) == 0:
            s_name, t_name = g.vs[s]['name'], g.vs[t]['name']
            logger.debug("!!!No syntetic route from %s to %s" %
                         (s_name, t_name))
            continue

        logger.debug('Candidates from %s to %s:' %
                     (g.vs[s]['name'], g.vs[t]['name']))

        for c in candidates:
            logger.debug('%s -- %s -- %s' %
                         ([g.vs[x]['name'] for x in c],
                          vft.trace_to_string(g_labeled, c, vft.PRELABELED),
                          [g.vs[x]['closeness'] for x in c]))

        chosen_one = random.choice(candidates)
        chosen_one_name = [g.vs[x]['name'] for x in chosen_one]

        # print chosen_one
        # print trace_original
        # pretty_plotter.pretty_plot(g, trace_original_idx,
        #                            chosen_one, changed_edgess,
        #                            spec_color=(0, 0, 0, 155))

        hop_stretch = len(chosen_one) - sh_len
        stretches.append(hop_stretch)

        trace_original_e = zip(trace_original, trace_original[1:])
        chosen_one_e = zip(chosen_one_name, chosen_one_name[1:])
        trace_affected = any([x in changed_e for x in trace_original_e])
        chosen_affected = any([x in changed_e for x in chosen_one_e])
        logger.debug('Trace affected: %s' % trace_affected)
        logger.debug('Chosen affected: %s' % chosen_affected)

        # if hop_stretch > 2:
        #     logger.debug('Base: %s' % trace_to_string(g_labeled, base_trace_name))
        #     logger.debug('SH: %s' % trace_to_string(g_labeled, sh_trace_name))
        #     logger.debug('Trace: %s' % trace_to_string(g_labeled, trace_original))
        #     logger.debug('Syntetic: %s' % trace_to_string(g_labeled, chosen_one_name))

        if trace_affected or chosen_affected or hop_stretch > 2:
            # pretty_plotter.pretty_plot_all(g, traces,
            #                                chosen_one, changed_edgess,
            #                                spec_color=(0, 0, 0, 255))
            bad += 1

        syntetic_traces.append(chosen_one_name)
        sh_traces.append(sh_trace_name)
        base_traces.append(base_trace_name)
        original_traces.append(trace_original)
        logger.debug('From %s to %s chosen one %s' %
                     (g.vs[s]['name'], g.vs[t]['name'], chosen_one_name))

    result = zip(base_traces, sh_traces, original_traces, syntetic_traces)
    helpers.save_to_json(arguments.all_trace_out, result)
    helpers.save_to_json(arguments.syntetic_out, syntetic_traces)

    print 'Bad: %d' % bad

    c = collections.Counter(stretches)
    trace_count = len(syntetic_traces)
    logger.info('Stretch dist:')
    for k in c:
        logger.info('\t%d: %5.2f%%[%d]' %
                    (k, 100 * c[k] / float(trace_count), c[k]))
    logger.info('Valid route count: %d' % trace_count)
    logger.info('Route count parameter: %d' % arguments.trace_count)
    logger.info('Generated valid pair count: %d' % len(pairs))
Beispiel #9
0
def main():
    parser = argparse.ArgumentParser(description='Display statistical informations',
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('network')
    parser.add_argument('metadata')
    parser.add_argument('out', type=str, help='folder path for results')

    parser.add_argument('--stretch-stat',
                        dest='stretch_stat',
                        action='store_true')

    parser.add_argument('--eye-stat',
                        dest='eye_stat',
                        action='store_true')

    parser.add_argument('--eye-stat-basic',
                        dest='eye_stat_basic',
                        action='store_true')

    parser.add_argument('--ba-stat',
                        dest='ba_stat',
                        nargs='+')

    parser.add_argument('--degree-dist',
                        dest='degree_dist',
                        action='store_true')

    parser.add_argument('--simple-load',
                        dest='simple_load',
                        action='store_true')

    parser.add_argument('--load2d')

    parser.add_argument('--stats',
                        dest='stats',
                        action='store_true')

    parser.add_argument('--upwalk')

    parser.add_argument('--verbose', '-v', action='count', default=0)

    arguments = parser.parse_args()

    arguments.verbose = min(len(helpers.LEVELS), arguments.verbose)
    logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose])

    g = helpers.load_network(arguments.network)
    meta = helpers.load_from_json(arguments.metadata)

    out_folder_path = arguments.out
    if out_folder_path.endswith('/'):
        out_folder_path = out_folder_path[:-1]

    # print 'WIKI MODE!!!!!'
    # meta = [x for x in meta if x[helpers.SH_LEN] == 4 and x[helpers.TRACE_LEN] < 10]

    # print 'ONLY WITH RANDOM NONVF WALK'
    # meta = [x for x in meta if helpers.RANDOM_NONVF_WALK_RUN_COUNT in x]

    if arguments.stretch_stat:
        logger.info('Generate stretch statistics')
        stretch_stat(meta, out_folder_path)

    if arguments.eye_stat:
        logger.info('Generate Eye statistics')
        eye_stat(meta, out_folder_path)

    if arguments.eye_stat_basic:
        logger.info('Generate Basic Eye statistics')
        eye_stat_basic(meta, out_folder_path)

    if arguments.ba_stat:
        logger.info('Generate Barabasi-Albert statistics')
        ba_stat(meta, arguments.ba_stat, out_folder_path)

    if arguments.degree_dist:
        logger.info('Generate degree distributions')
        degree_distribution_stat(g, out_folder_path)

    if arguments.load2d:
        logger.info('Generate load stat')
        tr = helpers.load_from_json(arguments.load2d)
        load2d(g, tr, out_folder_path)

    if arguments.simple_load:
        logger.info('Generate simple load based on meta')
        simple_load(g, meta, out_folder_path)

    if arguments.stats:
        logger.info('Stat gen')
        stats(g, meta  )
        # stat_printer(statistic)

    if arguments.upwalk:
        logger.info('Upwalk')
        upwalk(arguments.upwalk, out_folder_path)