Example #1
0
def main(args):
    lines = get_log_lines(args.files)

    # Split into sorted lists for each node.
    nodes = get_nodes(lines)
    node_to_lines = {}
    node_to_runs = {}
    for n in nodes:
        node_to_lines[n] = sorted(filter(lambda l: l.addr == n, lines))
        # Split by Manager lines.
        runs = []
        current_idx = 0
        start_idx = 0
        while current_idx < len(node_to_lines[n]):
            if node_to_lines[n][current_idx].protocol == "Manager":
                runs.append(node_to_lines[n][start_idx:current_idx])
                start_idx = current_idx
            current_idx += 1
        runs.append(node_to_lines[n][start_idx:current_idx])
        node_to_runs[n] = filter(lambda r: len(r) > 100, runs)
    
    # Use 20 as the anchor
    runs = []
    for i, r in enumerate(node_to_runs[20]):
        run = {}
        run[20] = i
        start = r[0].timestamp
        for n in nodes:
            if n == 20:
                continue
            for j, r in enumerate(node_to_runs[n]):
                if abs((start - r[0].timestamp).total_seconds()) < 90:
                    run[n] = j
        runs.append(run)
    rows = []
    for i, r in enumerate(runs):
        row = [i]
        for n in nodes:
            idx = r.get(n)
            timestamp = node_to_runs[n][idx][0].timestamp if idx else None
            row.append("%s, %s" % (idx, timestamp))
        rows.append(row)

    header = ["run"]
    for n in nodes:
        header.append(n)
    print tabulate.tabulate(rows, headers=header)

    # Merge runs.
    if args.o:
        for i in xrange(len(runs)):
            run = []
            for n, idx in runs[i].iteritems():
                run.extend(node_to_runs[n][idx])
            f = open('run-%s.log' % i, 'w')
            for line in sorted(run):
                f.write(line.original)
                f.write("\n")
            f.close()
    return
def compute_reduced_graph(set_links):
    node_indices = utils.get_nodes(set_links)

    graph = TransitiveGraph(len(node_indices))

    for arg1, arg2, relation in set_links:
        node_index1 = node_indices[arg1]
        node_index2 = node_indices[arg2]
        graph.add_edge(node_index1, node_index2)

    closure_matrix = graph.transitive_closure()

    indirect_links = set()

    for from_node, to_nodes in enumerate(closure_matrix):
        for to_node, reachable in enumerate(to_nodes):
            if from_node != to_node and reachable == 1:
                for indirect_node, indirect_reachable in enumerate(closure_matrix[to_node]):
                    if indirect_node != to_node:
                        if indirect_reachable == 1:
                            indirect_links.add((from_node, indirect_node))

    reduced_links = []

    for arg1, arg2, relation in set_links:
        node_index1 = node_indices[arg1]
        node_index2 = node_indices[arg2]

        if (node_index1, node_index2) not in indirect_links:
            reduced_links.append((arg1, arg2, relation))

    return reduced_links
def shortest_path_a_star(start_node, end_node, input_data_path, output_file):
    """Main function for A* shortest path
        
        `start_node` and `end_node` are ('key', 'value') format.
        `input_data_path` is a path to directory with nodes and edges layer.
        `output_file` is a path to the output shape file.
    """
    # Read graph
    G = nx.Graph(nx.read_shp(input_data_path, strict=False,
                             geom_attrs=True))  # Read and convert to Graph
    graph_summary(G)

    # Get start and end node
    start = get_nodes(G, start_node[0], start_node[1])[0]
    end = get_nodes(G, end_node[0], end_node[1])[0]
    print("Start node:")
    print_node(G, start)
    print("End node:")
    print_node(G, end)

    # Find shortest path
    shortest_path = nx.astar_path(G,
                                  start,
                                  end,
                                  heuristic=calculate_distance,
                                  weight='length')
    fids = nodes_from_path(G, shortest_path, key=start_node[0])
    print('Shortest path: ' + ' - '.join(['%d' % fid for fid in fids]))
    shortest_path_length = nx.astar_path_length(G,
                                                start,
                                                end,
                                                heuristic=calculate_distance,
                                                weight='length')
    print('Shortest path length: %f' % shortest_path_length)

    return shortest_path
Example #4
0
def stats_mode(args):
    first_completed = []
    time_to_completion = []
    num_packets_sent = []
    num_adv_sent = []
    num_req_sent = []
    num_data_sent = []

    for f in args.files:
        lines = utils.get_log_lines([f])
        lines = utils.sync_timings(lines)
        nodes = utils.get_nodes(lines)
        version = utils.get_version(lines)
        t_min = utils.get_t_min(lines)
        total_pages = utils.get_total_pages(lines, version)
        start_times = utils.get_start_times(lines, nodes, t_min)
        completion_times = utils.get_completion_times(lines, nodes, total_pages, version)
        final_times = utils.get_final_times(lines, nodes, total_pages, version)
        time_taken = utils.get_time_taken(nodes, start_times, final_times)
        packets_sent = utils.get_packets_sent(lines, nodes, start_times, final_times)

        num_adv_sent.append(sum(v[0] for v in packets_sent.values()))
        num_req_sent.append(sum(v[1] for v in packets_sent.values()))
        num_data_sent.append(sum(v[2] for v in packets_sent.values()))
        num_packets_sent.append(sum(sum(v) for v in packets_sent.values()))
        time_to_completion.append(max(time_taken.values()).total_seconds())
        first_completed.append(min(v.total_seconds() for v in time_taken.values() if v.total_seconds()))
    avg_time_to_completion = sum(time_to_completion) / len(time_to_completion)
    print "Average Time to Completion:", avg_time_to_completion
    avg_first_completed = sum(first_completed) / len(first_completed)
    print "Average Time for first node:", avg_first_completed
    print "Average Delta:", avg_time_to_completion - avg_first_completed

    avg_packets_sent = float(sum(num_packets_sent)) / len(num_packets_sent)
    avg_adv_sent = sum(num_adv_sent) / len(num_adv_sent)
    avg_req_sent = sum(num_req_sent) / len(num_req_sent)
    avg_data_sent = sum(num_data_sent) / len(num_data_sent)

    print "Average Packets Sent:", avg_packets_sent
    print "Total ADV Sent:", avg_adv_sent
    print "Total REQ Sent:", avg_req_sent
    print "Total DATA Sent:", avg_data_sent

    print "Average ADV Sent %:", 100 * avg_adv_sent / avg_packets_sent
    print "Average REQ Sent %:", 100 * avg_req_sent / avg_packets_sent
    print "Average DATA Sent %:", 100 * avg_data_sent / avg_packets_sent
Example #5
0
    def __init__(self,
                 port,
                 pvt_key=None,
                 version=settings.VERSION,
                 node=True):
        # TODO use private variables
        self.tx_pool = []
        self.version = version
        self.port = port
        self.nodes = utils.get_nodes(port)
        self.ledger = {}

        self.pvt_key = utils.new_rsa_key(pvt_key)
        self.pub_key = self.pvt_key.publickey().exportKey().decode()
        self.address = utils.pub_2_address(self.pub_key)

        if node:
            self.init_chain()
Example #6
0
def check_mode(args):
    for f in args.files:
        lines = utils.get_log_lines([f])
        lines = utils.sync_timings(lines)
        nodes = utils.get_nodes(lines)
        version = utils.get_version(lines)
        t_min = utils.get_t_min(lines)
        total_pages = utils.get_total_pages(lines, version)
        start_times = utils.get_start_times(lines, nodes, t_min)
        completion_times = utils.get_completion_times(lines, nodes, total_pages, version)
        final_times = utils.get_final_times(lines, nodes, total_pages, version)
        time_taken = utils.get_time_taken(nodes, start_times, final_times)
        packets_sent = utils.get_packets_sent(lines, nodes, start_times, final_times)

        # utils.get_stats(lines)
        all_nodes_completed = time_taken.values() and min(time_taken.values()).total_seconds() == 0
        all_nodes_exists = nodes == set([2,3,4,5,6,7,8,9,10,11,20])
        if not all_nodes_completed:
            print "Not all nodes completed:", f.name
        elif not all_nodes_exists:
            print "Not all nodes exist:", f.name, nodes
Example #7
0
def scatter_mode(args):
    for f in args.files:
        lines = utils.get_log_lines([f])
        lines = utils.sync_timings(lines)
        nodes = utils.get_nodes(lines)
        version = utils.get_version(lines)
        t_min = utils.get_t_min(lines)
        total_pages = utils.get_total_pages(lines, version)
        start_times = utils.get_start_times(lines, nodes, t_min)
        completion_times = utils.get_completion_times(lines, nodes, total_pages, version)
        final_times = utils.get_final_times(lines, nodes, total_pages, version)
        time_taken = utils.get_time_taken(nodes, start_times, final_times)
        packets_sent = utils.get_packets_sent(lines, nodes, start_times, final_times)


        all_nodes_completed = time_taken.values() and min(time_taken.values()).total_seconds() == 0
        all_nodes_exists = nodes == set([2,3,4,5,6,7,8,9,10,11,20])
        if not all_nodes_completed:
            continue
        # elif not all_nodes_exists:
        #     continue
        elif len(nodes) < 7:
            continue

        if args.l:
            for n in nodes:
                if not time_taken.get(n) or time_taken[n].total_seconds() == 0 or packets_sent[n][2] < 100:
                    continue
                print time_taken[n].total_seconds(), 100 * float(packets_sent[n][0] + packets_sent[n][1]) / sum(packets_sent[n]), 1
        else:
            adv_sent = sum(v[0] for v in packets_sent.values())
            req_sent = sum(v[1] for v in packets_sent.values())
            data_sent = sum(v[2] for v in packets_sent.values())
            total_sent = sum(sum(v) for v in packets_sent.values())        
            completion_time = max(time_taken.values()).total_seconds()
            print completion_time, 100 * float(adv_sent + req_sent) / total_sent, 1
Example #8
0
    try:
        status = utils.get_resource_status(
            'pod',
            label=args.hubble_ui_labels,
            must_exist=False,
        )
        if status is None:
            namespace.hubble_ui_ns = args.hubble_ui_ns
        else:
            namespace.hubble_ui_ns = status[0]
    except RuntimeError:
        namespace.hubble_ui_ns = args.hubble_ui_ns
        pass

    log.debug('Fetching nodes to determine cluster size...')
    nodes = utils.get_nodes()
    if not nodes:
        log.error('No nodes found')
    if len(nodes) > 20 and (not args.nodes and args.since == defaults.since
                            and args.size_limit == defaults.size_limit):
        log.warning(
            ('Detected a large cluster (> 20) with {} nodes. Consider '
             'setting one or more of the following to decrease the size '
             'of the sysdump as many nodes will slow down the collection '
             'and increase the size of the resulting '
             'archive:\n').format(len(nodes)) +
            '  --nodes       (Nodes to collect from)          \n'
            '  --since       (How far back in time to collect)\n'
            '  --size-limit  (Size limit of Cilium logs)      \n')

        choice = prompt('Continue [y/N] (default is N)? ').strip().lower()
Example #9
0
#############################################
nodes = 800  #Amount of nodes in final simulation
speed = 1.5  #Max speed of nodes
infection_range = 20  #Range of infection around infected nodes
width = 900  #Image width
height = 900  #Image height
rec_time_min = 300  #Recover time for node to either die / recover
rec_time_max = 2400  #Recover time for node to either die / recover
#############################################
### ==---==--==    SETTINGS    ==--==--== ###
#############################################

tot = nodes
nodes = utils.get_nodes(nodes, [width, height],
                        speed=speed,
                        irange=infection_range,
                        rec_max=rec_time_max,
                        rec_min=rec_time_min)

screen = pygame.display.set_mode([width, height])
pygame.display.set_caption("Realtime")

frame = 0
data = []

running = True
while running:
    frame += 1
    screen.fill([0, 0, 0])

    if frame % 10 == 0:
Example #10
0
def get_graph(request):
    data = {'nodes': utils.get_nodes(),
            'lines': utils.get_lines()}
    return HttpResponse(content=json.dumps(data), content_type='application/json')
Example #11
0
for line in t1.readlines():
    fri_traj_dictionary[line.split()[0]] = line[(len(line.split()[0]) + 1):-1]
t1.close()

test = '../data/test_undirected.txt'  # test positive
train = '../data/train_undirected.txt'  # train positive
test_un = '../data/test_negative.txt'  # test negative
train_un = '../data/train_negative.txt'  # train negative

fri_traj_train = '../data/fri_traj_train.txt'
fri_traj_test = '../data/fri_traj_test.txt'

f_t_train = open(fri_traj_train, 'w')
train_positive = open(train)
for line in train_positive.readlines():
    node_l, node_r = utils.get_nodes(line)
    new_line = fri_traj_dictionary[node_l] + ' ' + fri_traj_dictionary[
        node_r] + ' 1\n'
    f_t_train.write(new_line)
train_positive.close()
f_t_train.close()

f_t_train = open(fri_traj_train, 'a')
train_negative = open(train_un)
for line in train_negative.readlines():
    node_l, node_r = utils.get_nodes(line)
    new_line = fri_traj_dictionary[node_l] + ' ' + fri_traj_dictionary[
        node_r] + ' 0\n'
    f_t_train.write(new_line)
train_negative.close()
f_t_train.close()
Example #12
0
f = open(BK_file)
for line in f.readlines():
    total_len += 1
    if line.split('\n')[0] in dictionary.keys():
        print 'ok'
    dictionary[line.split('\n')[0]] = '0'
f.close()

# undirected
fe = open(BK_edges, 'w')
ff = open(BK_file)
for line in ff.readlines():
    if dictionary[line.split('\n')[0]] == '0':
        fe.write(line)
        dictionary[line.split('\n')[0]] = '1'
        line_l, line_r = utils.get_nodes(line)
        new_line = line_r + ' ' + line_l
        dictionary[new_line] = '1'
    else:
        pass
fe.close()
ff.close()

# calculate the degree of every node
fs = open(sorted_nodes)
dictionary_node = {}
for line in fs.readlines():
    dictionary_node[line.split('\n')[0]] = '0'

#
fe = open(BK_edges)
Example #13
0
def list_nodes():
    nodes = get_nodes()
    return jsonify({
        "bitcoin": [node.to_dict(True) for node in nodes],
        "lightning": [],
    })
def shortest_path_a_star(start_node, end_node, input_data_path, output_file):
    """Main function for A* shortest path
        
        `start_node` and `end_node` are ('key', 'value') format.
        `input_data_path` is a path to directory with nodes and edges layer.
        `output_file` is a path to the output shape file.
    """
    # Read graph
    G = nx.Graph(nx.read_shp(input_data_path, strict=False, geom_attrs=True)) # Read and convert to Graph
    graph_summary(G)

    # Get start and end node
    start = get_nodes(G, start_node[0], start_node[1])[0]
    end = get_nodes(G, end_node[0], end_node[1])[0]
    print("Start node:")
    print_node(G, start)
    print("End node:")
    print_node(G, end)

    # Get landmark node
    landmark_nodes = get_nodes(G, 'landmark', 1)
    # for landmark_node in landmark_nodes:
    #     print(G.node[landmark_node]['nodeID'], G.node[landmark_node]['landmark'])

    # Remove landmark that has bigger distance than the starting point to end
    #TODO: ????

    # Get transit node
    current_node = start
    unvisited_landmarks = deepcopy(landmark_nodes)
    path = [start]
    finish = False
    while not finish:
        current_distance_to_end = calculate_distance(current_node, end)

        # order distance
        # get distance for all unvisited landmarks from the current node
        # landmark_distance_dict = {node: calculate_distance(current_node, node) for node in unvisited_landmarks}
        landmark_distance_dict = {node: calculate_distance(current_node, node) + calculate_distance(node, end) for node in unvisited_landmarks}
        # sort the distance
        sorted_landmark_distance_dict = sorted(landmark_distance_dict.items(), key=operator.itemgetter(1))
        # No more landmarks, it means finish
        if len(sorted_landmark_distance_dict) == 0:
            finish = True
        for landmark_distance in sorted_landmark_distance_dict:
            # get landmark to end distance
            landmark_end_distance = calculate_distance(landmark_distance[0], end)
            # compare the `current_node to end distance` with the `landmark to end distance`
            # TODO: ????
            if current_distance_to_end < landmark_end_distance or calculate_distance(current_node, landmark_distance[0]) > current_distance_to_end:
                # the current node distance 
                finish = True
                continue
            else:
                path.append(landmark_distance[0])
                current_node = landmark_distance[0]
                unvisited_landmarks.remove(current_node)
                finish = False
                break
    print('Path')
    path.append(end)
    for landmark_node in path:
        print(G.node[landmark_node]['nodeID'], G.node[landmark_node]['landmark'], landmark_node)
    
    # Build full path from the path using A*
    full_path = []
    i = 0
    for i in range(len(path) - 1):
        shortest_landmark_path = nx.astar_path(G, path[i], path[i+1], heuristic=calculate_distance, weight='length')
        full_path.extend(shortest_landmark_path[:-1])

    # Adding end node
    full_path.append(end)
    # print('Full path')
    # for node in full_path:
    #     print(G.node[node]['nodeID'], G.node[node]['landmark'], node)

    # Clean path from duplicated node, this algorithm work since the path is continue
    if len(full_path) != len(set(full_path)):
        unduplicate_path = []
        skip = False
        current_node = None
        for node in full_path:
            if not skip:
                if full_path.count(node) == 1:
                    # Always add node with single occurence
                    unduplicate_path.append(node)
                else:
                    # Add the first node that has more than one occurence
                    unduplicate_path.append(node)
                    # Mark skip as true for the next nodes
                    skip = True
                    # Store the first duplicate node
                    current_node = node
            else:
                if node == current_node:
                    # Found another current_node
                    # Remove the skip flag
                    skip = False
                    current_node = None
                else:
                    # Always skip until found another current_node
                    pass

        full_path = unduplicate_path

    return full_path
Example #15
0
# ‘\t’ replaced by‘ ’
f = open(file_G)
f_n = open(file_N, 'w')
for line in f.readlines():
    new_line = line.replace('\t', ' ')
    f_n.write(new_line)
f_n.close()
f.close()

# social_edges process
file_social = '../data/Brightkite_edges_regular.txt'
file_social_filter = '../data/Brightkite_edges.txt'
f_s = open(file_social)
f_s_f = open(file_social_filter, 'w')
for line in f_s.readlines():
    line_l, line_r = utils.get_nodes(line)
    if utils.key_in_dic(line_l, user_dicitonary) and utils.key_in_dic(
            line_r, user_dicitonary):
        f_s_f.write(line)
    else:
        pass
f_s.close()
f_s_f.close()

# sub-graph which is connected
BK_file = '../data/Brightkite_edges.txt'
BK_edges = '../data/edges_undirected.txt'
nodes_file = '../data/vec_all.txt'
sorted_nodes = '../data/sorted_nodes.txt'
nodes_degree = '../data/nodes_degree.txt'