Exemple #1
0
    def __init__(self, file_name):
        output_cap, server_fee, deploy_cost, cus_list, tol_demand, graph \
            = read_graph(file_name)

        self.p = server_fee
        self.q = output_cap
        self.o = deploy_cost
        self.cus_list = cus_list
        self.tol_demand = tol_demand
        self.graph = graph

        self.n = graph.number_of_nodes()
        self.m = graph.number_of_edges()
        self.level_num = len(output_cap)
        self.cus_num = len(cus_list)

        # Add "super source"
        source = self.n
        self.graph.add_node(source, demand=self.tol_demand)
        for i in range(self.n):
            self.graph.add_edge(source, i, cost=0, capacity=1000000)

        self.x = {
        }  # 0-1 decision variables: x[j,k] means whether node j is deployed with a server of level k
        self.f = {
        }  # Integer decision variables: f[i,j] means the flow on edge (i,j)
        self.c = {}  # c[i,j] means the unit cost of edge (i,j)
        self.u = {}  # u[i,j] means the capacity of edge (i,j)
        self.b = {}  # b[i] means the demand of node i
Exemple #2
0
def check_allpaths(graph_filename, paths_filename):
    g = read_graph(graph_filename)
    for line_no, path in iter_allpaths(g, paths_filename):
        success, error = check_path(g, path)
        if success:
            continue
        print "%d: %s" % (line_no, error)
    def __init__(self, file_name="./case_example_3/mid/case0.txt"):
        output_cap, server_fee, deploy_cost, cus_list, total_demand, G = read_graph(file_name)
        self.output_cap = output_cap
        self.server_fee = server_fee
        self.deploy_cost = deploy_cost
        self.cus_list = cus_list
        self.total_demand = total_demand
        self.G = G

        self.spl = nx.all_pairs_shortest_path_length(self.G)

        self.compute_node_features()
Exemple #4
0
    def __init__(self, f):
        self.open_cost, self.cus_list, self.tol_demand, self.graph = read_graph(
            f)
        self.n = self.graph.number_of_nodes()
        self.m = self.graph.number_of_edges()
        self.cus_num = len(self.cus_list)

        # Add "super source"
        source = self.n
        self.graph.add_node(source, demand=self.tol_demand)
        for i in range(self.n):
            self.graph.add_edge(source, i, cost=0, capacity=1000000)

        self.x = {
        }  # Open server decision variables: x[j] == 1 if node j is deployed with a server
        self.f = {}  # f[i,j] means the flow in edge (i,j) (Decision Variables)
        self.c = {}  # c[i,j] means the cost of edge (i,j)
        self.u = {}  # u[i,j] means the capacity of edge (i,j)
        self.b = {}  # b[i] means the demand of node i
import networkx as nx

from read_graph import read_graph
from common.pipeline import Pipeline
from common.feature_generators import *

# Loading PPI graph
Graph = read_graph(directed=False, threshold=600)

pipeline = Pipeline(NeighbouringConductance(range=3))
_ = pipeline.apply(Graph)
Exemple #6
0
def configs_for_allpaths(graph_filename, paths_filename):
    g = read_graph(graph_filename)
    for line_no, path in iter_allpaths(g, paths_filename):
        print "%d: %r %r" % (line_no, path, path_to_configs(g, path))
Exemple #7
0
import networkx as nx

from read_graph import read_graph
from common.pipeline import Pipeline
from common.feature_generators import *

# Loading PPI graph
Graph, node_names = read_graph(directed=False)

pipeline = Pipeline(ExpectedDegree(default_recomputing=False, default_dump=True))
_ = pipeline.apply(Graph)
Exemple #8
0
# 1. Помещаем все вершины в кучу, расставляя метки
# 2. Извлекаем вершину из кучи
# 3. Для каждого соседа:
#   3.1 Если расстояние от вершины до соседа + метка, меньше расстояния из кучи обновить расстояние в куче
# 4. Сохранить расстояния для вершины

from read_graph import read_graph
from dejkstra_simple import dejkstra_simple
from dejkstra import dejkstra
from print_pathes import print_pathes
import time

EXAMPLE_STR = 'examples/8000_0.5.txt'
N_TIMES = 10

input = read_graph(EXAMPLE_STR)
acc_simple = 0.0
acc_heaped = 0.0

i = 0
while i < N_TIMES:
    i = i + 1
    start_time_s = time.time()
    pathes_s = dejkstra_simple(input)
    acc_simple = acc_simple + (time.time() - start_time_s)

    start_time = time.time()
    pathes = dejkstra(input)
    acc_heaped = acc_heaped + (time.time() - start_time)

print('Simple %s' % (acc_simple / N_TIMES))
            _, node = queue.get()

            neighbors = graph[node]  # get node list connected to node
            for edge in neighbors:
                _, node_id, node_w = edge.a, edge.b, edge.w
                # if not processed yet, update cost
                if not self.processed.get(node_id):
                    queue.put((self.cost[node] + node_w, node_id))
                    # update cost if cost is less
                    if self.cost[node] + node_w < self.cost[node_id]:
                        self.cost[node_id] = self.cost[node] + node_w
                        self.from_[node_id] = node

            # after update all its neighbors, mark node as processed
            self.processed[node] = True

        return self.cost


if __name__ == "__main__":
    from read_graph import read_graph

    filename = "/home/buxizhizhoum/1-Work/2-Codes/algorithm/graph/weighted_graph/test_graph.txt"

    graph = read_graph(filename)

    path_obj = ShortestPath(graph, start=0)
    print(path_obj.find_path())
    print(path_obj.from_)
    # path_obj.show_path(end=3)
Exemple #10
0
        done_mask = end_moments == dt
        done_works = chosen_works[done_mask]
        all_done_works = done_works if len(all_done_works) == 0 \
            else np.append(all_done_works, done_works, axis=0)

        chosen_works = chosen_works[np.invert(done_mask)]
        happened_events = get_available_events(matrix, all_done_works)
        available_works = get_available_works(
            matrix, happened_events,
            np.append(all_done_works, chosen_works, axis=0))
        new_works = choose_works(matrix, available_works, reserve_time,
                                 workers_number - len(chosen_works), type)
        chosen_works = chosen_works if len(new_works) == 0 else np.append(
            chosen_works, new_works, axis=0)
        end_moments = end_moments[np.invert(done_mask)] - dt
        end_moments = np.append(end_moments, get_duration(new_works, matrix))

        works_story.append(chosen_works)
        t_story.append(t)

        print_result_latex(available_works, chosen_works, done_works,
                           end_moments, happened_events, matrix, t)

    plot_diagram(t_story, works_story, workers_number)


if __name__ == '__main__':
    matrix, early_moments, late_moments, reserve_time = read_graph(
        'input_data', True)
    solve(np.array(matrix), np.array(reserve_time), 3, 3)
Exemple #11
0
    for distance in range(1, max_distance + 1):
        # 计算结果输出到以下文件中
        distance_output_file_name = 'distances/threshold=%d_distance=%d.txt' % (
            weight_threshold, distance)
        distance_files[distance] = open(distance_output_file_name, 'w')
    for uid in graph:
        closer_uid_set = set()
        last_uid_set = set([uid])
        for distance in range(1, max_distance + 1):
            next_uid_set = set()
            for uid1 in last_uid_set:
                for uid2 in graph[uid1]:
                    if uid2 != uid and uid2 not in closer_uid_set:
                        next_uid_set.add(uid2)
            for uid_next in next_uid_set:
                if uid < uid_next:
                    distance_files[distance].write('%s\t%s\n' %
                                                   (uid, uid_next))
            closer_uid_set.update(next_uid_set)
            last_uid_set = next_uid_set
    for distance in range(1, max_distance + 1):
        distance_files[distance].close()


if __name__ == '__main__':
    graph = read_graph("10_graph.txt", 30)
    cal_distances(graph, 30, 6)
    for weight_threshold in range(10, 81, 5):
        graph = read_graph("10_graph.txt", weight_threshold)
        cal_distances(graph, weight_threshold, 3)
Exemple #12
0
from common.feature_generators import Degree
from common.feature_generators import ClosenessCentrality
from common.feature_generators import BetweennessCentrality
from common.feature_generators import HITS
from common.feature_generators import PageRank
from common.feature_generators import Log10Wrapper
from common.feature_generators import NormalizeWrapper
from common.feature_generators import NeighbouringConductance
from common.feature_generators import FeatureSelector
from common.feature_generators import ExternalFeature
from validation import compute_correlations
from prediction import get_labels, train_model, get_and_save_metrics

# Loading PPI graph
print("\n######### Loading Graph #########")
Graph = read_graph(directed=False)
print("Loaded graph:\n\t{} nodes\n\t{} edges".format(Graph.number_of_nodes(),
                                                     Graph.number_of_edges()))

#########################
# Computing node features
#########################

print("\n######### Computing/retrieving node features #########")

# The pipeline object takes as an argument the sequence of feature generator objects we want
pipeline = Pipeline(
    Log10Wrapper(Degree(default_dump=True, default_recomputing=False))(),
    Log10Wrapper(ExpectedDegree(default_dump=True,
                                default_recomputing=False))(),
    ClusteringCoefficient(), ClosenessCentrality(), BetweennessCentrality(),
from read_graph import read_graph
from centrality import centrality
from build_heap import build_heap
from down_heap import down_heapify

def order_centrality(G):

    ctrs = {}
    for actor in G.keys():
        ctrs[actor] = centrality(G, actor)

    # print ctrs
    elems = ctrs.values()
    G = build_heap(elems)

    min = 0
    for i in range(1):
        min = G[0]
        G[0] = G[len(G)-1]
        G.pop()
        down_heapify(G, 0)

    return min

#test
G = read_graph('file.tsv')
print order_centrality(G)
Exemple #14
0
        cur_configs = new_configs

    return sum(cur_configs.values())

def time(g, count=100):
    """ Benchmark the algorithm. """

    result = timeit.Timer(stmt=lambda:count_paths(g)).timeit(number=count)
    return "%s seconds for %s iterations, giving %s seconds per iteration" % (result, count, result/count)
    


if __name__ == "__main__":
    f = sys.stdin
    timing = None

    if len(sys.argv) > 1:
        f = open(sys.argv[1])

    if len(sys.argv) > 2:
        timing = int(sys.argv[2])

    g = read_graph(f)

    if timing:
        print time(g, timing)
    else:
        print count_paths(g)
    
Exemple #15
0
import networkx as nx
import time

from read_graph import read_graph

file_name = "./case_example/low/case0.txt"
#file_name = "./case_example/high/topo2400.txt"

output_cap, server_fee, deploy_cost, cus_list, total_demand, G = read_graph(
    file_name)

for i in range(G.number_of_nodes()):
    G.node[i]['demand'] = -G.node[i]['demand']

# Super sink
sink = G.number_of_nodes()
G.add_node(sink, demand=total_demand)
for i in range(G.number_of_nodes() - 1):
    if G.node[i]['demand'] != 0:
        G.add_edge(i, sink, capacity=G.node[i]['demand'], cost=0)

for i in range(G.number_of_nodes() - 1):
    G.node[i]['demand'] = 0

for i in range(G.number_of_nodes() - 1):
    G.add_node(i, demand=-total_demand)

    start_time = time.clock()
    nx.max_flow_min_cost(G, i, sink, weight='cost')
    #nx.capacity_scaling(G, weight='cost')
    end_time = time.clock()