def main(): ''' Adjecency matrix and modifications ''' # # A = input_matrix() # A = input_sparse_matrix_data("../data/random_graph_data.txt", "All") # # print(A.toarray()) # # Modifications # A_mod = A + sp.eye(A.shape[0]) # Adding Self Loop # # print(A_mod.toarray()) # norm_adj = symnormalise(A_mod) # Normalization using D^(-1/2) A D^(-1/2) # adj = sparse_mx_to_torch_sparse_tensor(norm_adj).to( # 'cuda') # SciPy to Torch sparse # As = sparse_mx_to_torch_sparse_tensor(A).to( # 'cuda') # SciPy to sparse Tensor # A = sparse_mx_to_torch_sparse_tensor(A).to_dense().to( # 'cuda') # SciPy to Torch Tensor graph_data = GraphLoader( "/home/lisali/GCN_Partitioning/data/random_graph_data.txt", "All") A, As, adj = graph_data.get_data() # ###plot # fig = plt.figure(num=1, figsize=(500, 500)) # plt.ion() # fig1 = fig.add_subplot(1, 2, 1) # fig1.set_title('original matrix') # plt.imshow(As.to('cpu').toarray()) # fig2 = fig.add_subplot(1, 2, 2) # fig2.set_title('adjacency matrix') # plt.imshow(adj.to('cpu').toarray()) # plt.show() ''' Declare Input Size and Tensor ''' N = A.shape[0] d = 100 torch.manual_seed(100) x = torch.randn(N, d) x = x.to('cuda') ''' Model Definition ''' gl = [d, 64, 16] ll = [16, 5] model = GCN(gl, ll, dropout=0.5).to('cuda') optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=5e-6) print(model) # check_grad(model, x, adj, A, As) #Train Train(model, x, adj, As, optimizer) # Test the best partition Test(model, x, adj, As)
def main(): graph = GraphLoader.load(graph_path) # GraphLoader.export_pajek(graph, pajek_export_path) graph_analyzer = GraphAnalyser(graph) graph_analyzer.print_stats() graph_analyzer.connected_components() graph_analyzer.degree_plot() graph_analyzer.degree_rank_log_regression() graph_analyzer.hill_plot()
def __init__(self): self.graph = GraphLoader().construct_graph() self.graph_state = None self.prev_graph_state = None
import argparse from graph_loader import GraphLoader parser = argparse.ArgumentParser(description='DBLP Data Loader') parser.add_argument('--uri', default="bolt://localhost:7687", help='URI for the Neo4J database') parser.add_argument('--user', default='neo4j', help='Username for the neo4j database') parser.add_argument('--password', help='password for the neo4j database') args = parser.parse_args() if __name__ == "__main__": user = args.user password = args.password uri = args.uri graph_loader = GraphLoader(uri, user, password) graph_loader.evolve_conference_paper_reviews() graph_loader.load_evolve_journal_paper_reviews() graph_loader.evolve_authors_affiliations() graph_loader.close()
#!/usr/bin/env python3 from graph_loader import GraphLoader from permutation_matrix import PermutationMatrix from graph import Graph number_of_vertices = 16 # clebsch graph clebsch_file_path = 'data/clebsch.txt' graph_loader_clebsch = GraphLoader(number_of_vertices, clebsch_file_path) clebsch_graph = graph_loader_clebsch.load() clebsch_matrix = clebsch_graph.adjacency_matrix #shrikande graph shrikhande_file_path = 'data/shrikhande.txt' graph_loader_shrikhande = GraphLoader(number_of_vertices, shrikhande_file_path) shrikhande_graph = graph_loader_shrikhande.load() shrikhande_matrix = shrikhande_graph.adjacency_matrix def contains_same_edge(A, B, tol): m, n = A.shape for i in range(0, n): for j in range(0, i): if A[i, j] > 1 - tol and B[ i, j] > 1 - tol: # if both matrices == 1 at same return True return False while contains_same_edge(clebsch_matrix, shrikhande_matrix, 0.1) is True: clebsch_matrix = PermutationMatrix.permute_matrix(clebsch_matrix)
(33, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44), (44, 0), ) file_path = 'data/45_12_3_3.txt' graph_loader = GraphLoader(n, file_path) regular_graph = graph_loader.load() cycle_matrix = Graph.create_adjacency_matrix(n, A1) regular_graph_matrix = regular_graph.adjacency_matrix def contains_same_edge(A, B, tol): m, n = A.shape # does this only work on numpy matrices? for i in range(0, n): for j in range(0, i): if A[i, j] > 1 - tol and B[ i, j] > 1 - tol: # if both matrices == 1 at same return True return False