def run_simulation(n, A1, clebsch_graph, iterations, perturbation_scale=0.1): correct_count = 0 epsilon_vector = np.zeros(n) # no small parameter # graph deconvolver with new components graph_deconvolver = GraphDeconvolver(n, A1, clebsch_graph.adjacency_list) for i in range(0, iterations): # perturb matrix (introduce noise to the weights) clebsch_matrix = perturb_matrix(clebsch_graph.adjacency_matrix, perturbation_scale) #convolved graphs A_matrix = Graph.create_adjacency_matrix(n, A1) + clebsch_matrix status, problem_value, A1_star, A2_star = graph_deconvolver.deconvolve( A_matrix, epsilon_vector) print('Problem status: ', status) cycle = is_cycle(Graph.create_adjacency_list(n, A1_star)) print('Is cycle: ', cycle) if cycle: correct_count += 1 return correct_count
def run_simulation(n, A1, clebsch_graph, iterations, perturbation_scale=0.1, epsilon=0): correct_count = 0 epsilon_vector = epsilon * np.ones(n) for i in range(0, iterations): # perturb matrix (introduce noise to the weights) clebsch_matrix = perturb_matrix(clebsch_graph.adjacency_matrix, perturbation_scale) A2 = Graph.create_adjacency_list( n, clebsch_matrix ) #need to do this as it hasn't updated the internal adjacency list representation # graph deconvolver with new components graph_deconvolver = GraphDeconvolver(n, A1, A2) #convolved graphs A_matrix = Graph.create_adjacency_matrix(n, A1) + clebsch_matrix status, problem_value, A1_star, A2_star = graph_deconvolver.deconvolve( A_matrix, epsilon_vector) if status == 'optimal': print('Problem status: ', status) cycle = is_cycle(Graph.create_adjacency_list(n, A1_star)) print('Is cycle: ', cycle) if cycle: correct_count += 1 return correct_count
def run_simulation(n, cycle, itererations, perturbation_scale=0.1, epsilon=0): correct_count = 0 graph_denoiser = GraphDenoiser(n, cycle) A_matrix = Graph.create_adjacency_matrix(n, A) # vector of small parameters for spectral hull epsilon_vector = epsilon * np.ones(n) # adjacency matrix to compare against to check is denoised correctly #test_clebsch_adjacency_matrix = Graph.create_adjacency_matrix(n,cycle) for i in range(0, iterations): # perturb matrix (introduce noise to the weights) A_matrix_noisy = perturb_matrix(A_matrix, perturbation_scale) status, problem_value, A_recovered = graph_denoiser.denoise( A_matrix_noisy, epsilon_vector) if status == 'optimal': print('Problem status: ', status) cycle = is_cycle(Graph.create_adjacency_list(n, A_recovered)) print('Is cycle: ', cycle) # check for clebsch graph #cycle = np.allclose(test_clebsch_adjacency_matrix,A_recovered, atol=1e-3) #print('Is clebsch graph: ', cycle) if cycle: correct_count += 1 return correct_count
def run_simulation(n, cycle,itererations, perturbation_scale = 0.1): correct_count = 0 graph_denoiser = GraphDenoiser(n,cycle) A_matrix = Graph.create_adjacency_matrix(n,A) # generate cum sum of mean shift in eigenvalue distribution for i in range(0,iterations): # perturb matrix (introduce noise to the weights) A_matrix_noisy = perturb_matrix(A_matrix,perturbation_scale) epsilon_vector = calculate_epsilon_vector(A_matrix,A_matrix_noisy) print status,problem_value,A_recovered= graph_denoiser.denoise(A_matrix_noisy,epsilon_vector) print('Problem status: ',status) cycle = is_cycle(Graph.create_adjacency_list(n,A_recovered)) print('Is cycle: ', cycle) if cycle: correct_count +=1 return correct_count
def run_simulation(n, cycle, itererations, perturbation_scale=0.1, epsilon=0): correct_count = 0 graph_denoiser = GraphDenoiser(n, cycle) A_matrix = Graph.create_adjacency_matrix(n, A) # vector of small parameters for spectral hull epsilon_vector = epsilon * np.ones(n) for i in range(0, iterations): # perturb matrix (introduce noise to the weights) A_matrix_noisy = perturb_matrix(A_matrix, perturbation_scale) status, problem_value, A_recovered = graph_denoiser.denoise( A_matrix_noisy, epsilon_vector) if status == 'optimal': print('Problem status: ', status) cycle = is_cycle(Graph.create_adjacency_list(n, A_recovered)) print('Is cycle: ', cycle) if cycle: correct_count += 1 return correct_count
def mean_shift_eigenvalue_distribution(graph_matrix, sigma): n = graph_matrix.shape[0] means = np.zeros(n) original_eigenvalues = eigvalsh(graph_matrix) iterations = 1000 for iteration in range(iterations): eigenvalues = eigvalsh(perturb_matrix(graph_matrix, sigma)) - original_eigenvalues means += (eigenvalues - means) / (1.0 + iteration) return np.cumsum(means)
def run_simulation(n, multipartite_graph_matrix, iterations, partitions, perturbation_scale=0.1): correct_count = 0 graph_denoiser = GraphDenoiser( n, Graph.create_adjacency_list(n, multipartite_graph_matrix)) # vector of small parameters for spectral hull epsilon_vector = epsilon * np.ones(n) for i in range(0, iterations): A_matrix_noisy = perturb_matrix(multipartite_graph_matrix, perturbation_scale) status, problem_value, A_recovered = graph_denoiser.denoise( A_matrix_noisy, epsilon_vector) # first check has the correct degree sequence before check multipartite, (needs to be complete) correct_degree_sequence = check_degree_sequence( partitions, np.round(A_recovered) ) # check_degree_sequence can't deal with weighted edges so round if correct_degree_sequence: adjacency_table = Graph.create_adjacency_table(n, A_recovered) multipartite_graph_recognizer = Multipartite_graph_recognizer( partitions, adjacency_table) ok = multipartite_graph_recognizer.check() print('multipartite:', multipartite_graph_recognizer.match) else: print('incorrect degree sequence') ok = False if ok: correct_count += 1 return correct_count
def run_simulation(n, A1, multipartite_graph_matrix, iterations, partitions, perturbation_scale = 0.1): correct_count = 0 for i in range(0,iterations): # perturb matrix (introduce noise to the weights) graph_matrix = perturb_matrix(n,multipartite_graph_matrix, perturbation_scale) A2 = Graph.create_adjacency_list(n,graph_matrix) #need to do this as it hasn't updated the internal adjacency list representation # graph deconvolver with new components graph_deconvolver = GraphDeconvolver(n,A1,A2) #convolved graphs A_matrix = Graph.create_adjacency_matrix(n,A1) + graph_matrix status,problem_value,A1_star,A2_star= graph_deconvolver.deconvolve(A_matrix) # first check has the correct degree sequence before check multipartite, (needs to be complete) correct_degree_sequence = check_degree_sequence(partitions,np.round(A2_star)) # check_degree_sequence can't deal with weighted edges so round if correct_degree_sequence: adjacency_table = Graph.create_adjacency_table(n,A2_star) #print('p=',partitions,'\ng=',adjacency_table) multipartite_graph_recognizer=Multipartite_graph_recognizer(partitions,adjacency_table) ok=multipartite_graph_recognizer.check() print('multipartite:',multipartite_graph_recognizer.match) else: print('incorrect degree sequence') ok=False np.set_printoptions(suppress=True) print('Problem status: ',status) cycle = is_cycle(Graph.create_adjacency_list(n,A1_star)) print('Is cycle: ', cycle) print('A2 correct: ',ok) if cycle and ok: correct_count +=1 return correct_count
(10, 12), (10, 13), (11, 13), (11, 14), (12, 14), ) #A_matrix = bicycle(n) #A = Graph.create_adjacency_list(n,A_matrix) graph_denoiser = GraphDenoiser(n, A) # add gaussian noise to cycle A_matrix = Graph.create_adjacency_matrix(n, A) A_matrix_noisy = perturb_matrix(A_matrix, 0.2) epsilon_vector = np.zeros(n) status, problem_value, A_recovered = graph_denoiser.denoise( A_matrix_noisy, epsilon_vector) np.set_printoptions(precision=1e-3, suppress=True) print('Problem status: ', status) print('Norm value: ', problem_value) print('Recovered A: \n', A_recovered) print('is cycle graph: ', is_cycle(Graph.create_adjacency_list(n, A_recovered))) def set_visualiser_attributes(graph_visualiser): graph_visualiser.A.node_attr['shape'] = 'circle'