def _print_score(self, graph=None): if graph == None: graph = self.G x = utils.score(graph, self.assignments, self.num_partitions) edges_cut, steps = utils.base_metrics(graph, self.assignments) mod = utils.modularity_wavg(graph, self.assignments, self.num_partitions) loneliness = utils.loneliness_score_wavg(graph, self.loneliness_score_param, self.assignments, self.num_partitions) max_perm = utils.run_max_perm(graph) if self.verbose > 1: print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t\t{4}\t{5}\t{6}".format( x[0], x[1], edges_cut, steps, mod, loneliness, max_perm)) return [x[0], x[1], edges_cut, steps, mod, loneliness, max_perm]
def get_graph_metrics(self): self.metrics_timestamp = datetime.datetime.now().strftime('%H%M%S') f, _ = os.path.splitext(os.path.basename(self.DATA_FILENAME)) self.metrics_filename = f + "-" + self.metrics_timestamp graph_metrics = { "file": self.metrics_timestamp, "num_partitions": self.num_partitions, "num_iterations": self.num_iterations, "prediction_model_cut_off": self.prediction_model_cut_off, "restream_batches": self.restream_batches, "use_virtual_nodes": self.use_virtual_nodes, "virtual_edge_weight": self.virtual_edge_weight, } graph_fieldnames = [ "file", "num_partitions", "num_iterations", "prediction_model_cut_off", "restream_batches", "use_virtual_nodes", "virtual_edge_weight", "edges_cut", "waste", "cut_ratio", "total_communication_volume", "network_permanence", "Q", "NQ", "Qds", "intraEdges", "interEdges", "intraDensity", "modularity degree", "conductance", "expansion", "contraction", "fitness", "QovL", ] if self.verbose > 0: print("Complete graph with {} nodes".format( self.G.number_of_nodes())) file_oslom = utils.write_graph_files(self.OUTPUT_DIRECTORY, "{}-all".format( self.metrics_filename), self.G, quiet=True) # original scoring algorithm scoring = utils.score(self.G, self.assignments, self.num_partitions) graph_metrics.update({ "waste": scoring[0], "cut_ratio": scoring[1], }) # edges cut and communication volume edges_cut, steps = utils.base_metrics(self.G, self.assignments) graph_metrics.update({ "edges_cut": edges_cut, "total_communication_volume": steps, }) # MaxPerm max_perm = utils.run_max_perm(self.G) graph_metrics.update({"network_permanence": max_perm}) # Community Quality metrics community_metrics = utils.run_community_metrics( self.OUTPUT_DIRECTORY, "{}-all".format(self.metrics_filename), file_oslom) graph_metrics.update(community_metrics) if self.verbose > 0: print("\nConfig") print("-------\n") for f in graph_fieldnames[:8]: print("{}: {}".format(f, graph_metrics[f])) print("\nMetrics") print("-------\n") for f in graph_fieldnames[8:]: print("{}: {}".format(f, graph_metrics[f])) # write metrics to CSV csv_file = os.path.join(self.OUTPUT_DIRECTORY, "metrics.csv") utils.write_metrics_csv(csv_file, graph_fieldnames, graph_metrics)
def _print_score(self, graph=None): if self.compute_metrics_enabled == False: return [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] if graph == None: graph = self.G # waste, cut_ratio x = utils.score(graph, self.assignments, self.num_partitions) edges_cut, steps, cut_edges = utils.base_metrics( graph, self.assignments) #q_qds_conductance = utils.louvainModularityComQuality(graph, self.assignments, self.num_partitions) # non-overlapping metrics q_qds_conductance = utils.infomapModularityComQuality( graph, self.assignments, self.num_partitions) loneliness = utils.loneliness_score_wavg(graph, self.loneliness_score_param, self.assignments, self.num_partitions) #print('loneliness', loneliness) #max_perm = utils.run_max_perm(graph) max_perm = utils.wavg_max_perm(graph, self.assignments, self.num_partitions) rbse_list = utils.ratherBeSomewhereElseList(graph, self.assignments, self.num_partitions) rbse = utils.ratherBeSomewhereElseMetric(rbse_list) #nmi_score = nmi_metrics.nmi(np.array([self.assignments_prediction_model, self.assignments])) nmi_assignments = self.assignments.tolist() pred_assignments = self.assignments_prediction_model.tolist() pred_nmi_assignments = [] actual_nmi_assignments = [] if self.use_virtual_nodes: #print(len(nmi_assignments), self.initial_number_of_nodes) if (len(nmi_assignments) > self.initial_number_of_nodes): nmi_assignments = nmi_assignments[0:self. initial_number_of_nodes] for i, partition in enumerate(nmi_assignments): if partition >= 0: pred_nmi_assignments.append(pred_assignments[i]) actual_nmi_assignments.append(partition) #print('computing nmi', len(pred_nmi_assignments), len(actual_nmi_assignments)) nmi_score = normalized_mutual_info_score(pred_nmi_assignments, actual_nmi_assignments) #nmi_score = 0.0 # compute the sum of edge weights for all the cut edges for a total score #total_cut_weight = 0 #for cutEdge in cut_edges: # total_cut_weight += self.originalG.edge[cutEdge[0]][cutEdge[1]]['weight'] # compute fscores #print('computing fscore...') fscore, fscore_relabelled = utils.fscores2( self.assignments_prediction_model, self.assignments, self.num_partitions) #fscore = 0.0 #fscore_relabelled = 0.0 #print('fscores:', fscore, fscore_relabelled) if self.verbose > 1: print( "{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t\t{4:.5f}\t{5:.5f}\t{6}\t{7:.5f}\t{8:.10f}\t{9}\t{10}\t{11:.5f}" .format(x[0], x[1], edges_cut, steps, q_qds_conductance[1], q_qds_conductance[2], max_perm, rbse, nmi_score, fscore, abs(fscore - fscore_relabelled), loneliness)) #print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t\t{4:.5f}\t{5:.5f}\t{6:.5f}\t{7}\t{8}\t{9:.10f}\t{10}\t{11}\t{12}".format(x[0], x[1], edges_cut, steps, q_qds_conductance[0], q_qds_conductance[1], q_qds_conductance[2], loneliness, max_perm, nmi_score, total_cut_weight, fscore, abs(fscore-fscore_relabelled))) #print("{0:.5f}\t\t{1:.10f}\t{2}\t\t{3}\t\t\t{4}\t{5}\t{6}\t{7:.10f}\t{8}\t{9}\t{10}".format(x[0], x[1], edges_cut, steps, mod, loneliness, max_perm, nmi_score, total_cut_weight, fscore, abs(fscore-fscore_relabelled))) return [ x[0], x[1], edges_cut, steps, q_qds_conductance[1], q_qds_conductance[2], max_perm, rbse, nmi_score, fscore, abs(fscore - fscore_relabelled), loneliness ]