def predict(self, node_pairs):
        balance_theory_triads = triads.triadic_enumeration(
            self.graph,
            set(['021C', '111D', '111U', '030C', '201', '120C', '210'])
        )
        assesed_triads = set()
        for triad_type, triad_list in balance_theory_triads.items():
            for current_triad in triad_list:
                current_triad_sorted = tuple(sorted(current_triad))
                if current_triad_sorted not in assesed_triads:
                    assesed_triads.add(current_triad_sorted)
                    edges = triads.get_missing_edges_for_bt(
                        self.graph.subgraph(current_triad)
                        )
                    for edge in edges:
                        add_or_update_edge(
                            self.predicted_graph,
                            edge,
                            'Endogenous Social Theory',
                            f'BalanceTheory.{triad_type}',
                            1.0
                        )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'BalanceTheory'
    def predict(self, node_pairs):
        balance_theory_triads = triads.triadic_enumeration(
            self.graph,
            set(['021C', '111D', '111U', '030C', '201', '120C', '210'])
        )
        assesed_triads = set()
        for triad_type, triad_list in balance_theory_triads.items():
            for current_triad in triad_list:
                current_triad_sorted = tuple(sorted(current_triad))
                if current_triad_sorted not in assesed_triads:
                    assesed_triads.add(current_triad_sorted)
                    u = self.graph.nodes[current_triad_sorted[0]]
                    v = self.graph.nodes[current_triad_sorted[1]]
                    w = self.graph.nodes[current_triad_sorted[2]]
                    if not _get_similarity(u, v, self.weightings) > self.threshold \
                        or not _get_similarity(v, w, self.weightings) > self.threshold \
                            or not _get_similarity(u, w, self.weightings) > self.threshold:
                        continue
                    edges = triads.get_missing_edges_for_bt(
                        self.graph.subgraph(current_triad)
                    )
                    for edge in edges:
                        add_or_update_edge(
                            self.predicted_graph,
                            edge,
                            'Exogenous Social Theory',
                            f'BalanceTheory.{triad_type}',
                            1.0
                        )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'BalanceTheory'
 def _add_classification_to_predicted_graph(self, node_pairs, train_c_df, test_c_df, predicted_graph_train, predicted_graph_test):
     if self.validation:
         columns_header = self._create_classifiers_columnsheader()
         test_c_df = pd.concat([node_pairs, test_c_df], axis=1)
         for index, row in test_c_df.iterrows():
             node_pair = row['node_pairs']
             for col in columns_header:
                 if row[col] >= 0.5:
                     add_or_update_edge(
                         graph=predicted_graph_test,
                         edge=(node_pair[0], node_pair[1]),
                         method_name='ML-Classification',
                         method_specified=col,
                         score=row[col]
                     )                 
     else:
         columns_header = self._create_classifiers_columnsheader()
         train_c_df = pd.concat([node_pairs, train_c_df], axis=1)
         for index, row in train_c_df.iterrows():
             node_pair = row['node_pairs']
             for col in columns_header:
                 if row[col] >= 0.5:
                     add_or_update_edge(
                         graph=predicted_graph_train,
                         edge=(node_pair[0], node_pair[1]),
                         method_name='ML-Classification',
                         method_specified=col,
                         score=row[col]
                     )
 def _add_topology_to_predicted_graph(self, train_t_df, test_t_df, predicted_graph_train, predicted_graph_test):
     if self.validation:
         columns_header = self._get_topology_columnsheader()
         thresholds = self._get_topology_thresholds(test_t_df, columns_header)
         print("start _add_topology_to_predicted_graph 1")
         for index, row in test_t_df.iterrows():
             node_pair = row['node_pairs']
             for col in columns_header:
                 if row[col] > thresholds[col]:
                     add_or_update_edge(
                         graph=predicted_graph_test,
                         edge=(node_pair[0], node_pair[1]),
                         method_name='Topology',
                         method_specified=str(col),
                         score=row[col])   
         print("done with outer loop in 1")
         
     else:
         columns_header = self._get_topology_columnsheader()
         thresholds = self._get_topology_thresholds(train_t_df, columns_header)
         print("start _add_topology_to_predicted_graph 2")
         for index, row in train_t_df.iterrows():
             node_pair = row['node_pairs']
             for col in columns_header:
                 if row[col] > thresholds[col]:
                     add_or_update_edge(
                         graph=predicted_graph_train,
                         edge=(node_pair[0], node_pair[1]),
                         method_name='Topology',
                         method_specified=str(col),
                         score=row[col])   
         print("done with outer loop in 2")
    def predict(self, node_pairs):
        # kernighan_lin_bisection: Nur für ungerichtete Graphen
        # K-Clique: Erfordert k (smallest community)
        # Fluid Communities: Anzahl der Comm erfordert
        # girvan_newman: Ist geeignet

        # Calculate constraints and replace nan values by 1
        constraints = structuralholes.constraint(self.graph)
        constraints_cleared = {key: (val if not np.isnan(val) else 1)
                               for key, val in constraints.items()}

        # Define all structural communities on the first level
        communities = next(community.girvan_newman(self.graph))

        # Define persons with lowest constraints for each community
        com_brokers = {}
        com_index = 0
        for com in communities:
            com_brokers[com_index] = []
            community_constraints = {
                val: constraints_cleared[val] for val in com}

            # Calculate nth-percentile to select nodes with lowest constraint
            nth_percentile_node_constraint = np.percentile(
                list(community_constraints.values()),
                self.precentile_constraints,
                axis=0,
                interpolation='nearest'
            )

            # Add persons with lowest constraints to com_brokers dict
            for node, constraint in community_constraints.items():
                if constraint <= nth_percentile_node_constraint:
                    com_brokers[com_index].append(node)
            com_index += 1

        # Define new constraints by combining top brokers from different communities
        # Create predicted edges that have lowest constraints
        for com_index, brokers in com_brokers.items():
            for broker in brokers:
                possible_counterparts = self._get_combinations(com_brokers, com_index)
                results = self._get_combination_results(self.graph, broker, possible_counterparts)
                if len(results) == 0:
                    continue
                chosen_counterpart = min(results, key=results.get)
                if results[chosen_counterpart] < constraints_cleared[broker]:
                    add_or_update_edge(
                        graph=self.predicted_graph,
                        edge=(broker, chosen_counterpart),
                        method_name='Endogenous Social Theory',
                        method_specified='StructuralHoleTheory',
                        score=1.0
                    )
    def predict(self, node_pairs):
        for edge in self.graph.edges:
            reverse_edge = (edge[1], edge[0])
            if not self.graph.has_edge(*reverse_edge):
                add_or_update_edge(
                    self.predicted_graph,
                    reverse_edge,
                    'Endogenous Social Theory',
                    'SocialExchangeTheory',
                    1.0
                )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'SocialExchangeTheory'
    def predict(self, node_pairs):
        for edge in self.graph.edges:
            reverse_edge = (edge[1], edge[0])
            u_node = self.graph.nodes[edge[1]]
            v_node = self.graph.nodes[edge[0]]
            if not self.graph.has_edge(*reverse_edge) and \
                    _get_similarity(u_node, v_node, self.weightings) > self.threshold:
                add_or_update_edge(
                    self.predicted_graph,
                    reverse_edge,
                    'Exogenous Social Theory',
                    'ResourceDependenceTheory',
                    1.0
                )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'ResourceDependenceTheory'
    def predict(self, node_pairs):
        """
        Resource Dependence Theory automatically applied.
        """
        for u in self.graph.nodes:
            for v in self.graph.nodes:
                if u == v or self.graph.has_edge(u, v):
                    continue
                u_node = self.graph.nodes[u]
                v_node = self.graph.nodes[v]
                if _get_similarity(u_node, v_node, self.weightings) > self.threshold:
                    add_or_update_edge(
                        graph=self.predicted_graph,
                        edge=(u, v),
                        method_name='Exogenous Social Theory',
                        method_specified='HomophilyTheories',
                        score=1.0
                    )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'HomophilyTheories'
    def predict(self, node_pairs):
        node_centralities = {}
        graph_centralities = {}
        subgraphs = {}
        for u in self.graph.nodes:
            u_node = self.graph.nodes[u]
            subgraphs[u] = self.graph.copy()
            for v in self.graph.nodes:
                v_node = self.graph.nodes[v]
                if u == v:
                    continue
                if _get_similarity(u_node, v_node, self.weightings) <= self.threshold:
                    subgraphs[u].remove_node(v)
            # Calculate katz centrality for u
            # Order nodes by centrality
            local_centrality_ordered = {k: v for k, v in sorted(
                nx.katz_centrality(subgraphs[u]).items(),
                key=itemgetter(1)
            )}
            graph_centralities[u] = local_centrality_ordered
            node_centralities[u] = local_centrality_ordered[u]

        # Order nodes by centrality
        global_centrality_ordered = {k: v for k, v in sorted(
            node_centralities.items(), key=itemgetter(1)
        )}

        # Calculate n-th percentile of centrality values
        nth_percentile_centrality = np.percentile(
            list(global_centrality_ordered.values()),
            self.precentile_centrality,
            axis=0,
            interpolation='nearest'
        )

        # Retrieve all k, v pairs that are equal or bigger to percentile
        most_central_nodes = {
            k: v for k, v in global_centrality_ordered.items() if v >= nth_percentile_centrality
        }

        # Calculate shortest paths between nodes
        for central_node, centrality in most_central_nodes.items():
            shortest_paths = nx.single_source_shortest_path_length(
                subgraphs[central_node].to_undirected(as_view=True),
                central_node
            )
            # Remove paths with length 0 or 1
            for destination, length in list(shortest_paths.items()):
                if length <= 1 or length > self.max_distance:
                    del shortest_paths[destination]
            if len(shortest_paths) == 0:
                continue
            distant_nodes = {
                key: graph_centralities[central_node][key] for key in shortest_paths.keys()
            }
            distant_nodes_ordered = {
                k: v for k, v in sorted(distant_nodes.items(), key=itemgetter(1))
            }
            # Calculate nth-percentile to select nodes with highest centrality
            nth_percentile_distant_nodes = np.percentile(
                list(distant_nodes_ordered.values()),
                self.precentile_distant_nodes,
                axis=0,
                interpolation='nearest'
            )
            for distant_node, centrality in distant_nodes_ordered.items():
                if centrality >= nth_percentile_distant_nodes:
                    add_or_update_edge(
                        self.predicted_graph,
                        (central_node, distant_node),
                        'Exogenous Social Theory',
                        'CollectiveActionTheory',
                        1.0
                    )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'CollectiveActionTheory'
    def predict(self, node_pairs):
        # Calculate katz centrality for every node
        # katz_centrality gives errors sometimes
        # centrality = nx.katz_centrality(original_graph)
        centrality = nx.katz_centrality_numpy(self.graph)

        # Order nodes by centrality
        centrality_ordered = {k: v for k, v in sorted(
            centrality.items(), key=itemgetter(1)
        )}

        # Calculate n-th percentile of centrality values
        nth_percentile_centrality = np.percentile(
            list(centrality_ordered.values()), self.precentile_centrality, axis=0, interpolation='nearest'
        )

        # Retrieve all k, v pairs that are equal or bigger to percentile
        most_central_nodes = {
            k: v for k, v in centrality_ordered.items() if v >= nth_percentile_centrality
        }

        # Calculate shortest paths between nodes
        undirected_graph = self.graph.to_undirected(as_view=True)
        for central_node, centrality in most_central_nodes.items():
            shortest_paths = nx.single_source_shortest_path_length(
                undirected_graph, central_node
            )
            # Remove paths with length 0 or 1
            for destination, length in list(shortest_paths.items()):
                if length <= 1 or length > self.max_distance:
                    del shortest_paths[destination]
            if len(shortest_paths) == 0:
                continue
            distant_nodes = {
                key: centrality_ordered[key] for key in shortest_paths.keys()
            }
            distant_nodes_ordered = {
                k: v for k, v in sorted(distant_nodes.items(), key=itemgetter(1))
            }
            # Calculate nth-percentile to select nodes with highest centrality
            nth_percentile_distant_nodes = np.percentile(
                list(distant_nodes_ordered.values()),
                self.precentile_distant_nodes,
                axis=0,
                interpolation='nearest'
            )
            for distant_node, centrality in distant_nodes_ordered.items():
                if centrality >= nth_percentile_distant_nodes:
                    add_or_update_edge(
                        self.predicted_graph,
                        (central_node, distant_node),
                        'Endogenous Social Theory',
                        'CollectiveActionTheory',
                        1.0
                    )

        def __repr__(self):
            return self.__str__()

        def __str__(self):
            return 'CollectiveActionTheory'