Пример #1
0
    def __init__(self):
        self.NODE_LEVEL = {
            "attractor_basin":
            FeatureMeta(AttractorBasinCalculator, {"ab"}),
            "average_neighbor_degree":
            FeatureMeta(AverageNeighborDegreeCalculator, {"avg_nd"}),
            "betweenness_centrality":
            FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}),
            "bfs_moments":
            FeatureMeta(BfsMomentsCalculator, {"bfs"}),
            "closeness_centrality":
            FeatureMeta(ClosenessCentralityCalculator, {"closeness"}),
            "communicability_betweenness_centrality":
            FeatureMeta(CommunicabilityBetweennessCentralityCalculator,
                        {"communicability"}),
            "eccentricity":
            FeatureMeta(EccentricityCalculator, {"ecc"}),
            "fiedler_vector":
            FeatureMeta(FiedlerVectorCalculator, {"fv"}),
            "flow":
            FeatureMeta(FlowCalculator, {}),
            "general":
            FeatureMeta(GeneralCalculator, {"gen"}),
            "hierarchy_energy":
            FeatureMeta(HierarchyEnergyCalculator, {"hierarchy"}),
            "k_core":
            FeatureMeta(KCoreCalculator, {"kc"}),
            "load_centrality":
            FeatureMeta(LoadCentralityCalculator, {"load_c"}),
            "louvain":
            FeatureMeta(LouvainCalculator, {"lov"}),
            "motif3":
            FeatureMeta(nth_nodes_motif(3), {"m3"}),
            "page_rank":
            FeatureMeta(PageRankCalculator, {"pr"}),
            "motif4":
            FeatureMeta(nth_nodes_motif(4), {"m4"}),
        }

        self.MOTIFS = {
            "motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
            "motif4": FeatureMeta(nth_nodes_motif(4), {"m4"})
        }
Пример #2
0
                        help='The dataset to use.')
    parser.add_argument('--prefix',
                        type=str,
                        default="",
                        help='The prefix of the products dir name.')

    args = parser.parse_args()
    # args.cuda = not args.no_cuda and torch.cuda.is_available()
    if not torch.cuda.is_available():
        args.cuda = None
    return args


NEIGHBOR_FEATURES = {
    "first_neighbor_histogram":
    FeatureMeta(nth_neighbor_calculator(1), {"fnh", "first_neighbor"}),
    "second_neighbor_histogram":
    FeatureMeta(nth_neighbor_calculator(2), {"snh", "second_neighbor"}),
}


def accuracy(output, labels):
    preds = output.max(1)[1].type_as(labels)
    correct = preds.eq(labels).double()
    correct = correct.sum()
    return correct / len(labels)


def get_features():
    # if config["feat_type"] == "neighbors":
    #     feature_meta = NEIGHBOR_FEATURES
Пример #3
0
import networkx as nx

from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta


class BetweennessCentralityCalculator(NodeFeatureCalculator):
    def __init__(self, *args, normalized=False, **kwargs):
        super(BetweennessCentralityCalculator, self).__init__(*args, **kwargs)
        self._is_normalized = normalized

    def _calculate(self, include: set):
        self._features = nx.betweenness_centrality(
            self._gnx, normalized=self._is_normalized)

    def is_relevant(self):
        return True


feature_entry = {
    "betweenness_centrality":
    FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(BetweennessCentralityCalculator,
                          is_max_connected=True)
Пример #4
0
        }
        max_b_u = float(max(b_u.values()))

        for node in self._gnx:
            # the delta determines whether this node is to be considered
            if (b_u[node] / max_b_u) <= self._threshold:
                self._features[node] = 0
                continue

            udists = undirected_dists[node]
            dists = directed_dists[node]

            # getting coordinated values from two dictionaries with the same keys
            # saving the data as np.array type
            num, denom = map(np.array,
                             zip(*((udists[n], dists[n]) for n in dists)))

            num = num[denom != 0]
            denom = denom[denom != 0]

            self._features[node] = np.sum(num / denom) / float(b_u[node])


feature_entry = {
    "flow": FeatureMeta(FlowCalculator, {}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(FlowCalculator, is_max_connected=True)
Пример #5
0
import networkx as nx

from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta


class CommunicabilityBetweennessCentralityCalculator(NodeFeatureCalculator):
    def _calculate(self, include: set):
        self._features = nx.communicability_betweenness_centrality(self._gnx)

    def is_relevant(self):
        return not self._gnx.is_directed()


feature_entry = {
    "communicability_betweenness_centrality":
    FeatureMeta(CommunicabilityBetweennessCentralityCalculator,
                {"communicability"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(CommunicabilityBetweennessCentralityCalculator,
                          is_max_connected=True)
Пример #6
0
    def _calculate(self, include: set):
        for node in self._gnx:
            # calculate BFS distances
            distances = nx.single_source_shortest_path_length(self._gnx, node)
            # distances.pop(node)
            # if not distances:
            #     self._features[node] = [0., 0.]
            #     continue
            node_dist = Counter(distances.values())
            dists, weights = zip(*node_dist.items())
            # This was in the previous version
            # instead of the above commented fix
            adjusted_dists = np.asarray([x + 1 for x in dists])
            weights = np.asarray(weights)
            self._features[node] = [
                self.weighted_avg_and_std(adjusted_dists, weights)
            ]

    def _get_feature(self, element):
        return list(self._features[element])


feature_entry = {
    "bfs_moments": FeatureMeta(BfsMomentsCalculator, {"bfs"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature

    test_specific_feature(BfsMomentsCalculator, is_max_connected=True)
Пример #7
0
    def _calculate_dep(self, include: set):
        # Working on every connected component by itself
        self._features = dict(
            zip(self._gnx, alg_connectivity.fiedler_vector(self._gnx)))

    def _calculate(self, include: set):
        self._features = {}

        for graph in nx.connected_component_subgraphs(self._gnx):
            if len(graph) < 2:
                self._features.update(zip(graph.nodes(), [0.] * len(graph)))
            else:
                self._features.update(
                    zip(graph.nodes(),
                        map(float, alg_connectivity.fiedler_vector(graph))))

    def is_relevant(self):
        # Fiedler vector also works only on connected undirected graphs
        # so if gnx is not connected we shall expect an exception: networkx.exception.NetworkXError
        # return (not self._gnx.is_directed()) and (nx.is_connected(self._gnx.to_undirected()))
        return not self._gnx.is_directed()


feature_entry = {
    "fiedler_vector": FeatureMeta(FiedlerVector, {"fv"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(FiedlerVector, is_max_connected=True)
Пример #8
0
from features_algorithms.vertices.eccentricity import EccentricityCalculator
from features_algorithms.vertices.fiedler_vector import FiedlerVector
from features_algorithms.vertices.flow import FlowCalculator
from features_algorithms.vertices.general import GeneralCalculator
from features_algorithms.vertices.hierarchy_energy import HierarchyEnergyCalculator
from features_algorithms.vertices.k_core import KCoreCalculator
from features_algorithms.vertices.load_centrality import LoadCentralityCalculator
from features_algorithms.vertices.louvain import LouvainCalculator
# from features_algorithms.vertices.neighbor_nodes_histogram import nth_neighbor_calculator
from features_algorithms.vertices.motifs import nth_nodes_motif
from features_algorithms.vertices.page_rank import PageRankCalculator
from features_infra.feature_calculators import FeatureMeta, FeatureCalculator

NODE_FEATURES = {
    "attractor_basin":
    FeatureMeta(AttractorBasinCalculator, {"ab"}),
    "average_neighbor_degree":
    FeatureMeta(AverageNeighborDegreeCalculator, {"avg_nd"}),
    "betweenness_centrality":
    FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}),
    "bfs_moments":
    FeatureMeta(BfsMomentsCalculator, {"bfs"}),
    "closeness_centrality":
    FeatureMeta(ClosenessCentralityCalculator, {"closeness"}),
    "communicability_betweenness_centrality":
    FeatureMeta(CommunicabilityBetweennessCentralityCalculator,
                {"communicability"}),
    "eccentricity":
    FeatureMeta(EccentricityCalculator, {"ecc"}),
    "fiedler_vector":
    FeatureMeta(FiedlerVector, {"fv"}),
Пример #9
0
import sys

sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('../../..'))
sys.path.append(os.path.abspath('src'))
sys.path.append(os.path.abspath('src/accelerated_graph_features'))

from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
from features_algorithms.accelerated_graph_features.src import node_page_rank


class PageRankCalculator(NodeFeatureCalculator):
    def __init__(self, *args, alpha=0.9, **kwargs):
        super(PageRankCalculator, self).__init__(*args, **kwargs)
        self._alpha = alpha

    def is_relevant(self):
        # Undirected graphs will be converted to a directed
        #       graph with two directed edges for each undirected edge.
        return True

    def _calculate(self, include: set):
        self._features = node_page_rank(self._gnx, dumping=self._alpha)


feature_entry = {
    "page_rank": FeatureMeta(PageRankCalculator, {"pr"}),
}
Пример #10
0
from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta


class GeneralCalculator(NodeFeatureCalculator):
    def is_relevant(self):
        return True

    def _calculate(self, include: set):
        if self._gnx.is_directed():
            self._features = {
                node: (in_deg, out_deg)
                for (node, out_deg), (_, in_deg) in zip(
                    self._gnx.out_degree(), self._gnx.in_degree())
            }
        else:
            self._features = {node: deg for node, deg in self._gnx.degree()}


feature_entry = {
    "general": FeatureMeta(GeneralCalculator, {"gen"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(GeneralCalculator, is_max_connected=True)
Пример #11
0
import os
import sys

sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('../../..'))
sys.path.append(os.path.abspath('src'))
sys.path.append(os.path.abspath('src/accelerated_graph_features'))

from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
from features_algorithms.accelerated_graph_features.src import k_core


class KCoreCalculator(NodeFeatureCalculator):
    def is_relevant(self):
        return True

    def _calculate(self, include: set):
        self._features = k_core(self._gnx)


feature_entry = {
    "k_core": FeatureMeta(KCoreCalculator, {"kc"}),
}
Пример #12
0
        self._level = level
        self._gpu = gpu
        self._device = device
        self._print_name += "_%d" % (self._level,)

    def is_relevant(self):
        return True

    @classmethod
    def print_name(cls, level=None):
        print_name = super(MotifsNodeCalculator, cls).print_name()
        if level is None:
            return print_name
        return "%s_%d_C_kernel" % (print_name, level)

    def _calculate(self, include=None):
        self._features = motif(self._gnx, level=self._level, gpu=self._gpu, cudaDevice=self._device)

    def _get_feature(self, element):
        return np.array(self._features[element])


def nth_nodes_motif(motif_level, gpu, device):
    return partial(MotifsNodeCalculator, level=motif_level, gpu=gpu, device=device)


feature_node_entry = {
    "motif3_c": FeatureMeta(nth_nodes_motif(3, gpu=False, device=2), {"m3_c"}),
    "motif4_c": FeatureMeta(nth_nodes_motif(4, gpu=False, device=2), {"m4_c"}),
}
Пример #13
0
import networkx as nx

from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta


class LoadCentralityCalculator(NodeFeatureCalculator):
    def is_relevant(self):
        return True

    def _calculate(self, include: set):
        self._features = nx.load_centrality(self._gnx)


feature_entry = {
    "load_centrality": FeatureMeta(LoadCentralityCalculator, {"load_c"}),
}

if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(LoadCentralityCalculator, is_max_connected=True)
Пример #14
0
    def __init__(self):
        self.NODE_LEVEL = {
            "attractor_basin":
            FeatureMeta(AttractorBasinCalculator, {"ab"}),  # Directed
            "average_neighbor_degree":
            FeatureMeta(AverageNeighborDegreeCalculator, {"avg_nd"}),  # Any
            "betweenness_centrality":
            FeatureMeta(BetweennessCentralityCalculator,
                        {"betweenness"}),  # Any
            "bfs_moments":
            FeatureMeta(BfsMomentsCalculator, {"bfs"}),  # Any
            "closeness_centrality":
            FeatureMeta(ClosenessCentralityCalculator, {"closeness"}),  # Any
            "communicability_betweenness_centrality":
            FeatureMeta(CommunicabilityBetweennessCentralityCalculator,
                        {"communicability"}),  # Undirected
            "eccentricity":
            FeatureMeta(EccentricityCalculator, {"ecc"}),  # Any
            "fiedler_vector":
            FeatureMeta(FiedlerVectorCalculator,
                        {"fv"}),  # Undirected (due to a code limitation)
            "flow":
            FeatureMeta(FlowCalculator, {}),  # Directed
            # General - calculating degrees. Directed will get (in_deg, out_deg) and undirected will get degree only per vertex.
            "general":
            FeatureMeta(GeneralCalculator, {"gen"}),  # Any
            "hierarchy_energy":
            FeatureMeta(HierarchyEnergyCalculator,
                        {"hierarchy"}),  # Directed (but works for any)
            "k_core":
            FeatureMeta(KCoreCalculator, {"kc"}),  # Any
            "load_centrality":
            FeatureMeta(LoadCentralityCalculator, {"load_c"}),  # Any
            "louvain":
            FeatureMeta(LouvainCalculator, {"lov"}),  # Undirected
            "motif3":
            FeatureMeta(nth_nodes_motif(3), {"m3"}),  # Any
            "page_rank":
            FeatureMeta(PageRankCalculator,
                        {"pr"}),  # Directed (but works for any)
            "motif4":
            FeatureMeta(nth_nodes_motif(4), {"m4"}),  # Any
            # new
            "eigenvector_centrality":
            FeatureMeta(EigenvectorCentralityCalculator, {"eigenvector"}),
            "clustering_coefficient":
            FeatureMeta(ClusteringCoefficientCalculator, {"clustering"}),
            "square_clustering_coefficient":
            FeatureMeta(SquareClusteringCoefficientCalculator,
                        {"square_clustering"}),
            "generalized_degree":
            FeatureMeta(GeneralizedDegreeCalculator, {"generalized_degree"}),
            "all_pairs_shortest_path_length":
            FeatureMeta(AllPairsShortestPathLengthCalculator,
                        {"all_pairs_shortest_path_length"}),
            "all_pairs_shortest_path":
            FeatureMeta(AllPairsShortestPathCalculator,
                        {"all_pairs_shortest_path"}),
        }

        self.MOTIFS = {
            "motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
            "motif4": FeatureMeta(nth_nodes_motif(4), {"m4"})
        }
Пример #15
0
import networkx as nx

from features_infra.feature_calculators import EdgeFeatureCalculator, FeatureMeta


class EdgeCurrentFlowCalculator(EdgeFeatureCalculator):
    def _calculate(self, include: set):
        self._features = nx.edge_current_flow_betweenness_centrality(self._gnx)

    def is_relevant(self):
        return True


feature_entry = {
    "edge_current_flow": FeatureMeta(EdgeCurrentFlowCalculator, {"e_flow"}),
}

if __name__ == 'main':
    pass
Пример #16
0
        for node in self._gnx:
            out_dist = ab_out_dist.get(node, {})
            in_dist = ab_in_dist.get(node, {})

            self._features[node] = self._default_val
            denominator = sum((dist / avg_out[m]) * (self._alpha ** (-m)) for m, dist in out_dist.items())
            if 0 != denominator:
                numerator = sum((dist / avg_in[m]) * (self._alpha ** (-m)) for m, dist in in_dist.items())
                self._features[node] = numerator / denominator

    @staticmethod
    def _calculate_average_per_dist(num_nodes, count_dist):
        # rearrange the details in "count_dist" to be with unique distance in the array "all_dist_count"
        all_dist_count = {}
        for counter in count_dist.values():
            for dist, occurrences in counter.items():
                all_dist_count[dist] = all_dist_count.get(dist, 0) + occurrences

        # calculating for each distance the average
        return {dist: float(count) / num_nodes for dist, count in all_dist_count.items()}


feature_entry = {
    "attractor_basin": FeatureMeta(AttractorBasinCalculator, {"ab"}),
}


if __name__ == "__main__":
    from measure_tests.specific_feature_test import test_specific_feature
    test_specific_feature(AttractorBasinCalculator, is_max_connected=True)