Ejemplo n.º 1
0
    def fit(self, graph: nx.classes.graph.Graph):
        """
        Fitting a Role2vec model.

        Arg types:
            * **graph** *(NetworkX graph)* - The graph to be embedded.
        """
        self._set_seed()
        self._check_graph(graph)
        walker = RandomWalker(self.walk_length, self.walk_number)
        walker.do_walks(graph)

        hasher = WeisfeilerLehmanHashing(
            graph=graph,
            wl_iterations=self.wl_iterations,
            attributed=False,
            erase_base_features=self.erase_base_features)

        node_features = hasher.get_node_features()
        documents = self._create_documents(walker.walks, node_features)

        model = Doc2Vec(documents,
                        vector_size=self.dimensions,
                        window=0,
                        min_count=self.min_count,
                        dm=0,
                        workers=self.workers,
                        sample=self.down_sampling,
                        iter=self.epochs,
                        alpha=self.learning_rate,
                        seed=self.seed)

        self._embedding = [
            model.docvecs[str(i)] for i, _ in enumerate(documents)
        ]
Ejemplo n.º 2
0
    def fit(self, graphs: List[nx.classes.graph.Graph]):
        """
        Fitting a GL2Vec model.

        Arg types:
            * **graphs** *(List of NetworkX graphs)* - The graphs to be embedded.
        """
        self._set_seed()
        self._check_graphs(graphs)
        graphs = [self._create_line_graph(graph) for graph in graphs]
        documents = [
            WeisfeilerLehmanHashing(graph, self.wl_iterations, False,
                                    self.erase_base_features)
            for graph in graphs
        ]
        documents = [
            TaggedDocument(words=doc.get_graph_features(), tags=[str(i)])
            for i, doc in enumerate(documents)
        ]

        model = Doc2Vec(documents,
                        vector_size=self.dimensions,
                        window=0,
                        min_count=self.min_count,
                        dm=0,
                        sample=self.down_sampling,
                        workers=self.workers,
                        iter=self.epochs,
                        alpha=self.learning_rate,
                        seed=self.seed)

        self._embedding = [
            model.docvecs[str(i)] for i, _ in enumerate(documents)
        ]
Ejemplo n.º 3
0
    def fit(self, graphs):
        """
        Fitting a Graph2Vec model.

        Arg types:
            * **graphs** *(List of NetworkX graphs)* - The graphs to be embedded.
        """
        documents = [
            WeisfeilerLehmanHashing(graph, self.wl_iterations, self.attributed)
            for graph in graphs
        ]
        documents = [
            TaggedDocument(words=doc.extracted_features, tags=[str(i)])
            for i, doc in enumerate(documents)
        ]

        model = Doc2Vec(documents,
                        vector_size=self.dimensions,
                        window=0,
                        min_count=self.min_count,
                        dm=0,
                        sample=self.down_sampling,
                        workers=self.workers,
                        epochs=self.epochs,
                        alpha=self.learning_rate)

        self._embedding = [
            model.docvecs[str(i)] for i, _ in enumerate(documents)
        ]
Ejemplo n.º 4
0
    def fit(self, graphs: List[nx.classes.graph.Graph]):
        self._set_seed()
        graphs = self._check_graphs(graphs)
        documents = [WeisfeilerLehmanHashing(graph, self.wl_iterations, self.attributed, self.erase_base_features) for
                     graph in graphs]
        documents = [TaggedDocument(words=doc.get_graph_features(), tags=[str(i)]) for i, doc in enumerate(documents)]

        model = Doc2Vec(documents,
                        vector_size=self.dimensions,
                        window=0,
                        min_count=self.min_count,
                        dm=0,
                        sample=self.down_sampling,
                        workers=self.workers,
                        epochs=self.epochs,
                        alpha=self.learning_rate,
                        seed=self.seed)

        self._embedding = model