コード例 #1
0
    def config_predict_data(self, index):
        node_mask = gf.index_to_mask(index, self.graph.num_nodes)
        labels = self.graph.label
        cache = self.cache

        batch_mask, batch_y = [], []
        batch_feat, batch_adj = [], []
        batch_nodes = []
        for cluster in range(self.num_clusters):
            nodes = np.array(cache.cluster_member[cluster])
            mask = node_mask[nodes]
            y = labels[nodes][mask]
            if len(y) == 0:
                continue
            batch_feat.append(cache.batch_feat[cluster])
            batch_adj.append(cache.batch_adj[cluster])
            batch_mask.append(mask)
            batch_y.append(y)
            batch_nodes.append(nodes[mask])

        batch_inputs = tuple(zip(batch_feat, batch_adj))
        sequence = MiniBatchSequence(inputs=batch_inputs,
                                     y=batch_y,
                                     out_index=batch_mask,
                                     node_ids=batch_nodes,
                                     device=self.data_device)
        return sequence
コード例 #2
0
    def predict(self, index):
        cache = self.cache

        node_mask = gf.index_to_mask(index, self.graph.num_nodes)
        orders_dict = {idx: order for order, idx in enumerate(index)}
        batch_mask, orders = [], []
        batch_x, batch_adj = [], []
        for cluster in range(self.cfg.process.num_clusters):
            nodes = cache.cluster_member[cluster]
            mask = node_mask[nodes]
            batch_nodes = np.asarray(nodes)[mask]
            if batch_nodes.size == 0:
                continue
            batch_x.append(cache.batch_x[cluster])
            batch_adj.append(cache.batch_adj[cluster])
            batch_mask.append(mask)
            orders.append([orders_dict[n] for n in batch_nodes])

        batch_data = tuple(zip(batch_x, batch_adj))

        logit = np.zeros((index.size, self.graph.num_node_classes),
                         dtype=self.floatx)
        batch_data, batch_mask = gf.astensors(batch_data,
                                              batch_mask,
                                              device=self.device)

        model = self.model
        for order, inputs, mask in zip(orders, batch_data, batch_mask):
            output = model.predict_step_on_batch(inputs, out_weight=mask)
            logit[order] = output

        return logit
コード例 #3
0
ファイル: clustergcn.py プロジェクト: future1111/GraphGallery
    def predict(self, index):

        mask = gf.index_to_mask(index, self.graph.num_nodes)

        orders_dict = {idx: order for order, idx in enumerate(index)}
        batch_idx, orders = [], []
        batch_x, batch_adj = [], []
        for cluster in range(self.cache.n_clusters):
            nodes = self.cache.cluster_member[cluster]
            batch_mask = mask[nodes]
            batch_nodes = np.asarray(nodes)[batch_mask]
            if batch_nodes.size == 0:
                continue
            batch_x.append(self.cache.batch_x[cluster])
            batch_adj.append(self.cache.batch_adj[cluster])
            batch_idx.append(np.where(batch_mask)[0])
            orders.append([orders_dict[n] for n in batch_nodes])

        batch_data = tuple(zip(batch_x, batch_adj, batch_idx))

        logit = np.zeros((index.size, self.graph.num_node_classes),
                         dtype=self.floatx)
        batch_data = gf.astensors(batch_data, device=self.device)

        model = self.model
        model.eval()
        with torch.no_grad():
            for order, inputs in zip(orders, batch_data):
                output = model(inputs).detach().cpu().numpy()
                logit[order] = output

        return logit
コード例 #4
0
    def train_sequence(self, index, batch_size=np.inf):

        mask = gf.index_to_mask(index, self.graph.num_nodes)
        index = get_indice_graph(self.cache.A, index, batch_size)
        while index.size < self.cache.K:
            index = get_indice_graph(self.cache.A, index)

        structure_inputs = self.cache.A[index][:, index]
        feature_inputs = self.cache.X[index]
        mask = mask[index]
        labels = self.graph.node_label[index[mask]]

        sequence = FullBatchSequence([feature_inputs, structure_inputs, mask],
                                     labels,
                                     device=self.device)
        return sequence
コード例 #5
0
ファイル: lgcn.py プロジェクト: MemMeta/GraphGallery
    def train_sequence(self, index, batch_size=np.inf):
        cache = self.cache
        mask = gf.index_to_mask(index, self.graph.num_nodes)
        index = get_indice_graph(cache.A, index, batch_size)
        while index.size < self.cfg.model.K:
            index = get_indice_graph(cache.A, index)

        A = cache.A[index][:, index]
        X = cache.X[index]
        mask = mask[index]
        labels = self.graph.node_label[index[mask]]

        sequence = FullBatchSequence([X, A],
                                     labels,
                                     out_weight=mask,
                                     device=self.device)
        return sequence
コード例 #6
0
    def train_sequence(self, index):
        node_mask = gf.index_to_mask(index, self.graph.num_nodes)
        labels = self.graph.node_label
        cache = self.cache

        batch_mask, batch_y = [], []
        batch_x, batch_adj = [], []
        for cluster in range(self.cfg.process.num_clusters):
            nodes = cache.cluster_member[cluster]
            mask = node_mask[nodes]
            y = labels[nodes][mask]
            if y.size == 0:
                continue
            batch_x.append(cache.batch_x[cluster])
            batch_adj.append(cache.batch_adj[cluster])
            batch_mask.append(mask)
            batch_y.append(y)

        batch_inputs = tuple(zip(batch_x, batch_adj))
        sequence = MiniBatchSequence(batch_inputs,
                                     batch_y,
                                     out_weight=batch_mask,
                                     device=self.device)
        return sequence
コード例 #7
0
ファイル: clustergcn.py プロジェクト: future1111/GraphGallery
    def train_sequence(self, index):

        mask = gf.index_to_mask(index, self.graph.num_nodes)
        labels = self.graph.node_label

        batch_idx, batch_labels = [], []
        batch_x, batch_adj = [], []
        for cluster in range(self.cache.n_clusters):
            nodes = self.cache.cluster_member[cluster]
            batch_mask = mask[nodes]
            y = labels[nodes][batch_mask]
            if y.size == 0:
                continue
            batch_x.append(self.cache.batch_x[cluster])
            batch_adj.append(self.cache.batch_adj[cluster])
            batch_idx.append(np.where(batch_mask)[0])
            batch_labels.append(y)

        batch_data = tuple(zip(batch_x, batch_adj, batch_idx))

        sequence = MiniBatchSequence(batch_data,
                                     batch_labels,
                                     device=self.device)
        return sequence