def forward(self, g, node_feat, edge_feat=None): if self.embed_size > 0: dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) h = self.embed.weight else: h = node_feat return self.sgc(g, h)
def forward(self, graph, node_feat, edge_feat=None): if self.embed_size > 0: dgl_warning( "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) h = self.embed.weight else: h = node_feat for i in range(self.num_layers): h = self.conv_list[i](graph, h) h = self.out_mlp(h) return h
def forward(self, graph, node_feat, edge_feat = None): if self.embed_size > 0: dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") h = self.embed.weight else: h = node_feat h = self.dropout(h) for l, layer in enumerate(self.layers): h = layer(graph, h, edge_feat) if l != len(self.layers) - 1: h = self.activation(h) h = self.dropout(h) return h
def forward(self, graph, node_feat, edge_feat=None): if self.embed_size > 0: dgl_warning( "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) h = self.embed.weight else: h = node_feat for l in range(self.num_layers - 1): h = self.gat_layers[l](graph, h).flatten(1) # output projection logits = self.gat_layers[-1](graph, h).mean(1) return logits
def forward(self, g, node_feat, edge_feat=None): if self.embed_size > 0: dgl_warning( "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) h = self.embed.weight else: h = node_feat edge_weight = edge_feat if self.use_edge_weight else None for l, layer in enumerate(self.layers): h = layer(g, h, edge_weight=edge_weight) if l != len(self.layers) - 1: h = self.act(h) h = self.dropout(h) return h