def build_output_layer(self): print('Building an OUTPUT layer of {}x{}'.format( self.hidden_dimensions[-2], self.hidden_dimensions[-1])) return RelGraphConv(self.hidden_dimensions[-2], self.hidden_dimensions[-1], self.num_rels, regularizer='basis', dropout=self.feat_drop, num_bases=self.num_bases, activation=torch.tanh)
def build_input_layer(self): print('Building an INPUT layer of {}x{}'.format( self.in_dim, self.hidden_dimensions[0])) return RelGraphConv(self.in_dim, self.hidden_dimensions[0], self.num_rels, regularizer='basis', dropout=self.feat_drop, num_bases=self.num_bases, activation=F.leaky_relu)
def build_hidden_layer(self, i): print('Building an HIDDEN layer of {}x{}'.format( self.hidden_dimensions[i], self.hidden_dimensions[i + 1])) return RelGraphConv(self.hidden_dimensions[i], self.hidden_dimensions[i + 1], self.num_rels, regularizer='basis', dropout=self.feat_drop, num_bases=self.num_bases, activation=F.leaky_relu)
def build_gnn_output_layer(self): print( f'Building an OUTPUT layer for RGCN DeulingDQN of {self.gnn_hidden_dimensions[-2]}x{self.gnn_hidden_dimensions[-1]} (activation:{self.gnn_activation[-1]})' ) return RelGraphConv(self.gnn_hidden_dimensions[-2], self.gnn_hidden_dimensions[-1], self.num_rels, regularizer='basis', dropout=self.feat_drop, num_bases=self.num_bases, activation=self.gnn_activation[-1])