예제 #1
0
class Net(torch.nn.Module):
    def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = ARMAConv(dataset.num_features,
                              args.hidden,
                              args.num_stacks,
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)
        self.conv2 = ARMAConv(args.hidden,
                              dataset.num_classes,
                              args.num_stacks,
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=args.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)
예제 #2
0
class ARMA_Net(torch.nn.Module):
    def __init__(self, features_num, num_class, hidden, num_stacks, num_layers,
                 shared_weights, dropout, skip_dropout):
        super(ARMA_Net, self).__init__()
        self.dropout = dropout
        self.conv1 = ARMAConv(features_num,
                              hidden,
                              num_stacks,
                              num_layers,
                              shared_weights,
                              dropout=skip_dropout)
        self.conv2 = ARMAConv(hidden,
                              num_class,
                              num_stacks,
                              num_layers,
                              shared_weights,
                              dropout=skip_dropout)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__