Пример #1
0
class GNN(nn.Module):
    def __init__(self, args):
        # what to do with use_gdc

        super(GNN, self).__init__()
        self.args = args
        self.d = args.n_node_features
        num_nodes = args.n_nodes

        K = 3

        self.conv1 = ChebConv(self.d, 16, K)
        self.conv2 = ChebConv(16, 16, K)
        self.conv3 = ChebConv(16, 1, K)
        # then, we have to return a results for pi and v, respectively
        self.activ1 = nn.Linear(16, num_nodes)
        self.activ2 = nn.Linear(16, 1)
        # define parameters
        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        # print("analyzing data")
        # print(data.x)
        # print(data.edge_index)
        # print(data.weight)
        # input()

        x, edges, weights = data.x, data.edge_index, data.weight
        weights = weights.float()

        # print(weights)
        # print(weights[0])
        # print(weights[0].item())
        # print(type(weights[0].item()))
        # input()

        x = self.conv1(x, edges, weights)
        x = F.relu(x)
        x = F.dropout(x)
        x = self.conv2(x, edges, weights)
        x = F.relu(x)

        c = self.conv3(x, edges, weights)

        #print(c)
        #input()

        # choice = torch.masked_select(c.squeeze(), choices)
        choice = F.softmax(c, dim=0).view(self.args.n_nodes)

        v = global_mean_pool(x, torch.zeros(data.num_nodes, dtype=torch.long).to(self.args.device))
        value = self.activ2(v)

        # print("value: ", value.squeeze())

        return choice, value.squeeze()
Пример #2
0
class ChebNet(torch.nn.Module):
    def __init__(self, num_features, num_classes):
        super(ChebNet, self).__init__()
        self.conv1 = ChebConv(num_features, 16, K=2)
        self.conv2 = ChebConv(16, num_classes, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)
Пример #3
0
class DFAChebNet(torch.nn.Module):
    def __init__(self, num_features, num_classes, training_method='dfa'):
        super(DFAChebNet, self).__init__()
        self.conv1 = ChebConv(num_features, 16, K=2)
        self.dfa_1 = DFALayer()
        self.conv2 = ChebConv(16, num_classes, K=2)

        self.dfa = DFA(dfa_layers=[self.dfa_1], no_training=training_method != 'dfa')

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, p=0., training=self.training)
        x = self.dfa_1(x)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(self.dfa(x), dim=1)