logits_list = F.leaky_relu(alpha, negative_slope=self.negative_slope)
        # print(logits_list.size())

        # 对应位置做softmax
        alpha = softmax(logits_list, adj_list[0], nodes_ft.size()[0])
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)
        # print('alpha.size() = ',alpha.size(), adj_list.size())

        vals = egde_j_node_state * alpha.view(edge_num, self.heads, 1)
        out_node_state = scatter_('add', vals, adj_list[0], dim_size=nodes_ft.size()[0])

        if self.concat is True:
            out_node_state = out_node_state.view(-1, self.heads * self.out_channels)
        else:
            out_node_state = out_node_state.mean(dim=1)

        if self.bias is not None:
            # print('out_node_state size = ', out_node_state.size(), 'bias size = ', self.bias.size())
            out_node_state = out_node_state + self.bias

        # print('out_node_state size = ', out_node_state.size())
        return out_node_state

if __name__ == '__main__':
    dataset = Cora()
    net = GatConv(
        dataset.node_feature_size,
        dataset.label_size,
    )
    net(dataset.all_node_feature, dataset.edge_index)
        model_res_list.append(res)

    print(sum(model_res_list) / float(test_time))
    # print(ratio ,' ', sum(model_res_list) / float(test_time))

if __name__ == '__main__':
    from model.HGCN import HGCN

    torch.cuda.set_device(3)
    model_ = HGCN

    ratio_list = [
        [0.01, 0.1, 0.1],
        [0.02, 0.1, 0.1],
        [0.03, 0.1, 0.1],
        [0.05, 0.1, 0.1],
        [0.07, 0.1, 0.1],
        [0.1, 0.1, 0.1],
        [0.2, 0.1, 0.1],
        [0.3, 0.1, 0.1],
        [0.4, 0.1, 0.1],
        [0.5, 0.1, 0.1],
        [0.6, 0.1, 0.1],
        [0.7, 0.1, 0.1],
        [0.8, 0.1, 0.1],
    ]

    for dataset in [Cora(), Citeseer(), Pubmed()]:
        print(dataset.dataset_name)
        for ratio in ratio_list:
            ratio_test(model_, dataset, ratio)
Exemple #3
0
import os.path as osp

import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GATConv
from data_loader import Cora
from model_component.utils.mask import index_2_mask

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

data = Cora()
data.get_data_loader([2000, 300, 400], mode='numerical', shuffle=True)
nodes = data.data_loader.all_node_feature.to(device)
edges = data.data_loader.edge_index.to(device)
node_num = nodes.size()[0]

all_label = data.data_loader.all_node_label
train_index = data.data_loader.train_index
test_index = data.data_loader.test_index
valid_index = data.data_loader.valid_index

train_mask = index_2_mask(node_num, train_index)
test_mask = index_2_mask(node_num, test_index)
valid_mask = index_2_mask(node_num, valid_index)




class Net(torch.nn.Module):
        self.conv2 = GatConv(self.size_first_layer * self.first_heads,
                             label_feature_size,
                             heads=1,
                             dropout=self.p)

    def forward(self, nodes, edges):
        edges = add_self_loops(nodes.size()[0], edges)
        nodes = F.dropout(nodes, p=self.p, training=self.training)
        x = F.elu(self.conv1(nodes, edges))

        x = F.dropout(x, p=self.p, training=self.training)
        x = self.conv2(x, edges)
        return F.log_softmax(x, dim=1)


data = Cora()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GatNet(1433, 7).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.002, weight_decay=5e-4)
data.get_data_loader([140, 500, 1000], mode='numerical', shuffle=False)
# data.get_data_loader([2000, 300, 400], mode='numerical', shuffle=True)
nodes = data.data_loader.all_node_feature.to(device)
edges = data.data_loader.edge_index.to(device)

all_label = data.data_loader.all_node_label
train_index = data.data_loader.train_index
test_index = data.data_loader.test_index
valid_index = data.data_loader.valid_index
# print(all_label)

Exemple #5
0
    def forward(self, node_feature, neighbor_nodes_feature):
        x = self.gat_conv(node_feature,
                          neighbor_nodes_feature)  #[1, att_out_size]
        x = torch.mm(x, self.weight)  #
        # print(x)
        # out = F.softmax(x)
        # print(res)
        return x


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(1433, 7, 32).to(device)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

data = Cora()
train_data_loader, valid_data_loader, test_data_loader = data.get_data_loader(
    [4, 2, 4])
criterion = torch.nn.CrossEntropyLoss()
# criterion = torch.nn.functional.nll_loss()


def train():
    for epoch in range(100):
        total_loss = 0
        for i, batch_data in enumerate(train_data_loader, 0):
            center_node_index, center_node_feature, center_node_label, neighbor_nodes_feature = batch_data
            # print(center_node_feature[0].size(), neighbor_nodes_feature[0].size())

            res = model(center_node_feature, neighbor_nodes_feature[0])
            # print(center_node_label, res)