Exemplo n.º 1
0
from torch_sparse import spspmm, spmm
import matplotlib.pyplot as plt


learning_rate = 0.1
weight_decay = 5e-4
epochs = 200


device = "cuda" if torch.cuda.is_available() else "cpu"
model = GHNN_Net().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)


dataset = CoraData().data
x = dataset.x / dataset.x.sum(1, keepdims=True) 
tensor_x = torch.from_numpy(x).to(device)
tensor_y = torch.from_numpy(dataset.y).to(device)
tensor_train_mask = torch.from_numpy(dataset.train_mask).to(device)
tensor_val_mask = torch.from_numpy(dataset.val_mask).to(device)
tensor_test_mask = torch.from_numpy(dataset.test_mask).to(device)
normalized_Laplacian = CoraData.normalization(dataset.adjacency)  
indices = torch.from_numpy(np.asarray([normalized_Laplacian.row,
                                       normalized_Laplacian.col]).astype('int64')).long()
values = torch.from_numpy(normalized_Laplacian.data.astype(np.float32))
Laplacian_tensor_ = torch.sparse.FloatTensor(indices, values,
                                            (2708, 2708)).to(device)

identity_list_ = [1 for i in range(2708)]
identity_coo_ = sp.spdiags(identity_list_, diags=[0], m=2708, n=2708, format="coo")
Exemplo n.º 2
0
INPUT_DIM = 1433  # 输入维度
# Note: 采样的邻居阶数需要与GCN的层数保持一致
HIDDEN_DIM = [156, 7]  # 隐藏单元节点数
NUM_NEIGHBORS_LIST = [15, 10]  # 每阶采样邻居的节点数
assert len(HIDDEN_DIM) == len(NUM_NEIGHBORS_LIST)
BTACH_SIZE = 32  # 批处理大小
EPOCHS = 50
NUM_BATCH_PER_EPOCH = 40  # 每个epoch循环的批次数
LEARNING_RATE = 0.01  # 学习率
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

Data = namedtuple(
    "Data",
    ['x', 'y', 'adjacency_dict', 'train_mask', 'val_mask', 'test_mask'])

data = CoraData().data
x = data.x / data.x.sum(1, keepdims=True)  # 归一化数据,使得每一行和为1

train_index = np.where(data.train_mask)[0]
train_label = data.y[train_index]
test_index = np.where(data.test_mask)[0]
model = GraphSage(input_dim=INPUT_DIM,
                  hidden_dim=HIDDEN_DIM,
                  num_neighbors_list=NUM_NEIGHBORS_LIST).to(DEVICE)
print(model)
criterion = nn.CrossEntropyLoss().to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=5e-4)


def train():
    model.train()
Exemplo n.º 3
0
# 超参数定义
learning_rate = 0.1
weight_decay = 5e-4
epochs = 200

# 模型定义:Model, Loss, Optimizer
device = "cuda" if torch.cuda.is_available() else "cpu"
model = GraphHeat_Net().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(),
                       lr=learning_rate,
                       weight_decay=weight_decay)

# 加载数据,并转换为torch.Tensor
dataset = CoraData().data
x = dataset.x / dataset.x.sum(1, keepdims=True)  # 归一化数据,使得每一行和为1
tensor_x = torch.from_numpy(x).to(device)
tensor_y = torch.from_numpy(dataset.y).to(device)
tensor_train_mask = torch.from_numpy(dataset.train_mask).to(device)
tensor_val_mask = torch.from_numpy(dataset.val_mask).to(device)
tensor_test_mask = torch.from_numpy(dataset.test_mask).to(device)
normalize_adjacency = CoraData.normalization(dataset.adjacency)  # 规范化邻接矩阵
indices = torch.from_numpy(
    np.asarray([normalize_adjacency.row,
                normalize_adjacency.col]).astype('int64')).long()
values = torch.from_numpy(normalize_adjacency.data.astype(np.float32))
tensor_adjacency = torch.sparse.FloatTensor(indices, values,
                                            (2708, 2708)).to(device)

heatkernel = chebyshev_polynomials(dataset.adjacency, 3)