def run_(data, dataset, edge_index, train_node, writer, runs=10, lr=0.01, weight_decay=0.005): model = Net(dataset) losses, accs = [], [] self_loop = torch.stack((torch.LongTensor(range( data.num_nodes)), torch.LongTensor(range(data.num_nodes))), dim=0).cuda() # pdb.set_trace() model.to(device).reset_parameters() optimizer = Adam(model.parameters(), lr=0.01, weight_decay=0.0005) criterion = nn.BCEWithLogitsLoss(torch.ones(data.num_class).cuda()) for i in range(1000): # train(model, optimizer, data, self_loop) train(model, optimizer, data, edge_index, criterion, writer, i) if torch.cuda.is_available(): torch.cuda.synchronize() # loss, acc = evaluate(model, data, self_loop) loss, acc = evaluate(model, data, edge_index, criterion) print('w/o Val Loss: {:.4f}, Test Accuracy: {:.3f}'.format(loss, acc)) return loss, acc
def run_(data, dataset, edge_index, train_node, runs=10, lr=0.01, weight_decay=0.005): model = Net(dataset) losses, accs = [], [] model.to(device).reset_parameters() optimizer = Adam(model.parameters(), lr=0.01, weight_decay=0.0005) for i in range(200): train(model, optimizer, data, edge_index) if torch.cuda.is_available(): torch.cuda.synchronize() loss, acc = evaluate(model, data, edge_index) losses.append(loss) accs.append(acc) print('Val Loss: {:.4f}, Test Accuracy: {:.3f}'.format(loss, acc)) losses, accs = tensor(losses), tensor(accs) print( '::::::No Edge::::::\n Val Loss: {:.4f}, Mean Test Accuracy: {:.3f} ± {:.3f}' .format(losses.mean().item(), accs.mean().item(), accs.std().item()))