Exemplo n.º 1
0
class DAEGC(nn.Module):
    def __init__(self,
                 num_features,
                 hidden_size,
                 embedding_size,
                 alpha,
                 num_clusters,
                 v=1):
        super(DAEGC, self).__init__()
        self.num_clusters = num_clusters
        self.v = v

        # get pretrain model
        self.gat = GAT(num_features, hidden_size, embedding_size, alpha)
        self.gat.load_state_dict(
            torch.load(args.pretrain_path, map_location='cpu'))

        # cluster layer
        self.cluster_layer = Parameter(
            torch.Tensor(num_clusters, embedding_size))
        torch.nn.init.xavier_normal_(self.cluster_layer.data)

    def forward(self, x, adj, M):
        A_pred, z = self.gat(x, adj, M)
        q = self.get_Q(z)

        return A_pred, z, q

    def get_Q(self, z):
        q = 1.0 / (1.0 + torch.sum(
            torch.pow(z.unsqueeze(1) - self.cluster_layer, 2), 2) / self.v)
        q = q.pow((self.v + 1.0) / 2.0)
        q = (q.t() / torch.sum(q, 1)).t()
        return q
Exemplo n.º 2
0
def build_classifier(gnn, input_dim, hidden_dim, num_labels, args):
    if gnn == 'GCN':
        return GCN(input_dim, hidden_dim, num_labels, args.num_layers)
    elif gnn == 'GAT':
        return GAT(input_dim, hidden_dim, num_labels, args.num_layers,
                   args.num_heads, args.merge, args.dropout)
    else:
        raise NotImplementedError(
            '%d is not implemented yet or doesn\'t exist.' % gnn)
Exemplo n.º 3
0
    def __init__(self,
                 num_features,
                 hidden_size,
                 embedding_size,
                 alpha,
                 num_clusters,
                 v=1):
        super(DAEGC, self).__init__()
        self.num_clusters = num_clusters
        self.v = v

        # get pretrain model
        self.gat = GAT(num_features, hidden_size, embedding_size, alpha)
        self.gat.load_state_dict(
            torch.load(args.pretrain_path, map_location='cpu'))

        # cluster layer
        self.cluster_layer = Parameter(
            torch.Tensor(num_clusters, embedding_size))
        torch.nn.init.xavier_normal_(self.cluster_layer.data)
Exemplo n.º 4
0
def init_model():
    if args.model == "GCN":
        model = GCN(**model_args)
    elif args.model == "GAT":
        model_args["num_heads"] = 8
        model_args["n_units"] = 8
        model_args["dropout"] = 0.6
        model_args["activation"] = "elu"
        model = GAT(**model_args)
    else:
        model_args["n_layers"] = args.hidden_layers
        if args.model == "JKNetConCat":
            model = JKNetConCat(**model_args)
        elif args.model == "JKNetMaxpool":
            model = JKNetMaxpool(**model_args)
        else:
            print("Model should be GCN, GAT, JKNetConCat or JKNetMaxpool.")
            assert False

    optimizer = th.optim.Adam(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.weight_decay)
    return model, optimizer
Exemplo n.º 5
0
def pretrain(dataset):
    model = GAT(
        num_features=args.input_dim,
        hidden_size=args.hidden_size,
        embedding_size=args.embedding_size,
        alpha=args.alpha,
    ).to(device)
    print(model)
    optimizer = Adam(model.parameters(),
                     lr=args.lr,
                     weight_decay=args.weight_decay)

    # data process
    dataset = utils.data_preprocessing(dataset)
    adj = dataset.adj.to(device)
    adj_label = dataset.adj_label.to(device)
    M = utils.get_M(adj).to(device)

    # data and label
    x = torch.Tensor(dataset.x).to(device)
    y = dataset.y.cpu().numpy()

    for epoch in range(args.max_epoch):
        model.train()
        A_pred, z = model(x, adj, M)
        loss = F.binary_cross_entropy(A_pred.view(-1), adj_label.view(-1))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            _, z = model(x, adj, M)
            kmeans = KMeans(n_clusters=args.n_clusters,
                            n_init=20).fit(z.data.cpu().numpy())
            acc, nmi, ari, f1 = eva(y, kmeans.labels_, epoch)
        if epoch % 5 == 0:
            torch.save(model.state_dict(),
                       f"./pretrain/predaegc_{args.name}_{epoch}.pkl")
Exemplo n.º 6
0
    parser.add_argument('--lr', type = float, default = 0.005, help = 'Initial learning rate.')
    parser.add_argument('--weight_decay', type = float, default = 5e-4, help = 'Weight decay (L2 loss on parameters).')
    parser.add_argument('--hidden', type = int, default = 8, help = 'Number of hidden units.')
    parser.add_argument('--n_heads', type = int, default = 8, help = 'Number of head attentions.')
    parser.add_argument('--dropout', type = float, default = 0.6, help = 'Dropout rate (1 - keep probability).')
    parser.add_argument('--alpha', type = float, default = 0.2, help = 'Alpha for the leaky_relu.')
    parser.add_argument('--patience', type = int, default = 100, help = 'Patience')

    args = parser.parse_args()
    args.use_cuda = torch.cuda.is_available()
    
    # load data
    adj, features, labels, idx_train, idx_val, idx_test = load_data()
    
    model = GAT(n_input = features.shape[1], n_hidden = args.hidden,
                n_classes = int(labels.max()) + 1, dropout = args.dropout,
                alpha = args.alpha, n_heads = args.n_heads)
    
    
    if args.use_cuda:
        model.cuda()
        features = features.cuda()
        adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()
        
    optimizer = optim.Adam(model.parameters(), lr = args.lr,
                           weight_decay = args.weight_decay)
        
Exemplo n.º 7
0
def main():
    # check cuda
    device = f'cuda:{args.gpu}' if torch.cuda.is_available(
    ) and args.gpu >= 0 else 'cpu'
    # load data
    dataset = DglNodePropPredDataset(name=args.dataset)
    evaluator = Evaluator(name=args.dataset)

    split_idx = dataset.get_idx_split()
    g, labels = dataset[
        0]  # graph: DGLGraph object, label: torch tensor of shape (num_nodes, num_tasks)

    if args.dataset == 'ogbn-arxiv':
        if args.model == 'gat':
            g = dgl.add_reverse_edges(g, copy_ndata=True)
            g = g.add_self_loop()
        else:
            g = dgl.to_bidirected(g, copy_ndata=True)

        feat = g.ndata['feat']
        feat = (feat - feat.mean(0)) / feat.std(0)
        g.ndata['feat'] = feat

    g = g.to(device)
    feats = g.ndata['feat']
    labels = labels.to(device)

    # load masks for train / validation / test
    train_idx = split_idx["train"].to(device)
    valid_idx = split_idx["valid"].to(device)
    test_idx = split_idx["test"].to(device)

    n_features = feats.size()[-1]
    n_classes = dataset.num_classes

    # load model
    if args.model == 'mlp':
        model = MLP(n_features, args.hid_dim, n_classes, args.num_layers,
                    args.dropout)
    elif args.model == 'linear':
        model = MLPLinear(n_features, n_classes)
    elif args.model == 'gat':
        model = GAT(in_feats=n_features,
                    n_classes=n_classes,
                    n_hidden=args.hid_dim,
                    n_layers=args.num_layers,
                    n_heads=args.n_heads,
                    activation=F.relu,
                    dropout=args.dropout,
                    attn_drop=args.attn_drop)
    else:
        raise NotImplementedError(f'Model {args.model} is not supported.')

    model = model.to(device)
    print(f'Model parameters: {sum(p.numel() for p in model.parameters())}')

    if args.pretrain:
        print('---------- Before ----------')
        model.load_state_dict(
            torch.load(f'base/{args.dataset}-{args.model}.pt'))
        model.eval()

        if args.model == 'gat':
            y_soft = model(g, feats).exp()
        else:
            y_soft = model(feats).exp()

        y_pred = y_soft.argmax(dim=-1, keepdim=True)
        valid_acc = evaluate(y_pred, labels, valid_idx, evaluator)
        test_acc = evaluate(y_pred, labels, test_idx, evaluator)
        print(f'Valid acc: {valid_acc:.4f} | Test acc: {test_acc:.4f}')

        print('---------- Correct & Smoothing ----------')
        cs = CorrectAndSmooth(num_correction_layers=args.num_correction_layers,
                              correction_alpha=args.correction_alpha,
                              correction_adj=args.correction_adj,
                              num_smoothing_layers=args.num_smoothing_layers,
                              smoothing_alpha=args.smoothing_alpha,
                              smoothing_adj=args.smoothing_adj,
                              scale=args.scale)

        mask_idx = torch.cat([train_idx, valid_idx])
        if args.model != 'gat':
            y_soft = cs.correct(g, y_soft, labels[mask_idx], mask_idx)
        y_soft = cs.smooth(g, y_soft, labels[mask_idx], mask_idx)
        y_pred = y_soft.argmax(dim=-1, keepdim=True)
        valid_acc = evaluate(y_pred, labels, valid_idx, evaluator)
        test_acc = evaluate(y_pred, labels, test_idx, evaluator)
        print(f'Valid acc: {valid_acc:.4f} | Test acc: {test_acc:.4f}')
    else:
        if args.model == 'gat':
            opt = optim.RMSprop(model.parameters(), lr=args.lr)
        else:
            opt = optim.Adam(model.parameters(), lr=args.lr)

        best_acc = 0
        best_model = copy.deepcopy(model)

        # training
        print('---------- Training ----------')
        for i in range(args.epochs):
            if args.model == 'gat':
                adjust_learning_rate(opt, args.lr, i)

            model.train()
            opt.zero_grad()

            if args.model == 'gat':
                logits = model(g, feats)
            else:
                logits = model(feats)

            train_loss = F.nll_loss(logits[train_idx],
                                    labels.squeeze(1)[train_idx])
            train_loss.backward()

            opt.step()

            model.eval()
            with torch.no_grad():
                if args.model == 'gat':
                    logits = model(g, feats)
                else:
                    logits = model(feats)

                y_pred = logits.argmax(dim=-1, keepdim=True)

                train_acc = evaluate(y_pred, labels, train_idx, evaluator)
                valid_acc = evaluate(y_pred, labels, valid_idx, evaluator)

                print(
                    f'Epoch {i} | Train loss: {train_loss.item():.4f} | Train acc: {train_acc:.4f} | Valid acc {valid_acc:.4f}'
                )

                if valid_acc > best_acc:
                    best_acc = valid_acc
                    best_model = copy.deepcopy(model)

        # testing & saving model
        print('---------- Testing ----------')
        best_model.eval()

        if args.model == 'gat':
            logits = best_model(g, feats)
        else:
            logits = best_model(feats)

        y_pred = logits.argmax(dim=-1, keepdim=True)
        test_acc = evaluate(y_pred, labels, test_idx, evaluator)
        print(f'Test acc: {test_acc:.4f}')

        if not os.path.exists('base'):
            os.makedirs('base')

        torch.save(best_model.state_dict(),
                   f'base/{args.dataset}-{args.model}.pt')
Exemplo n.º 8
0
from visualize_dataset import show_pred

seed = 2020
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)

plt.rcParams['font.sans-serif'] = ['simhei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# data
train_loader, test_loader = get_loader('PEMS04')

gcn = GCN(6, 6, 1)
chebnet = ChebNet(6, 6, 1, 1)
gat = GAT(6, 6, 1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
models = [chebnet.to(device), gcn.to(device), gat.to(device)]

all_predict_values = []
epochs = 30
for i in range(len(models)):
    model = models[i]
    criterion = nn.MSELoss().to(device)
    optimizer = optim.Adam(params=model.parameters(), lr=3e-2)
    model.train()
    for epoch in range(epochs):
        epoch_loss, epoch_mae, epoch_rmse, epoch_mape = 0.0, 0.0, 0.0, 0.0
        num = 0
        start_time = time.time()
        for data in train_loader:  # ["graph": [B, N, N] , "flow_x": [B, N, H, D], "flow_y": [B, N, 1, D]]
Exemplo n.º 9
0
Arquivo: train.py Projeto: yuneg11/GNN
    nfeatures = features.shape[1]
    nclass = len(labels.unique())
    sparse = (args.dataset == "citeseer")

    # Load model
    if args.model == "gcn":
        if sparse:
            model = SpGCN(nfeatures, args.hidden, nclass, args.dropout)
        else:
            model = GCN(nfeatures, args.hidden, nclass, args.dropout)
    elif args.model == "gat":
        if sparse:
            model = SpGAT(nfeatures, args.hidden, nclass, args.dropout,
                          args.alpha, args.nheads)
        else:
            model = GAT(nfeatures, args.hidden, nclass, args.dropout,
                        args.alpha, args.nheads)
    else:
        raise ValueError("Invalid model '{}'".format(args.model))

    # Move to device
    model.to(device)
    adj, features, labels = adj.to(device), features.to(device), labels.to(
        device)

    # Prepare training
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    early_stopping = EarlyStopping(args.patience)
    history = History()
Exemplo n.º 10
0
    model.eval()
    output = model.forward(features, adj)
    loss = F.nll_loss(output[idx_test], labels[idx_test])
    acc = accuracy(output[idx_test], labels[idx_test])
    print('Test result\tloss:{:.4f}\tacc:{:.4f}'.format(loss, acc))


if __name__ == '__main__':
    seed = 2020
    hidden_dim = 16
    dropout = 0.5
    learning_rate = 0.01
    weight_decay = 5e-4
    epochs = 200

    set_seed(seed)

    adj, features, labels, idx_train, idx_val, idx_test = load_data()
    model = GAT(input_dim=features.shape[1],
                hidden_dim=hidden_dim,
                output_dim=labels.max().item() + 1,
                dropout=dropout,
                head_num=8)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate,
                                 weight_decay=weight_decay)

    for e in range(epochs):
        train(e, model, optimizer, adj, features, labels, idx_train, idx_val)

    test(model, adj, features, labels, idx_test)
Exemplo n.º 11
0
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = False
cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.cuda.empty_cache()

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(path=".",
                                                                dataset="cora")

model = GAT(
    nfeat=features.shape[1],
    nhid=args.hidden,
    nhead=args.nb_heads,
    nclass=int(labels.max()) + 1,
    p_dropout=args.dropout,
)

optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
Exemplo n.º 12
0
import torch.nn as nn
import torch.nn.functional as F
import itertools

from model import GAT
from comments import build_karate_club_graph

embed = nn.Embedding(34, 5)  # 34 nodes with embedding dim equal to 5
inputs = embed.weight
labeled_nodes = torch.tensor(
    [0, 33])  # only the instructor and the president nodes are labeled
labels = torch.tensor([0, 1])  # their labels are different

edge_index = torch.from_numpy(build_karate_club_graph()).long()

net = GAT(4, 5, 5, 2)

optimizer = torch.optim.Adam(itertools.chain(net.parameters(),
                                             embed.parameters()),
                             lr=0.01)
all_logits = []
for epoch in range(5000):
    logits = net(inputs, edge_index)
    # we save the logits for visualization later
    all_logits.append(logits.detach())
    logp = F.log_softmax(logits, 1)
    # we only compute loss for labeled nodes
    loss = F.nll_loss(logp[labeled_nodes], labels)

    optimizer.zero_grad()
    loss.backward()
Exemplo n.º 13
0
Arquivo: train.py Projeto: 63days/GAT
def main(args):
    device = torch.device('cuda' if args.cuda else 'cpu')

    adj, features, labels, idx_train, idx_val, idx_test = load_data(
        args.dataset)

    if args.model == 'gcn':
        model = GCN(nfeat=features.size(1),
                    nhid=args.hidden,
                    nclass=labels.max().item() + 1,
                    dropout=args.dropout)

        print('Model: GCN')

    elif args.model == 'gat':
        model = GAT(nfeat=features.size(1),
                    nhid=args.hidden,
                    nclass=labels.max().item() + 1,
                    dropout=args.dropout,
                    alpha=args.alpha,
                    nheads=args.n_heads)
        print('Model: GAT')

    elif args.model == 'spgcn':
        model = SpGCN(nfeat=features.size(1),
                      nhid=args.hidden,
                      nclass=labels.max().item() + 1,
                      dropout=args.dropout)
        print('Model: SpGCN')

    elif args.model == 'spgat':
        model = SpGAT(nfeat=features.size(1),
                      nhid=args.hidden,
                      nclass=labels.max().item() + 1,
                      dropout=args.dropout,
                      alpha=args.alpha,
                      nheads=args.n_heads)
        print('Model: SpGAT')

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)

    if args.cuda:
        adj = adj.cuda()
        features = features.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()
        model.cuda()
        print(device)

    def train(epoch):
        model.train()
        optimizer.zero_grad()
        output = model(features, adj)
        loss_train = F.nll_loss(output[idx_train], labels[idx_train])
        acc_train = accuracy(output[idx_train], labels[idx_train])
        loss_train.backward()
        optimizer.step()

        if not args.fastmode:
            model.eval()
            output = model(features, adj)

        loss_val = F.nll_loss(output[idx_val], labels[idx_val])
        acc_val = accuracy(output[idx_val], labels[idx_val])
        #         print('Epoch: {:04d}'.format(epoch + 1),
        #               'loss_train: {:.4f}'.format(loss_train.item()),
        #               'acc_train: {:.4f}'.format(acc_train.item()),
        #               'loss_val: {:.4f}'.format(loss_val.item()),
        #               'acc_val: {:.4f}'.format(acc_val.item()))
        pbar.set_description(
            '| epoch: {:4d} | loss_train: {:.4f} | acc_train: {:.4f} |'
            ' loss_val: {:.4f} | acc_val: {:.4f}'.format(
                epoch + 1, loss_train.item(), acc_train.item(),
                loss_val.item(), acc_val.item()))
        return loss_train.item(), loss_val.item()

    def test():
        model.eval()
        output = model(features, adj)
        loss_test = F.nll_loss(output[idx_test], labels[idx_test])
        acc_test = accuracy(output[idx_test], labels[idx_test])
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))

    losses = {}
    pbar = tqdm(range(args.epochs))
    for epoch in pbar:
        loss_train, loss_val = train(epoch)

        if epoch % 10 == 0:
            if len(losses) == 0:
                losses['train'] = [loss_train]
                losses['val'] = [loss_val]

            else:
                losses['train'].append(loss_train)
                losses['val'].append(loss_val)

    f, ax = plt.subplots()

    train_loss = ax.plot(losses['train'], label='Train Loss')
    val_loss = ax.plot(losses['val'], label='Validation Loss')

    ax.legend()
    ax.set_xlabel('Epoch / 10')
    ax.set_ylabel('Loss')

    plt.savefig('results/loss_{}_{}.png'.format(args.model, args.dataset),
                dpi=300)

    print('Optimization Finished!')

    test()
Exemplo n.º 14
0
def test(checkpoint_path, class_num, args):

    model = GAT(args.feat_dim, args.embed_dim, class_num, args.alpha,
                args.dropout, args.nheads, args.use_cuda)

    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['graph_state_dict'])
    if args.use_cuda:
        model.cuda()
    model.eval()

    for key in building.keys():
        node_num = test_dataset[key]['node_num']
        old_feature_map, adj_lists = collectGraph_test(
            test_dataset[key]['feature_path'], node_num, args.feat_dim,
            args.num_sample, args.suffix)
        old_feature_map = torch.FloatTensor(old_feature_map)
        if args.use_cuda:
            old_feature_map = old_feature_map.cuda()

        batch_num = int(math.ceil(node_num / float(args.batch_size)))
        new_feature_map = torch.FloatTensor()
        for batch in tqdm(range(batch_num)):
            start_node = batch * args.batch_size
            end_node = min((batch + 1) * args.batch_size, node_num)
            batch_nodes = range(start_node, end_node)
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            new_feature, _ = model(old_feature_map, batch_nodes,
                                   batch_neighbors)
            new_feature = F.normalize(new_feature, p=2, dim=1)
            new_feature_map = torch.cat(
                (new_feature_map, new_feature.cpu().detach()), dim=0)
        new_feature_map = new_feature_map.numpy()
        old_similarity = np.dot(old_feature_map.cpu().numpy(),
                                old_feature_map.cpu().numpy().T)
        new_similarity = np.dot(new_feature_map, new_feature_map.T)
        mAP_old = building[key].evalRetrieval(old_similarity, retrieval_result)
        mAP_new = building[key].evalRetrieval(new_similarity, retrieval_result)
        print time.strftime('%Y-%m-%d %H:%M:%S'), 'eval {}'.format(key)
        print 'base feature: {}, new feature: {}'.format(
            old_feature_map.size(), new_feature_map.shape)
        print 'base mAP: {:.4f}, new mAP: {:.4f}, improve: {:.4f}'.format(
            mAP_old, mAP_new, mAP_new - mAP_old)

        ## directly update node's features by mean pooling features of its neighbors.
        meanAggregator = model.attentions[0]
        mean_feature_map = torch.FloatTensor()
        for batch in tqdm(range(batch_num)):
            start_node = batch * args.batch_size
            end_node = min((batch + 1) * args.batch_size, node_num)
            batch_nodes = range(start_node, end_node)
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            mean_feature = meanAggregator.meanAggregate(
                old_feature_map, batch_nodes, batch_neighbors)
            mean_feature = F.normalize(mean_feature, p=2, dim=1)
            mean_feature_map = torch.cat(
                (mean_feature_map, mean_feature.cpu().detach()), dim=0)
        mean_feature_map = mean_feature_map.numpy()
        mean_similarity = np.dot(mean_feature_map, mean_feature_map.T)
        mAP_mean = building[key].evalRetrieval(mean_similarity,
                                               retrieval_result)
        print 'mean aggregation mAP: {:.4f}'.format(mAP_mean)
        print ""
Exemplo n.º 15
0
def train(args):
    ## load training data
    print "loading training data ......"
    node_num, class_num = removeIsolated(args.suffix)
    label, feature_map, adj_lists = collectGraph_train(node_num, class_num,
                                                       args.feat_dim,
                                                       args.num_sample,
                                                       args.suffix)
    label = torch.LongTensor(label)
    feature_map = torch.FloatTensor(feature_map)

    model = GAT(args.feat_dim, args.embed_dim, class_num, args.alpha,
                args.dropout, args.nheads, args.use_cuda)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.learning_rate,
                                 weight_decay=args.weight_decay)
    scheduler = StepLR(optimizer,
                       step_size=args.step_size,
                       gamma=args.learning_rate_decay)

    ## train
    np.random.seed(2)
    random.seed(2)
    rand_indices = np.random.permutation(node_num)
    train_nodes = rand_indices[:args.train_num]
    val_nodes = rand_indices[args.train_num:]

    if args.use_cuda:
        model.cuda()
        label = label.cuda()
        feature_map = feature_map.cuda()

    epoch_num = args.epoch_num
    batch_size = args.batch_size
    iter_num = int(math.ceil(args.train_num / float(batch_size)))
    check_loss = []
    val_accuracy = []
    check_step = args.check_step
    train_loss = 0.0
    iter_cnt = 0
    for e in range(epoch_num):
        model.train()
        scheduler.step()

        random.shuffle(train_nodes)
        for batch in range(iter_num):
            batch_nodes = train_nodes[batch * batch_size:(batch + 1) *
                                      batch_size]
            batch_label = label[batch_nodes].squeeze()
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            _, logit = model(feature_map, batch_nodes, batch_neighbors)
            loss = F.nll_loss(logit, batch_label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iter_cnt += 1
            train_loss += loss.cpu().item()
            if iter_cnt % check_step == 0:
                check_loss.append(train_loss / check_step)
                print time.strftime(
                    '%Y-%m-%d %H:%M:%S'
                ), "epoch: {}, iter: {}, loss:{:.4f}".format(
                    e, iter_cnt, train_loss / check_step)
                train_loss = 0.0

        ## validation
        model.eval()

        group = int(math.ceil(len(val_nodes) / float(batch_size)))
        val_cnt = 0
        for batch in range(group):
            batch_nodes = val_nodes[batch * batch_size:(batch + 1) *
                                    batch_size]
            batch_label = label[batch_nodes].squeeze()
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            _, logit = model(feature_map, batch_nodes, batch_neighbors)
            batch_predict = np.argmax(logit.cpu().detach().numpy(), axis=1)
            val_cnt += np.sum(batch_predict == batch_label.cpu().numpy())
        val_accuracy.append(val_cnt / float(len(val_nodes)))
        print time.strftime('%Y-%m-%d %H:%M:%S'
                            ), "Epoch: {}, Validation Accuracy: {:.4f}".format(
                                e, val_cnt / float(len(val_nodes)))
        print "******" * 10

    checkpoint_path = 'checkpoint/checkpoint_{}.pth'.format(
        time.strftime('%Y%m%d%H%M'))
    torch.save(
        {
            'train_num': args.train_num,
            'epoch_num': args.epoch_num,
            'batch_size': args.batch_size,
            'learning_rate': args.learning_rate,
            'embed_dim': args.embed_dim,
            'num_sample': args.num_sample,
            'graph_state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, checkpoint_path)

    vis = visdom.Visdom(env='GraphAttention', port='8099')
    vis.line(X=np.arange(1,
                         len(check_loss) + 1, 1) * check_step,
             Y=np.array(check_loss),
             opts=dict(title=time.strftime('%Y-%m-%d %H:%M:%S'),
                       xlabel='itr.',
                       ylabel='loss'))
    vis.line(X=np.arange(1,
                         len(val_accuracy) + 1, 1),
             Y=np.array(val_accuracy),
             opts=dict(title=time.strftime('%Y-%m-%d %H:%M:%S'),
                       xlabel='epoch',
                       ylabel='accuracy'))

    return checkpoint_path, class_num
Exemplo n.º 16
0
args.cuda = not args.no_cuda and torch.cuda.is_available()

random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()

if args.fastGAT == 0:
    # Model and optimizer
    model = GAT(nfeat=features.shape[1],
                nhid=args.hidden,
                nclass=int(labels.max()) + 1,
                dropout=args.dropout,
                nheads=args.nb_heads,
                alpha=args.alpha)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
else:
    # Model and optimizer
    model = FastGAT(nfeat=features.shape[1],
                    nhid=args.hidden,
                    nclass=int(labels.max()) + 1,
                    dropout=args.dropout,
                    nheads=args.nb_heads,
                    alpha=args.alpha)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
Exemplo n.º 17
0
def main(args):
    # load and preprocess dataset
    data = CoraGraphDataset()

    g = data[0]
    if args.gpu < 0:
        cuda = False
    else:
        cuda = True
        g = g.int().to(args.gpu)

    features = g.ndata['feat']
    labels = g.ndata['label']
    train_mask = g.ndata['train_mask']
    val_mask = g.ndata['val_mask']
    test_mask = g.ndata['test_mask']
    num_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()

    x = g.nodes().cpu()
    print(features)
    print(g)
    x = input()
    print("""----Data statistics------'
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
      #Test samples %d""" %
          (n_edges, n_classes,
           train_mask.int().sum().item(),
           val_mask.int().sum().item(),
           test_mask.int().sum().item()))

    # add self loop
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
    n_edges = g.number_of_edges()
    # create model
    heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
    model = GAT(args.num_layers,
                num_feats,
                args.num_hidden,
                n_classes,
                heads,
                F.elu,
                args.in_drop,
                args.attn_drop,
                args.negative_slope,
                args.residual)
    print(model)
    if args.early_stop:
        stopper = EarlyStopping(patience=100)
    if cuda:
        model.cuda()
    loss_fcn = torch.nn.CrossEntropyLoss()

    # use optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # initialize graph
    dur = []
    for epoch in range(args.epochs):
        model.train()
        if epoch >= 3:
            t0 = time.time()
        # forward
        logits = model(g, features)
        loss = loss_fcn(logits[train_mask], labels[train_mask])

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if epoch >= 3:
            dur.append(time.time() - t0)

        train_acc = accuracy(logits[train_mask], labels[train_mask])

        if args.fastmode:
            val_acc = accuracy(logits[val_mask], labels[val_mask])
        else:
            val_acc = evaluate(model, g, features, labels, val_mask)
            if args.early_stop:
                if stopper.step(val_acc, model):
                    break

        print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |"
              " ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".
              format(epoch, np.mean(dur), loss.item(), train_acc,
                     val_acc, n_edges / np.mean(dur) / 1000))

    print()
    if args.early_stop:
        model.load_state_dict(torch.load('es_checkpoint.pt'))
    acc = evaluate(model, g, features, labels, test_mask)
    print("Test Accuracy {:.4f}".format(acc))