Пример #1
0
class DAEGC(nn.Module):
    def __init__(self,
                 num_features,
                 hidden_size,
                 embedding_size,
                 alpha,
                 num_clusters,
                 v=1):
        super(DAEGC, self).__init__()
        self.num_clusters = num_clusters
        self.v = v

        # get pretrain model
        self.gat = GAT(num_features, hidden_size, embedding_size, alpha)
        self.gat.load_state_dict(
            torch.load(args.pretrain_path, map_location='cpu'))

        # cluster layer
        self.cluster_layer = Parameter(
            torch.Tensor(num_clusters, embedding_size))
        torch.nn.init.xavier_normal_(self.cluster_layer.data)

    def forward(self, x, adj, M):
        A_pred, z = self.gat(x, adj, M)
        q = self.get_Q(z)

        return A_pred, z, q

    def get_Q(self, z):
        q = 1.0 / (1.0 + torch.sum(
            torch.pow(z.unsqueeze(1) - self.cluster_layer, 2), 2) / self.v)
        q = q.pow((self.v + 1.0) / 2.0)
        q = (q.t() / torch.sum(q, 1)).t()
        return q
Пример #2
0
     if loss_values[-1] < best:
         best = loss_values[-1]
         best_epoch = epoch
         patience_counter = 0
     else:
         patience_counter += 1
     
     if patience_counter == args.patience:
         break
     
     files = glob.glob('*.pkl')
     for file in files:
         epoch_nb = int(file.split('.')[0])
         if epoch_nb < best_epoch:
             os.remove(file)
 
 files = glob.glob('*.pkl')
 for file in files:
     epoch_nb = int(file.split('.')[0])
     if epoch_nb > best_epoch:
         os.remove(file)
 
 print("Optimization Finished!")
 print("Total time elapsed: {:.4f}s".format(time.time() - start_time))
 
 # Restore best model
 print('Loading {}th epoch'.format(best_epoch))
 model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))
   
 # Testing
 run_test(model, features, adj, idx_test, labels)
Пример #3
0
    else:
        bad_counter += 1

    if bad_counter == args.patience:
        break

    files = glob.glob('*' + save_name)
    for file in files:
        epoch_nb = int(file.split('.')[0])
        if epoch_nb < best_epoch:
            os.remove(file)

files = glob.glob('*' + save_name)
for file in files:
    epoch_nb = int(file.split('.')[0])
    if epoch_nb > best_epoch:
        os.remove(file)

print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))

# Restore best model
print('Loading {}th epoch'.format(best_epoch))
model.load_state_dict(torch.load(('{}' + save_name).format(best_epoch)))

# Testing
compute_test()
# print(loss_values)

print('OK.')
Пример #4
0
def main(args):
    # load and preprocess dataset
    data = CoraGraphDataset()

    g = data[0]
    if args.gpu < 0:
        cuda = False
    else:
        cuda = True
        g = g.int().to(args.gpu)

    features = g.ndata['feat']
    labels = g.ndata['label']
    train_mask = g.ndata['train_mask']
    val_mask = g.ndata['val_mask']
    test_mask = g.ndata['test_mask']
    num_feats = features.shape[1]
    n_classes = data.num_labels
    n_edges = data.graph.number_of_edges()

    x = g.nodes().cpu()
    print(features)
    print(g)
    x = input()
    print("""----Data statistics------'
      #Edges %d
      #Classes %d
      #Train samples %d
      #Val samples %d
      #Test samples %d""" %
          (n_edges, n_classes,
           train_mask.int().sum().item(),
           val_mask.int().sum().item(),
           test_mask.int().sum().item()))

    # add self loop
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
    n_edges = g.number_of_edges()
    # create model
    heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
    model = GAT(args.num_layers,
                num_feats,
                args.num_hidden,
                n_classes,
                heads,
                F.elu,
                args.in_drop,
                args.attn_drop,
                args.negative_slope,
                args.residual)
    print(model)
    if args.early_stop:
        stopper = EarlyStopping(patience=100)
    if cuda:
        model.cuda()
    loss_fcn = torch.nn.CrossEntropyLoss()

    # use optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # initialize graph
    dur = []
    for epoch in range(args.epochs):
        model.train()
        if epoch >= 3:
            t0 = time.time()
        # forward
        logits = model(g, features)
        loss = loss_fcn(logits[train_mask], labels[train_mask])

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if epoch >= 3:
            dur.append(time.time() - t0)

        train_acc = accuracy(logits[train_mask], labels[train_mask])

        if args.fastmode:
            val_acc = accuracy(logits[val_mask], labels[val_mask])
        else:
            val_acc = evaluate(model, g, features, labels, val_mask)
            if args.early_stop:
                if stopper.step(val_acc, model):
                    break

        print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |"
              " ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".
              format(epoch, np.mean(dur), loss.item(), train_acc,
                     val_acc, n_edges / np.mean(dur) / 1000))

    print()
    if args.early_stop:
        model.load_state_dict(torch.load('es_checkpoint.pt'))
    acc = evaluate(model, g, features, labels, test_mask)
    print("Test Accuracy {:.4f}".format(acc))
Пример #5
0
def test(checkpoint_path, class_num, args):

    model = GAT(args.feat_dim, args.embed_dim, class_num, args.alpha,
                args.dropout, args.nheads, args.use_cuda)

    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['graph_state_dict'])
    if args.use_cuda:
        model.cuda()
    model.eval()

    for key in building.keys():
        node_num = test_dataset[key]['node_num']
        old_feature_map, adj_lists = collectGraph_test(
            test_dataset[key]['feature_path'], node_num, args.feat_dim,
            args.num_sample, args.suffix)
        old_feature_map = torch.FloatTensor(old_feature_map)
        if args.use_cuda:
            old_feature_map = old_feature_map.cuda()

        batch_num = int(math.ceil(node_num / float(args.batch_size)))
        new_feature_map = torch.FloatTensor()
        for batch in tqdm(range(batch_num)):
            start_node = batch * args.batch_size
            end_node = min((batch + 1) * args.batch_size, node_num)
            batch_nodes = range(start_node, end_node)
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            new_feature, _ = model(old_feature_map, batch_nodes,
                                   batch_neighbors)
            new_feature = F.normalize(new_feature, p=2, dim=1)
            new_feature_map = torch.cat(
                (new_feature_map, new_feature.cpu().detach()), dim=0)
        new_feature_map = new_feature_map.numpy()
        old_similarity = np.dot(old_feature_map.cpu().numpy(),
                                old_feature_map.cpu().numpy().T)
        new_similarity = np.dot(new_feature_map, new_feature_map.T)
        mAP_old = building[key].evalRetrieval(old_similarity, retrieval_result)
        mAP_new = building[key].evalRetrieval(new_similarity, retrieval_result)
        print time.strftime('%Y-%m-%d %H:%M:%S'), 'eval {}'.format(key)
        print 'base feature: {}, new feature: {}'.format(
            old_feature_map.size(), new_feature_map.shape)
        print 'base mAP: {:.4f}, new mAP: {:.4f}, improve: {:.4f}'.format(
            mAP_old, mAP_new, mAP_new - mAP_old)

        ## directly update node's features by mean pooling features of its neighbors.
        meanAggregator = model.attentions[0]
        mean_feature_map = torch.FloatTensor()
        for batch in tqdm(range(batch_num)):
            start_node = batch * args.batch_size
            end_node = min((batch + 1) * args.batch_size, node_num)
            batch_nodes = range(start_node, end_node)
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            mean_feature = meanAggregator.meanAggregate(
                old_feature_map, batch_nodes, batch_neighbors)
            mean_feature = F.normalize(mean_feature, p=2, dim=1)
            mean_feature_map = torch.cat(
                (mean_feature_map, mean_feature.cpu().detach()), dim=0)
        mean_feature_map = mean_feature_map.numpy()
        mean_similarity = np.dot(mean_feature_map, mean_feature_map.T)
        mAP_mean = building[key].evalRetrieval(mean_similarity,
                                               retrieval_result)
        print 'mean aggregation mAP: {:.4f}'.format(mAP_mean)
        print ""