Exemple #1
0
def train_twostage(model_ts):
    optimizer_ts = optim.Adam(model_ts.parameters(),
                              lr=args.lr,
                              weight_decay=args.weight_decay)
    edges = adj_train.indices().t()
    edges_test = adj_test.indices().t()
    edges_test_eval, labels_test_eval = negative_sample(
        edges_test, 1, bin_adj_train)
    #    print(edges_test_eval)
    for t in range(300):
        adj_input = make_normalized_adj(edge_dropout(edges, args.edge_dropout),
                                        n)
        edges_eval, labels = negative_sample(edges, args.negsamplerate,
                                             bin_adj_train)
        preds = model_ts(features_train, adj_input, edges_eval)
        loss = torch.nn.BCEWithLogitsLoss()(preds, labels)
        optimizer_ts.zero_grad()
        loss.backward()
        if t % 10 == 0:
            preds_test_eval = model_ts(features_train, adj_input,
                                       edges_test_eval)
            test_ce = torch.nn.BCEWithLogitsLoss()(preds_test_eval,
                                                   labels_test_eval)
            test_auc = sklearn.metrics.roc_auc_score(
                labels_test_eval.long().detach().numpy(),
                nn.Sigmoid()(preds_test_eval).detach().numpy())
            print(t, loss.item(), test_ce.item(), test_auc)
        optimizer_ts.step()
 def train_twostage(model_ts, train_instances, test_instances, features, algoname):
     optimizer_ts = optim.Adam(model_ts.parameters(),
                        lr=args.lr, weight_decay=args.weight_decay)
     edges = {}
     edges_eval = {}
     labels_eval = {}
     for i in train_instances + test_instances:
         edges[i] = adj_train[i].indices().t()
         edges_eval_i, labels_eval_i = negative_sample(adj_all[i].indices().t(), 1, bin_adj_all[i])
         edges_eval[i] = edges_eval_i
         labels_eval[i] = labels_eval_i
     
     def get_evaluation(instances):
         test_ce = 0
         test_auc = 0
         for i in instances:
             preds_test_eval = model_ts(features[i], adj_train[i], edges_eval[i])
             test_ce += torch.nn.BCEWithLogitsLoss()(preds_test_eval, labels_eval[i])
             test_auc_i = sklearn.metrics.roc_auc_score(labels_eval[i].long().detach().numpy(), nn.Sigmoid()(preds_test_eval).detach().numpy())
             aucs[algoname][test_instances.index(i)] = test_auc
             test_auc += test_auc_i
         return test_ce/len(instances), test_auc/len(instances)
     
     for t in range(150):
         i = np.random.choice(train_instances)
         adj_input = make_normalized_adj(edge_dropout(edges[i], args.edge_dropout), bin_adj_train[i].shape[0])
         edges_eval_i, labels_i = negative_sample(edges[i], args.negsamplerate, bin_adj_train[i])
         preds = model_ts(features[i], adj_input, edges_eval_i)
         loss = torch.nn.BCEWithLogitsLoss()(preds, labels_i)
         optimizer_ts.zero_grad()
         loss.backward()
         if t % 10 == 0:
             test_ce, test_auc = get_evaluation(test_instances)
             print(t, loss.item(), test_ce.item(), test_auc)
         optimizer_ts.step()
def train_twostage(model_ts):
    optimizer_ts = optim.Adam(model_ts.parameters(),
                              lr=0.005,
                              weight_decay=5e-4)
    edges = adj_train.indices().t()
    for t in range(300):
        adj_input = make_normalized_adj(edge_dropout(edges, 0.2), n)
        edges_eval, labels = negative_sample(edges, 1, bin_adj_train)
        preds = model_ts(features_train, adj_input, edges_eval)
        loss = torch.nn.BCEWithLogitsLoss()(preds, labels)
        optimizer_ts.zero_grad()
        loss.backward()
        optimizer_ts.step()
Exemple #4
0
        adj_test, features, labels = load_nofeatures(
            args.dataset, '_test_{:.2f}'.format(train_pct), n)
        adj_valid, features, labels = load_nofeatures(
            args.dataset, '_valid_{:.2f}'.format(train_pct), n)

adj_test = adj_test.coalesce()
adj_valid = adj_valid.coalesce()
adj_train = adj_train.coalesce()
n = adj_train.shape[0]
K = args.K
bin_adj_test = (adj_test.to_dense() > 0).float()
bin_adj_train = (adj_train.to_dense() > 0).float()
m_train = bin_adj_train.sum()
bin_adj_valid = (adj_valid.to_dense() > 0).float()
bin_adj_all = (bin_adj_train + bin_adj_test + bin_adj_valid > 0).float()
adj_all = make_normalized_adj(bin_adj_all.nonzero(), n)
nfeat = features_test.shape[1]

adj_all, features_test, labels, idx_train, idx_val, idx_test = load_data(
    'data/{}/'.format(args.dataset), '{}'.format(args.dataset))
adj_all = adj_all.coalesce()
adj_test = adj_all
bin_adj_all = (adj_all.to_dense() > 0).float()
n = adj_all.shape[0]
K = args.K
nfeat = features_test.shape[1]

##############################################################################
#INITIALIZE MODELS
##############################################################################