def test_twostage(model_ts, test_instances_eval, algoname): for test_i in test_instances_eval: #predict probability that all unobserved edges exist n = adj_train[test_i].shape[0] indices = torch.tensor(np.arange(n)) to_pred = torch.zeros(n**2, 2) to_pred[:, 1] = indices.repeat(n) for i in range(n): to_pred[i * n:(i + 1) * n, 0] = i to_pred = to_pred.long() preds = model_ts(features_train[test_i], adj_train[test_i], to_pred) preds = nn.Sigmoid()(preds).view(n, n) preds = bin_adj_train[test_i] + (1 - bin_adj_train[test_i]) * preds if args.objective == 'modularity': r = greedy_modularity_communities(preds, K) loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item() vals[algoname + '-agglomerative'][test_instances.index(test_i)] = loss r = partition(preds, K) loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item() vals[algoname + '-recursive'][test_instances.index(test_i)] = loss degrees = preds.sum(dim=1) preds = torch.diag(1. / degrees) @ preds mod_pred = make_modularity_matrix(preds) r = baseline_spectral(mod_pred, K) loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item() vals[algoname + '-spectral'][test_instances.index(test_i)] = loss elif args.objective == 'kcenter': print('making dists') if args.use_igraph: dist_ts = make_dists_igraph(preds) else: dist_ts = make_all_dists(preds, 100) diameter = dist_ts[dist_ts < 100].max() dist_ts[dist_ts == 100] = diameter print(test_i) dist_ts = dist_ts.float() diameter = dist_ts.max() x = gonzalez_kcenter(dist_ts, K) loss = obj_test[test_i](x) vals[algoname + '-gonzalez'][test_instances.index(test_i)] = loss.item() x = greedy_kcenter(dist_ts, diameter, K) loss = obj_test[test_i](x) vals[algoname + '-greedy'][test_instances.index(test_i)] = loss.item()
'{}_twostage_dist.pt'.format(args.dataset))) except: print('making dists') if args.use_igraph: print('using igraph') dist_ts = make_dists_igraph(preds) else: print('using networkx') dist_ts = make_all_dists(preds, 100) diameter = dist_ts[dist_ts < 100].max() dist_ts[dist_ts == 100] = diameter print('made dists') torch.save(dist_ts, '{}_twostage_dist.pt'.format(args.dataset)) dist_ts = dist_ts.float() diameter = dist_ts.max() x = gonzalez_kcenter(dist_ts, K) print('gonzalez ts', obj_train_hardmax(x), obj_test(x)) print(dist_ts.type(), diameter.type()) x = greedy_kcenter(dist_ts, diameter, K) print('greedy ts', obj_train_hardmax(x), obj_test(x)) ############################################################################## #TRAIN END-TO-END GCN ############################################################################## if run_gcne2e: print('just GCN') optimizer_gcn = optim.Adam(model_gcn.parameters(), lr=args.lr, weight_decay=args.weight_decay) if args.objective == 'modularity':
for idx, i in enumerate(test_instances): if args.objective == 'modularity': preds = bin_adj_train[i] r = greedy_modularity_communities(preds, K) vals['train-agglomerative'][idx] = loss_fn(None, r, None, None, bin_adj_all[i], test_object[i], args).item() r = partition(preds, K) vals['train-recursive'][idx] = loss_fn(None, r, None, None, bin_adj_all[i], test_object[i], args).item() degrees = preds.sum(dim=1) preds = torch.diag(1./degrees)@preds mod_pred = make_modularity_matrix(preds) r = baseline_spectral(mod_pred, K) vals['train-spectral'][idx] = loss_fn(None, r, None, None, bin_adj_all[i], test_object[i], args).item() elif args.objective == 'kcenter': loss_gonzalez = 0 loss_greedy = 0 for i in test_instances: x = gonzalez_kcenter(dist_train[i], K) vals['train-gonzalez'][idx] = test_object[i](x).item() x = greedy_kcenter(dist_train[i], diameter, K) vals['train-greedy'][idx] = test_object[i](x).item() for algo in algs: if 'train' in algo: print(algo, np.mean(vals[algo]), np.std(vals[algo])) print() for algo in algs: print(algo, np.mean(vals[algo]), np.std(vals[algo])) pickle.dump((vals, aucs), open('results_distributional_{}_{}_{}.pickle'.format(args.dataset, args.objective, args.K), 'wb'))