def test_twostage(model_ts, test_instances_eval, algoname):
        for test_i in test_instances_eval:
            #predict probability that all unobserved edges exist
            n = adj_train[test_i].shape[0]
            indices = torch.tensor(np.arange(n))
            to_pred = torch.zeros(n**2, 2)
            to_pred[:, 1] = indices.repeat(n)
            for i in range(n):
                to_pred[i * n:(i + 1) * n, 0] = i
            to_pred = to_pred.long()
            preds = model_ts(features_train[test_i], adj_train[test_i],
                             to_pred)
            preds = nn.Sigmoid()(preds).view(n, n)
            preds = bin_adj_train[test_i] + (1 - bin_adj_train[test_i]) * preds

            if args.objective == 'modularity':
                r = greedy_modularity_communities(preds, K)
                loss = loss_fn(None, r, None, None, bin_adj_all[test_i],
                               test_object[test_i], args).item()
                vals[algoname +
                     '-agglomerative'][test_instances.index(test_i)] = loss
                r = partition(preds, K)
                loss = loss_fn(None, r, None, None, bin_adj_all[test_i],
                               test_object[test_i], args).item()
                vals[algoname +
                     '-recursive'][test_instances.index(test_i)] = loss
                degrees = preds.sum(dim=1)
                preds = torch.diag(1. / degrees) @ preds
                mod_pred = make_modularity_matrix(preds)
                r = baseline_spectral(mod_pred, K)
                loss = loss_fn(None, r, None, None, bin_adj_all[test_i],
                               test_object[test_i], args).item()
                vals[algoname +
                     '-spectral'][test_instances.index(test_i)] = loss
            elif args.objective == 'kcenter':
                print('making dists')
                if args.use_igraph:
                    dist_ts = make_dists_igraph(preds)
                else:
                    dist_ts = make_all_dists(preds, 100)
                    diameter = dist_ts[dist_ts < 100].max()
                    dist_ts[dist_ts == 100] = diameter
                print(test_i)
                dist_ts = dist_ts.float()
                diameter = dist_ts.max()
                x = gonzalez_kcenter(dist_ts, K)
                loss = obj_test[test_i](x)
                vals[algoname +
                     '-gonzalez'][test_instances.index(test_i)] = loss.item()
                x = greedy_kcenter(dist_ts, diameter, K)
                loss = obj_test[test_i](x)
                vals[algoname +
                     '-greedy'][test_instances.index(test_i)] = loss.item()
示例#2
0
        degrees = preds.sum(dim=1)
        preds = torch.diag(1. / degrees) @ preds
        mod_pred = make_modularity_matrix(preds)
        r = baseline_spectral(mod_pred, K)
        print('spectral',
              loss_fn(None, r, None, None, bin_adj_all, test_object, args))
    elif args.objective == 'kcenter':
        try:
            dist_ts = torch.load('{}_twostage_dist.pt'.format(args.dataset))
            print('loaded ts dists from {}'.format(
                '{}_twostage_dist.pt'.format(args.dataset)))
        except:
            print('making dists')
            if args.use_igraph:
                print('using igraph')
                dist_ts = make_dists_igraph(preds)
            else:
                print('using networkx')
                dist_ts = make_all_dists(preds, 100)
                diameter = dist_ts[dist_ts < 100].max()
                dist_ts[dist_ts == 100] = diameter
            print('made dists')
            torch.save(dist_ts, '{}_twostage_dist.pt'.format(args.dataset))
        dist_ts = dist_ts.float()
        diameter = dist_ts.max()
        x = gonzalez_kcenter(dist_ts, K)
        print('gonzalez ts', obj_train_hardmax(x), obj_test(x))
        print(dist_ts.type(), diameter.type())
        x = greedy_kcenter(dist_ts, diameter, K)
        print('greedy ts', obj_train_hardmax(x), obj_test(x))