示例#1
0
def compute_nystrom(ds_name, use_node_labels, embedding_dim,
                    community_detection_method, kernels):
    if ds_name == "SYNTHETIC":
        graphs, labels = generate_synthetic()
    else:
        graphs, labels = load_data(ds_name, use_node_labels)

    print('computing communities ...')
    communities, subgraphs = compute_communities(graphs, use_node_labels,
                                                 community_detection_method)

    print("Number of communities: ", len(communities))
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    Q = []
    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    return Q, subgraphs, labels, Q_t.shape
 def fit(self,X,y,batch_size=100):
     '''
     Arguments:
     X: (array) The training data, must have two dimensions.
     y: (array) The training labels, must be one-hot encoded with two dimensions. To produce training labels in 
     this format, one might use sklearn.preprocessing.LabelBinarizer.
     batch_size: (int) The mini-batch size for computing the stochastic gradients.
     '''
     
     mapper = Nystrom(kernel=self.kernel,gamma=self.gamma,degree=self.degree,
                      coef0=self.coef0,n=self.n,k=self.k,rand_svd=self.rand_svd);
     mapper.fit(X);
     self._Xrep = mapper._Xrep;
     clf = TFClassifier(loss=self.loss,alpha=self.alpha,optimizer=self.optimizer)
         
     clf.fit(mapper.transform(X),y,batch_size=batch_size);
     self.dual_coef_ = np.dot(mapper.A,clf.coef_.T).T;
     self.intercept_ = clf.intercept_;
     return
示例#3
0
def compute_nystrom(use_node_labels, embedding_dim, community_detection_method, kernels):
   graphs_reg, labels_reg,graphs_gen, labels_gen,graphs_mal, labels_mal = load()
   graphs=graphs_reg+graphs_gen+graphs_mal
   labels=np.concatenate((labels_reg,labels_gen,labels_mal),axis=0)
   communities, subgraphs = compute_communities(graphs, use_node_labels, community_detection_method)

   print("Number of communities: ", len(communities))
   lens = []
   for community in communities:
       lens.append(community.number_of_nodes())

   print("Average size: %.2f" % np.mean(lens))
   Q=[]
   for idx, k in enumerate(kernels):
       model = Nystrom(k, n_components=embedding_dim)
       model.fit(communities)
       Q_t = model.transform(communities)
       Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
       Q.append(Q_t)

   return Q, subgraphs, labels, Q_t.shape
示例#4
0
def test_ensemble_nystrom_full_prec_three_learner():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=3, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    assert feat_ensemble.size(0) == n_sample
    assert feat_ensemble.size(1) == n_feat
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1, input_val2)
    print("single learner ensembled nystrom test passed!")
示例#5
0
def test_ensemble_nystrom_full_prec_one_learner():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1, input_val2)
    np.testing.assert_array_almost_equal(
        np.sum(feat.cpu().numpy()**2), np.sum(feat_ensemble.cpu().numpy()**2))

    np.testing.assert_array_almost_equal(
        np.sum(approx_kernel_mat.cpu().numpy()**2),
        np.sum(approx_kernel_mat_ensemble.cpu().numpy()**2))
    print("single learner ensembled nystrom test passed!")
示例#6
0
def compute_nystrom(ds_name, pct_data, use_node_labels, embedding_dim, community_detection_method, kernels, seed):
    communities_load_path = 'communities_dump_" + ds_name + "_balance_42.pkl'
    nystrom_load_path = "nystrom_dump_" + ds_name + "_balance_42.pkl"

    if os.path.exists(nystrom_load_path):
        print('loading Nystrom results from ', nystrom_load_path)
        return pkl.load(open(nystrom_load_path, 'rb'))
    if os.path.exists(communities_load_path):
        print("loading preprocessed communities data from", communities_load_path)
        communities, subgraphs = pkl.load(open(communities_load_path, 'rb'))
    else:
        if ds_name == "SYNTHETIC":
            graphs, labels = generate_synthetic()
        else:
            graphs, labels = load_data(
                dataset=ds_name, pct_data=pct_data, seed=seed)
        communities, subgraphs = compute_communities(
            graphs, use_node_labels, community_detection_method)

    print("Number of communities: ", len(communities))
    print("dumping communities to", communities_load_path)
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    sys.stdout.flush()
    Q = []

    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    print("Dumping Nystrom output to", nystrom_load_path)
    pkl.dump((Q, subgraphs, labels, Q_t.shape), open(nystrom_load_path, 'wb'))
    return Q, subgraphs, labels, Q_t.shape
示例#7
0
    def __init__(self, X, kern, Xm):

        super(PITC, self).__init__("PITC")
        M = np.shape(Xm)[0]
        self.M = M

        start = time.time()
        X_split = np.array_split(X, M)
        self.kern = kern
        kern_blocks = np.zeros((M), dtype=object)

        for t in xrange(M):
            nyst = Nystrom(X_split[t], kern, Xm, False)
            size = np.shape(X_split[t])[0]
            kern_blocks[t] = kern.K(
                X_split[t],
                X_split[t]) - nyst.precon + (kern.noise) * np.identity(size)

        self.blocks = kern_blocks
        blocked = block_diag(*kern_blocks)

        self.nyst = Nystrom(X, kern, Xm, False)
        self.precon = self.nyst.precon + blocked
        self.duration = time.time() - start
示例#8
0
    def __init__(self, X, kern, Xm):

        super(FITC, self).__init__("FITC")
        M = np.shape(Xm)[0]
        N = np.shape(X)[0]
        self.kern = kern
        start = time.time()

        k = kern.K(X, X)
        self.nyst = Nystrom(X, kern, Xm, False)
        self.diag = np.diag(k - self.nyst.precon +
                            (kern.noise) * np.identity(N))

        self.precon = self.nyst.precon + np.diag(self.diag)
        self.duration = time.time() - start
示例#9
0
    def setup(self, X, n_landmark=None):
        '''
        X is in the shape of [n_sample, n_dimension]
        call setup() once before using Nystrom
        '''
        if self.n_feat > X.size(0):
            self.n_feat = X.size(0)
            self.n_feat_per_learner = self.n_feat // self.n_learner

        self.learners = []
        np.random.seed(self.rand_seed)
        perm = np.random.permutation(np.arange(X.size(0)))
        # perm = np.arange(X.size(0) )
        for i in range(self.n_learner):
            self.learners.append(
                Nystrom(self.n_feat_per_learner, self.kernel, self.rand_seed))
            start_idx = i * self.n_feat_per_learner
            end_idx = min((i + 1) * self.n_feat_per_learner, X.size(0))
            self.learners[-1].setup(X[perm[start_idx:end_idx], :])
示例#10
0
def test_ensemble_nystrom_low_prec():
    # test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
    n_sample = 150
    n_feat = n_sample
    input_val1 = torch.DoubleTensor(
        np.random.normal(size=[n_sample, n_feat])).double()
    input_val2 = input_val1
    # input_val2  = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
    # get exact gaussian kernel
    kernel = GaussianKernel(sigma=10.0)
    kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)

    # setup quantizer
    quantizer = Quantizer(4,
                          torch.min(input_val1),
                          torch.max(input_val1),
                          rand_seed=2,
                          use_cuda=False)

    # nystrom method
    approx = Nystrom(n_feat, kernel=kernel)
    approx.setup(input_val1)
    feat = approx.get_feat(input_val1)
    approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2,
                                                 quantizer, quantizer)

    # ensembleed nystrom method
    approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
    approx_ensemble.setup(input_val1)
    feat_ensemble = approx_ensemble.get_feat(input_val1)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1,
        input_val2,
        quantizer,
        quantizer,
        consistent_quant_seed=True)
    approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(
        input_val1,
        input_val2,
        quantizer,
        quantizer,
        consistent_quant_seed=True)

    print("single learner ensembled nystrom quantizerd version test passed!")
示例#11
0
    warnings.filterwarnings('ignore')

    models = dict()
    for algorithm in args.algorithms:
        for i in range(args.iterations):
            if algorithm == 'cssp':
                m = args.columns
                model = CSSP(k, m)
            elif algorithm == 'kmeans':
                model = KMeans(k)
            elif algorithm == 'kasp':
                gamma = args.gamma
                model = KASP(k, gamma)
            elif algorithm == 'ncut':
                model = NCut(k)
            elif algorithm == 'nystrom':
                m = args.columns
                model = Nystrom(k, m)
            model.fit(X)
            if not algorithm in models:
                models[algorithm] = []
            models[algorithm].append(model)

    max_algo_name = max([len(algo) for algo in models.keys()])
    print('Algorithm |   Time   | Accuracy')
    print('----------+----------+----------')
    for name, model in models.items():
        t = sum([m.time for m in model]) / args.iterations
        acc = sum([m.accuracy(Y) for m in model]) / args.iterations
        print('{:<{nl}} | {:<8} | {:<6}'.format(name, round(t, 3), round(acc, 3), nl=9))
    lr_t = learning_rate
    min_avg_loss = np.inf
    avg_obj = 0.

    for t in range(1, n_epochs + 1):

        for batch_num in range(1, len(Xtr) // batch_size + 1):
            Xb, yb = get_next_batch(batch_size)
            di_obj, _ = sess.run([objective, train_step],
                                 feed_dict={
                                     x: Xb,
                                     y: yb,
                                     lr: lr_t
                                 })
            avg_obj = ((batch_num - 1) * avg_obj + di_obj) / batch_num

        print('Epoch %d with average KDI %f' % (t, avg_obj))
        if t % lr_epochs == 0:
            lr_t = lr_t * 0.1

    print('Fitting a predictor on the optimized Nystrom mapping.')
    kmap = Nystrom(kernel='rbf', gamma=gamma, n=n)
    kmap.fit(sess.run(kernel_func.X_rep))
    sess.close()

    clf = TFClassifier(loss='logistic', alpha=0.)
    clf.fit(kmap.transform(Xtr), ytr)
    Accuracy = clf.score(kmap.transform(Xte), yte)

    print('Accuracy for feature dimensionality %d: %f' % (n, Accuracy))
def mpgk_aa(Gs, h, n_clusters, limit):
    N = len(Gs)
    if use_node_labels:
        d = Gs[0].node[list(Gs[0].nodes())[0]]['label'].size
    else:
        d = Gs[0].node[list(Gs[0].nodes())[0]]['attributes'].size

    idx = np.zeros(N + 1, dtype=np.int64)
    nbrs = dict()
    ndata = []
    for i in range(N):
        n = Gs[i].number_of_nodes()
        idx[i + 1] = idx[i] + n

        nodes = list(Gs[i].nodes())
        M = np.zeros((n, d))
        nodes2idx = dict()
        for j in range(idx[i], idx[i + 1]):
            if use_node_labels:
                M[j - idx[i], :] = Gs[i].node[nodes[j - idx[i]]]['label']
            else:
                M[j - idx[i], :] = Gs[i].node[nodes[j - idx[i]]]['attributes']
            nodes2idx[nodes[j - idx[i]]] = j

        ndata.append(M)

        for node in nodes:
            nbrs[nodes2idx[node]] = list()
            for neighbor in Gs[i].neighbors(node):
                nbrs[nodes2idx[node]].append(nodes2idx[neighbor])

    graph_hists = list()
    X = np.vstack(ndata)

    for it in range(1, h + 1):
        print("Iteration:", it)
        hists, nbrs_hists = compute_histograms(X, nbrs, n_clusters, limit)
        X = np.zeros((X.shape[0], 200))

        ny = Nystrom(n_components=150)
        ny.fit(hists)
        X[:, :150] = ny.transform(hists)

        ny = Nystrom(n_components=50)
        ny.fit(nbrs_hists)
        X[:, 150:] = ny.transform(nbrs_hists)

        graph_hists.append(list())
        for i in range(N):
            d = dict()
            for j in range(idx[i], idx[i + 1]):
                for n in hists[j]:
                    if n in d:
                        d[n] += hists[j][n]
                    else:
                        d[n] = hists[j][n]
            graph_hists[it - 1].append(d)

    K = np.zeros((N, N))
    for it in range(h):
        for i in range(N):
            for j in range(i, N):
                for n in graph_hists[it][i]:
                    if n in graph_hists[it][j]:
                        K[i, j] += min(graph_hists[it][i][n],
                                       graph_hists[it][j][n])
                K[j, i] = K[i, j]

    return K
示例#14
0
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=args.minibatch,
                                             shuffle=False)

    # setup gaussian kernel
    n_input_feat = X_train.shape[1]
    kernel = GaussianKernel(sigma=args.kernel_sigma)
    if args.approx_type == "exact":
        print("exact kernel mode")
        # raise Exception("SGD based exact kernel is not implemented yet!")
        kernel_approx = kernel
        quantizer = None
    elif args.approx_type == "nystrom":
        print("fp nystrom mode")
        kernel_approx = Nystrom(args.n_feat,
                                kernel=kernel,
                                rand_seed=args.random_seed)
        kernel_approx.setup(X_train)
        quantizer = None
    elif args.approx_type == "ensemble_nystrom":
        print("ensembled nystrom mode with ", args.n_ensemble_nystrom,
              "learner")
        kernel_approx = EnsembleNystrom(args.n_feat,
                                        n_learner=args.n_ensemble_nystrom,
                                        kernel=kernel,
                                        rand_seed=args.random_seed)
        kernel_approx.setup(X_train)
        if args.do_fp_feat:
            quantizer = None
        else:
            # decide on the range of representation from training sample based features
示例#15
0
# Create the metric function
def geodesic(_, source=None, dest=None):
    if source is None and dest is None:
        return np.vstack([full.get_row(i) for i in range(len(pts))])
    elif dest is None:
        return full.get_rows(source).T
    else:
        return full.get_rows(source)[:, dest].T


metric_func = geodesic

## END GEODESICS SETUP

t0 = time()
nys = Nystrom(l_nys, func=metric_func)
nys.fit(X)
t1 = time()
print("-------------------------------")
print("Done unreg Nystrom: rel. error %f " %
      nys.score_rows(full, chunk_size=chunk_size))
print("Timings:")
print("Fit: %f" % (t1 - t0))

t0 = time()
nys = Nystrom(l_nys, func=metric_func, rcond=1e-4)
nys.fit(X)
t1 = time()
print("-------------------------------")
print("Done reg Nystrom: rel. error %f " %
      nys.score_rows(full, chunk_size=chunk_size))