def compute_scores(adj_matrix, labels, n_folds, n_bagging_models, sample_ratio,
                   n_kfold_trials):
    clf = TransductiveBaggingClassifier(1, -1, n_bagging_models, sample_ratio,
                                        n_jobs)
    predicted_vs_true_tuples = []
    for i in range(n_kfold_trials):
        for train_idxs, test_idxs in KFold(labels.shape[0],
                                           n_folds=n_folds,
                                           shuffle=True):
            predict_labels = labels.copy()
            predict_labels[test_idxs] = -1
            clf.fit(adj_matrix, predict_labels)
            predicted = clf.transduction_
            predicted_vs_true_tuples.append(
                (predicted[test_idxs], labels[test_idxs]))
            # print predicted[test_idxs]
            # print labels[test_idxs]

    all_scores = []
    for label_index in range(labels.shape[1]):
        scores = []
        for pred_labels, true_labels in predicted_vs_true_tuples:
            try:
                scores.append(
                    metrics.roc_auc_score(true_labels[:, label_index],
                                          pred_labels[:, label_index]))
            except ValueError, e:
                print("Error computing roc_auc_score, label={}: {}".format(
                    label_index, e))

        all_scores.append(scores)
def compute_scores(adj_matrix, labels, n_folds, n_bagging_models,
                   sample_ratio, n_kfold_trials):
    clf = TransductiveBaggingClassifier(1, -1, n_bagging_models,
                                        sample_ratio, n_jobs)
    predicted_vs_true_tuples = []
    for i in range(n_kfold_trials):
        for train_idxs, test_idxs in KFold(
                labels.shape[0],
                n_folds=n_folds,
                shuffle=True):
            predict_labels = labels.copy()
            predict_labels[test_idxs] = -1
            clf.fit(adj_matrix, predict_labels)
            predicted = clf.transduction_
            predicted_vs_true_tuples.append(
                (predicted[test_idxs], labels[test_idxs]))
            # print predicted[test_idxs]
            # print labels[test_idxs]

    all_scores = []
    for label_index in range(labels.shape[1]):
        scores = []
        for pred_labels, true_labels in predicted_vs_true_tuples:
            try:
                scores.append(metrics.roc_auc_score(
                    true_labels[:, label_index],
                    pred_labels[:, label_index]))
            except ValueError, e:
                print("Error computing roc_auc_score, label={}: {}".format(label_index, e))
                
        all_scores.append(scores)
Exemple #3
0
 def test_transduction_bagging2(self):
     clf = TransductiveBaggingClassifier(1, 1000, 6, 0.5)
     g = nx.Graph()
     ## two squares w/ vertices joined 
     g.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3),
                       (3, 4),
                       (4, 5), (4, 6), (4, 7), (5, 6), (5, 7), (6, 7)])
     labels = np.array([-1, 0, 0, 0, 1, 1, 1, -1], ndmin=2).T
     adjacency_matrix = nx.to_numpy_matrix(g).A
     clf.fit(adjacency_matrix, labels)
     np.testing.assert_array_equal(
         clf.transduction_,
         np.array([0,0,0,0,1,1,1,1], ndmin=2).T)
Exemple #4
0
 def test_transduction_bagging(self):
     clf = TransductiveBaggingClassifier(1, 1000, 6, 0.5)
     g = nx.Graph()
     # graph of three cpts, 0, 1, 2 and 3, 4, 5
     g.add_edges_from([(0, 1), (1, 2), (0, 2), (3, 4), (4, 5)])
     labels = np.array([[0, 0, -1, 1, 1, -1],
                        [1, 1, -1, 0, 0, -1]]).T
     adjacency_matrix = nx.to_numpy_matrix(g).A
     clf.fit(adjacency_matrix, labels)
     np.testing.assert_array_equal(
         clf.transduction_,
         np.array([[0, 0, 0, 1, 1, 1],
                   [1, 1, 1, 0, 0, 0]]).T)