Esempio n. 1
0
    def test_conditional_ranking(self):

        regparam = 0.001

        K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 = (
            self.generate_xortask()
        )
        train_rows, train_columns = Y_train.shape
        trainlabelcount = train_rows * train_columns

        K_Kron_train_x = np.kron(K_train2, K_train1)

        # Train linear Conditional Ranking Kronecker RLS
        params = {}
        params["X1"] = X_train1
        params["X2"] = X_train2
        params["Y"] = Y_train
        params["regparam"] = regparam
        linear_kron_condrank_learner = KronRLS(**params)
        linear_kron_condrank_learner.solve_linear_conditional_ranking(regparam)

        # Train an ordinary RankRLS for reference
        params = {}
        params["X"] = K_Kron_train_x
        params["kernel"] = "PrecomputedKernel"
        params["Y"] = Y_train.reshape((trainlabelcount, 1), order="F")
        qids = []
        for j in range(Y_train.shape[1]):
            for i in range(Y_train.shape[0]):
                qids.append(i)
        params["qids"] = qids
        rankrls_learner = QueryRankRLS(**params)
        rankrls_learner.solve(regparam)
        K_test_x = np.kron(K_test2, K_test1)
        ordrankrls_testpred = rankrls_learner.predict(K_test_x)
        condrank_testpred = linear_kron_condrank_learner.predict(X_test1, X_test2)
        # print('')
        print(
            "\n\nMeanabsdiff: conditional ranking vs rankrls "
            + str(np.mean(np.abs(condrank_testpred - ordrankrls_testpred)))
            + "\n"
        )
        np.testing.assert_almost_equal(condrank_testpred, ordrankrls_testpred)
Esempio n. 2
0
 def test_conditional_ranking(self):
     
     regparam = 0.001
     
     K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 = self.generate_xortask()
     train_rows, train_columns = Y_train.shape
     trainlabelcount = train_rows * train_columns
     
     K_Kron_train_x = np.kron(K_train2, K_train1)
     
     
     #Train linear Conditional Ranking Kronecker RLS
     params = {}
     params["X1"] = X_train1
     params["X2"] = X_train2
     params["Y"] = Y_train
     params["regparam"] = regparam
     linear_kron_condrank_learner = KronRLS(**params)
     linear_kron_condrank_learner.solve_linear_conditional_ranking(regparam)
     
     #Train an ordinary RankRLS for reference
     params = {}
     params["X"] = K_Kron_train_x
     params["kernel"] = "PrecomputedKernel"
     params["Y"] = Y_train.reshape((trainlabelcount, 1), order = 'F')
     qids = []
     for j in range(Y_train.shape[1]):
         for i in range(Y_train.shape[0]):
             qids.append(i)
     params["qids"] = qids
     rankrls_learner = QueryRankRLS(**params)
     rankrls_learner.solve(regparam)
     K_test_x = np.kron(K_test2, K_test1)
     ordrankrls_testpred = rankrls_learner.predict(K_test_x)
     condrank_testpred = linear_kron_condrank_learner.predict(X_test1, X_test2)
     #print('')
     print('\n\nMeanabsdiff: conditional ranking vs rankrls ' + str(np.mean(np.abs(condrank_testpred - ordrankrls_testpred))) + '\n')
     np.testing.assert_almost_equal(condrank_testpred, ordrankrls_testpred)
Esempio n. 3
0
from rlscore.reader import read_qids
from rlscore.reader import read_sparse
from rlscore.reader import read_sparse
from rlscore.reader import read_qids
from rlscore.measure import cindex
train_labels = np.loadtxt("./examples/data/rank_train.labels")
test_labels = np.loadtxt("./examples/data/rank_test.labels")
train_qids = read_qids("./examples/data/rank_train.qids")
test_features = read_sparse("./examples/data/rank_test.features")
train_features = read_sparse("./examples/data/rank_train.features")
test_qids = read_qids("./examples/data/rank_test.qids")
kwargs = {}
kwargs["Y"] = train_labels
kwargs["X"] = train_features
kwargs["qids"] = train_qids
kwargs["regparam"] = 1
learner = QueryRankRLS(**kwargs)
P = learner.predict(test_features)
from rlscore.measure.measure_utilities import UndefinedPerformance
from rlscore.measure.measure_utilities import qids_to_splits
test_qids = qids_to_splits(test_qids)
perfs = []
for query in test_qids:
    try:
        perf = cindex(test_labels[query], P[query])
        perfs.append(perf)
    except UndefinedPerformance:
        pass
test_perf = np.mean(perfs)
print "test set performance: %f" % test_perf
Esempio n. 4
0
import numpy as np
from rlscore.learner.query_rankrls import QueryRankRLS
from rlscore.utilities.reader import read_qids
from rlscore.utilities.reader import read_sparse
from rlscore.measure import cindex
train_labels = np.loadtxt("./legacy_tests/data/rank_train.labels")
test_labels = np.loadtxt("./legacy_tests/data/rank_test.labels")
train_qids = read_qids("./legacy_tests/data/rank_train.qids")
test_features = read_sparse("./legacy_tests/data/rank_test.features")
train_features = read_sparse("./legacy_tests/data/rank_train.features")
test_qids = read_qids("./legacy_tests/data/rank_test.qids")
kwargs = {}
kwargs["Y"] = train_labels
kwargs["X"] = train_features
kwargs["qids"] = train_qids
kwargs["regparam"] = 1
learner = QueryRankRLS(**kwargs)
P = learner.predict(test_features)
from rlscore.measure.measure_utilities import UndefinedPerformance
from rlscore.measure.measure_utilities import qids_to_splits
test_qids = qids_to_splits(test_qids)
perfs = []
for query in test_qids:
    try:
        perf = cindex(test_labels[query], P[query])
        perfs.append(perf)
    except UndefinedPerformance:
        pass
test_perf = np.mean(perfs)
print("test set performance: %f" %test_perf)