Exemplo n.º 1
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)

        @contextlib.contextmanager
        def mock_module(name):
            if not name in sys.modules:
                try:
                    sys.modules[name] = Mock()
                    yield
                finally:
                    del sys.modules[name]
            else:
                yield

        # Do what you will, just don't crash
        with mock_module("matplotlib"), \
                mock_module("matplotlib.pyplot"), \
                mock_module("matplotlib.backends.backend_agg"):
            scoring.graph_ranks(avranks, "abcd", cd)
            scoring.graph_ranks(avranks, "abcd", cd, cdmethod=0)
Exemplo n.º 2
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)
Exemplo n.º 3
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)
Exemplo n.º 4
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)

        @contextlib.contextmanager
        def mock_module(name):
            if not name in sys.modules:
                try:
                    sys.modules[name] = Mock()
                    yield
                finally:
                    del sys.modules[name]
            else:
                yield

        # Do what you will, just don't crash
        with mock_module("matplotlib"), \
                mock_module("matplotlib.pyplot"), \
                mock_module("matplotlib.backends.backend_agg"):
            scoring.graph_ranks(avranks, "abcd", cd)
            scoring.graph_ranks(avranks, "abcd", cd, cdmethod=0)
Exemplo n.º 5
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)

        # Do what you will, just don't crash
        scoring.graph_ranks(avranks, "abcd", cd)
        scoring.graph_ranks(avranks, "abcd", cd, cdmethod=0)
Exemplo n.º 6
0
    def test_compute_CD(self):
        avranks = [1.9, 3.2, 2.8, 3.3]
        cd = scoring.compute_CD(avranks, 30)
        np.testing.assert_almost_equal(cd, 0.856344)

        cd = scoring.compute_CD(avranks, 30, test="bonferroni-dunn")
        np.testing.assert_almost_equal(cd, 0.798)

        # Do what you will, just don't crash
        scoring.graph_ranks(avranks, "abcd", cd)
        scoring.graph_ranks(avranks, "abcd", cd, cdmethod=0)
Exemplo n.º 7
0
    
    # Two ways to aggregate the rank information
    # EIther aggregate it specific to each evaluator, and then average the ranks of the 4 to 6 evaluators
    # Or, rank irrespective of evaluator and aggregate across 60 x (4 to 6) evaluators. Hansen et al. talk about this kind of evaluation.
    use_approach = 0
    
    if use_approach == 0:
        mat = scipy.io.loadmat("./utils/data_from_util_scripts/n_means.mat")
        arr = mat['n_means']
        arr = arr[0]
        for cnt, i in enumerate(arr):
            print cnt
            num_datasets = i[0][0]
            avranks = i[0][1:]
            algs = fn_names[cnt]
            cd = compute_CD(avranks, num_datasets)
            print cd
            graph_ranks("cd_plt"+str(cnt)+".pdf", avranks, algs, cd=cd, width=6)

    else:
        arr = []
        mat = scipy.io.loadmat("./utils/data_from_util_scripts/friedman_raw_input.mat")
        arr1 = mat['all_friedman_input']
        for i in arr1[0]:
            j = np.apply_along_axis(rankdata, 1, i)
            x = np.mean(j, axis=0)
            x = np.hstack((j.shape[0], x))
            arr.append(x)

        for cnt, i in enumerate(arr):
            print cnt