def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    data = Data()
    data.generate_mute_data(100, 5)
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
        }
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(data, data)
    comp._create_surrogate_distribution_within(data, data)
    target = 1
    source = 0

    comp.cmi_surr[target] = np.zeros((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1 / comp_settings['n_perm_comp'], (
        'The p-value was not calculated correctly: {0}'.format(p[target]))

    comp.cmi_surr[target] = np.ones((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert not s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1.0, (
        'The p-value was not calculated correctly: {0}'.format(p[target]))
Ejemplo n.º 2
0
def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    data = Data()
    data.generate_mute_data(100, 5)
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
    }
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(data, data)
    comp._create_surrogate_distribution_within(data, data)
    target = 1
    source = 0

    comp.cmi_surr[target] = np.zeros((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1 / comp_settings['n_perm_comp'], (
        'The p-value was not calculated correctly: {0}'.format(p[target]))

    comp.cmi_surr[target] = np.ones((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert not s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1.0, (
        'The p-value was not calculated correctly: {0}'.format(p[target]))
def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    dat = Data()
    dat.generate_mute_data(100, 5)
    res_0 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_0.pkl'))
    res_1 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_1.pkl'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
        }
    comp = NetworkComparison()
    res_comp = comp.compare_within(comp_settings, res_0, res_1, dat, dat)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(dat, dat)
    comp._create_surrogate_distribution_within(dat, dat)
    target = 1
    for p in range(comp_settings['n_perm_comp']):
        comp.cmi_surr[p][target] = np.array([0, 1])
    comp.cmi_diff[target] = np.array([0.5, 0.5])
    [p, s] = comp._p_value_union()
    assert (s[target] == np.array([True, False])).all(), (
                                    'The significance was not determined '
                                    'correctly: {0}'.format(s[target]))
    p_1 = 1 / comp_settings['n_perm_comp']
    p_2 = 1.0
    print(p[target])
    assert (p[target] == np.array([p_1, p_2])).all(), (
                                'The p-value was not calculated correctly: {0}'
                                .format(p[target]))
Ejemplo n.º 4
0
def test_tails():
    """Test one- and two-tailed testing for all stats types."""
    data = Data()
    data.generate_mute_data(100, 5)

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))

    # comparison settings
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 4
    }

    comp = NetworkComparison()
    for tail in ['two', 'one']:
        for stats_type in ['independent', 'dependent']:
            comp_settings['stats_type'] = stats_type
            comp_settings['tail_comp'] = tail
            c_within = comp.compare_within(comp_settings, res_0, res_1, data,
                                           data)
            c_between = comp.compare_between(comp_settings,
                                             network_set_a=np.array(
                                                 (res_0, res_1)),
                                             network_set_b=np.array(
                                                 (res_2, res_3)),
                                             data_set_a=np.array((data, data)),
                                             data_set_b=np.array((data, data)))
            adj_mat_within = c_within.get_adjacency_matrix('pvalue')
            adj_mat_within.print_matrix()
            adj_mat_between = c_between.get_adjacency_matrix('pvalue')
            adj_mat_between.print_matrix()
def test_plot_network_comparison():
    """Test results class for network comparison."""
    data_0 = Data()
    data_0.generate_mute_data(500, 5)
    data_1 = Data(np.random.rand(5, 500, 5), 'psr')

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))

    # comparison settings
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 200,
        'tail': 'two',
        'permute_in_time': True,
        'perm_type': 'random'
    }
    comp = NetworkComparison()

    comp_settings['stats_type'] = 'independent'
    res_within = comp.compare_within(comp_settings, res_0, res_1, data_0,
                                     data_1)
    comp_settings['stats_type'] = 'independent'
    res_between = comp.compare_between(
        comp_settings,
        network_set_a=np.array(list(it.repeat(res_0, 10))),
        network_set_b=np.array(list(it.repeat(res_1, 10))),
        data_set_a=np.array(list(it.repeat(data_0, 10))),
        data_set_b=np.array(list(it.repeat(data_1, 10))))

    graph, fig = plot_network_comparison(res_between)
    plt.close(fig)
    graph, fig = plot_network_comparison(res_within)
    plt.close(fig)
def test_tails():
    """Test one- and two-tailed testing for all stats types."""
    data = Data()
    data.generate_mute_data(100, 5)

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))

    # comparison settings
    comp_settings = {
            'cmi_estimator': 'JidtKraskovCMI',
            'n_perm_max_stat': 50,
            'n_perm_min_stat': 50,
            'n_perm_omnibus': 200,
            'n_perm_max_seq': 50,
            'alpha_comp': 0.26,
            'n_perm_comp': 4}

    comp = NetworkComparison()
    for tail in ['two', 'one']:
        for stats_type in ['independent', 'dependent']:
            comp_settings['stats_type'] = stats_type
            comp_settings['tail_comp'] = tail
            c_within = comp.compare_within(
                comp_settings, res_0, res_1, data, data)
            c_between = comp.compare_between(
                comp_settings,
                network_set_a=np.array((res_0, res_1)),
                network_set_b=np.array((res_2, res_3)),
                data_set_a=np.array((data, data)),
                data_set_b=np.array((data, data)))
            print(c_within.get_adjacency_matrix('pvalue'))
            print(c_between.get_adjacency_matrix('pvalue'))
Ejemplo n.º 7
0
def test_results_network_comparison():
    """Test results class for network comparison."""
    data_0 = Data()
    data_0.generate_mute_data(500, 5)
    data_1 = Data(np.random.rand(5, 500, 5), 'psr')

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))

    # comparison settings
    comp_settings = {
            'cmi_estimator': 'JidtKraskovCMI',
            'n_perm_max_stat': 50,
            'n_perm_min_stat': 50,
            'n_perm_omnibus': 200,
            'n_perm_max_seq': 50,
            'alpha_comp': 0.26,
            'n_perm_comp': 200,
            'tail': 'two',
            'permute_in_time': True,
            'perm_type': 'random'
            }
    comp = NetworkComparison()

    comp_settings['stats_type'] = 'independent'
    res_within = comp.compare_within(
        comp_settings, res_0, res_1, data_0, data_1)
    comp_settings['stats_type'] = 'independent'
    res_between = comp.compare_between(
        comp_settings,
        network_set_a=np.array(list(it.repeat(res_0, 10))),
        network_set_b=np.array(list(it.repeat(res_1, 10))),
        data_set_a=np.array(list(it.repeat(data_0, 10))),
        data_set_b=np.array(list(it.repeat(data_1, 10))))
    s = 0
    t = [1, 2]
    test = ['Within', 'Between']
    for (i, res) in enumerate([res_within, res_between]):
        # Union network
        # TODO do we need the max_lag entry?
        assert (res.get_adjacency_matrix('union')[s, t] == 1).all(), (
            '{0}-test did not return correct union network links.'.format(
                test[i]))
        no_diff = np.extract(np.invert(res.get_adjacency_matrix('comparison')),
                             res.get_adjacency_matrix('union'))
        assert (no_diff == 0).all(), (
            '{0}-test did not return 0 in union network for no links.'.format(
                test[i]))
        # Comparison
        assert res.get_adjacency_matrix('comparison')[s, t].all(), (
            '{0}-test did not return correct comparison results.'.format(
                test[i]))
        no_diff = np.extract(np.invert(res.get_adjacency_matrix('comparison')),
                             res.get_adjacency_matrix('comparison'))
        assert (no_diff == 0).all(), (
            '{0}-test did not return 0 comparison for non-sign. links.'.format(
                test[i]))
        # Abs. difference
        assert (res.get_adjacency_matrix('diff_abs')[s, t] > 0).all(), (
            '{0}-test did not return correct absolute differences.'.format(
                test[i]))
        no_diff = np.extract(np.invert(res.get_adjacency_matrix('comparison')),
                             res.get_adjacency_matrix('diff_abs'))
        assert (no_diff == 0).all(), (
            '{0}-test did not return 0 difference for non-sign. links.'.format(
                test[i]))
        # p-value
        p_max = 1 / comp_settings['n_perm_comp']
        assert (res.get_adjacency_matrix('pvalue')[s, t] == p_max).all(), (
            '{0}-test did not return correct p-value for sign. links.'.format(
                test[i]))
        no_diff = np.extract(np.invert(res.get_adjacency_matrix('comparison')),
                             res.get_adjacency_matrix('pvalue'))
        assert (no_diff == 1).all(), (
            '{0}-test did not return p-vals of 1 for non-sign. links.'.format(
                test[i]))
Ejemplo n.º 8
0
def test_assertions():
    """Test if input checks raise errors."""
    data = Data()
    data.generate_mute_data(100, 5)

    # Load previously generated example data
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))

    # comparison settings
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'tail': 'two'
    }

    # no. permutations insufficient for requested alpha
    comp_settings['n_perm_comp'] = 6
    comp_settings['alpha_comp'] = 0.001
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):
        comp._initialise(comp_settings)

    # data sets have unequal no. replications
    dat2 = Data()
    dat2.generate_mute_data(100, 3)
    comp_settings['stats_type'] = 'dependent'
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 1000
    comp = NetworkComparison()
    with pytest.raises(AssertionError):
        comp.compare_within(comp_settings, res_0, res_1, data, dat2)

    # data sets have unequal no. realisations
    dat2 = Data()
    dat2.generate_mute_data(80, 5)
    comp_settings['stats_type'] = 'dependent'
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 21
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):
        comp.compare_within(comp_settings, res_0, res_1, data, dat2)

    # no. replications/subjects too small for dependent-samples test
    comp_settings['stats_type'] = 'dependent'
    comp_settings['n_perm_comp'] = 1000
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):  # between
        comp.compare_between(comp_settings,
                             network_set_a=np.array((res_0, res_1)),
                             network_set_b=np.array((res_2, res_3)),
                             data_set_a=np.array((data, data)),
                             data_set_b=np.array((data, data)))
    with pytest.raises(RuntimeError):  # within
        comp.compare_within(comp_settings, res_0, res_1, dat2, dat2)

    # no. replications/subjects too small for independent-samples test
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):  # between
        comp.compare_between(comp_settings,
                             network_set_a=np.array((res_0, res_1)),
                             network_set_b=np.array((res_2, res_3)),
                             data_set_a=np.array((data, data)),
                             data_set_b=np.array((data, data)))
    with pytest.raises(RuntimeError):  # within
        comp.compare_within(comp_settings, res_0, res_1, dat2, dat2)

    # add target to network that is not in the data object
    dat2 = Data(np.random.rand(2, 1000, 50), dim_order='psr')
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 21
    comp = NetworkComparison()
    with pytest.raises(IndexError):
        comp.compare_within(comp_settings, res_0, res_2, dat2, dat2)
Ejemplo n.º 9
0
def test_network_comparison_use_cases():
    """Run all intended use cases, within/between, dependent/independent."""
    data = Data()
    data.generate_mute_data(100, 5)

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))
    res_4 = pickle.load(open(path + 'mute_results_4.p', 'rb'))

    # comparison settings
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 4,
        'tail': 'two'
    }

    comp = NetworkComparison()

    print('\n\nTEST 0 - independent within')
    comp_settings['stats_type'] = 'independent'
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 1 - dependent within')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 2 - independent between')
    comp_settings['stats_type'] = 'independent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data)))

    print('\n\nTEST 3 - dependent between')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data)))

    print('\n\nTEST 4 - independent within unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 5 - independent between unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3, res_4)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data, data)))
Ejemplo n.º 10
0
# Generate network comparison results.
comp_settings = {
    'cmi_estimator': 'JidtKraskovCMI',
    'stats_type': 'independent',
    'n_perm_max_stat': 50,
    'n_perm_min_stat': 50,
    'n_perm_omnibus': 200,
    'n_perm_max_seq': 50,
    'alpha_comp': 0.26,
    'n_perm_comp': 200,
    'tail': 'two',
    'permute_in_time': True,
    'perm_type': 'random'
}
comp = NetworkComparison()
res_within = comp.compare_within(comp_settings, res_0, res_1, data_0, data_1)


def test_export_networkx():
    """Test export to networkx DiGrap() object."""
    # raise AssertionError('Test not yet implemented.')
    # Test export of networx graph for network inference results.
    weights = 'binary'
    adj_matrix = res_0.get_adjacency_matrix(weights=weights, fdr=False)
    io.export_networkx_graph(adjacency_matrix=adj_matrix, weights=weights)

    # Test export of source graph
    for s in [True, False]:
        io.export_networkx_source_graph(results=res_0,
                                        target=1,
                                        sign_sources=s,
Ejemplo n.º 11
0
def test_network_comparison_use_cases():
    """Run all intended use cases, within/between, dependent/independent."""
    dat = Data()
    dat.generate_mute_data(100, 5)

    # Load previously generated example data (pickled)
    res_0 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_0.pkl'))
    res_1 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_1.pkl'))
    res_2 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_2.pkl'))
    res_3 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_3.pkl'))
    res_4 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_4.pkl'))

#    path = os.path.dirname(__file__) + 'data/'
#    res_0 = idtxl_io.load_pickle(path + 'mute_res_0')
#    res_1 = idtxl_io.load_pickle(path + 'mute_res_1')
#    res_2 = idtxl_io.load_pickle(path + 'mute_res_2')
#    res_3 = idtxl_io.load_pickle(path + 'mute_res_3')
#    res_4 = idtxl_io.load_pickle(path + 'mute_res_4')

    # comparison settings
    comp_settings = {
            'cmi_estimator': 'JidtKraskovCMI',
            'n_perm_max_stat': 50,
            'n_perm_min_stat': 50,
            'n_perm_omnibus': 200,
            'n_perm_max_seq': 50,
            'alpha_comp': 0.26,
            'n_perm_comp': 4,
            'tail': 'two'
            }

    comp = NetworkComparison()

    print('\n\nTEST 0 - independent within')
    comp_settings['stats_type'] = 'independent'
    comp.compare_within(comp_settings, res_0, res_1, dat, dat)

    print('\n\nTEST 1 - dependent within')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_within(comp_settings, res_0, res_1, dat, dat)

    print('\n\nTEST 2 - independent between')
    comp_settings['stats_type'] = 'independent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((dat, dat)),
                         data_set_b=np.array((dat, dat)))

    print('\n\nTEST 3 - dependent between')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((dat, dat)),
                         data_set_b=np.array((dat, dat)))

    print('\n\nTEST 4 - independent within unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, dat, dat)

    print('\n\nTEST 5 - independent between unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3, res_4)),
                         data_set_a=np.array((dat, dat)),
                         data_set_b=np.array((dat, dat, dat)))
Ejemplo n.º 12
0
def test_assertions():
    """Test if input checks raise errors."""
    data = Data()
    data.generate_mute_data(100, 5)

    # Load previously generated example data
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))

    # comparison settings
    comp_settings = {
            'cmi_estimator': 'JidtKraskovCMI',
            'n_perm_max_stat': 50,
            'n_perm_min_stat': 50,
            'n_perm_omnibus': 200,
            'n_perm_max_seq': 50,
            'tail': 'two'
            }

    # no. permutations insufficient for requested alpha
    comp_settings['n_perm_comp'] = 6
    comp_settings['alpha_comp'] = 0.001
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):
        comp._initialise(comp_settings)

    # data sets have unequal no. replications
    dat2 = Data()
    dat2.generate_mute_data(100, 3)
    comp_settings['stats_type'] = 'dependent'
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 1000
    comp = NetworkComparison()
    with pytest.raises(AssertionError):
        comp.compare_within(comp_settings, res_0, res_1, data, dat2)

    # data sets have unequal no. realisations
    dat2 = Data()
    dat2.generate_mute_data(80, 5)
    comp_settings['stats_type'] = 'dependent'
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 21
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):
        comp.compare_within(comp_settings, res_0, res_1, data, dat2)

    # no. replications/subjects too small for dependent-samples test
    comp_settings['stats_type'] = 'dependent'
    comp_settings['n_perm_comp'] = 1000
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):   # between
        comp.compare_between(comp_settings,
                             network_set_a=np.array((res_0, res_1)),
                             network_set_b=np.array((res_2, res_3)),
                             data_set_a=np.array((data, data)),
                             data_set_b=np.array((data, data)))
    with pytest.raises(RuntimeError):   # within
        comp.compare_within(comp_settings, res_0, res_1, dat2, dat2)

    # no. replications/subjects too small for independent-samples test
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    with pytest.raises(RuntimeError):   # between
        comp.compare_between(comp_settings,
                             network_set_a=np.array((res_0, res_1)),
                             network_set_b=np.array((res_2, res_3)),
                             data_set_a=np.array((data, data)),
                             data_set_b=np.array((data, data)))
    with pytest.raises(RuntimeError):   # within
        comp.compare_within(comp_settings, res_0, res_1, dat2, dat2)

    # add target to network that is not in the data object
    dat2 = Data(np.random.rand(2, 1000, 50), dim_order='psr')
    comp_settings['alpha_comp'] = 0.05
    comp_settings['n_perm_comp'] = 21
    comp = NetworkComparison()
    with pytest.raises(IndexError):
        comp.compare_within(comp_settings, res_0, res_2, dat2, dat2)
Ejemplo n.º 13
0
def test_network_comparison_use_cases():
    """Run all intended use cases, within/between, dependent/independent."""
    data = Data()
    data.generate_mute_data(100, 5)

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    res_2 = pickle.load(open(path + 'mute_results_2.p', 'rb'))
    res_3 = pickle.load(open(path + 'mute_results_3.p', 'rb'))
    res_4 = pickle.load(open(path + 'mute_results_4.p', 'rb'))

    # comparison settings
    comp_settings = {
            'cmi_estimator': 'JidtKraskovCMI',
            'n_perm_max_stat': 50,
            'n_perm_min_stat': 50,
            'n_perm_omnibus': 200,
            'n_perm_max_seq': 50,
            'alpha_comp': 0.26,
            'n_perm_comp': 4,
            'tail': 'two'
            }

    comp = NetworkComparison()

    print('\n\nTEST 0 - independent within')
    comp_settings['stats_type'] = 'independent'
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 1 - dependent within')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 2 - independent between')
    comp_settings['stats_type'] = 'independent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data)))

    print('\n\nTEST 3 - dependent between')
    comp_settings['stats_type'] = 'dependent'
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data)))

    print('\n\nTEST 4 - independent within unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    print('\n\nTEST 5 - independent between unbalanced')
    comp_settings['stats_type'] = 'independent'
    comp = NetworkComparison()
    comp.compare_between(comp_settings,
                         network_set_a=np.array((res_0, res_1)),
                         network_set_b=np.array((res_2, res_3, res_4)),
                         data_set_a=np.array((data, data)),
                         data_set_b=np.array((data, data, data)))
Ejemplo n.º 14
0
def test_network_comparison():
    """Run within/between, dependent/independent test on bivariate MI."""
    # Generate continuous data.
    data_cont = generate_continuous_data(n_replications=10)
    data_disc = generate_discrete_data(n_replications=10)
    data_dummy_cont = _generate_dummy_data(data_cont)
    data_dummy_disc = _generate_dummy_data(data_disc)

    # Set path to load results from network inference on continuous data.
    path = os.path.join(os.path.dirname(__file__), 'data/')

    # Comparison settings.
    res = pickle.load(
        open('{0}discrete_results_mte_JidtDiscreteCMI.p'.format(path),
             'rb'))  # load discrete results to get alphabet sizes
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 4,
        'tail': 'two',
        'n_discrete_bins': res.settings['alph1']
    }
    comp = NetworkComparison()

    # Perform comparison.
    for inference in ['bmi', 'mmi', 'bmi', 'bte', 'mte']:

        # Discrete data
        estimator = 'JidtDiscreteCMI'
        res = pickle.load(
            open(
                '{0}discrete_results_{1}_{2}.p'.format(path, inference,
                                                       estimator), 'rb'))
        comp_settings['cmi_estimator'] = estimator
        for stats_type in ['dependent', 'independent']:
            print(
                ('\n\n\n######### Running network comparison on {0} '
                 'results ({1} estimator) on discrete data, {2} test.'.format(
                     inference, estimator, stats_type)))
            comp_settings['stats_type'] = stats_type

            c_within = comp.compare_within(comp_settings, res, res, data_disc,
                                           data_dummy_disc)
            c_between = comp.compare_between(comp_settings,
                                             network_set_a=np.array(
                                                 (res, res)),
                                             network_set_b=np.array(
                                                 (res, res)),
                                             data_set_a=np.array(
                                                 (data_disc, data_disc)),
                                             data_set_b=np.array(
                                                 (data_dummy_disc,
                                                  data_dummy_disc)))
            _verify_test(c_within, c_between, res)

        # Continous data
        for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
            res = pickle.load(
                open(
                    '{0}continuous_results_{1}_{2}.p'.format(
                        path, inference, estimator), 'rb'))
            comp_settings['cmi_estimator'] = estimator
            for stats_type in ['dependent', 'independent']:
                print(('\n\n\n######### Running network comparison on {0} '
                       'results ({1} estimator) on continuous data, {2} test.'.
                       format(inference, estimator, stats_type)))
                comp_settings['stats_type'] = stats_type
                c_within = comp.compare_within(comp_settings, res, res,
                                               data_cont, data_dummy_cont)
                c_between = comp.compare_between(comp_settings,
                                                 network_set_a=np.array(
                                                     (res, res)),
                                                 network_set_b=np.array(
                                                     (res, res)),
                                                 data_set_a=np.array(
                                                     (data_cont, data_cont)),
                                                 data_set_b=np.array(
                                                     (data_dummy_cont,
                                                      data_dummy_cont)))
                _verify_test(c_within, c_between, res)
Ejemplo n.º 15
0
def test_results_network_comparison():
    """Test results class for network comparison."""
    data_0 = Data()
    data_0.generate_mute_data(500, 5)
    data_1 = Data(np.random.rand(5, 500, 5), 'psr')

    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))

    # comparison settings
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 200,
        'tail': 'two',
        'permute_in_time': True,
        'perm_type': 'random'
    }
    comp = NetworkComparison()

    comp_settings['stats_type'] = 'independent'
    res_within = comp.compare_within(comp_settings, res_0, res_1, data_0,
                                     data_1)
    comp_settings['stats_type'] = 'independent'
    res_between = comp.compare_between(
        comp_settings,
        network_set_a=np.array(list(it.repeat(res_0, 10))),
        network_set_b=np.array(list(it.repeat(res_1, 10))),
        data_set_a=np.array(list(it.repeat(data_0, 10))),
        data_set_b=np.array(list(it.repeat(data_1, 10))))
    s = 0
    t = [1, 2]
    test = ['Within', 'Between']
    for (i, res) in enumerate([res_within, res_between]):
        # Get adjacency matrices.
        adj_matrix_union = res.get_adjacency_matrix('union')
        adj_matrix_comp = res.get_adjacency_matrix('comparison')
        adj_matrix_diff = res.get_adjacency_matrix('diff_abs')
        adj_matrix_pval = res.get_adjacency_matrix('pvalue')

        # Test all adjacency matrices for non-zero entries at compared edges
        # and zero/False entries otherwise.

        # Union network
        # TODO do we need the max_lag entry?
        assert adj_matrix_union._edge_matrix[s, t].all(), (
            '{0}-test did not return correct union network links.'.format(
                test[i]))
        # Comparison
        assert adj_matrix_comp._edge_matrix[s, t].all(), (
            '{0}-test did not return correct comparison results.'.format(
                test[i]))
        no_diff = np.extract(np.invert(adj_matrix_union._edge_matrix),
                             adj_matrix_comp._edge_matrix)
        assert not no_diff.any(), (
            '{0}-test did not return 0 comparison for non-sign. links.'.format(
                test[i]))
        # Abs. difference
        assert (adj_matrix_diff._edge_matrix[s, t]).all(), (
            '{0}-test did not return correct absolute differences.'.format(
                test[i]))
        no_diff = np.extract(np.invert(adj_matrix_union._edge_matrix),
                             adj_matrix_diff._edge_matrix)
        assert not no_diff.any(), (
            '{0}-test did not return 0 difference for non-sign. links.'.format(
                test[i]))
        # p-value
        p_max = 1 / comp_settings['n_perm_comp']
        assert (adj_matrix_pval._weight_matrix[s, t] == p_max).all(), (
            '{0}-test did not return correct p-value for sign. links.'.format(
                test[i]))
        no_diff = np.extract(np.invert(adj_matrix_union._edge_matrix),
                             adj_matrix_pval._edge_matrix)
        assert not no_diff.any(), (
            '{0}-test did not return p-vals of 1 for non-sign. links.'.format(
                test[i]))
Ejemplo n.º 16
0
# Generate network comparison results.
comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'stats_type': 'independent',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'alpha_comp': 0.26,
        'n_perm_comp': 200,
        'tail': 'two',
        'permute_in_time': True,
        'perm_type': 'random'
        }
comp = NetworkComparison()
res_within = comp.compare_within(
    comp_settings, res_0, res_1, data_0, data_1)


def test_export_networkx():
    """Test export to networkx DiGrap() object."""
    # raise AssertionError('Test not yet implemented.')
    # Test export of networx graph for network inference results.
    weights = 'binary'
    adj_matrix = res_0.get_adjacency_matrix(weights=weights, fdr=False)
    io.export_networkx_graph(adjacency_matrix=adj_matrix, weights=weights)

    # Test export of source graph
    for s in [True, False]:
        io.export_networkx_source_graph(
            results=res_0, target=1, sign_sources=s, fdr=False)