Esempio n. 1
0
def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    data = Data()
    data.generate_mute_data(100, 5)
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
    }
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(data, data)
    comp._create_surrogate_distribution_within(data, data)
    target = 1
    source = 0

    comp.cmi_surr[target] = np.zeros((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1 / comp_settings['n_perm_comp'], (
        'The p-value was not calculated correctly: {0}'.format(p[target]))

    comp.cmi_surr[target] = np.ones((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert not s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1.0, (
        'The p-value was not calculated correctly: {0}'.format(p[target]))
def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    data = Data()
    data.generate_mute_data(100, 5)
    path = os.path.join(os.path.dirname(__file__), 'data/')
    res_0 = pickle.load(open(path + 'mute_results_0.p', 'rb'))
    res_1 = pickle.load(open(path + 'mute_results_1.p', 'rb'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
        }
    comp = NetworkComparison()
    comp.compare_within(comp_settings, res_0, res_1, data, data)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(data, data)
    comp._create_surrogate_distribution_within(data, data)
    target = 1
    source = 0

    comp.cmi_surr[target] = np.zeros((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1 / comp_settings['n_perm_comp'], (
        'The p-value was not calculated correctly: {0}'.format(p[target]))

    comp.cmi_surr[target] = np.ones((1, comp_settings['n_perm_comp']))
    comp.cmi_diff[target] = np.array([0.5])
    comp._p_value_union()
    p = comp.pvalue
    s = comp.significance
    assert not s[target][source], (
        'The significance was not determined correctly: {0}'.format(s[target]))
    assert p[target][source] == 1.0, (
        'The p-value was not calculated correctly: {0}'.format(p[target]))
def test_p_value_union():
    """Test if the p-value is calculated correctly."""
    dat = Data()
    dat.generate_mute_data(100, 5)
    res_0 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_0.pkl'))
    res_1 = np.load(os.path.join(os.path.dirname(__file__),
                    'data/mute_res_1.pkl'))
    comp_settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 50,
        'n_perm_min_stat': 50,
        'n_perm_omnibus': 200,
        'n_perm_max_seq': 50,
        'n_perm_comp': 6,
        'alpha_comp': 0.2,
        'tail_comp': 'one_bigger',
        'stats_type': 'independent'
        }
    comp = NetworkComparison()
    res_comp = comp.compare_within(comp_settings, res_0, res_1, dat, dat)

    # Replace the surrogate CMI by all zeros for source 0 and all ones for
    # source 1. Set the CMI difference to 0.5 for both sources. Check if this
    # results in one significant and one non-significant result with the
    # correct p-values.
    comp._initialise(comp_settings)
    comp._create_union(res_0, res_1)
    comp._calculate_cmi_diff_within(dat, dat)
    comp._create_surrogate_distribution_within(dat, dat)
    target = 1
    for p in range(comp_settings['n_perm_comp']):
        comp.cmi_surr[p][target] = np.array([0, 1])
    comp.cmi_diff[target] = np.array([0.5, 0.5])
    [p, s] = comp._p_value_union()
    assert (s[target] == np.array([True, False])).all(), (
                                    'The significance was not determined '
                                    'correctly: {0}'.format(s[target]))
    p_1 = 1 / comp_settings['n_perm_comp']
    p_2 = 1.0
    print(p[target])
    assert (p[target] == np.array([p_1, p_2])).all(), (
                                'The p-value was not calculated correctly: {0}'
                                .format(p[target]))