コード例 #1
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_zero_lag():
    """Test analysis for 0 lag."""
    expected_mi, source, source_uncorr, target = _get_gauss_data()
    data = Data(np.hstack((source, target)),
                dim_order='sp',
                normalise=False,
                seed=SEED)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'tau_sources':
        0,  # this is not required, but shouldn't throw an error if provided
        'max_lag_sources': 0,
        'min_lag_sources': 0
    }
    nw = BivariateMI()
    results = nw.analyse_single_target(settings, data, target=1, sources='all')
    mi_estimator = JidtKraskovMI(settings={'normalise': False})
    jidt_mi = mi_estimator.estimate(source, target)
    omnibus_mi = results.get_single_target(1, fdr=False).omnibus_mi
    print('Estimated omnibus MI: {0:0.6f}, estimated MI using JIDT core '
          'estimator: {1:0.6f} (expected: {2:0.6f}).'.format(
              omnibus_mi, jidt_mi, expected_mi))
    assert np.isclose(omnibus_mi, jidt_mi, atol=0.005), (
        'Zero-lag omnibus MI ({0:0.6f}) differs from JIDT estimate '
        '({1:0.6f}).'.format(omnibus_mi, jidt_mi))
    assert np.isclose(
        omnibus_mi, expected_mi,
        atol=0.05), ('Zero-lag omnibus MI ({0:0.6f}) differs from expected MI '
                     '({1:0.6f}).'.format(omnibus_mi, expected_mi))
コード例 #2
0
def test_zero_lag():
    """Test analysis for 0 lag."""
    covariance = 0.4
    n = 10000
    source = np.random.normal(0, 1, size=n)
    target = (covariance * source +
              (1 - covariance) * np.random.normal(0, 1, size=n))
    # expected_corr = covariance / (np.sqrt(covariance**2 + (1-covariance)**2))
    corr = np.corrcoef(source, target)[0, 1]
    expected_mi = -0.5 * np.log(1 - corr**2)

    data = Data(np.vstack((source, target)), dim_order='ps', normalise=False)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': 0,
        'min_lag_sources': 0
    }
    nw = BivariateMI()
    results = nw.analyse_single_target(settings, data, target=1, sources='all')
    mi_estimator = JidtKraskovMI(settings={})
    jidt_mi = mi_estimator.estimate(source, target)
    omnibus_mi = results.get_single_target(1, fdr=False).omnibus_mi
    print('Estimated omnibus MI: {0:0.6f}, estimated MI using JIDT core '
          'estimator: {1:0.6f} (expected: {2:0.6f}).'.format(
              omnibus_mi, jidt_mi, expected_mi))
    assert np.isclose(omnibus_mi, jidt_mi, rtol=0.05), (
        'Zero-lag omnibus MI ({0:0.6f}) differs from JIDT estimate ({1:0.6f}).'
        .format(omnibus_mi, jidt_mi))
    assert np.isclose(omnibus_mi, expected_mi, rtol=0.05), (
        'Zero-lag omnibus MI ({0:0.6f}) differs from expected MI ({1:0.6f}).'.
        format(omnibus_mi, expected_mi))
コード例 #3
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_bivariate_mi_one_realisation_per_replication():
    """Test boundary case of one realisation per replication."""
    # Create a data set where one pattern fits into the time series exactly
    # once, this way, we get one realisation per replication for each variable.
    # This is easyer to assert/verify later. We also test data.get_realisations
    # this way.
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'max_lag_sources': 5,
        'min_lag_sources': 4
    }
    target = 0
    data = Data(normalise=False, seed=SEED)
    n_repl = 10
    n_procs = 2
    n_points = n_procs * (settings['max_lag_sources'] + 1) * n_repl
    data.set_data(
        np.arange(n_points).reshape(n_procs, settings['max_lag_sources'] + 1,
                                    n_repl), 'psr')
    nw = BivariateMI()
    nw._initialise(settings, data, 'all', target)
    assert (not nw.selected_vars_full)
    assert (not nw.selected_vars_sources)
    assert (not nw.selected_vars_target)
    assert ((nw._replication_index == np.arange(n_repl)).all())
    assert (nw._current_value == (target, settings['max_lag_sources']))
    assert (nw._current_value_realisations[:, 0] == data.data[target,
                                                              -1, :]).all()
コード例 #4
0
def test_bivariate_mi_one_realisation_per_replication():
    """Test boundary case of one realisation per replication."""
    # Create a data set where one pattern fits into the time series exactly
    # once, this way, we get one realisation per replication for each variable.
    # This is easyer to assert/verify later. We also test data.get_realisations
    # this way.
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'max_lag_sources': 5,
        'min_lag_sources': 4}
    target = 0
    data = Data(normalise=False)
    n_repl = 10
    n_procs = 2
    n_points = n_procs * (settings['max_lag_sources'] + 1) * n_repl
    data.set_data(np.arange(n_points).reshape(
                                        n_procs,
                                        settings['max_lag_sources'] + 1,
                                        n_repl), 'psr')
    nw = BivariateMI()
    nw._initialise(settings, data, 'all', target)
    assert (not nw.selected_vars_full)
    assert (not nw.selected_vars_sources)
    assert (not nw.selected_vars_target)
    assert ((nw._replication_index == np.arange(n_repl)).all())
    assert (nw._current_value == (target, settings['max_lag_sources']))
    assert (nw._current_value_realisations[:, 0] ==
            data.data[target, -1, :]).all()
コード例 #5
0
def test_gauss_data():
    """Test bivariate MI estimation from correlated Gaussians."""
    # Generate data and add a delay one one sample.
    expected_mi, source, source_uncorr, target = _get_gauss_data()
    source = source[1:]
    source_uncorr = source_uncorr[1:]
    target = target[:-1]
    data = Data(np.hstack((source, source_uncorr, target)), dim_order='sp')
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': 2,
        'min_lag_sources': 1}
    nw = BivariateMI()
    results = nw.analyse_single_target(
        settings, data, target=2, sources=[0, 1])
    mi = results.get_single_target(2, fdr=False)['mi'][0]
    sources = results.get_target_sources(2, fdr=False)

    # Assert that only the correlated source was detected.
    assert len(sources) == 1, 'Wrong no. inferred sources: {0}.'.format(
        len(sources))
    assert sources[0] == 0, 'Wrong inferred source: {0}.'.format(sources[0])
    # Compare BivarateMI() estimate to JIDT estimate.
    est = JidtKraskovMI({'lag_mi': 1})
    jidt_mi = est.estimate(var1=source, var2=target)
    print('Estimated MI: {0:0.6f}, estimated MI using JIDT core estimator: '
          '{1:0.6f} (expected: {2:0.6f}).'.format(mi, jidt_mi, expected_mi))
    assert np.isclose(mi, jidt_mi, atol=0.005), (
        'Estimated MI {0:0.6f} differs from JIDT estimate {1:0.6f} (expected: '
        'MI {2:0.6f}).'.format(mi, jidt_mi, expected_mi))
コード例 #6
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_discrete_input():
    """Test bivariate MI estimation from discrete data."""
    # Generate Gaussian test data
    covariance = 0.4
    data = _get_discrete_gauss_data(covariance=covariance,
                                    n=10000,
                                    delay=1,
                                    normalise=False,
                                    seed=SEED)
    corr_expected = covariance / (1 * np.sqrt(covariance**2 +
                                              (1 - covariance)**2))
    expected_mi = calculate_mi(corr_expected)
    settings = {
        'cmi_estimator': 'JidtDiscreteCMI',
        'discretise_method': 'none',
        'n_discrete_bins': 5,  # alphabet size of the variables analysed
        'n_perm_max_stat': 21,
        'n_perm_omnibus': 30,
        'n_perm_max_seq': 30,
        'min_lag_sources': 1,
        'max_lag_sources': 2
    }
    nw = BivariateMI()
    res = nw.analyse_single_target(settings=settings, data=data, target=1)
    assert np.isclose(
        res._single_target[1].omnibus_mi, expected_mi, atol=0.05), (
            'Estimated MI for discrete variables is not correct. Expected: '
            '{0}, Actual results: {1}.'.format(expected_mi,
                                               res['selected_sources_mi'][0]))
コード例 #7
0
def test_zero_lag():
    """Test analysis for 0 lag."""
    covariance = 0.4
    n = 10000
    source = np.random.normal(0, 1, size=n)
    target = (covariance * source + (1 - covariance) *
              np.random.normal(0, 1, size=n))
    # expected_corr = covariance / (np.sqrt(covariance**2 + (1-covariance)**2))
    corr = np.corrcoef(source, target)[0, 1]
    expected_mi = -0.5 * np.log(1 - corr**2)

    data = Data(np.vstack((source, target)), dim_order='ps', normalise=False)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': 0,
        'min_lag_sources': 0}
    nw = BivariateMI()
    results = nw.analyse_single_target(
        settings, data, target=1, sources='all')
    mi_estimator = JidtKraskovMI(settings={})
    jidt_mi = mi_estimator.estimate(source, target)
    omnibus_mi = results.get_single_target(1, fdr=False).omnibus_mi
    print('Estimated omnibus MI: {0:0.6f}, estimated MI using JIDT core '
          'estimator: {1:0.6f} (expected: {2:0.6f}).'.format(
              omnibus_mi, jidt_mi, expected_mi))
    assert np.isclose(omnibus_mi, jidt_mi, rtol=0.05), (
        'Zero-lag omnibus MI ({0:0.6f}) differs from JIDT estimate ({1:0.6f}).'.format(
                omnibus_mi, jidt_mi))
    assert np.isclose(omnibus_mi, expected_mi, rtol=0.05), (
        'Zero-lag omnibus MI ({0:0.6f}) differs from expected MI ({1:0.6f}).'.format(
                omnibus_mi, expected_mi))
コード例 #8
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_analyse_network():
    """Test method for full network analysis."""
    n_processes = 5  # the MuTE network has 5 nodes
    data = Data(seed=SEED)
    data.generate_mute_data(10, 5)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 30,
        'max_lag_sources': 5,
        'min_lag_sources': 4
    }
    nw = BivariateMI()

    # Test all to all analysis
    results = nw.analyse_network(settings, data, targets='all', sources='all')
    targets_analysed = results.targets_analysed
    sources = np.arange(n_processes)
    assert all(np.array(targets_analysed) == np.arange(n_processes)), (
        'Network analysis did not run on all targets.')
    for t in targets_analysed:
        s = np.array(list(set(sources) - set([t])))
        assert all(np.array(results._single_target[t].sources_tested) == s), (
            'Network analysis did not run on all sources for target '
            '{0}'.format(t))
    # Test analysis for subset of targets
    target_list = [1, 2, 3]
    results = nw.analyse_network(settings,
                                 data,
                                 targets=target_list,
                                 sources='all')
    targets_analysed = results.targets_analysed
    assert all(np.array(targets_analysed) == np.array(target_list)), (
        'Network analysis did not run on correct subset of targets.')
    for t in targets_analysed:
        s = np.array(list(set(sources) - set([t])))
        assert all(np.array(results._single_target[t].sources_tested) == s), (
            'Network analysis did not run on all sources for target '
            '{0}'.format(t))

    # Test analysis for subset of sources
    source_list = [1, 2, 3]
    target_list = [0, 4]
    results = nw.analyse_network(settings,
                                 data,
                                 targets=target_list,
                                 sources=source_list)
    targets_analysed = results.targets_analysed
    assert all(np.array(targets_analysed) == np.array(target_list)), (
        'Network analysis did not run for all targets.')
    for t in targets_analysed:
        assert all(
            results._single_target[t].sources_tested == np.array(source_list)
        ), ('Network analysis did not run on the correct subset '
            'of sources for target {0}'.format(t))
コード例 #9
0
def test_analyse_network():
    """Test method for full network analysis."""
    n_processes = 5  # the MuTE network has 5 nodes
    data = Data()
    data.generate_mute_data(10, 5)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 30,
        'max_lag_sources': 5,
        'min_lag_sources': 4}
    nw = BivariateMI()

    # Test all to all analysis
    results = nw.analyse_network(settings, data, targets='all', sources='all')
    targets_analysed = results.targets_analysed
    sources = np.arange(n_processes)
    assert all(np.array(targets_analysed) == np.arange(n_processes)), (
                'Network analysis did not run on all targets.')
    for t in targets_analysed:
        s = np.array(list(set(sources) - set([t])))
        assert all(np.array(results._single_target[t].sources_tested) == s), (
                    'Network analysis did not run on all sources for target '
                    '{0}'. format(t))
    # Test analysis for subset of targets
    target_list = [1, 2, 3]
    results = nw.analyse_network(settings, data, targets=target_list,
                                 sources='all')
    targets_analysed = results.targets_analysed
    assert all(np.array(targets_analysed) == np.array(target_list)), (
                'Network analysis did not run on correct subset of targets.')
    for t in targets_analysed:
        s = np.array(list(set(sources) - set([t])))
        assert all(np.array(results._single_target[t].sources_tested) == s), (
                    'Network analysis did not run on all sources for target '
                    '{0}'. format(t))

    # Test analysis for subset of sources
    source_list = [1, 2, 3]
    target_list = [0, 4]
    results = nw.analyse_network(settings, data, targets=target_list,
                                 sources=source_list)
    targets_analysed = results.targets_analysed
    assert all(np.array(targets_analysed) == np.array(target_list)), (
                'Network analysis did not run for all targets.')
    for t in targets_analysed:
        assert all(results._single_target[t].sources_tested ==
                   np.array(source_list)), (
                        'Network analysis did not run on the correct subset '
                        'of sources for target {0}'.format(t))
コード例 #10
0
def test_define_candidates():
    """Test candidate definition from a list of procs and a list of samples."""
    target = 1
    tau_sources = 3
    max_lag_sources = 10
    current_val = (target, 10)
    procs = [target]
    samples = np.arange(current_val[1] - 1, current_val[1] - max_lag_sources,
                        -tau_sources)
    nw = BivariateMI()
    candidates = nw._define_candidates(procs, samples)
    assert (1, 9) in candidates, 'Sample missing from candidates: (1, 9).'
    assert (1, 6) in candidates, 'Sample missing from candidates: (1, 6).'
    assert (1, 3) in candidates, 'Sample missing from candidates: (1, 3).'
コード例 #11
0
def test_define_candidates():
    """Test candidate definition from a list of procs and a list of samples."""
    target = 1
    tau_sources = 3
    max_lag_sources = 10
    current_val = (target, 10)
    procs = [target]
    samples = np.arange(current_val[1] - 1, current_val[1] - max_lag_sources,
                        -tau_sources)
    nw = BivariateMI()
    candidates = nw._define_candidates(procs, samples)
    assert (1, 9) in candidates, 'Sample missing from candidates: (1, 9).'
    assert (1, 6) in candidates, 'Sample missing from candidates: (1, 6).'
    assert (1, 3) in candidates, 'Sample missing from candidates: (1, 3).'
コード例 #12
0
def test_faes_method():
    """Check if the Faes method is working."""
    settings = {'cmi_estimator': 'JidtKraskovCMI',
                'add_conditionals': 'faes',
                'max_lag_sources': 5,
                'min_lag_sources': 3}
    nw_1 = BivariateMI()
    data = Data()
    data.generate_mute_data()
    sources = [1, 2, 3]
    target = 0
    nw_1._initialise(settings, data, sources, target)
    assert (nw_1._selected_vars_sources ==
            [i for i in it.product(sources, [nw_1.current_value[1]])]), (
                'Did not add correct additional conditioning vars.')
コード例 #13
0
def test_faes_method():
    """Check if the Faes method is working."""
    settings = {'cmi_estimator': 'JidtKraskovCMI',
                'add_conditionals': 'faes',
                'max_lag_sources': 5,
                'min_lag_sources': 3}
    nw_1 = BivariateMI()
    data = Data()
    data.generate_mute_data()
    sources = [1, 2, 3]
    target = 0
    nw_1._initialise(settings, data, sources, target)
    assert (nw_1._selected_vars_sources ==
            [i for i in it.product(sources, [nw_1.current_value[1]])]), (
                'Did not add correct additional conditioning vars.')
コード例 #14
0
def getAnalysisClass(methodname):
    # Initialise analysis object
    if methodname == "BivariateMI": return BivariateMI()
    elif methodname == "MultivariateMI": return MultivariateMI()
    elif methodname == "BivariateTE": return BivariateTE()
    elif methodname == "MultivariateTE": return MultivariateTE()
    else:
        raise ValueError("Unexpected method", methodname)
コード例 #15
0
def test_define_candidates():
    """Test candidate definition from a list of procs and a list of samples."""
    target = 1
    tau_sources = 3
    max_lag_sources = 10
    current_val = (target, 10)
    procs = [target]
    samples = np.arange(current_val[1] - 1, current_val[1] - max_lag_sources,
                        -tau_sources)
    # Test if candidates that are added manually to the conditioning set are
    # removed from the candidate set.
    nw = BivariateMI()
    nw.current_value = current_val
    settings = [
        {'add_conditionals': None},
        {'add_conditionals': (2, 3)},
        {'add_conditionals': [(2, 3), (4, 1)]},
        {'add_conditionals': [(1, 9)]},
        {'add_conditionals': [(1, 9), (2, 3), (4, 1)]}]
    for s in settings:
        nw.settings = s
        candidates = nw._define_candidates(procs, samples)
        assert (1, 9) in candidates, 'Sample missing from candidates: (1, 9).'
        assert (1, 6) in candidates, 'Sample missing from candidates: (1, 6).'
        assert (1, 3) in candidates, 'Sample missing from candidates: (1, 3).'
        if s['add_conditionals'] is not None:
            if type(s['add_conditionals']) is tuple:
                cond_ind = nw._lag_to_idx([s['add_conditionals']])
            else:
                cond_ind = nw._lag_to_idx(s['add_conditionals'])
            for c in cond_ind:
                assert c not in candidates, (
                    'Sample added erronously to candidates: {}.'.format(c))
コード例 #16
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_add_conditional_manually():
    """Enforce the conditioning on additional variables."""
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'max_lag_sources': 5,
        'min_lag_sources': 3
    }
    nw = BivariateMI()
    data = Data(seed=SEED)
    data.generate_mute_data()

    # Add a conditional with a lag bigger than the max_lag requested above
    settings['add_conditionals'] = (8, 0)
    with pytest.raises(IndexError):
        nw.analyse_single_target(settings=settings, data=data, target=0)

    # Add valid conditionals and test if they were added
    settings['add_conditionals'] = [(0, 1), (1, 3)]
    nw._initialise(settings=settings, data=data, target=0, sources=[1, 2])
    # Get list of conditionals after intialisation and convert absolute samples
    # back to lags for comparison.
    cond_list = nw._idx_to_lag(nw.selected_vars_full)
    assert settings['add_conditionals'][0] in cond_list, (
        'First enforced conditional is missing from results.')
    assert settings['add_conditionals'][1] in cond_list, (
        'Second enforced conditional is missing from results.')
コード例 #17
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_gauss_data():
    """Test bivariate MI estimation from correlated Gaussians."""
    # Generate data and add a delay one one sample.
    expected_mi, source, source_uncorr, target = _get_gauss_data()
    source = source[1:]
    source_uncorr = source_uncorr[1:]
    target = target[:-1]
    data = Data(np.hstack((source, source_uncorr, target)),
                dim_order='sp',
                normalise=False,
                seed=SEED)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': 1,
        'min_lag_sources': 1
    }
    nw = BivariateMI()
    results = nw.analyse_single_target(settings,
                                       data,
                                       target=2,
                                       sources=[0, 1])
    mi = results.get_single_target(2, fdr=False)['mi'][0]
    sources = results.get_target_sources(2, fdr=False)

    # Assert that only the correlated source was detected.
    assert len(sources) == 1, 'Wrong no. inferred sources: {0}.'.format(
        len(sources))
    assert sources[0] == 0, 'Wrong inferred source: {0}.'.format(sources[0])
    # Compare BivarateMI() estimate to JIDT estimate.
    est = JidtKraskovMI({'lag_mi': 1, 'normalise': False})
    jidt_mi = est.estimate(var1=source, var2=target)
    print('Estimated MI: {0:0.6f}, estimated MI using JIDT core estimator: '
          '{1:0.6f} (expected: ~ {2:0.6f}).'.format(mi, jidt_mi, expected_mi))
    assert np.isclose(mi, jidt_mi, atol=0.005), (
        'Estimated MI {0:0.6f} differs from JIDT estimate {1:0.6f} (expected: '
        'MI {2:0.6f}).'.format(mi, jidt_mi, expected_mi))
    assert np.isclose(mi, expected_mi, atol=0.05), (
        'Estimated MI {0:0.6f} differs from expected MI {1:0.6f}.'.format(
            mi, expected_mi))
コード例 #18
0
def test_discrete_input():
    """Test bivariate MI estimation from discrete data."""
    # Generate Gaussian test data
    covariance = 0.4
    n = 10000
    delay = 1
    source = np.random.normal(0, 1, size=n)
    target = (covariance * source +
              (1 - covariance) * np.random.normal(0, 1, size=n))
    corr_expected = covariance / (1 * np.sqrt(covariance**2 +
                                              (1 - covariance)**2))
    expected_mi = calculate_mi(corr_expected)
    source = source[delay:]
    target = target[:-delay]

    # Discretise data
    settings = {'discretise_method': 'equal', 'n_discrete_bins': 5}
    est = JidtDiscreteCMI(settings)
    source_dis, target_dis = est._discretise_vars(var1=source, var2=target)
    data = Data(np.vstack((source_dis, target_dis)),
                dim_order='ps',
                normalise=False)
    settings = {
        'cmi_estimator': 'JidtDiscreteCMI',
        'discretise_method': 'none',
        'n_discrete_bins': 5,  # alphabet size of the variables analysed
        'n_perm_max_stat': 21,
        'n_perm_omnibus': 30,
        'n_perm_max_seq': 30,
        'min_lag_sources': 1,
        'max_lag_sources': 2
    }
    nw = BivariateMI()
    res = nw.analyse_single_target(settings=settings, data=data, target=1)
    assert np.isclose(
        res._single_target[1].omnibus_mi, expected_mi, atol=0.05), (
            'Estimated MI for discrete variables is not correct. Expected: '
            '{0}, Actual results: {1}.'.format(expected_mi,
                                               res['selected_sources_mi'][0]))
コード例 #19
0
def test_discrete_input():
    """Test bivariate MI estimation from discrete data."""
    # Generate Gaussian test data
    covariance = 0.4
    n = 10000
    delay = 1
    source = np.random.normal(0, 1, size=n)
    target = (covariance * source + (1 - covariance) *
              np.random.normal(0, 1, size=n))
    corr_expected = covariance / (
        1 * np.sqrt(covariance**2 + (1-covariance)**2))
    expected_mi = calculate_mi(corr_expected)
    source = source[delay:]
    target = target[:-delay]

    # Discretise data
    settings = {'discretise_method': 'equal',
                'n_discrete_bins': 5}
    est = JidtDiscreteCMI(settings)
    source_dis, target_dis = est._discretise_vars(var1=source, var2=target)
    data = Data(np.vstack((source_dis, target_dis)),
                dim_order='ps', normalise=False)
    settings = {
        'cmi_estimator': 'JidtDiscreteCMI',
        'discretise_method': 'none',
        'n_discrete_bins': 5,  # alphabet size of the variables analysed
        'n_perm_max_stat': 21,
        'n_perm_omnibus': 30,
        'n_perm_max_seq': 30,
        'min_lag_sources': 1,
        'max_lag_sources': 2}
    nw = BivariateMI()
    res = nw.analyse_single_target(settings=settings, data=data, target=1)
    assert np.isclose(
        res._single_target[1].omnibus_mi, expected_mi, atol=0.05), (
            'Estimated MI for discrete variables is not correct. Expected: '
            '{0}, Actual results: {1}.'.format(
                expected_mi, res['selected_sources_mi'][0]))
コード例 #20
0
def analyse_discrete_data():
    """Run network inference on discrete data."""
    data = generate_discrete_data()
    settings = {
        'cmi_estimator': 'JidtDiscreteCMI',
        'discretise_method': 'none',
        'n_discrete_bins': 5,  # alphabet size of the variables analysed
        'min_lag_sources': 1,
        'max_lag_sources': 3,
        'max_lag_target': 1
    }

    nw = MultivariateTE()
    res = nw.analyse_network(settings=settings, data=data)
    pickle.dump(
        res,
        open(
            '{0}discrete_results_mte_{1}.p'.format(path,
                                                   settings['cmi_estimator']),
            'wb'))

    nw = BivariateTE()
    res = nw.analyse_network(settings=settings, data=data)
    pickle.dump(
        res,
        open(
            '{0}discrete_results_bte_{1}.p'.format(path,
                                                   settings['cmi_estimator']),
            'wb'))

    nw = MultivariateMI()
    res = nw.analyse_network(settings=settings, data=data)
    pickle.dump(
        res,
        open(
            '{0}discrete_results_mmi_{1}.p'.format(path,
                                                   settings['cmi_estimator']),
            'wb'))

    nw = BivariateMI()
    res = nw.analyse_network(settings=settings, data=data)
    pickle.dump(
        res,
        open(
            '{0}discrete_results_bmi_{1}.p'.format(path,
                                                   settings['cmi_estimator']),
            'wb'))
コード例 #21
0
def analyse_continuous_data():
    """Run network inference on continuous data."""
    data = generate_continuous_data()
    settings = {
        'min_lag_sources': 1,
        'max_lag_sources': 3,
        'max_lag_target': 1
    }

    nw = MultivariateTE()
    for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
        settings['cmi_estimator'] = estimator
        res = nw.analyse_network(settings=settings, data=data)
        pickle.dump(
            res,
            open('{0}continuous_results_mte_{1}.p'.format(path, estimator),
                 'wb'))

    nw = BivariateTE()
    for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
        settings['cmi_estimator'] = estimator
        res = nw.analyse_network(settings=settings, data=data)
        pickle.dump(
            res,
            open('{0}continuous_results_bte_{1}.p'.format(path, estimator),
                 'wb'))

    nw = MultivariateMI()
    for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
        settings['cmi_estimator'] = estimator
        res = nw.analyse_network(settings=settings, data=data)
        pickle.dump(
            res,
            open('{0}continuous_results_mmi_{1}.p'.format(path, estimator),
                 'wb'))

    nw = BivariateMI()
    for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
        settings['cmi_estimator'] = estimator
        res = nw.analyse_network(settings=settings, data=data)
        pickle.dump(
            res,
            open('{0}continuous_results_bmi_{1}.p'.format(path, estimator),
                 'wb'))
コード例 #22
0
def test_define_candidates():
    """Test candidate definition from a list of procs and a list of samples."""
    target = 1
    tau_sources = 3
    max_lag_sources = 10
    current_val = (target, 10)
    procs = [target]
    samples = np.arange(current_val[1] - 1, current_val[1] - max_lag_sources,
                        -tau_sources)
    # Test if candidates that are added manually to the conditioning set are
    # removed from the candidate set.
    nw = BivariateMI()
    settings = [{
        'add_conditionals': None
    }, {
        'add_conditionals': (2, 3)
    }, {
        'add_conditionals': [(2, 3), (4, 1)]
    }]
    for s in settings:
        nw.settings = s
        candidates = nw._define_candidates(procs, samples)
        assert (1, 9) in candidates, 'Sample missing from candidates: (1, 9).'
        assert (1, 6) in candidates, 'Sample missing from candidates: (1, 6).'
        assert (1, 3) in candidates, 'Sample missing from candidates: (1, 3).'

    settings = [{
        'add_conditionals': [(1, 9)]
    }, {
        'add_conditionals': [(1, 9), (2, 3), (4, 1)]
    }]
    for s in settings:
        nw.settings = s
        candidates = nw._define_candidates(procs, samples)
    assert (1, 9) not in candidates, 'Sample missing from candidates: (1, 9).'
    assert (1, 6) in candidates, 'Sample missing from candidates: (1, 6).'
    assert (1, 3) in candidates, 'Sample missing from candidates: (1, 3).'
コード例 #23
0
def test_add_conditional_manually():
    """Enforce the conditioning on additional variables."""
    settings = {'cmi_estimator': 'JidtKraskovCMI',
                'max_lag_sources': 5,
                'min_lag_sources': 3}
    nw = BivariateMI()
    data = Data()
    data.generate_mute_data()

    # Add a conditional with a lag bigger than the max_lag requested above
    settings['add_conditionals'] = (8, 0)
    with pytest.raises(IndexError):
        nw.analyse_single_target(settings=settings, data=data, target=0)

    # Add valid conditionals and test if they were added
    settings['add_conditionals'] = [(0, 1), (1, 3)]
    nw._initialise(settings=settings, data=data, target=0, sources=[1, 2])
    # Get list of conditionals after intialisation and convert absolute samples
    # back to lags for comparison.
    cond_list = nw._idx_to_lag(nw.selected_vars_full)
    assert settings['add_conditionals'][0] in cond_list, (
        'First enforced conditional is missing from results.')
    assert settings['add_conditionals'][1] in cond_list, (
        'Second enforced conditional is missing from results.')
コード例 #24
0
def test_return_local_values():
    """Test estimation of local values."""
    max_lag = 5
    data = Data()
    data.generate_mute_data(200, 5)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'local_values': True,  # request calculation of local values
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': max_lag,
        'min_lag_sources': 4,
        'max_lag_target': max_lag}
    target = 1
    mi = BivariateMI()
    results_local = mi.analyse_network(settings, data, targets=[target])

    lmi = results_local.get_single_target(target, fdr=False)['mi']
    n_sources = len(results_local.get_target_sources(target, fdr=False))
    assert type(lmi) is np.ndarray, (
        'LMI estimation did not return an array of values: {0}'.format(
                lmi))
    assert lmi.shape[0] == n_sources, (
        'Wrong dim (no. sources) in LMI estimate: {0}'.format(
                lmi.shape))
    assert lmi.shape[1] == data.n_realisations_samples((0, max_lag)), (
        'Wrong dim (no. samples) in LMI estimate {0}'.format(
                lmi.shape))
    assert lmi.shape[2] == data.n_replications, (
        'Wrong dim (no. replications) in LMI estimate {0}'.format(
                lmi.shape))

    # Test for correctnes of single link MI estimation by comparing it to the
    # MI between single variables and the target. For this test case where we
    # find only one significant past variable per source, the two should be the
    # same. Also compare single link average MI to mean local MI for each
    # link.
    settings['local_values'] = False
    results_avg = mi.analyse_network(settings, data, targets=[target])
    mi_single_link = results_avg.get_single_target(target, fdr=False)['mi']
    mi_selected_sources = results_avg.get_single_target(
        target, fdr=False)['selected_sources_mi']
    sources_local = results_local.get_target_sources(target, fdr=False)
    sources_avg = results_avg.get_target_sources(target, fdr=False)
    assert np.isclose(mi_single_link, mi_selected_sources, atol=0.005).all(), (
        'Single link average MI {0} and single source MI {1} deviate.'.format(
                mi_single_link, mi_selected_sources))
    # Check if average and local values are the same. Test each source
    # separately. Inferred sources may differ between the two calls to
    # analyse_network() due to low number of surrogates used in unit testing.
    print('Compare average and local values.')
    for s in list(set(sources_avg).intersection(sources_local)):
        i1 = np.where(sources_avg == s)[0][0]
        i2 = np.where(sources_local == s)[0][0]
        assert np.isclose(mi_single_link[i1], np.mean(lmi[i2, :, :]), atol=0.005), (
            'Single link average MI {0:0.6f} and mean LMI {1:0.6f} deviate.'.format(
                mi_single_link[i1], np.mean(lmi[i2, :, :])))
        assert np.isclose(mi_single_link[i1], mi_selected_sources[i1], atol=0.005), (
            'Single link average MI {0:0.6f} and single source MI {1:0.6f} deviate.'.format(
                mi_single_link[i1], mi_selected_sources[i1]))
コード例 #25
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_check_source_set():
    """Test the method _check_source_set.

    This method sets the list of source processes from which candidates are
    taken for multivariate MI estimation.
    """
    data = Data(seed=SEED)
    data.generate_mute_data(100, 5)
    nw = BivariateMI()
    nw.settings = {'verbose': True}
    # Add list of sources.
    sources = [1, 2, 3]
    nw._check_source_set(sources, data.n_processes)
    assert nw.source_set == sources, 'Sources were not added correctly.'

    # Assert that initialisation fails if the target is also in the source list
    sources = [0, 1, 2, 3]
    nw.target = 0
    with pytest.raises(RuntimeError):
        nw._check_source_set(sources=[0, 1, 2, 3],
                             n_processes=data.n_processes)

    # Test if a single source, no list is added correctly.
    sources = 1
    nw._check_source_set(sources, data.n_processes)
    assert (type(nw.source_set) is list)

    # Test if 'all' is handled correctly
    nw.target = 0
    nw._check_source_set('all', data.n_processes)
    assert nw.source_set == [1, 2, 3, 4], 'Sources were not added correctly.'

    # Test invalid inputs.
    with pytest.raises(RuntimeError):  # sources greater than no. procs
        nw._check_source_set(8, data.n_processes)
    with pytest.raises(RuntimeError):  # negative value as source
        nw._check_source_set(-3, data.n_processes)
コード例 #26
0
# In[8]:

# %load ./IDTxl/demos/demo_bivariate_mi.py
# Import classes
from idtxl.bivariate_mi import BivariateMI
from idtxl.data import Data
from idtxl.visualise_graph import plot_network
import matplotlib.pyplot as plt

# a) Generate test data
data = Data()
data.generate_mute_data(n_samples=1000, n_replications=5)

# b) Initialise analysis object and define settings
network_analysis = BivariateMI()
settings = {
    'cmi_estimator': 'JidtGaussianCMI',
    'max_lag_sources': 5,
    'min_lag_sources': 1
}

# c) Run analysis
results = network_analysis.analyse_network(settings=settings, data=data)

# d) Plot inferred network to console and via matplotlib
results.print_edge_list(weights='max_te_lag', fdr=False)
plot_network(results=results, weights='max_te_lag', fdr=False)
plt.show()

# ## 网络推断实现细节
コード例 #27
0
# Import classes
from idtxl.bivariate_mi import BivariateMI
from idtxl.data import Data
from idtxl.visualise_graph import plot_network
import matplotlib.pyplot as plt

# a) Generate test data
data = Data()
data.generate_mute_data(n_samples=1000, n_replications=5)

# b) Initialise analysis object and define settings
network_analysis = BivariateMI()
settings = {'cmi_estimator': 'JidtGaussianCMI',
            'max_lag_sources': 5,
            'min_lag_sources': 1}

# c) Run analysis
results = network_analysis.analyse_network(settings=settings, data=data)

# d) Plot inferred network to console and via matplotlib
results.print_edge_list(weights='max_te_lag', fdr=False)
plot_network(results=results, weights='max_te_lag', fdr=False)
plt.show()
コード例 #28
0
def test_bivariate_mi_init():
    """Test instance creation for BivariateMI class."""
    # Test error on missing estimator
    settings = {
        'n_perm_max_stat': 21,
        'n_perm_omnibus': 30,
        'n_perm_max_seq': 30,
        'max_lag_sources': 7,
        'min_lag_sources': 2}
    nw = BivariateMI()
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=Data(), target=1)

    # Test setting of min and max lags
    settings['cmi_estimator'] = 'JidtKraskovCMI'
    data = Data()
    data.generate_mute_data(n_samples=10, n_replications=5)

    # Invalid: min lag sources bigger than max lag
    settings['min_lag_sources'] = 8
    settings['max_lag_sources'] = 7
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: taus bigger than lags
    settings['min_lag_sources'] = 2
    settings['max_lag_sources'] = 4
    settings['tau_sources'] = 10
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: negative lags or taus
    settings['tau_sources'] = 1
    settings['min_lag_sources'] = 1
    settings['max_lag_sources'] = -7
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['max_lag_sources'] = 7
    settings['min_lag_sources'] = -4
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['min_lag_sources'] = 4
    settings['tau_sources'] = -1
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: lags or taus are no integers
    settings['tau_sources'] = 1
    settings['min_lag_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['min_lag_sources'] = 1
    settings['max_lag_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['max_lag_sources'] = 7
    settings['tau_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['tau_sources'] = 1

    # Invalid: sources or target is no int
    with pytest.raises(RuntimeError):  # no int
        nw.analyse_single_target(settings=settings, data=data, target=1.5)
    with pytest.raises(RuntimeError):  # negative
        nw.analyse_single_target(settings=settings, data=data, target=-1)
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings, data=data, target=10)
    with pytest.raises(RuntimeError):  # wrong type
        nw.analyse_single_target(settings=settings, data=data, target={})
    with pytest.raises(RuntimeError):  # negative
        nw.analyse_single_target(settings=settings, data=data, target=0,
                                 sources=-1)
    with pytest.raises(RuntimeError):   # negative
        nw.analyse_single_target(settings=settings, data=data, target=0,
                                 sources=[-1])
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings, data=data, target=0,
                                 sources=20)
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings, data=data, target=0,
                                 sources=[20])
コード例 #29
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_bivariate_mi_init():
    """Test instance creation for BivariateMI class."""
    # Test error on missing estimator
    settings = {
        'n_perm_max_stat': 21,
        'n_perm_omnibus': 30,
        'n_perm_max_seq': 30,
        'max_lag_sources': 7,
        'min_lag_sources': 2
    }
    nw = BivariateMI()
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=Data(), target=1)

    # Test setting of min and max lags
    settings['cmi_estimator'] = 'JidtKraskovCMI'
    data = Data(seed=SEED)
    data.generate_mute_data(n_samples=10, n_replications=5)

    # Invalid: min lag sources bigger than max lag
    settings['min_lag_sources'] = 8
    settings['max_lag_sources'] = 7
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: taus bigger than lags
    settings['min_lag_sources'] = 2
    settings['max_lag_sources'] = 4
    settings['tau_sources'] = 10
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: negative lags or taus
    settings['tau_sources'] = 1
    settings['min_lag_sources'] = 1
    settings['max_lag_sources'] = -7
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['max_lag_sources'] = 7
    settings['min_lag_sources'] = -4
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['min_lag_sources'] = 4
    settings['tau_sources'] = -1
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)

    # Invalid: lags or taus are no integers
    settings['tau_sources'] = 1
    settings['min_lag_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['min_lag_sources'] = 1
    settings['max_lag_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['max_lag_sources'] = 7
    settings['tau_sources'] = 1.5
    with pytest.raises(RuntimeError):
        nw.analyse_single_target(settings=settings, data=data, target=1)
    settings['tau_sources'] = 1

    # Invalid: sources or target is no int
    with pytest.raises(RuntimeError):  # no int
        nw.analyse_single_target(settings=settings, data=data, target=1.5)
    with pytest.raises(RuntimeError):  # negative
        nw.analyse_single_target(settings=settings, data=data, target=-1)
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings, data=data, target=10)
    with pytest.raises(RuntimeError):  # wrong type
        nw.analyse_single_target(settings=settings, data=data, target={})
    with pytest.raises(RuntimeError):  # negative
        nw.analyse_single_target(settings=settings,
                                 data=data,
                                 target=0,
                                 sources=-1)
    with pytest.raises(RuntimeError):  # negative
        nw.analyse_single_target(settings=settings,
                                 data=data,
                                 target=0,
                                 sources=[-1])
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings,
                                 data=data,
                                 target=0,
                                 sources=20)
    with pytest.raises(RuntimeError):  # not in data
        nw.analyse_single_target(settings=settings,
                                 data=data,
                                 target=0,
                                 sources=[20])
コード例 #30
0
ファイル: information_dynamics.py プロジェクト: LNov/infonet
def infer_network(network_inference,
                  time_series,
                  parallel_target_analysis=False):
    # Define parameter options dictionaries
    network_inference_algorithms = pd.DataFrame()
    network_inference_algorithms['Description'] = pd.Series({
        'bMI_greedy':
        'Bivariate Mutual Information via greedy algorithm',
        'bTE_greedy':
        'Bivariate Transfer Entropy via greedy algorithm',
        'mMI_greedy':
        'Multivariate Mutual Information via greedy algorithm',
        'mTE_greedy':
        'Multivariate Transfer Entropy via greedy algorithm',
        'cross_corr':
        'Cross-correlation thresholding algorithm'
    })
    network_inference_algorithms['Required parameters'] = pd.Series({
        'bMI_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources', 'tau_target',
            'cmi_estimator', 'z_standardise', 'permute_in_time',
            'n_perm_max_stat', 'n_perm_min_stat', 'n_perm_omnibus',
            'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'bTE_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources',
            'max_lag_target', 'tau_target', 'cmi_estimator', 'z_standardise',
            'permute_in_time', 'n_perm_max_stat', 'n_perm_min_stat',
            'n_perm_omnibus', 'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'mMI_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources', 'tau_target',
            'cmi_estimator', 'z_standardise', 'permute_in_time',
            'n_perm_max_stat', 'n_perm_min_stat', 'n_perm_omnibus',
            'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'mTE_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources',
            'max_lag_target', 'tau_target', 'cmi_estimator', 'z_standardise',
            'permute_in_time', 'n_perm_max_stat', 'n_perm_min_stat',
            'n_perm_omnibus', 'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'cross_corr': ['min_lag_sources', 'max_lag_sources']
    })
    try:
        # Ensure that a network inference algorithm has been specified
        if 'algorithm' not in network_inference:
            raise ParameterMissing('algorithm')
        # Ensure that the provided algorithm is implemented
        if network_inference.algorithm not in network_inference_algorithms.index:
            raise ParameterValue(network_inference.algorithm)
        # Ensure that all the parameters required by the algorithm have been provided
        par_required = network_inference_algorithms['Required parameters'][
            network_inference.algorithm]
        for par in par_required:
            if par not in network_inference:
                raise ParameterMissing(par)

    except ParameterMissing as e:
        print(e.msg, e.par_names)
        raise
    except ParameterValue as e:
        print(e.msg, e.par_value)
        raise

    else:
        nodes_n = np.shape(time_series)[0]

        can_be_z_standardised = True
        if network_inference.z_standardise:
            # Check if data can be normalised per process (assuming the
            # first dimension represents processes, as in the rest of the code)
            can_be_z_standardised = np.all(np.std(time_series, axis=1) > 0)
            if not can_be_z_standardised:
                print('Time series can not be z-standardised')

        if len(time_series.shape) == 2:
            dim_order = 'ps'
        else:
            dim_order = 'psr'

        # initialise an empty data object
        dat = Data()

        # Load time series
        dat = Data(time_series,
                   dim_order=dim_order,
                   normalise=(network_inference.z_standardise
                              & can_be_z_standardised))

        algorithm = network_inference.algorithm
        if algorithm in [
                'bMI_greedy', 'mMI_greedy', 'bTE_greedy', 'mTE_greedy'
        ]:
            # Set analysis options
            if algorithm == 'bMI_greedy':
                network_analysis = BivariateMI()
            if algorithm == 'mMI_greedy':
                network_analysis = MultivariateMI()
            if algorithm == 'bTE_greedy':
                network_analysis = BivariateTE()
            if algorithm == 'mTE_greedy':
                network_analysis = MultivariateTE()

            settings = {
                'min_lag_sources': network_inference.min_lag_sources,
                'max_lag_sources': network_inference.max_lag_sources,
                'tau_sources': network_inference.tau_sources,
                'max_lag_target': network_inference.max_lag_target,
                'tau_target': network_inference.tau_target,
                'cmi_estimator': network_inference.cmi_estimator,
                'kraskov_k': network_inference.kraskov_k,
                'num_threads': network_inference.jidt_threads_n,
                'permute_in_time': network_inference.permute_in_time,
                'n_perm_max_stat': network_inference.n_perm_max_stat,
                'n_perm_min_stat': network_inference.n_perm_min_stat,
                'n_perm_omnibus': network_inference.n_perm_omnibus,
                'n_perm_max_seq': network_inference.n_perm_max_seq,
                'fdr_correction': network_inference.fdr_correction,
                'alpha_max_stat': network_inference.p_value,
                'alpha_min_stat': network_inference.p_value,
                'alpha_omnibus': network_inference.p_value,
                'alpha_max_seq': network_inference.p_value,
                'alpha_fdr': network_inference.p_value
            }

            # # Add optional settings
            # optional_settings_keys = {
            #     'config.debug',
            #     'config.max_mem_frac'
            # }

            # for key in optional_settings_keys:
            #     if traj.f_contains(key, shortcuts=True):
            #         key_last = key.rpartition('.')[-1]
            #         settings[key_last] = traj[key]
            #         print('Using optional setting \'{0}\'={1}'.format(
            #             key_last,
            #             traj[key])
            #         )

            if parallel_target_analysis:
                # Use SCOOP to create a generator of map results, each
                # correspinding to one map iteration
                res_iterator = futures.map_as_completed(
                    network_analysis.analyse_single_target,
                    itertools.repeat(settings, nodes_n),
                    itertools.repeat(dat, nodes_n), list(range(nodes_n)))
                # Run analysis
                res_list = list(res_iterator)
                if settings['fdr_correction']:
                    res = network_fdr({'alpha_fdr': settings['alpha_fdr']},
                                      *res_list)
                else:
                    res = res_list[0]
                    res.combine_results(*res_list[1:])
            else:
                # Run analysis
                res = network_analysis.analyse_network(settings=settings,
                                                       data=dat)
            return res

        else:
            raise ParameterValue(
                algorithm,
                msg='Network inference algorithm not yet implemented')
data += np.random.normal(0, 0.1, N_NODE * N_DATA).reshape((N_NODE, N_DATA))

#######################
# Run IDTxl
#######################
# a) Convert data to ITDxl format
dataIDTxl = Data(data, dim_order='ps')

# b) Initialise analysis object and define settings
N_METHOD = 3
title_lst = [
    "BivariateMI for shift-based data", "BivariateTE for shift-based data",
    "MultivariateTE for shift-based data"
]
method_lst = ['BivariateMI', 'BivariateTE', 'MultivariateTE']
network_analysis_lst = [BivariateMI(), BivariateTE(), MultivariateTE()]

settings = {
    'cmi_estimator': 'JidtGaussianCMI',
    'max_lag_sources': LAG_MAX,
    'min_lag_sources': LAG_MIN
}

# c) Run analysis
results_lst = [
    net_analysis.analyse_network(settings=settings, data=dataIDTxl)
    for net_analysis in network_analysis_lst
]

#######################
# Convert results to matrices and plot them
コード例 #32
0
def test_check_source_set():
    """Test the method _check_source_set.

    This method sets the list of source processes from which candidates are
    taken for multivariate MI estimation.
    """
    data = Data()
    data.generate_mute_data(100, 5)
    nw = BivariateMI()
    nw.settings = {'verbose': True}
    # Add list of sources.
    sources = [1, 2, 3]
    nw._check_source_set(sources, data.n_processes)
    assert nw.source_set == sources, 'Sources were not added correctly.'

    # Assert that initialisation fails if the target is also in the source list
    sources = [0, 1, 2, 3]
    nw.target = 0
    with pytest.raises(RuntimeError):
        nw._check_source_set(sources=[0, 1, 2, 3],
                             n_processes=data.n_processes)

    # Test if a single source, no list is added correctly.
    sources = 1
    nw._check_source_set(sources, data.n_processes)
    assert (type(nw.source_set) is list)

    # Test if 'all' is handled correctly
    nw.target = 0
    nw._check_source_set('all', data.n_processes)
    assert nw.source_set == [1, 2, 3, 4], 'Sources were not added correctly.'

    # Test invalid inputs.
    with pytest.raises(RuntimeError):   # sources greater than no. procs
        nw._check_source_set(8, data.n_processes)
    with pytest.raises(RuntimeError):  # negative value as source
        nw._check_source_set(-3, data.n_processes)
コード例 #33
0
dat = Data()

# Load time series
dat = Data(time_series,
           dim_order=dim_order,
           normalise=(network_inference.z_standardise & can_be_z_standardised))

network_inference = traj.parameters.network_inference

print('network_inference.p_value = {0}\n'.format(network_inference.p_value))

algorithm = network_inference.algorithm
if algorithm in ['bMI', 'mMI_greedy', 'bTE_greedy', 'mTE_greedy']:
    # Set analysis options
    if algorithm == 'bMI':
        network_analysis = BivariateMI()
    if algorithm == 'mMI_greedy':
        network_analysis = MultivariateMI()
    if algorithm == 'bTE_greedy':
        network_analysis = BivariateTE()
    if algorithm == 'mTE_greedy':
        network_analysis = MultivariateTE()

    #settings['add_conditionals'] = [(35, 3), (26, 1), (17, 5), (25, 4)]

    # Add optional settings
    optional_settings_keys = {'config.debug', 'config.max_mem_frac'}

    for key in optional_settings_keys:
        if traj.f_contains(key, shortcuts=True):
            key_last = key.rpartition('.')[-1]
コード例 #34
0
ファイル: test_results.py プロジェクト: monperrus/IDTxl
def test_results_network_inference():
    """Test results class for multivariate TE network inference."""
    covariance = 0.4
    n = 10000
    delay = 1
    normalisation = False
    source = np.random.normal(0, 1, size=n)
    target_1 = (covariance * source +
                (1 - covariance) * np.random.normal(0, 1, size=n))
    target_2 = (covariance * source +
                (1 - covariance) * np.random.normal(0, 1, size=n))
    corr_expected = covariance / (1 * np.sqrt(covariance**2 +
                                              (1 - covariance)**2))
    expected_mi = calculate_mi(corr_expected)
    source = source[delay:]
    target_1 = target_1[:-delay]
    target_2 = target_2[:-delay]

    # Discretise data for speed
    settings_dis = {'discretise_method': 'equal', 'n_discrete_bins': 5}
    est = JidtDiscreteCMI(settings_dis)
    source_dis, target_1_dis = est._discretise_vars(var1=source, var2=target_1)
    source_dis, target_2_dis = est._discretise_vars(var1=source, var2=target_2)
    data = Data(np.vstack((source_dis, target_1_dis, target_2_dis)),
                dim_order='ps',
                normalise=normalisation)

    nw = MultivariateTE()
    # TE - single target
    res_single_multi_te = nw.analyse_single_target(settings=settings,
                                                   data=data,
                                                   target=1)
    # TE whole network
    res_network_multi_te = nw.analyse_network(settings=settings, data=data)

    nw = BivariateTE()
    # TE - single target
    res_single_biv_te = nw.analyse_single_target(settings=settings,
                                                 data=data,
                                                 target=1)
    # TE whole network
    res_network_biv_te = nw.analyse_network(settings=settings, data=data)

    nw = MultivariateMI()
    # TE - single target
    res_single_multi_mi = nw.analyse_single_target(settings=settings,
                                                   data=data,
                                                   target=1)
    # TE whole network
    res_network_multi_mi = nw.analyse_network(settings=settings, data=data)

    nw = BivariateMI()
    # TE - single target
    res_single_biv_mi = nw.analyse_single_target(settings=settings,
                                                 data=data,
                                                 target=1)
    # TE whole network
    res_network_biv_mi = nw.analyse_network(settings=settings, data=data)

    res_te = [
        res_single_multi_te, res_network_multi_te, res_single_biv_te,
        res_network_biv_te
    ]
    res_mi = [
        res_single_multi_mi, res_network_multi_mi, res_single_biv_mi,
        res_network_biv_mi
    ]
    res_all = res_te + res_mi

    # Check estimated values
    for res in res_te:
        est_te = res._single_target[1].omnibus_te
        assert np.isclose(est_te, expected_mi, atol=0.05), (
            'Estimated TE for discrete variables is not correct. Expected: '
            '{0}, Actual results: {1}.'.format(expected_mi, est_te))
    for res in res_mi:
        est_mi = res._single_target[1].omnibus_mi
        assert np.isclose(est_mi, expected_mi, atol=0.05), (
            'Estimated TE for discrete variables is not correct. Expected: '
            '{0}, Actual results: {1}.'.format(expected_mi, est_mi))

    est_te = res_network_multi_te._single_target[2].omnibus_te
    assert np.isclose(est_te, expected_mi, atol=0.05), (
        'Estimated TE for discrete variables is not correct. Expected: {0}, '
        'Actual results: {1}.'.format(expected_mi, est_te))
    est_mi = res_network_multi_mi._single_target[2].omnibus_mi
    assert np.isclose(est_mi, expected_mi, atol=0.05), (
        'Estimated TE for discrete variables is not correct. Expected: {0}, '
        'Actual results: {1}.'.format(expected_mi, est_mi))

    # Check data parameters in results objects
    n_nodes = 3
    n_realisations = n - delay - max(settings['max_lag_sources'],
                                     settings['max_lag_target'])
    for res in res_all:
        assert res.data_properties.n_nodes == n_nodes, 'Incorrect no. nodes.'
        assert res.data_properties.n_nodes == n_nodes, 'Incorrect no. nodes.'
        assert res.data_properties.n_realisations == n_realisations, (
            'Incorrect no. realisations.')
        assert res.data_properties.n_realisations == n_realisations, (
            'Incorrect no. realisations.')
        assert res.data_properties.normalised == normalisation, (
            'Incorrect value for data normalisation.')
        assert res.data_properties.normalised == normalisation, (
            'Incorrect value for data normalisation.')
        adj_matrix = res.get_adjacency_matrix('binary', fdr=False)
        assert adj_matrix._edge_matrix.shape[0] == n_nodes, (
            'Incorrect number of rows in adjacency matrix.')
        assert adj_matrix._edge_matrix.shape[1] == n_nodes, (
            'Incorrect number of columns in adjacency matrix.')
        assert adj_matrix._edge_matrix.shape[0] == n_nodes, (
            'Incorrect number of rows in adjacency matrix.')
        assert adj_matrix._edge_matrix.shape[1] == n_nodes, (
            'Incorrect number of columns in adjacency matrix.')
コード例 #35
0
# Import classes
from idtxl.bivariate_mi import BivariateMI
from idtxl.data import Data
from idtxl.visualise_graph import plot_network
import matplotlib.pyplot as plt

# a) Generate test data
data = Data()
data.generate_mute_data(n_samples=1000, n_replications=5)

# b) Initialise analysis object and define settings
network_analysis = BivariateMI()
settings = {
    'cmi_estimator': 'JidtGaussianCMI',
    'max_lag_sources': 5,
    'min_lag_sources': 1
}

# c) Run analysis
results = network_analysis.analyse_network(settings=settings, data=data)

# d) Plot inferred network to console and via matplotlib
results.print_edge_list(weights='max_te_lag', fdr=False)
plot_network(results=results, weights='max_te_lag', fdr=False)
plt.show()
コード例 #36
0
ファイル: test_bivariate_mi.py プロジェクト: monperrus/IDTxl
def test_return_local_values():
    """Test estimation of local values."""
    max_lag = 5
    data = Data(seed=SEED)
    data.generate_mute_data(200, 5)
    settings = {
        'cmi_estimator': 'JidtKraskovCMI',
        'local_values': True,  # request calculation of local values
        'n_perm_max_stat': 21,
        'n_perm_min_stat': 21,
        'n_perm_max_seq': 21,
        'n_perm_omnibus': 21,
        'max_lag_sources': max_lag,
        'min_lag_sources': max_lag,
        'max_lag_target': max_lag
    }
    target = 1
    mi = BivariateMI()
    results_local = mi.analyse_network(settings, data, targets=[target])

    lmi = results_local.get_single_target(target, fdr=False)['mi']
    if lmi is None:
        return
    n_sources = len(results_local.get_target_sources(target, fdr=False))
    assert type(lmi) is np.ndarray, (
        'LMI estimation did not return an array of values: {0}'.format(lmi))
    assert lmi.shape[0] == n_sources, (
        'Wrong dim (no. sources) in LMI estimate: {0}'.format(lmi.shape))
    assert lmi.shape[1] == data.n_realisations_samples(
        (0, max_lag)), ('Wrong dim (no. samples) in LMI estimate {0}'.format(
            lmi.shape))
    assert lmi.shape[2] == data.n_replications, (
        'Wrong dim (no. replications) in LMI estimate {0}'.format(lmi.shape))

    # Test for correctnes of single link MI estimation by comparing it to the
    # MI between single variables and the target. For this test case where we
    # find only one significant past variable per source, the two should be the
    # same. Also compare single link average MI to mean local MI for each
    # link.
    settings['local_values'] = False
    results_avg = mi.analyse_network(settings, data, targets=[target])
    mi_single_link = results_avg.get_single_target(target, fdr=False)['mi']
    mi_selected_sources = results_avg.get_single_target(
        target, fdr=False)['selected_sources_mi']
    sources_local = results_local.get_target_sources(target, fdr=False)
    sources_avg = results_avg.get_target_sources(target, fdr=False)
    print('Single link average MI: {0}, single source MI: {1}.'.format(
        mi_single_link, mi_selected_sources))
    if mi_single_link is None:
        return
    assert np.isclose(mi_single_link, mi_selected_sources, atol=0.005).all(), (
        'Single link average MI {0} and single source MI {1} deviate.'.format(
            mi_single_link, mi_selected_sources))
    # Check if average and local values are the same. Test each source
    # separately. Inferred sources may differ between the two calls to
    # analyse_network() due to low number of surrogates used in unit testing.
    print('Compare average and local values.')
    for s in list(set(sources_avg).intersection(sources_local)):
        i1 = np.where(sources_avg == s)[0][0]
        i2 = np.where(sources_local == s)[0][0]
        assert np.isclose(
            mi_single_link[i1], np.mean(lmi[i2, :, :]), atol=0.005
        ), ('Single link average MI {0:0.6f} and mean LMI {1:0.6f} deviate.'.
            format(mi_single_link[i1], np.mean(lmi[i2, :, :])))
        assert np.isclose(
            mi_single_link[i1], mi_selected_sources[i1], atol=0.005
        ), ('Single link average MI {0:0.6f} and single source MI {1:0.6f} deviate.'
            .format(mi_single_link[i1], mi_selected_sources[i1]))
コード例 #37
0
def test_JidtKraskovCMI_BMI_checkpoint():
    """ run test without checkpointing, with checkpointing without resume and checkpointing with resume to compare the results"""
    filename_ckp1 = os.path.join(
        os.path.dirname(__file__), 'data', 'run_checkpoint')
    filename_ckp2 = os.path.join(
        os.path.dirname(__file__), 'data', 'resume_checkpoint')

    """Test running analysis without any checkpoint setting."""
    # Generate test data
    data = Data(seed=SEED)
    data.generate_mute_data(n_samples=N_SAMPLES, n_replications=1)

    # Initialise analysis object and define settings
    network_analysis1 = BivariateMI()
    network_analysis2 = BivariateMI()
    network_analysis3 = BivariateMI()
    network_analysis4 = BivariateMI()

    # Settings without checkpointing.
    settings1 = {'cmi_estimator': 'JidtKraskovCMI',
                 'n_perm_max_stat': N_PERM,
                 'n_perm_min_stat': N_PERM,
                 'n_perm_max_seq': N_PERM,
                 'n_perm_omnibus': N_PERM,
                 'max_lag_sources': 3,
                 'min_lag_sources': 2,
                 'noise_level': 0
                 }

    # Settings with checkpointing.
    settings2 = {'cmi_estimator': 'JidtKraskovCMI',
                 'n_perm_max_stat': N_PERM,
                 'n_perm_min_stat': N_PERM,
                 'n_perm_max_seq': N_PERM,
                 'n_perm_omnibus': N_PERM,
                 'max_lag_sources': 3,
                 'min_lag_sources': 2,
                 'noise_level': 0,
                 'write_ckp': True,
                 'filename_ckp': filename_ckp1}

    # Settings resuming from checkpoint.
    settings3 = {'cmi_estimator': 'JidtKraskovCMI',
                 'n_perm_max_stat': N_PERM,
                 'n_perm_min_stat': N_PERM,
                 'n_perm_max_seq': N_PERM,
                 'n_perm_omnibus': N_PERM,
                 'max_lag_sources': 3,
                 'min_lag_sources': 2,
                 'noise_level': 0,
                 'write_ckp': True,
                 'filename_ckp': filename_ckp2}

    # Setting sources and targets for the analysis
    sources = [0]
    targets = [2, 3]
    targets2 = [3]

    # Starting Analysis of the Network
    # results of a network analysis without checkpointing
    results1 = network_analysis1.analyse_network(
        settings=settings1, data=data, targets=targets, sources=sources)

    # results of a network analysis with checkpointing
    results2 = network_analysis2.analyse_network(
        settings=settings2, data=data, targets=targets, sources=sources)

    # Creating a checkpoint similar to the above settings where the targets of
    # of the first source have been already analyzed
    with open(filename_ckp1 + ".ckp", "r+") as f:
        tarsource = f.readlines()
    tarsource = tarsource[8]
    tarsource = (tarsource[:-1])

    network_analysis3._set_checkpointing_defaults(
        settings3, data, sources, targets)

    with open(filename_ckp2 + ".ckp") as f:
        lines = f.read().splitlines()
    lines[8] = tarsource
    with open(filename_ckp2 + ".ckp", 'w') as f:
        f.write('\n'.join(lines))
    print(lines)

    # Resume analysis.
    network_analysis_res = BivariateMI()
    data, settings, targets, sources = network_analysis_res.resume_checkpoint(
        filename_ckp2)

    # results of a network analysis resuming from checkpoint
    results3 = network_analysis_res.analyse_network(
        settings=settings3, data=data, targets=targets, sources=sources)

    # results of a network analysis without checkpointing but other targets/sources
    results4 = network_analysis4.analyse_network(
        settings=settings1, data=data, targets=targets2, sources=sources)

    adj_matrix1 = results1.get_adjacency_matrix(weights='binary', fdr=False)
    adj_matrix2 = results2.get_adjacency_matrix(weights='binary', fdr=False)
    adj_matrix3 = results3.get_adjacency_matrix(weights='binary', fdr=False)
    adj_matrix4 = results4.get_adjacency_matrix(weights='binary', fdr=False)

    result1 = adj_matrix1.get_edge_list()
    result2 = adj_matrix2.get_edge_list()
    result3 = adj_matrix3.get_edge_list()
    result4 = adj_matrix4.get_edge_list()

    print("Printing results:")
    print("Result 1: without checkpoint")
    print(result1)
    print("Result 2: with checkpoint")
    print(result2)
    print("Result 3: resuming from checkpoint")
    print(result3)
    print("Result 4: without checkpoint and different targets and sources")
    print(result4)

    print("Comparing the results:")
    assert np.array_equal(result1, result2), 'Result 1 and 2 not equal!'
    assert np.array_equal(result1, result3), 'Result 1 and 3 not equal!'
    assert not np.array_equal(result1, result4), 'Result 1 and 4 equal, expected to be different!'

    cmp1 = list(set(result1).intersection(result2))
    cmp2 = list(set(result1).intersection(result3))
    cmp3 = list(set(result1).intersection(result4))
    len1 = len(result1)
    assert len(cmp1) == len1 and len(cmp2) == len1 and len(cmp3) != len1, (
        "Discrete MultivariateMI Running with checkpoints does not give the expected results")

    print("Final")
    print("Elements of comparison between result 1 and 2")
    print(cmp1)
    print("Elements of comparison between result 1 and 3")
    print(cmp2)
    print("Elements of comparison between result 1 and 4")
    print(cmp3)

    _clear_ckp(filename_ckp1)
    _clear_ckp(filename_ckp2)