예제 #1
0
def test_raw_rank_estimation(fname, ref_meg, scalings):
    """Test raw rank estimation."""
    if ref_meg and scalings != 'norm':
        # Adjust for CTF data (scale factors are quite different)
        scalings = dict(mag=1e31, grad=1e11)
    raw = read_raw_fif(fname)
    raw.crop(0, min(4., raw.times[-1])).load_data()
    out = _picks_by_type(raw.info, ref_meg=ref_meg, meg_combined=True)
    has_eeg = 'eeg' in raw
    if has_eeg:
        (_, picks_meg), (_, picks_eeg) = out
    else:
        (_, picks_meg), = out
        picks_eeg = []
    n_meg = len(picks_meg)
    n_eeg = len(picks_eeg)

    if len(raw.info['proc_history']) == 0:
        expected_rank = n_meg + n_eeg
    else:
        expected_rank = _get_rank_sss(raw.info) + n_eeg
    got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg)
    assert got_rank == expected_rank
    if 'sss' in fname:
        raw.add_proj(compute_proj_raw(raw))
    raw.apply_proj()
    n_proj = len(raw.info['projs'])
    want_rank = expected_rank - (0 if 'sss' in fname else n_proj)
    got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg)
    assert got_rank == want_rank
예제 #2
0
def test_raw_rank_estimation(fname, ref_meg, scalings):
    """Test raw rank estimation."""
    if ref_meg and scalings != 'norm':
        # Adjust for CTF data (scale factors are quite different)
        scalings = dict(mag=1e31, grad=1e11)
    raw = read_raw_fif(fname)
    raw.crop(0, min(4., raw.times[-1])).load_data()
    out = _picks_by_type(raw.info, ref_meg=ref_meg, meg_combined=True)
    has_eeg = 'eeg' in raw
    if has_eeg:
        (_, picks_meg), (_, picks_eeg) = out
    else:
        (_, picks_meg), = out
        picks_eeg = []
    n_meg = len(picks_meg)
    n_eeg = len(picks_eeg)

    if len(raw.info['proc_history']) == 0:
        expected_rank = n_meg + n_eeg
    else:
        expected_rank = _get_rank_sss(raw.info) + n_eeg
    got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg)
    assert got_rank == expected_rank
    if has_eeg:
        with pytest.deprecated_call():
            assert raw.estimate_rank(picks=picks_eeg,
                                     scalings=scalings) == n_eeg
    if 'sss' in fname:
        raw.add_proj(compute_proj_raw(raw))
    raw.apply_proj()
    n_proj = len(raw.info['projs'])
    want_rank = expected_rank - (0 if 'sss' in fname else n_proj)
    got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg)
    assert got_rank == want_rank
예제 #3
0
def test_rank_estimation():
    """Test raw rank estimation."""
    iter_tests = itt.product(
        [fif_fname, hp_fif_fname],  # sss
        ['norm', dict(mag=1e11, grad=1e9, eeg=1e5)])
    for fname, scalings in iter_tests:
        raw = read_raw_fif(fname).crop(0, 4.).load_data()
        (_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
                                                        meg_combined=True)
        n_meg = len(picks_meg)
        n_eeg = len(picks_eeg)

        if len(raw.info['proc_history']) == 0:
            expected_rank = n_meg + n_eeg
        else:
            expected_rank = _get_rank_sss(raw.info) + n_eeg
        assert _estimate_rank_raw(raw, scalings=scalings) == expected_rank
        with pytest.deprecated_call():
            assert raw.estimate_rank(picks=picks_eeg,
                                     scalings=scalings) == n_eeg
        if 'sss' in fname:
            raw.add_proj(compute_proj_raw(raw))
        raw.apply_proj()
        n_proj = len(raw.info['projs'])
        want_rank = expected_rank - (0 if 'sss' in fname else n_proj)
        assert _estimate_rank_raw(raw, scalings=scalings) == want_rank
예제 #4
0
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg):
    """Test maxfilter rank lookup."""
    raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types()
    assert raw.info['projs'] == []
    mf = raw.info['proc_history'][0]['max_info']
    assert mf['sss_info']['nfree'] == rank_orig
    assert _get_rank_sss(raw) == rank_orig
    mult = 1 + (meg == 'separate')
    rank = rank_orig - mult * n_proj
    if n_proj > 0:
        # Let's do some projection
        raw.add_proj(
            compute_proj_raw(raw,
                             n_mag=n_proj,
                             n_grad=n_proj,
                             meg=meg,
                             verbose=True))
    raw.apply_proj()
    data_orig = raw[:][0]

    # degenerate cases
    with pytest.raises(ValueError, match='tol must be'):
        _estimate_rank_raw(raw, tol='foo')
    with pytest.raises(TypeError, match='must be a string or a number'):
        _estimate_rank_raw(raw, tol=None)

    allowed_rank = [rank_orig if meg == 'separate' else rank]
    if fname == mf_fif_fname:
        # Here we permit a -1 because for mf_fif_fname we miss by 1, which is
        # probably acceptable. If we use the entire duration instead of 5 sec
        # this problem goes away, but the test is much slower.
        allowed_rank.append(allowed_rank[0] - 1)

    # multiple ways of hopefully getting the same thing
    # default tol=1e-4, scalings='norm'
    rank_new = _estimate_rank_raw(raw)
    assert rank_new in allowed_rank

    tol = 'float32'  # temporary option until we can fix things
    rank_new = _estimate_rank_raw(raw, tol=tol)
    assert rank_new in allowed_rank
    rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol)
    assert rank_new in allowed_rank
    scalings = dict(grad=1e13, mag=1e15)
    rank_new = _compute_rank_int(raw,
                                 None,
                                 scalings=scalings,
                                 tol=tol,
                                 verbose='debug')
    assert rank_new in allowed_rank
    # XXX default scalings mis-estimate sometimes :(
    if fname == hp_fif_fname:
        allowed_rank.append(allowed_rank[0] - 2)
    rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug')
    assert rank_new in allowed_rank
    del allowed_rank

    rank_new = _compute_rank_int(raw, 'info')
    assert rank_new == rank
    assert_array_equal(raw[:][0], data_orig)
예제 #5
0
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg):
    """Test maxfilter rank lookup."""
    raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types()
    assert raw.info['projs'] == []
    mf = raw.info['proc_history'][0]['max_info']
    assert mf['sss_info']['nfree'] == rank_orig
    assert _get_rank_sss(raw) == rank_orig
    mult = 1 + (meg == 'separate')
    rank = rank_orig - mult * n_proj
    if n_proj > 0:
        # Let's do some projection
        raw.add_proj(compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj,
                                      meg=meg, verbose=True))
    raw.apply_proj()
    data_orig = raw[:][0]

    # degenerate cases
    with pytest.raises(ValueError, match='tol must be'):
        _estimate_rank_raw(raw, tol='foo')
    with pytest.raises(TypeError, match='must be a string or a number'):
        _estimate_rank_raw(raw, tol=None)

    allowed_rank = [rank_orig if meg == 'separate' else rank]
    if fname == mf_fif_fname:
        # Here we permit a -1 because for mf_fif_fname we miss by 1, which is
        # probably acceptable. If we use the entire duration instead of 5 sec
        # this problem goes away, but the test is much slower.
        allowed_rank.append(allowed_rank[0] - 1)

    # multiple ways of hopefully getting the same thing
    # default tol=1e-4, scalings='norm'
    rank_new = _estimate_rank_raw(raw)
    assert rank_new in allowed_rank

    tol = 'float32'  # temporary option until we can fix things
    rank_new = _estimate_rank_raw(raw, tol=tol)
    assert rank_new in allowed_rank
    rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol)
    assert rank_new in allowed_rank
    scalings = dict(grad=1e13, mag=1e15)
    rank_new = _compute_rank_int(raw, None, scalings=scalings, tol=tol,
                                 verbose='debug')
    assert rank_new in allowed_rank
    # XXX default scalings mis-estimate sometimes :(
    if fname == hp_fif_fname:
        allowed_rank.append(allowed_rank[0] - 2)
    rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug')
    assert rank_new in allowed_rank
    del allowed_rank

    rank_new = _compute_rank_int(raw, 'info')
    assert rank_new == rank
    assert_array_equal(raw[:][0], data_orig)