def test_micromed_header():
    d = Dataset(micromed_file)
    assert d.header['chan_name'][-1] == 'EMG'
    assert d.header['chan_name'][:5] == ['FP1', 'FP2', 'AF7', 'AF3', 'AFz']
    assert d.header['subj_id'] == 'Tp_metrologie Tp_metrologie'

    orig = d.header['orig']
    assert orig['dvideo_begin'] == iinfo('u4').max

    markers = d.read_markers()
    assert len(markers) == 444
    assert markers[0]['name'] == '1'
    assert markers[0]['start'] == markers[0]['end']

    data = d.read_data(chan=('FP1', 'AFz'), begsam=10, endsam=20)
    assert data.data[0][1, 5] == -334.27734375

    data = d.read_data(chan=('FP1', 'AFz'), begsam=-10, endsam=20)
    assert all(isnan(data.data[0][1, :10]))

    data = d.read_data(chan=('FP1', 'AFz'), begsam=900, endsam=1010)
    assert all(isnan(data.data[0][1, -10:]))

    data = d.read_data(chan=('FP1', 'AFz'), begsam=-10, endsam=-2)
    assert all(isnan(data.data[0][1, :]))

    with raises(OSError):
        d.read_videos(10, 20)
def test_brainvision_dataset_01():
    d = Dataset(brainvision_dir / 'test.vhdr')
    data = d.read_data(begtime=1, endtime=2)

    assert data.data[0][0, 0] == -24.0
    assert len(d.read_markers()) == 12

    data = d.read_data(begsam=-1, endsam=5)
    assert isnan(data.data[0][0, 0])
def test_brainvision_dataset_02():
    d = Dataset(brainvision_dir / 'test_old_layout_latin1_software_filter.vhdr')
    data = d.read_data(begsam=1, endsam=2)
    assert_almost_equal(data.data[0][0, 0], 5.1)

    data = d.read_data(endsam=255)
    assert isnan(data.data[0][0, -1])

    data = d.read_data()
    assert data.data[0].shape == (29, 251)
def test_ioeeg_eeglab():

    d1 = Dataset(eeglab_1_file)
    d2 = Dataset(eeglab_2_file)

    data1 = d1.read_data()
    data2 = d2.read_data()

    assert data1.data[0][0, 0] == data2.data[0][0, 0]

    assert len(d1.read_markers()) == 2
def test_ioeeg_eeglab_hdf5():

    d1 = Dataset(eeglab_hdf5_1_file)
    d2 = Dataset(eeglab_hdf5_2_file)

    data1 = d1.read_data(begsam=100, endsam=200)
    data2 = d2.read_data(begsam=100, endsam=200)

    assert data1.data[0][0, 0] == data2.data[0][0, 0]

    assert len(d1.read_markers()) == 2
def test_openephys_dataset_01():
    d = Dataset(openephys_dir)
    data = d.read_data(begtime=1, endtime=2)

    d = Dataset(openephys_dir)
    data = d.read_data(chan=[
        'CH1',
    ], begsam=10, endsam=1400)
    assert data.data[0][0, 0] == -132.6

    mrk = d.read_markers()
    assert len(mrk) == 0
def test_baseline():

    d = Dataset(micromed_file)
    ev = [ev['start'] for ev in d.read_markers()][1]
    chans = d.header['chan_name'][:2]
    data = d.read_data(events=ev, pre=1, post=1, chan=chans)

    time_interval = (-.5, -.1)
    out = apply_baseline(data, time=time_interval)
    mout = math(select(out, time=time_interval), operator_name='mean', axis='time')
    assert_array_almost_equal(mout(trial=0), array([1, 1]))

    out = apply_baseline(data, time=time_interval, baseline='zscore')
    mout = math(select(out, time=time_interval), operator_name='mean', axis='time')
    assert_array_almost_equal(mout(trial=0), array([0, 0]))

    out = apply_baseline(data, time=time_interval, baseline='percent')
    mout = math(select(out, time=time_interval), operator_name='mean', axis='time')
    assert_array_almost_equal(mout(trial=0), array([0, 0]))

    freq_interval = (10, 15)
    freq = frequency(data)
    out = apply_baseline(freq, freq=freq_interval, baseline='dB')
    mout = math(select(out, freq=freq_interval), operator_name='mean', axis='freq')
    assert_array_almost_equal(mout(trial=0), array([0, 0]))

    out = apply_baseline(freq, freq=freq_interval, baseline='normchange')
    assert out.data[0].max() <= 1
    assert out.data[0].min() >= -1
Beispiel #8
0
def test_blackrock_ns4_02():
    d = Dataset(ns4_file)

    d = Dataset(ns4_file)
    N_SMP = d.header['n_samples']
    data = d.read_data(begsam=N_SMP - 1, endsam=N_SMP + 10)
    assert isnan(data.data[0][0, -10:]).all()
Beispiel #9
0
def test_bci2000_data():
    assert bci2000_file.exists()

    d = Dataset(bci2000_file)
    assert len(d.read_markers()) == 0

    data = d.read_data()
    assert data.data[0][0, 0] == 179.702
Beispiel #10
0
def test_dataset_events():
    d = Dataset(micromed_file)
    events = [ev['start'] for ev in d.read_markers()][::30]

    data = d.read_data(events=events)

    assert data.time[0].shape[0] == 512
    assert data.time[0].shape[0] == data.data[0].shape[1]
    assert (data.number_of('time') == 512).all()
def test_write_read_fieldtrip():
    data = create_data(n_trial=1, n_chan=2)

    data.export(fieldtrip_file, export_format='fieldtrip')
    d = Dataset(fieldtrip_file)
    ftdata = d.read_data()
    assert_array_equal(data.data[0], ftdata.data[0])

    assert len(d.read_markers()) == 0
def test_ioeeg_mobert_end():
    d = Dataset(moberg_file)
    n_smp = d.header['n_samples']
    data = d.read_data(begsam=n_smp - 1, endsam=n_smp + 1)

    assert data(trial=0, chan='Fp1')[0] == -12302.22384929657
    assert isnan(data(trial=0, chan='Fp1')[1])

    assert len(d.read_markers()) == 0
Beispiel #13
0
def load_and_slice_data_for_feature_extraction(
        edf_filepath: str,
        epochstages: List[str],
        bad_segments: List[int] = None,
        epochoffset_secs: float = None,
        end_offset: float = None,
        chans_to_consider: List[str] = None,
        epoch_len=pysleep_defaults.epoch_len,
        stages_to_consider=pysleep_defaults.nrem_stages):
    if epochoffset_secs is None:
        epochoffset_secs = 0
    if end_offset is not None:
        last_good_epoch = int((end_offset - epochoffset_secs) / epoch_len)
        epochstages = epochstages[0:last_good_epoch]

    d = Dataset(edf_filepath)

    eeg_data = d.read_data().data[0]
    if not (1 < np.sum(np.abs(eeg_data)) / eeg_data.size < 200):
        raise EEGError(
            "edf data should be in mV, please rescale units in edf file")

    if bad_segments is not None:
        for bad_epoch in bad_segments:
            epochstages[bad_epoch] = 'artifact'
    epochstages = pysleep_utils.convert_epochstages_to_eegevents(
        epochstages, start_offset=epochoffset_secs)
    epochstages_to_consider = epochstages.loc[
        epochstages['description'].isin(stages_to_consider), :]
    starts = epochstages_to_consider['onset'].tolist()
    ends = (epochstages_to_consider['onset'] +
            epochstages_to_consider['duration']).tolist()

    for i in range(len(starts) - 1, 0, -1):
        if starts[i] == ends[i - 1]:
            del starts[i]
            del ends[i - 1]

    data = d.read_data(begtime=starts, endtime=ends, chan=chans_to_consider)
    data.starts = starts
    data.ends = ends
    return data
Beispiel #14
0
def read_ieeg_block(filename, electrode_file, conditions, minimalduration,
                    output_dir):
    d = Dataset(filename, bids=True)
    markers = d.read_markers()

    electrodes = Electrodes(electrode_file)
    elec_names = [x['name'] for x in electrodes.electrodes.tsv]
    elec_names = [x for x in elec_names if x in d.header['chan_name']
                  ]  # exclude elec location that have no corresponding channel

    all_conditions = [x for v in conditions.values() for x in v]
    clean_labels = _reject_channels(d, elec_names, all_conditions,
                                    minimalduration)

    outputs = []
    for active_baseline, data_conds in conditions.items():
        block_beg = []
        block_end = []

        for mrk in markers:

            if mrk['name'] in data_conds:
                dur = (mrk['end'] - mrk['start'])
                if dur >= minimalduration:
                    block_beg.append(mrk['start'])
                    block_end.append(mrk['end'])

        data = d.read_data(begtime=block_beg,
                           endtime=block_end,
                           chan=clean_labels)

        output_task = Task(filename)
        output_task.extension = '.pkl'
        output_task.task += active_baseline
        output_file = output_dir / output_task.get_filename()
        with output_file.open('wb') as f:
            dump(data, f)
        outputs.append(output_file)

    return outputs
Beispiel #15
0
def test_wonambi_write_read():
    write_wonambi(gen_data, wonambi_file, subj_id='test_subj')
    d = Dataset(wonambi_file)
    data = d.read_data()
    assert_array_equal(data(trial=0), gen_data(trial=0))
def test_ioeeg_eeglab_begsam():

    d1 = Dataset(eeglab_1_file)
    data = d1.read_data(begsam=-10, endsam=1)
    assert isnan(data.data[0][0, 0])
def test_write_read_fieldtrip_hdf5():
    d = Dataset(hdf5_file)
    d.read_data()
from pytest import approx, raises

from wonambi import Dataset
from wonambi.detect.spindle import DetectSpindle

from .paths import psg_file

d = Dataset(psg_file)
data = d.read_data(chan=('EEG Fpz-Cz', 'EEG Pz-Oz'), begtime=35790, endtime=35820)


def test_detect_spindle_Moelle2011():
    detsp = DetectSpindle()
    assert repr(detsp) == 'detsp_Moelle2011_12-15Hz_00.5-03.0s'

    sp = detsp(data)
    assert len(sp.events) == 4


def test_detect_spindle_Nir2011():
    detsp = DetectSpindle(method='Nir2011')

    sp = detsp(data)
    assert len(sp.events) == 4


def test_detect_spindle_Wamsley2012():
    detsp = DetectSpindle(method='Wamsley2012')

    sp = detsp(data)
    assert len(sp.events) == 4
def extract_band_power(
        edf_filepath: str,
        bands: dict = pysleep_defaults.default_freq_bands,
        chans_to_consider: List[str] = None,
        epochoffset_secs: float = None,
        end_time: float = None,
        epoch_len: int = pysleep_defaults.epoch_len) -> pd.DataFrame:
    """

    :param edf_filepath: The edf to extract bandpower for
    :param bands: bands to extract power in, if None, then defaults will be used i.e.
        bands = {
            'delta': (1, 4),
            'theta': (4, 7),
            'alpha': (8, 12),
            'sigma': (11, 16),
            'slow_sigma': (11, 13),
            'fast_sigma': (13, 16),
            'beta': (13, 30)
        }
    :param chans_to_consider: which channels to consider
    :param epochoffset_secs: start time of the recording to extract band power for (when do epochs start), onset is measured from this
    :param end_time: end time of the recording to extract band power for
    :param epoch_len: how long a time bin you want your power to be averaged over
    :return: chan_epoch_band as a numpy array, and times, bands, chans
    """

    d = Dataset(edf_filepath)
    if not (epochoffset_secs is None or epochoffset_secs >= 0):
        raise error_handling.EEGError('Epochoffset is negative!' +
                                      str(epochoffset_secs))
    if not ((end_time is None) or
            (end_time <= d.header['n_samples'] / d.header['s_freq'])):
        raise error_handling.EEGError("end time (" + str(end_time) +
                                      ") larger than record end!" +
                                      str(d.header['n_samples'] /
                                          d.header['s_freq']))
    data = d.read_data(begtime=epochoffset_secs,
                       endtime=end_time,
                       chan=chans_to_consider)
    power = timefrequency(data, method='spectrogram')
    abs_power = math(power, operator_name='abs')
    chan_time_freq = abs_power.data[0]
    all_chans = np.ones((chan_time_freq.shape[0], ), dtype=bool)
    epochoffset_secs = 0 if epochoffset_secs is None else epochoffset_secs
    time_axis = np.round(abs_power.axis['time'][0], 2) - epochoffset_secs
    freq_axis = np.round(abs_power.axis['freq'][0], 2)
    chan_axis = abs_power.axis['chan'][0]
    freq_binsize = freq_axis[1] - freq_axis[0]
    assert epoch_len > 0, "epoch len must be greater than zero"
    times = np.arange(0, time_axis[-1], epoch_len)
    cont = []
    for band, freqs in bands.items():
        freq_mask = (freqs[0] <= freq_axis) & (freqs[1] >= freq_axis)
        for win_start in times:
            time_mask = (win_start < time_axis) & (time_axis <
                                                   win_start + epoch_len)
            idx = np.ix_(all_chans, time_mask, freq_mask)
            if idx:
                chan_epoch_per_band = chan_time_freq[idx].mean(axis=1).mean(
                    axis=1) / freq_binsize
            else:
                chan_epoch_per_band = np.zeros((len(chans_to_consider), ))
            for chan, power in zip(chan_axis, chan_epoch_per_band):
                cont.append(
                    pd.Series({
                        'onset': win_start,
                        'duration': epoch_len,
                        'band': band.split('_')[0],
                        'chan': chan,
                        'power': power
                    }))
    band_power = pd.concat(
        cont, axis=1).T.apply(lambda x: pd.to_numeric(x, errors='ignore'))
    return band_power
Beispiel #20
0
def test_xltek_data():
    d = Dataset(ktlx_file)
    data = d.read_data(begsam=1000, endsam=1001, chan=('FZ', ))
    assert_array_almost_equal(data.data[0][0, 0], -90.119315)
Beispiel #21
0
from pytest import approx

from wonambi import Dataset
from wonambi.detect.slowwave import DetectSlowWave

from .paths import psg_file

d = Dataset(psg_file)
data = d.read_data(chan=('EEG Fpz-Cz', 'EEG Pz-Oz'),
                   begtime=27930,
                   endtime=27960)


def test_detect_slowwave_Massimini2004():
    detsw = DetectSlowWave()
    detsw.invert = True
    assert repr(detsw) == 'detsw_Massimini2004_0.10-4.00Hz'

    sw = detsw(data)
    assert len(sw.events) == 1


def test_detect_slowwave_AASM_Massimini2004():
    detsw = DetectSlowWave(method='AASM/Massimini2004')
    detsw.invert = True
    assert repr(detsw) == 'detsw_AASM/Massimini2004_0.10-4.00Hz'

    sw = detsw(data)
    assert len(sw.events) == 15

Beispiel #22
0
def test_blackrock_ns4_00():
    d = Dataset(ns4_file)
    data = d.read_data(begsam=10, endsam=11)
    assert data.data[0][0, 0] == 3463.0878627887814
Beispiel #23
0
def test_blackrock_nev():
    d = Dataset(nev_file)
    with raises(TypeError):
        d.read_data()
Beispiel #24
0
def plot_raw_overview(filename):
    event_type = 'all'

    if filename.name.startswith('sub-drouwen'):
        CHANS = [f'IH0{x + 1}' for x in range(8)]
    elif filename.name.startswith('sub-itens'):
        CHANS = [f'C0{x + 1}' for x in range(8)]
    elif filename.name.startswith('sub-lemmer'):
        CHANS = [f'IH{x + 1}' for x in range(8)]
    elif filename.name.startswith('sub-som705'):
        CHANS = [f'GA0{x + 1}' for x in range(8)]  # a bit random
    elif filename.name.startswith('sub-ommen'):
        CHANS = ['chan1',
                 'chan2']  # I dont 'understand why I cannot use 'chan64'
    elif filename.name.startswith('sub-vledder') or filename.name.startswith(
            'sub-ommen'):
        CHANS = ['chan1', 'chan64']
    elif '_acq-blackrock_' in filename.name:
        CHANS = ['chan1', 'chan128']
    else:
        print('you need to specify reference channel for this test')
        return None, None

    d = Dataset(filename, bids=True)
    event_names, event_onsets = select_events(d, event_type)

    is_ecog = d.dataset.task.channels.tsv['type'] == 'ECOG'
    is_seeg = d.dataset.task.channels.tsv['type'] == 'SEEG'
    chans = array(d.header['chan_name'])[is_ecog | is_seeg]
    data = d.read_data(begtime=event_onsets[0],
                       endtime=event_onsets[-1],
                       chan=list(chans))
    data.data[0][isnan(data.data[0])] = 0  # ignore nan

    data = montage(data, ref_chan=CHANS)
    freq = frequency(data, taper='hann', duration=2, overlap=0.5)

    hist = make_histogram(data, max=250, step=10)
    divs = []
    fig = plot_hist(hist)
    divs.append(to_div(fig))

    bad_chans = None

    if AUTOMATIC:
        from sklearn.covariance import EllipticEnvelope

        algorithm = EllipticEnvelope(
            contamination=P['data_quality']['histogram']['contamination'])
        prediction = algorithm.fit(hist.data[0]).predict(hist.data[0])
        new_bad_chans = data.chan[0][prediction == -1]
        print('bad channels with histogram / elliptic envelope: ' +
              ', '.join(new_bad_chans))
        bad_chans = set(new_bad_chans)

        fig = plot_outliers(hist.chan[0],
                            algorithm.dist_,
                            prediction,
                            yaxis_title='distance',
                            yaxis_type='log')
        divs.append(to_div(fig))

    fig = plot_freq(freq)
    divs.append(to_div(fig))

    if AUTOMATIC:
        from sklearn.neighbors import LocalOutlierFactor

        algorithm = LocalOutlierFactor(
            n_neighbors=P['data_quality']['spectrum']['n_neighbors'])
        prediction = algorithm.fit_predict(freq.data[0])

        new_bad_chans = data.chan[0][prediction == -1]
        print('bad channels with spectrum / local outlier factor: ' +
              ', '.join(new_bad_chans))
        bad_chans |= set(new_bad_chans)
        fig = plot_outliers(freq.chan[0],
                            algorithm.negative_outlier_factor_,
                            prediction,
                            yaxis_title='distance',
                            yaxis_type='linear')
        divs.append(to_div(fig))

        # we use again the reference channel. Ref channel was handpicked but it might have a weird spectrum
        bad_chans -= set(CHANS)

    return bad_chans, divs
Beispiel #25
0
def test_blackrock_ns4_01():
    d = Dataset(ns4_file)
    data = d.read_data(begsam=-10, endsam=1)
    assert isnan(data.data[0][0, :10]).all()
def test_ioeeg_mobert_begin():
    d = Dataset(moberg_file)
    data = d.read_data(begsam=-10, endsam=10)

    assert isnan(data(trial=0, chan='Fp1')[0])
    assert data(trial=0, chan='Fp1')[-1] == -1678.8678197860718
Beispiel #27
0
def test_xltek_data():
    d = Dataset(ktlx_file)
    data = d.read_data(begsam=223380, endsam=223381, chan=('Fz', ))
    assert_array_almost_equal(data.data[0][0, 0], -2021.171532)