Exemple #1
0
def test_collecting_feature():
    """Test computation of spectral markers"""
    epochs = _get_data()[:2]
    psds_params = dict(n_fft=4096, n_overlap=100, n_jobs='auto', nperseg=128)
    estimator = PowerSpectralDensityEstimator(tmin=None,
                                              tmax=None,
                                              fmin=1.,
                                              fmax=45.,
                                              psd_method='welch',
                                              psd_params=psds_params,
                                              comment='default')

    wpli = WeightedPhaseLagIndex()
    markers_list = [
        PowerSpectralDensity(estimator=estimator, fmin=1, fmax=4),
        ContingentNegativeVariation(), wpli
    ]

    markers = Markers(markers_list)
    # check states and names
    for name, marker in markers.items():
        assert_true(not any(k.endswith('_') for k in vars(marker)))
        assert_equal(name, marker._get_title())

    # check order
    assert_equal(list(markers.values()), markers_list)

    # check fit
    markers.fit(epochs)
    for t_marker in markers_list:
        assert_true(any(k.endswith('_') for k in vars(t_marker)))

    tmp = _TempDir()
    tmp_fname = tmp + '/test-smarkers.hdf5'
    markers.save(tmp_fname)
    markers2 = read_markers(tmp_fname)
    for ((k1, v1), (k2, v2)) in zip(markers.items(), markers2.items()):
        assert_equal(k1, k2)
        assert_equal(
            {
                k: v
                for k, v in vars(v1).items()
                if not k.endswith('_') and not k == 'estimator'
            }, {
                k: v
                for k, v in vars(v2).items()
                if not k.endswith('_') and not k == 'estimator'
            })
    pe = PermutationEntropy().fit(epochs)
    markers._add_marker(pe)

    tmp = _TempDir()
    tmp_fname = tmp + '/test-markers.hdf5'
    markers.save(tmp_fname)
    markers3 = read_markers(tmp_fname)
    assert_true(pe._get_title() in markers3)

    assert_true(wpli._get_title() in markers3)
Exemple #2
0
            comment='gamma_weighted'),

        WeightedPhaseLagIndex(tmin=None, tmax=1.0, fmin=0, fmax=8.0, 
                              comment='theta_weighted'),
        WeightedPhaseLagIndex(tmin=None, tmax=1.0, fmin=8.0, fmax=12.0, 
                              comment='alpha_weighted'),
        PhaseLockingValue(tmin=None, tmax=1.0, fmin=0, fmax=8.0, 
                          comment='theta'),
        PhaseLockingValue(tmin=None, tmax=1.0, fmin=8.0, fmax=12.0, 
                          comment='alpha'),

        KolmogorovComplexity(tmin=None, tmax=1.0, backend='openmp',
                             method_params={'nthreads': 'auto'}),
    ]

    fc = Markers(f_list)
    return fc


def get_src_markers():
        psds_params = dict(n_fft=4096, n_overlap=100, n_jobs='auto', nperseg=128)

    base_psd = PowerSpectralDensityEstimator(
        psd_method='welch', tmin=None, tmax=1.0, fmin=1., fmax=45.,
        psd_params=psds_params, comment='default')

    f_list = [
        PowerSpectralDensity(estimator=base_psd, fmin=1., fmax=4.,
                             normalize=False, comment='delta'),
        PowerSpectralDensity(estimator=base_psd, fmin=1., fmax=4.,
                             normalize=True, comment='deltan'),
Exemple #3
0
                       condition_a=['LDGS', 'LDGD'],
                       condition_b=['LSGS', 'LSGD'],
                       comment='p3a'),
    TimeLockedContrast(tmin=None,
                       tmax=None,
                       condition_a=['LSGD', 'LDGD'],
                       condition_b=['LSGS', 'LDGS'],
                       comment='GD-GS'),
    TimeLockedContrast(tmin=0.996,
                       tmax=1.196,
                       condition_a=['LSGD', 'LDGD'],
                       condition_b=['LSGS', 'LDGS'],
                       comment='p3b')
]

mc = Markers(m_list)

mc.fit(epochs)
mc.save('data/JSXXX-markers.hdf5')

##############################################################################
# Let's explore a bit the PSDs used for the marker computation

psd = base_psd.data_
freqs = base_psd.freqs_

plt.figure()
plt.semilogy(freqs, np.mean(psd, axis=0).T, alpha=0.1, color='black')
plt.xlim(2, 40)
plt.ylabel('log(psd)')
plt.xlabel('Frequency [Hz]')
Exemple #4
0
def test_passthrough():
    """Test computation of Passthrough markers"""
    psds_params = dict(n_fft=4096, n_overlap=100, n_jobs='auto',
                       nperseg=128)
    estimator = PowerSpectralDensityEstimator(
        tmin=None, tmax=None, fmin=1., fmax=45., psd_method='welch',
        psd_params=psds_params, comment='default'
    )
    psd_delta = PowerSpectralDensity(
        estimator, fmin=1., fmax=4., comment='delta')

    markers = {
        psd_delta._get_title(): psd_delta,
    }

    ant_delta = Passthrough(parent=psd_delta, comment='anterior_delta')
    pos_delta = Passthrough(parent=psd_delta, comment='posterior_delta')

    _base_io_test(ant_delta, epochs,
                  functools.partial(read_passthrough, markers=markers,
                                    comment='anterior_delta'))

    data1 = ant_delta.data_
    data2 = pos_delta.data_
    # Data should be the same
    assert_array_equal(data1, data2)

    red_delta = [
        {'axis': 'frequency', 'function': np.sum},
        {'axis': 'epochs', 'function': np.mean},
        {'axis': 'channels', 'function': np.mean}]

    # If we apply the same reduction, we should get the same data
    ant_topos = ant_delta.reduce_to_topo(reduction_func=red_delta)
    pos_topos = pos_delta.reduce_to_topo(reduction_func=red_delta)

    assert_array_equal(ant_topos, pos_topos)

    # Now apply different picks
    ant_scalar = ant_delta.reduce_to_scalar(
        reduction_func=red_delta,
        picks={'channels': [0]}
    )

    pos_scalar = pos_delta.reduce_to_scalar(
        reduction_func=red_delta,
        picks={'channels': [1]}
    )

    m_list = [
        ant_delta,
        pos_delta
    ]
    mc = Markers(m_list)
    mc.fit(epochs)

    reduction_params = {}
    reduction_params['Passthrough/anterior_delta'] = {
        'reduction_func': red_delta,
        'picks': {'channels': [0]}
    }
    reduction_params['Passthrough/posterior_delta'] = {
        'reduction_func': red_delta,
        'picks': {'channels': [1]}
    }

    scalars = mc.reduce_to_scalar(reduction_params)

    assert ant_scalar == scalars[0]
    assert pos_scalar == scalars[1]
    for k, v in attrs.items():
        if k.endswith('_'):
            setattr(out, k, v)
    return out


# Resgister the marker to NICE
register_marker_class(MyCustomMarker)

# Now you can create a collection with nice markers and the custom marker

markers_list = [
    PermutationEntropy(),
    ContingentNegativeVariation(),
    MyCustomMarker()
]

markers = Markers(markers_list)

# Fit on test data
epochs = _get_data()[:2]
markers.fit(epochs)

# Save to a file
tmp = _TempDir()
tmp_fname = tmp + '/test-markers.hdf5'
markers.save(tmp_fname)

# Read from file
markers2 = read_markers(tmp_fname)
markers = Markers([
    PowerSpectralDensity(estimator=base_psd,
                         fmin=1.,
                         fmax=4.,
                         normalize=False,
                         comment='delta'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=1.,
                         fmax=4.,
                         normalize=True,
                         comment='deltan'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=4.,
                         fmax=8.,
                         normalize=False,
                         comment='theta'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=4.,
                         fmax=8.,
                         normalize=True,
                         comment='thetan'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=8.,
                         fmax=12.,
                         normalize=False,
                         comment='alpha'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=8.,
                         fmax=12.,
                         normalize=True,
                         comment='alphan'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=12.,
                         fmax=30.,
                         normalize=False,
                         comment='beta'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=12.,
                         fmax=30.,
                         normalize=True,
                         comment='betan'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=30.,
                         fmax=45.,
                         normalize=False,
                         comment='gamma'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=30.,
                         fmax=45.,
                         normalize=True,
                         comment='gamman'),
    PowerSpectralDensity(estimator=base_psd,
                         fmin=1.,
                         fmax=45.,
                         normalize=False,
                         comment='summary_se'),
    PowerSpectralDensitySummary(estimator=base_psd,
                                fmin=1.,
                                fmax=45.,
                                percentile=.5,
                                comment='summary_msf'),
    PowerSpectralDensitySummary(estimator=base_psd,
                                fmin=1.,
                                fmax=45.,
                                percentile=.9,
                                comment='summary_sef90'),
    PowerSpectralDensitySummary(estimator=base_psd,
                                fmin=1.,
                                fmax=45.,
                                percentile=.95,
                                comment='summary_sef95'),
    PermutationEntropy(tmin=None, tmax=0.6, backend=backend),
    # csd needs to be skipped
    SymbolicMutualInformation(tmin=None,
                              tmax=0.6,
                              method='weighted',
                              backend=backend,
                              method_params={
                                  'nthreads': 'auto',
                                  'bypass_csd': True
                              },
                              comment='weighted'),
    KolmogorovComplexity(tmin=None,
                         tmax=0.6,
                         backend=backend,
                         method_params={'nthreads': 'auto'}),
])
Exemple #7
0
def computeWSMI(file_to_compute, word_to_compute, categoria):
    #nombres de archivo
    MAT_FULLNAME = file_to_compute
    MAT_BASENAME = op.basename(MAT_FULLNAME).split('.')[0]
    MAT_VAR = word_to_compute
    FIF_FILENAME = '../data/' + categoria + '/' + MAT_BASENAME + '-' + word_to_compute + '-epo.fif'
    HDF5_FILENAME = '../data/' + categoria + '/' + MAT_BASENAME + '-' + word_to_compute + '-markers.hdf5'
    MAT_OUTPUT = '../data/' + categoria + '/' + MAT_BASENAME + '-' + word_to_compute + '-wsmi.mat'

    start_time = time.time()

    #importamos la matriz desde .mat y la guardo en healthyData
    print('Loading mat file: ' + MAT_BASENAME + " - " + word_to_compute)
    #Esto no funciona para mat de ciertas versiones
    #se importa como samples x channel
    healthy = {}
    sio.loadmat(MAT_FULLNAME, healthy)
    healthyData = np.array(healthy[MAT_VAR])

    #Esto funciona para mat version 7.3
    #pero este nuevo metodo importa channel x samples entonces transponemos para mantener todo consistente
    #with h5py.File(MAT_FULLNAME, 'r') as f:
    #    healthyData = np.array(f[MAT_VAR]).transpose()

    #eliminamos la ultima columna, es decir, el canal Cz
    healthyData = np.delete(healthyData, 256, 1)

    #creamos la informacion para el mne container
    montage = mne.channels.make_standard_montage('GSN-HydroCel-256')
    channel_names = montage.ch_names
    sfreq = 1000
    info = mne.create_info(channel_names,
                           sfreq,
                           ch_types='eeg',
                           montage=montage)
    info['description'] = 'egi/256'

    #hacemos reshape para que quede trials x samples x channel
    healthyData = np.reshape(healthyData, (30, 4000, 256))

    #transponemos para que quede trials x channels x samples
    healthyData = np.transpose(healthyData, (0, 2, 1))

    #epochsarray toma trials x channels x samples
    epochs = mne.EpochsArray(healthyData, info)
    epochs.save(FIF_FILENAME, overwrite=True)

    #importamos el archivo fif
    epochs = mne.read_epochs(FIF_FILENAME, preload=True)

    #computamos wsmi
    m_list = [
        SymbolicMutualInformation(tmin=None,
                                  tmax=0.6,
                                  method='weighted',
                                  backend='python',
                                  tau=16,
                                  method_params={
                                      'nthreads': 'auto',
                                      'bypass_csd': False
                                  },
                                  comment='weighted'),
    ]

    mc = Markers(m_list)
    mc.fit(epochs)

    #guardamos el archivo
    mc.save(HDF5_FILENAME, overwrite=True)

    print('Converting hdf5 to mat...')
    filename = HDF5_FILENAME
    with h5py.File(filename, "r") as f:
        # List all groups
        a_group_key = list(f.keys())[0]

        # Get the data
        data_labels = list(f[a_group_key])
        data = f['nice']

        values = list(data['marker']['SymbolicMutualInformation']['weighted']
                      ['key_data_'])

        sio.savemat(MAT_OUTPUT, {'data': values})

    #eliminamos el fif para que no ocupe espacio
    os.remove(FIF_FILENAME)

    print('Execution time: ', str(time.time() - start_time), 'sec')