Ejemplo n.º 1
0
 def test_sampleEntropy(self):
     ts = TS_SAMPLE_ENTROPY
     std_ts = np.std(ts)
     sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
     np.testing.assert_array_equal(
         np.around(sample_entropy,
                   8), [2.21187685, 2.10787948, 2.36712361, 1.79175947])
Ejemplo n.º 2
0
 def test_sampleEntropy(self):
     ts = TS_SAMPLE_ENTROPY
     std_ts = np.std(ts)
     sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
     np.testing.assert_allclose(
         sample_entropy,
         np.array([2.26881823, 2.11119024, 2.33537492, 1.79175947]))
Ejemplo n.º 3
0
 def test_sampleEntropy(self):
     ts = TS_SAMPLE_ENTROPY
     std_ts = np.std(ts)
     sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
     np.testing.assert_array_equal(
         np.around(sample_entropy, 8),
         np.array([2.21187685, 2.12087873, 2.3826278, 1.79175947]))
Ejemplo n.º 4
0
def test_complexity():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))


    # Shannon
    assert np.allclose(nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0)


    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1))


    # Sample
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)

    assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]
    assert nk.entropy_sample(signal, 2, 0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]

    # MSE
#    assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
#    assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))

    # Fuzzy
    assert np.allclose(nk.entropy_fuzzy(signal, 2, 0.2, 1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
    def calculate_rri_nonlinear_features(self, rri, diff_rri, diff2_rri):

        # Empty dictionary
        rri_nonlinear_features = dict()

        # Non-linear RR statistics
        if len(rri) > 1:
            rri_nonlinear_features['rri_entropy'] = self.safe_check(
                sample_entropy(rri,
                               sample_length=2,
                               tolerance=0.1 * np.std(rri))[0])
            rri_nonlinear_features[
                'rri_higuchi_fractal_dimension'] = self.safe_check(
                    hfd(rri, k_max=10))
        else:
            rri_nonlinear_features['rri_entropy'] = np.nan
            rri_nonlinear_features['rri_higuchi_fractal_dimension'] = np.nan

        # Non-linear RR difference statistics
        if len(diff_rri) > 1:
            rri_nonlinear_features['diff_rri_entropy'] = self.safe_check(
                sample_entropy(diff_rri,
                               sample_length=2,
                               tolerance=0.1 * np.std(diff_rri))[0])
            rri_nonlinear_features[
                'diff_rri_higuchi_fractal_dimension'] = self.safe_check(
                    hfd(diff_rri, k_max=10))
        else:
            rri_nonlinear_features['diff_rri_entropy'] = np.nan
            rri_nonlinear_features[
                'diff_rri_higuchi_fractal_dimension'] = np.nan

        # Non-linear RR difference difference statistics
        if len(diff2_rri) > 1:
            rri_nonlinear_features['diff2_rri_entropy'] = self.safe_check(
                sample_entropy(diff2_rri,
                               sample_length=2,
                               tolerance=0.1 * np.std(diff2_rri))[0])
            rri_nonlinear_features[
                'diff2_rri_higuchi_fractal_dimension'] = self.safe_check(
                    hfd(diff2_rri, k_max=10))
        else:
            rri_nonlinear_features['diff2_rri_entropy'] = np.nan
            rri_nonlinear_features[
                'diff2_rri_higuchi_fractal_dimension'] = np.nan

        return rri_nonlinear_features
Ejemplo n.º 6
0
def test_complexity_vs_Python():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))

    # Shannon
    shannon = nk.entropy_shannon(signal)
    #    assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())
    assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)

    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(
        nk.entropy_approximate(
            signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) -
        entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(
        signal, dimension=2,
        r=0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy(
            signal, 2, 0.2 * np.std(signal, ddof=1))

    # Sample
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2 * np.std(signal, ddof=1))
        - entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        pyeeg_samp_entropy(signal, 2, 0.2), 0)

    #    import sampen
    #    sampen.sampen2(signal[0:300], mm=2, r=r)

    assert nk.entropy_sample(signal,
                             dimension=2, r=0.2) != pyentrp.sample_entropy(
                                 signal, 2, 0.2)[1]
    assert nk.entropy_sample(
        signal, dimension=2,
        r=0.2 * np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(
            signal, 2, 0.2)[0.2][2]

    # MSE
    #    assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
    #    assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))

    # Fuzzy
    assert np.allclose(
        nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) -
        entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)

    # DFA
    assert nk.fractal_dfa(signal, windows=np.array([
        4, 8, 12, 20
    ])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp="poly")
def get_SampEn(idx):  # 计算RR间期采样熵
    sampEn = ent.sample_entropy(idx, 2, 0.2 * np.std(idx))
    for i in range(len(sampEn)):
        if np.isnan(sampEn[i]):
            sampEn[i] = -2
        if np.isinf(sampEn[i]):
            sampEn[i] = -1
    return sampEn
Ejemplo n.º 8
0
 def __get_SampEn(self):  # 计算RR间期采样熵
     sampEn = ent.sample_entropy(self.RR_intervals, 2,
                                 0.2 * np.std(self.RR_intervals))
     for i in range(len(sampEn)):
         if np.isnan(sampEn[i]):
             sampEn[i] = -2
         if np.isinf(sampEn[i]):
             sampEn[i] = -1
     return sampEn
def sampl(d1):
    sa1 = []
    print("Sample started")
    for i in range(d1.shape[0]):
        X = d1[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa1.append(ee[0])
    print("Sample Finished")
    return (sa1)
def sampl(d1, d2, d3, d4, d5):

    sa1 = []
    sa2 = []
    sa3 = []
    sa4 = []
    sa5 = []

    for i in range(0, 500):
        X = d1[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa1.append(ee[0])

    for i in range(0, 500):
        X = d2[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa2.append(ee[0])

    for i in range(0, 500):
        X = d3[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa3.append(ee[0])

    for i in range(0, 500):
        X = d4[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa4.append(ee[0])

    for i in range(0, 500):
        X = d5[i]
        std_X = np.std(X)
        ee = entropy.sample_entropy(X, 2, 0.2 * std_X)
        sa5.append(ee[0])

    return (sa1, sa2, sa3, sa4, sa5)
Ejemplo n.º 11
0
def extract_feature(X):
    X = X.astype(float)
    stft = np.abs(librosa.stft(X))
    mfccs = np.mean(librosa.feature.mfcc(y=X, sr=SAMPLE_RATE, n_mfcc=40).T,
                    axis=0)
    chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=SAMPLE_RATE).T,
                     axis=0)
    mel = np.mean(librosa.feature.melspectrogram(X, sr=SAMPLE_RATE).T, axis=0)
    shannon = [ent.shannon_entropy(X)]
    sample = ent.sample_entropy(X, 1)
    per = [ent.permutation_entropy(X)]
    fft = np.fft.fft(X) / len(X)
    fft = np.abs(fft[:len(X) // 7])
    fft = [fft[0]]
    return mfccs, chroma, mel, shannon, sample, per, fft
    def calculate_p_wave_features(self):

        # Empty dictionary
        p_wave_features = dict()

        # Get P-Wave energy bounds
        p_eng_start = self.p_time_sp - 10
        if p_eng_start < 0:
            p_eng_start = 0
        p_eng_end = self.p_time_sp + 10

        # Get end points
        start_sp = self.template_rpeak_sp - self.qrs_start_sp_manual

        # Calculate p-wave statistics
        p_wave_features['p_wave_time'] = self.p_time_sp * 1 / self.fs
        p_wave_features['p_wave_time_std'] = np.std(self.p_times_sp * 1 / self.fs, ddof=1) * 1 / self.fs
        p_wave_features['p_wave_amp'] = self.p_amp
        p_wave_features['p_wave_amp_std'] = np.std(self.p_amps, ddof=1)
        p_wave_features['p_wave_eng'] = np.sum(np.power(self.median_template[p_eng_start:p_eng_end], 2))

        """
        Calculate non-linear statistics
        """
        entropy = [
            self.safe_check(
                sample_entropy(
                    self.templates[0:start_sp, col],
                    sample_length=2,
                    tolerance=0.1 * np.std(self.templates[0:start_sp, col]),
                )[0]
            )
            for col in range(self.templates.shape[1])
        ]
        p_wave_features['p_wave_entropy_mean'] = np.mean(entropy)
        p_wave_features['p_wave_entropy_std'] = np.std(entropy, ddof=1)

        higuchi_fractal = [
            hfd(self.templates[0:start_sp, col], k_max=10) for col in range(self.templates.shape[1])
        ]
        p_wave_features['p_wave_higuchi_fractal_mean'] = np.mean(higuchi_fractal)
        p_wave_features['p_wave_higuchi_fractal_mean'] = np.mean(higuchi_fractal)
        p_wave_features['p_wave_higuchi_fractal_std'] = np.std(higuchi_fractal, ddof=1)

        return p_wave_features
    def calculate_t_wave_features(self):

        # Empty dictionary
        t_wave_features = dict()

        # Get T-Wave energy bounds
        t_eng_start = self.t_time_sp - 10
        t_eng_end = self.t_time_sp + 10
        if t_eng_end > self.templates.shape[0] - 1:
            t_eng_end = self.templates.shape[0] - 1

        # Get end points
        end_sp = self.template_rpeak_sp + self.qrs_end_sp_manual

        # Calculate p-wave statistics
        t_wave_features['t_wave_time'] = self.t_time_sp * 1 / self.fs
        t_wave_features['t_wave_time_std'] = np.std(self.t_times_sp * 1 / self.fs, ddof=1) * 1 / self.fs
        t_wave_features['t_wave_amp'] = self.t_amp
        t_wave_features['t_wave_amp_std'] = np.std(self.t_amps, ddof=1)
        t_wave_features['t_wave_eng'] = np.sum(np.power(self.median_template[t_eng_start:t_eng_end], 2))

        """
        Calculate non-linear statistics
        """
        entropy = [
            self.safe_check(
                sample_entropy(
                    self.templates[end_sp:, col],
                    sample_length=2,
                    tolerance=0.1 * np.std(self.templates[end_sp:, col]),
                )[0]
            )
            for col in range(self.templates.shape[1])
        ]
        t_wave_features['t_wave_entropy_mean'] = np.mean(entropy)
        t_wave_features['t_wave_entropy_std'] = np.std(entropy, ddof=1)

        higuchi_fractal = [
            hfd(self.templates[end_sp:, col], k_max=10) for col in range(self.templates.shape[1])
        ]
        t_wave_features['t_wave_higuchi_fractal_mean'] = np.mean(higuchi_fractal)
        t_wave_features['t_wave_higuchi_fractal_std'] = np.std(higuchi_fractal, ddof=1)

        return t_wave_features
    def calculate_r_peak_amplitude_features(self):

        r_peak_amplitude_features = dict()

        rpeak_indices = self.rpeaks
        rpeak_amplitudes = self.signal_filtered[rpeak_indices]

        # Basic statistics
        r_peak_amplitude_features['rpeak_min'] = np.min(rpeak_amplitudes)
        r_peak_amplitude_features['rpeak_max'] = np.max(rpeak_amplitudes)
        r_peak_amplitude_features['rpeak_mean'] = np.mean(rpeak_amplitudes)
        r_peak_amplitude_features['rpeak_std'] = np.std(rpeak_amplitudes, ddof=1)
        r_peak_amplitude_features['rpeak_skew'] = sp.stats.skew(rpeak_amplitudes)
        r_peak_amplitude_features['rpeak_kurtosis'] = sp.stats.kurtosis(rpeak_amplitudes)

        # Non-linear statistics
        r_peak_amplitude_features['rpeak_entropy'] = self.safe_check(
            sample_entropy(rpeak_amplitudes, sample_length=2, tolerance=0.1 * np.std(rpeak_amplitudes))[0]
        )
        r_peak_amplitude_features['rpeak_higuchi_fractal_dimension'] = hfd(rpeak_amplitudes, k_max=10)

        return r_peak_amplitude_features
Ejemplo n.º 15
0
def test_complexity():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))

    # Shannon
    assert np.allclose(
        nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0)

    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(
        nk.entropy_approximate(signal, 2, 0.2 * np.std(signal, ddof=1)) -
        entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(
        signal, 2, 0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy(
            signal, 2, 0.2 * np.std(signal, ddof=1))

    # Sample
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2 * np.std(signal, ddof=1)) -
        entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) -
        entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2),
        0)

    assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy(
        signal, 2, 0.2)[1]

    # Fuzzy
    assert np.allclose(
        nk.entropy_fuzzy(signal, 2, 0.2, 1) -
        entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
def extract_feature(X):
    X = X.astype(float)
    stft = np.abs(librosa.stft(X))
    mfccs = np.mean(librosa.feature.mfcc(y=X, sr=SAMPLE_RATE, n_mfcc=40).T,
                    axis=0)
    chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=SAMPLE_RATE).T,
                     axis=0)
    mel = np.mean(librosa.feature.melspectrogram(X, sr=SAMPLE_RATE).T, axis=0)
    shannon = [ent.shannon_entropy(X)]
    sample = ent.sample_entropy(X, 1)
    spectral = [np.round(spectral_entropy(X, SAMPLE_RATE), 2)]
    per = [ent.permutation_entropy(X)]
    energy_ent = [energy_entropy(X)]
    energy_sig = [energy(X)]
    zero_cross = [zero_crossing_rate(X)]
    f, psd = welch(X, nfft=1024, fs=SAMPLE_RATE, noverlap=896, nperseg=1024)
    fft = np.fft.fft(X) / len(X)
    fft = np.abs(fft[:len(X) // 7])
    fft = [fft[0]]

    return np.concatenate(
        (mfccs, chroma, mel, shannon, sample, spectral, per, energy_ent,
         energy_sig, zero_cross, psd, chroma, fft))
Ejemplo n.º 17
0
def sample_entropy(signal_df, channels):
    '''
    Calculate sample entropy of sensor signals.

    :param signal_df: dataframe housing desired sensor signals
    :param channels: channels of signal to measure sample entropy
    :return: dataframe of calculated sample entropy for each signal channel
    '''
    from pyentrp import entropy as ent
    sample_entropy_df = pd.DataFrame()

    for channel in channels:
        current_sample_ent = ent.sample_entropy(signal_df[channel], 4, 1)
        sample_entropy_df[channel +
                          '_sample_entropy_m1'] = [current_sample_ent[0]]
        sample_entropy_df[channel +
                          '_sample_entropy_m2'] = [current_sample_ent[1]]
        sample_entropy_df[channel +
                          '_sample_entropy_m3'] = [current_sample_ent[2]]
        sample_entropy_df[channel +
                          '_sample_entropy_m4'] = [current_sample_ent[3]]

    return sample_entropy_df
Ejemplo n.º 18
0
def extract(epoch,
            fs,
            time_before_stimul,
            amplitude=False,
            amplitude_P300=False,
            kurtosis=False,
            skewness=False,
            std=False,
            sampen=False,
            rms=False,
            hurst=False,
            gradient=False,
            alfa=False,
            beta=False,
            theta=False,
            delta=False,
            broad_band=False,
            **kwargs):
    """
    Extract the features of ONE channel given an epoch structure

    Parameters
    ----------
    epoch : narray (float)
        Data for the features calculation
    time_before_stimul: float
        Time before stimulus ( 0 sec )
    amplitude: bool
        If True calculate the amplitude
    amplitude_P300: bool
        If True calculate the amplitude
    kurtosis: bool
        If True calculate the kurtosis
    skewness: bool
        If True calculate the skewness
    std: bool
        If True calculate the standard deviation
    sampen: bool
        If True calculate the Sample Entropy
    rms: bool
        If True calculate the RMS
    hurst: bool
        If True calculate the Hurst exponent
    gradient: bool
        If True calculate the gradient
    alfa: bool
        If True calculate the alfa band power
    beta: bool
        If True calculate the beta band power
    theta: bool
        If True calculate the theta band power
    delta: bool
        If True calculate the delta band power
    broad_band: bool
        If True calculate the broad band power

    Returns
    -------
    feat: dict
        Extracted features.

    """

    num_samples, num_epochs = epoch.shape

    # Features initialization
    feat = {}
    feat_amplitude = []
    feat_amplitude_P300 = []
    feat_kurt = []
    feat_skew = []
    feat_std = []
    feat_rms = []
    feat_gradient = []
    feat_hurst = []
    feat_sampen = []
    feat_alfa = []
    feat_beta = []
    feat_theta = []
    feat_delta = []
    feat_broad_band = []

    # Retrieving parameters
    win = kwargs.get('window_sec', None)
    method = kwargs.get('method', 'welch')
    relative = kwargs.get('relative', True)
    amplitude_norm = kwargs.get('amplitude_norm', 'median')
    order = kwargs.get('order', 2)

    # Calculate features for each channel
    for ep in range(num_epochs):

        # Pick the current channel and good epochs
        current_epoch = epoch[:, ep]

        # UNIVARIATE FEATURES
        if amplitude:

            # Calculating the normalization factor
            if amplitude_norm == 'median':
                norm_factor = np.median(current_epoch)
            elif amplitude_norm == 'mean':
                norm_factor = np.mean(current_epoch)
            elif amplitude_norm is None:
                norm_factor = 1
            else:
                raise Exception("Unknown normalization factor")

            max_amp = np.max(current_epoch)
            min_amp = np.min(current_epoch)
            feat_amplitude.append((max_amp - min_amp) / norm_factor)

        # Take the maximum value found in the range 300-500 - baseline
        if amplitude_P300:

            # Calculating the normalization factor
            if amplitude_norm == 'median':
                norm_factor = np.median(current_epoch)
            elif amplitude_norm == 'mean':
                norm_factor = np.mean(current_epoch)
            elif amplitude_norm is None:
                norm_factor = 1
            else:
                raise Exception("Unknown normalization factor")

            mean_300_500 = np.mean(current_epoch[round(0.300 * fs):])
            baseline = np.mean(current_epoch[:round(fs * time_before_stimul)])
            feat_amplitude_P300.append((mean_300_500 - baseline) / norm_factor)

        if kurtosis:
            feat_kurt.append(stats.kurtosis(current_epoch))

        if skewness:
            feat_skew.append(stats.skew(current_epoch))

        if std:
            feat_std.append(np.std(current_epoch))

        if gradient:
            feat_gradient.append(np.mean(np.gradient(current_epoch)))

        if rms:
            feat_rms.append(np.sqrt(np.mean(np.power(current_epoch, 2))))

        if sampen:
            feat_sampen.append(
                ent.sample_entropy(current_epoch, order,
                                   0.2 * np.std(current_epoch))[0])

        if hurst:
            feat_hurst.append(compute_Hc(current_epoch)[0])

        # FREQUENCY FEATURES
        if alfa:
            feat_alfa.append(
                band_power(current_epoch,
                           fs,
                           'alfa',
                           window_sec=win,
                           method=method,
                           relative=relative))

        if beta:
            feat_beta.append(
                band_power(current_epoch,
                           fs,
                           'beta',
                           window_sec=win,
                           method=method,
                           relative=relative))

        if theta:
            feat_theta.append(
                band_power(current_epoch,
                           fs,
                           'theta',
                           window_sec=win,
                           method=method,
                           relative=relative))

        if delta:
            feat_delta.append(
                band_power(current_epoch,
                           fs,
                           'delta',
                           window_sec=win,
                           method=method,
                           relative=relative))

        if broad_band:
            feat_broad_band.append(
                frequency_baseline(current_epoch, time_before_stimul, fs,
                                   'broad_band'))

    # SAVING THE RESULTS
    if amplitude:
        feat['amplitude'] = feat_amplitude.copy()
    if amplitude_P300:
        feat['amplitude_P300'] = feat_amplitude_P300.copy()
    if kurtosis:
        feat['kurtosis'] = feat_kurt.copy()
    if skewness:
        feat['skewness'] = feat_skew.copy()
    if std:
        feat['std'] = feat_std.copy()
    if gradient:
        feat['gradient'] = feat_gradient.copy()
    if rms:
        feat['rms'] = feat_rms.copy()
    if sampen:
        feat['sampen'] = feat_sampen.copy()
    if hurst:
        feat['hurst'] = feat_hurst.copy()
    if alfa:
        feat['alfa'] = feat_alfa.copy()
    if beta:
        feat['beta'] = feat_beta.copy()
    if theta:
        feat['theta'] = feat_theta.copy()
    if delta:
        feat['delta'] = feat_delta.copy()
    if broad_band:
        feat['broad_band'] = feat_broad_band.copy()

    return feat
Ejemplo n.º 19
0
def gen_data(file_list, fs, resample_num):
    tmp_array = []
    key = ['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']
    for t in tqdm(range(len(file_list))):
        tmp_list = []
        s = file_list[t]
        tmp = pd.read_csv(s, sep=' ', engine='python')
        for i, z in enumerate(key):
            #             tmp[z] = scale(tmp[z])
            #             tmp_group = minmax_scale(tmp[z])
            #             tmp_group_bin = pd.cut(tmp_group, bins=10, labels=range(10))
            #             tmp_group_df = pd.DataFrame()
            #             tmp_group_df['bins'] = tmp_group_bin
            #             tmp_group_df['values'] = tmp_group
            #             tmp_group_df  = tmp_group_df.groupby('bins')['values'].agg(['count', 'min', 'max', 'mean', 'std']).values.flatten()
            #             tmp_list.extend(list(tmp_group_df))
            try:
                tmp_lead = biosppy.ecg.ecg(tmp[z],
                                           show=False,
                                           sampling_rate=fs)
            except:
                pass
            rpeaks = tmp_lead['rpeaks']
            if rpeaks.shape[0] != 0:
                rr_intervals = np.diff(rpeaks)
                min_dis = rr_intervals.min()
                drr = np.diff(rr_intervals)
                r_density = (rr_intervals.shape[0] + 1) / tmp[z].shape[0] * fs
                pnn50 = drr[drr >= fs * 0.05].shape[0] / rr_intervals.shape[0]
                rmssd = np.sqrt(np.mean(drr * drr))
                samp_entrp = ent.sample_entropy(rr_intervals, 2,
                                                0.2 * np.std(rr_intervals))
                samp_entrp[np.isnan(samp_entrp)] = -2
                samp_entrp[np.isinf(samp_entrp)] = -1
                tmp_list.extend([
                    rr_intervals.min(),
                    rr_intervals.max(),
                    rr_intervals.mean(),
                    rr_intervals.std(),
                    skew(rr_intervals),
                    kurtosis(rr_intervals), r_density, pnn50, rmssd,
                    samp_entrp[0], samp_entrp[1]
                ])

            #                 rr_dis_values = np.array([tmp[z].values[rpeaks[i]:rpeaks[i+1]][:min_dis] for i in range(rpeaks.shape[0]-1)])
            #                 rr_dis_values_min = rr_dis_values.min(axis=0)
            #                 rr_dis_values_max = rr_dis_values.max(axis=0)
            #                 rr_dis_values_diff = rr_dis_values_max - rr_dis_values_min
            #                 rr_dis_values_mean = rr_dis_values.mean(axis=0)
            #                 rr_dis_values_std = rr_dis_values.std(axis=0)
            #                 for c in [rr_dis_values_diff, rr_dis_values_mean, rr_dis_values_std]:
            #                     tmp_rr_rmp = resample(c, num=resample_num)
            #                     tmp_list.extend(list(tmp_rr_rmp))
            else:
                tmp_list.extend([np.nan] * 11)
            heart_rate = tmp_lead['heart_rate']
            if heart_rate.shape[0] != 0:
                tmp_list.extend([
                    heart_rate.min(),
                    heart_rate.max(),
                    heart_rate.mean(),
                    heart_rate.std(),
                    skew(heart_rate),
                    kurtosis(heart_rate)
                ])
            else:
                tmp_list.extend([np.nan] * 6)
            templates = tmp_lead['templates']
            templates_min = templates.min(axis=0)
            templates_max = templates.max(axis=0)
            templates_diff = templates_max - templates_min
            templates_mean = templates.mean(axis=0)
            templates_std = templates.std(axis=0)
            for j in [templates_diff, templates_mean, templates_std]:
                tmp_rmp = resample(j, num=resample_num)
                tmp_list.extend(list(tmp_rmp))
        tmp_array.append(tmp_list)
    tmp_df = pd.DataFrame(tmp_array)
    tmp_df['filename'] = file_list
    tmp_df['filename'] = tmp_df['filename'].apply(lambda x: x.split('/')[-1])
    return tmp_df
Ejemplo n.º 20
0
    def calculate_rri_nonlinear_statistics(self,
                                           rri,
                                           diff_rri,
                                           diff2_rri,
                                           suffix=''):

        # Empty dictionary
        rri_nonlinear_statistics = dict()

        # Non-linear RR statistics
        if len(rri) > 1:
            rri_nonlinear_statistics['rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(rri, m=2, delay=1))
            rri_nonlinear_statistics['rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['rri_fisher_info' + suffix] = fisher_info(
                rri, tau=1, de=2)
            hjorth_parameters = hjorth(rri)
            rri_nonlinear_statistics['rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['rri_hurst_exponent' + suffix] = pfd(rri)
            rri_nonlinear_statistics['rri_svd_entropy' + suffix] = svd_entropy(
                rri, tau=2, de=2)
            rri_nonlinear_statistics['rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(rri)
        else:
            rri_nonlinear_statistics['rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_sample_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_multiscale_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_hurst_exponent' + suffix] = np.nan
            rri_nonlinear_statistics['rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        # Non-linear RR difference statistics
        if len(diff_rri) > 1:
            rri_nonlinear_statistics['diff_rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(diff_rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['diff_rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(diff_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff_rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(diff_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff_rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(diff_rri, m=2, delay=1))
            rri_nonlinear_statistics['diff_rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(diff_rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['diff_rri_fisher_info' +
                                     suffix] = fisher_info(diff_rri,
                                                           tau=1,
                                                           de=2)
            hjorth_parameters = hjorth(diff_rri)
            rri_nonlinear_statistics['diff_rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['diff_rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['diff_rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['diff_rri_hurst_exponent' +
                                     suffix] = pfd(diff_rri)
            rri_nonlinear_statistics['diff_rri_svd_entropy' +
                                     suffix] = svd_entropy(diff_rri,
                                                           tau=2,
                                                           de=2)
            rri_nonlinear_statistics['diff_rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(diff_rri)
        else:
            rri_nonlinear_statistics['diff_rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_sample_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_multiscale_permutation_entropy'
                                     + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_hurst_exponent' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        # Non-linear RR difference difference statistics
        if len(diff2_rri) > 1:
            rri_nonlinear_statistics['diff2_rri_shannon_entropy' + suffix] = \
                self.safe_check(ent.shannon_entropy(diff2_rri))
            rri_nonlinear_statistics['diff2_rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(diff2_rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['diff2_rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(diff2_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff2_rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(diff2_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff2_rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(diff2_rri, m=2, delay=1))
            rri_nonlinear_statistics['diff2_rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(diff2_rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['diff2_rri_fisher_info' +
                                     suffix] = fisher_info(diff2_rri,
                                                           tau=1,
                                                           de=2)
            hjorth_parameters = hjorth(diff2_rri)
            rri_nonlinear_statistics['diff2_rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['diff2_rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['diff2_rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['diff2_rri_hurst_exponent' +
                                     suffix] = pfd(diff2_rri)
            rri_nonlinear_statistics['diff2_rri_svd_entropy' +
                                     suffix] = svd_entropy(diff2_rri,
                                                           tau=2,
                                                           de=2)
            rri_nonlinear_statistics['diff2_rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(diff2_rri)
        else:
            rri_nonlinear_statistics['diff2_rri_shannon_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_sample_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_multiscale_permutation_entropy'
                                     + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_hurst_exponent' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        return rri_nonlinear_statistics
Ejemplo n.º 21
0
rolling = rolling_window(df_std.logR_ask, window_size, window_size)
rolling = rolling_window(df_QN_laplace_std.values.transpose()[0], window_size, window_size)
rolling_ns = rolling_window(df.ask, window_size, 10)
rolling_ts = rolling_window(df.index, window_size, 10)
df_ = pd.DataFrame(rolling)

sw_1 = rolling[1]
sw_1_ns = rolling[1]
nolds.lyap_r(sw_1, emb_dim = emb_dim)
nolds.lyap_e(sw_1, emb_dim = emb_dim)
nolds.sampen(sw_1, emb_dim= emb_dim)
nolds.hurst_rs(sw_1)
nolds.corr_dim(sw_1, emb_dim=emb_dim)
nolds.dfa(sw_1)
ent.shannon_entropy(sw_1) # is this even valid? we do not have any p_i states i ALSO IGNORES TEMPORAL ORDER - Practical consideration of permutation entropy
ent.sample_entropy(sw_1, sample_length = 10) #what is sample length?
#ent.multiscale_entropy(sw_1, sample_length = 10, tolerance = 0.1*np.std(sw_1)) # what is tolerance?

                      "Practical considerations of permutation entropy: A Tutorial review - how to choose parameters in permutation entropy"
ent.permutation_entropy(sw_1, m=8, delay = emd_dim )  #Reference paper above 
#ent.composite_multiscale_entropy()
lempel_ziv_complexity(sw_1)
gzip_compress_ratio(sw_1_ns, 9)


#https://www.researchgate.net/post/How_can_we_find_out_which_value_of_embedding_dimensions_is_more_accurate
#when choosing emb_dim for Takens, each dimension should have at least 10 dp ==> 10^1 == 1D, 10^2 == 2D, ..., 10^6 == 6D 

#FALSE NEAREST NEIGHBOR FOR DETERMINING MINIMAL EMBEDDING DIMENSION

#MEASURES OF COMPLEXITY
Ejemplo n.º 22
0
def gen_data(file_list, fs, resample_num):
    tmp_array = []
    key = ['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']
    for t in tqdm(range(len(file_list))):
        tmp_list = []
        s = file_list[t]
        tmp = pd.read_csv(s, sep=' ', engine='python')
        for i, z in enumerate(key):
            try:
                tmp_lead = biosppy.ecg.ecg(tmp[z],
                                           show=False,
                                           sampling_rate=fs)
            except:
                pass
            rpeaks = tmp_lead['rpeaks']
            if rpeaks.shape[0] != 0:
                rr_intervals = np.diff(rpeaks)
                min_dis = rr_intervals.min()
                drr = np.diff(rr_intervals)
                r_density = (rr_intervals.shape[0] + 1) / tmp[z].shape[0] * fs
                pnn50 = drr[drr >= fs * 0.05].shape[0] / rr_intervals.shape[0]
                rmssd = np.sqrt(np.mean(drr * drr))
                samp_entrp = ent.sample_entropy(rr_intervals, 2,
                                                0.2 * np.std(rr_intervals))
                samp_entrp[np.isnan(samp_entrp)] = -2
                samp_entrp[np.isinf(samp_entrp)] = -1
                tmp_list.extend([
                    rr_intervals.min(),
                    rr_intervals.max(),
                    rr_intervals.mean(),
                    rr_intervals.std(),
                    skew(rr_intervals),
                    kurtosis(rr_intervals), r_density, pnn50, rmssd,
                    samp_entrp[0], samp_entrp[1]
                ])
            else:
                tmp_list.extend([np.nan] * 11)
            heart_rate = tmp_lead['heart_rate']
            if heart_rate.shape[0] != 0:
                tmp_list.extend([
                    heart_rate.min(),
                    heart_rate.max(),
                    heart_rate.mean(),
                    heart_rate.std(),
                    skew(heart_rate),
                    kurtosis(heart_rate)
                ])
            else:
                tmp_list.extend([np.nan] * 6)
            templates = tmp_lead['templates']
            templates_min = templates.min(axis=0)
            templates_max = templates.max(axis=0)
            templates_diff = templates_max - templates_min
            templates_mean = templates.mean(axis=0)
            templates_std = templates.std(axis=0)
            for j in [templates_diff, templates_mean, templates_std]:
                tmp_rmp = resample(j, num=resample_num)
                tmp_list.extend(list(tmp_rmp))
        tmp_array.append(tmp_list)
    tmp_df = pd.DataFrame(tmp_array)
    return tmp_df
Ejemplo n.º 23
0
def entropy(sig):

    ts = sig
    std_ts = np.std(ts)
    sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
    return sample_entropy