Beispiel #1
0
def get_sampen(arr, i):
    sampen = []

    for data in arr:
        sampen.append(data[i])


    return sampen2(normalize_data(sampen))
Beispiel #2
0
    def initialize_parameters(self, vis=False):

        rate, sig = wav.read(filename=self.whole_name)
        self.total_length = sig.shape[0] / rate
        self.sampling_rate = rate
        t = np.arange(sig.shape[0]) / rate
        hilbert_envelope = hilbert(sig, N=int(sig.shape[-1] / 1))
        analytic_signal = hilbert_envelope
        amplitude_envelope = np.abs(analytic_signal)
        #instantaneous_phase = np.unwrap(np.angle(analytic_signal))
        #instantaneous_frequency = (np.diff(instantaneous_phase) /(2.0 * np.pi) * rate)
        normalized_amplitude_envelope = (
            amplitude_envelope -
            np.mean(amplitude_envelope)) / np.std(amplitude_envelope)
        normalized_amplitude_envelope_variance = np.var(
            normalized_amplitude_envelope)
        self.normalized_amplitude_envelope_kurtosis = scipy.stats.kurtosis(
            normalized_amplitude_envelope)
        self.normalized_amplitude_envelope_variance = normalized_amplitude_envelope_variance
        self.signal_kurtosis = scipy.stats.kurtosis(sig)
        smen_duration = 3  #Sec
        self.sample_entropy = sampen2(
            normalized_amplitude_envelope[t < smen_duration], 2, 0.0008)
        max_time_to_show = 2
        if vis:
            fig = plt.figure()
            ax0 = fig.add_subplot(111)
            max_time_to_show = 2
            ax0.plot(t[t < max_time_to_show],
                     sig[t < max_time_to_show],
                     label='signal')
            ax0.plot(t[t < max_time_to_show],
                     amplitude_envelope[t < max_time_to_show],
                     label='envelope')
            ax0.set_xlabel("time in seconds")
            ax0.legend()

        #PHase
        #ax1 = fig.add_subplot(212)
        #ax1.plot(t[1:], instantaneous_frequency)
        #ax1.set_xlabel("time in seconds")
        #ax1.set_ylim(0.0, 120.0)
        max_time_to_show = 10
        time_to_correlate = 1.5
        corr_idx = int(time_to_correlate * rate)
        result = np.correlate(normalized_amplitude_envelope[0:corr_idx],
                              normalized_amplitude_envelope,
                              mode='full')
        result = result[corr_idx:]**2
        normalized_result = (result - np.mean(result)) / np.std(result)
        self.max_correlation_peak = np.max(normalized_result[500:2330])
        if vis == True:
            plt.figure()
            plt.plot(t[t < max_time_to_show],
                     normalized_result[t < max_time_to_show])
            plt.show()
Beispiel #3
0
    def test_sampen2_with_defaults(self):
        data = []

        with open(self.file_path, 'r') as file:
            for row in file:
                data.append(float(row.strip(' \t\n\r')))

        self.assertEqual(sampen2(data, mm=2, r=0.2, normalize=False),
                         [(0, 2.140629540027156, 0.0028357991885715863),
                          (1, 2.162868347337613, 0.004903248034526253),
                          (2, 2.123328492035711, 0.007596323621379352)])
def getSampEn(vector, m=2, r_multiply_by_sigma=.2):
    vector_np = np.asarray(vector)
    r = r_multiply_by_sigma * np.std(vector_np)
    results = sampen.sampen2(data=vector, mm=m, r=r)
    results_SampEN = []
    for x in np.array(results)[:, 1]:
        if x is not None:
            results_SampEN.append(x)
        else:
            results_SampEN.append(-100.)
    return list(results_SampEN)
Beispiel #5
0
def toast():
    # test get max
    arr = [[1, 2, 3], [1, 2, 3], [1, 5, 6]]
    assert get_max(arr, 1) == (5, 2)

    # test get min
    arr = [[1, 1, 3], [1, 2, 3], [1, 5, 6]]
    assert get_min(arr, 1) == (1, 0)

    argo = [1,3,5,7,53,4,586,54,5645,756,76,346,45758,56,42,223,6,3,6,3,6,3,0,-2,-68756,-6456,0,0,0,345,-12, -34,-34343,342,654,-34,0]
    print (sampen2(argo,2))
Beispiel #6
0
def mse_parc(imgfile, maskfile, atlasfile, outname, m, r, scales, verbose):
    """
    Main function to run all steps for analysis on parcellated data
    """

    # load data
    datadict = load_data(imgfile, maskfile, atlasfile, verbose)
    imgdata = datadict["imgdata"]
    mask = datadict["mask"]
    atlasdata = datadict["atlasdata"]
    regions = datadict["regions"]
    nregions = datadict["nregions"]
    numtps = datadict["numtps"]

    # parcellate data
    parc_data = parcellate_data(imgdata, atlasdata, numtps, regions, nregions, \
                                verbose)
    # standardize data
    parc_zdata = standardize_data(parc_data, nregions, verbose)

    # pre-allocate results array
    nscales = len(scales)
    result = np.zeros((nregions, nscales))

    # loop over regions
    for region in range(0, nregions):
        if verbose:
            print("Working on region %d of %d" % (region + 1, nregions))

        for scale_idx, scale in enumerate(scales):
            # do coarsegraining
            s = coarsegraining(parc_zdata[region, :], scale)

            # # compute sample entropy
            # [sampe, A, B] = sample_entropy(s,m+1,r)
            #
            # # grab sample entropy at specific scale
            # result[region, scale_idx] = sampe[m]

            # compute sample entropy
            sampe = se.sampen2(s, mm=m + 1, r=r, normalize=False)

            # grab sample entropy at specific scale
            result[region, scale_idx] = sampe[m][1]

    # save parcellated degree centrality image to disk
    if outname is not None:
        fname2save = "%s_mse_parc.nii.gz" % outname
        save_mse_parc_img(result, atlasfile, regions, fname2save, verbose)
        fname2save = "%s_mse_parc.csv" % outname
        save_mse_parc_csv(result, fname2save, regions)

    return (result)
 def mean_sample_entropy_for_segment(self,segment):
     segments = self.get_segment(segment)
     segments = filter(lambda x: x.shape[0] != 0, segments)
     try:
         segments = np.concatenate(list(segments))
     except ValueError:
         return np.nan
     ''' segments = map(lambda x: entr.sample_entropy(x.ravel(),1,.2*np.nanstd(x))[0],segments)
     segments = list(segments) '''
     try:
         return smp.sampen2(list(segments.ravel())[:300],1,normalize=True)[1][1]
     except (ValueError, ZeroDivisionError):
         return np.nan
Beispiel #8
0
    def test_sampen2_with_defaults(self):
        data = []

        with open(self.file_path, 'r') as file:
            for row in file:
                data.append(float(row.strip(' \t\n\r')))

        self.assertEqual(
            sampen2(data, mm=2, r=0.2, normalize=False),
            [
                (0, 2.140629540027156, 0.0028357991885715863),
                (1, 2.162868347337613, 0.004903248034526253),
                (2, 2.123328492035711, 0.007596323621379352)
            ]
        )
Beispiel #9
0
    def test_sampen2_matching_makefile(self):
        data = []

        with open(self.file_path, 'r') as file:
            for row in file:
                data.append(float(row.strip(' \t\n\r')))

        self.assertEqual(sampen2(data, mm=5, normalize=True), [
            (0, 2.196817997610929, 0.002684778756853663),
            (1, 2.2248168592127824, 0.004639787747652105),
            (2, 2.1972245773362196, 0.007540128072706757),
            (3, 2.1552015875613715, 0.017693023262169073),
            (4, 2.315007612992603, 0.0331496460180921),
            (5, None, None),
        ])
Beispiel #10
0
def samp_ent(x, m=3, r=0.2):
    """
    https://sampen.readthedocs.io/en/stable/#documentation
    :param r:
    :param m:
    :param x: time series data
    :return: sample entropy
    """
    start_time = time.time()
    se = sampen2(x, mm=m, r=r)
    elapsed_time = time.time() - start_time
    rdo = se[len(se) - 1][1]
    logger.info("Sample Entropy %s", rdo)
    logger.debug("Elapsed time to calculate sample entropy is %s",
                 elapsed_time)
    return rdo
Beispiel #11
0
    def test_sampen2_matching_makefile(self):
        data = []

        with open(self.file_path, 'r') as file:
            for row in file:
                data.append(float(row.strip(' \t\n\r')))

        self.assertEqual(
            sampen2(data, mm=5, normalize=True),
            [
                (0, 2.196817997610929, 0.002684778756853663),
                (1, 2.2248168592127824, 0.004639787747652105),
                (2, 2.1972245773362196, 0.007540128072706757),
                (3, 2.1552015875613715, 0.017693023262169073),
                (4, 2.315007612992603, 0.0331496460180921),
                (5, None, None),
            ]
        )
Beispiel #12
0
def seSQI (sig, rate, total_length):

    hilbert_envelope = hilbert(sig, N=int(sig.shape[-1] / 1))
    amplitude_envelope = np.abs(hilbert_envelope)
    # instantaneous_phase = np.unwrap(np.angle(hilbert_envelope))
    # instantaneous_frequency = (np.diff(instantaneous_phase) /(2.0 * np.pi) * rate)
    normalized_amplitude_envelope = (amplitude_envelope - np.mean(amplitude_envelope)) / np.std(amplitude_envelope)

    # Calculating the autocorrelation of the envelope, normalizing it, and removing the peak at Lag [0] - from David springer "Automated signal quality assessment..."
    auto_corr_sig = np.correlate(normalized_amplitude_envelope, normalized_amplitude_envelope, mode='full')
    trunc_sig = int(np.floor(auto_corr_sig.size / 2))
    auto_corr_sig = auto_corr_sig[trunc_sig:]
    auto_corr_sig_norm = (auto_corr_sig - np.mean(auto_corr_sig)) / np.std(auto_corr_sig)
    first_zero_cross_idx = (np.where(np.diff(auto_corr_sig_norm) > 0))[0][0] + 1
    auto_corr_sig_norm1 = auto_corr_sig_norm[first_zero_cross_idx:first_zero_cross_idx+5*rate if first_zero_cross_idx+5*rate<total_length*rate else np.size(auto_corr_sig_norm)]

    # Calculating the Sample Entropy
    return sampen2(auto_corr_sig_norm1, 2, 0.0008)[2][1], normalized_amplitude_envelope,auto_corr_sig_norm
def getSampEn(vector, m=2, r_multiply_by_sigma=.2):
    """
    sample entropy
    NOTE: returns 3 values
    """
    vector = np.asarray(vector)
    r = r_multiply_by_sigma * np.std(vector)
    try:
        results = sampen.sampen2(data=vector.tolist(), mm=m, r=r)
    except:
        return [0.0, 0.0, 0.0]

    results_SampEN = []
    for x in np.array(results)[:, 1]:
        if x is not None:
            results_SampEN.append(x)
        else:
            results_SampEN.append(-100.)

    return list(results_SampEN)
def sampEntropy(rawEMGSignal):
    """
    Parameters
    ----------
    rawEMGSignal : ndarray
        an epoch of raw emg-signal

    Returns
    -------
    feature_value : float
        sample entropy calculated from the rawEMGSignal for the parameters m = 2 and r = 0.2 * std

    """
    copy_rawEMGSignal = copy.copy(rawEMGSignal)  # shallow copy
    copy_feature = sampen2(normalize_data(list(copy_rawEMGSignal)))[2][1]
    if copy_feature is None:
        feature_value = 0
    else:
        feature_value = copy_feature

    return feature_value
Beispiel #15
0
plt.subplot(121)
plt.plot(rick_s[:sample])
plt.xlabel(f'time samples (first {sample})',fontsize=15)
plt.ylabel('', fontsize=15)

plt.subplot(122)
plt.plot(rick_s)
plt.xlabel('time samples',fontsize=15)
plt.ylabel('', fontsize=15)

plt.show()
#%%
m = []
se = []
print('RICK SERIE WAV')
samp_ent = sampen.sampen2(rick_s.tolist(), mm=3, normalize=True)
for i in range(3):
    print(f'\nm={i+1}')
    m.append(i+1)
    ent_mel, mel_A, mel_B, mel_i, mel_j, mel_match, mel_xm = sampen_own(rick_s,
                                                                        i+1, 0.2*np.std(rick_s),'max')
    se.append(ent_mel)
    
    print(f'Entropy: {ent_mel}')
    print(f'Entropy: {samp_ent[i+1][1]}')

rick_entropy = pd.DataFrame({'m':m,
                             'entropy':se})


def emg_sampen(signal):
    sampen = sampen2(signal)
    sampen = [sampen[0][1], sampen[1][1], sampen[2][1]]
    return np.array(sampen)
plt.subplot(122)
plt.plot(rick)
plt.xlabel('time samples',fontsize=15)
plt.ylabel('', fontsize=15)

plt.show()

#%%
r_rick = 0.2*np.std(rick)
print(r_rick)
#%% RICK ENTROPY
rick_ent,_,_,_,_,_,_ = sampen_own(rick[:5000], 2, r_rick,'max')
print(rick_ent)

rick_ent = sampen.sampen2(rick[:5000].tolist(), mm=2, normalize=True)
print(rick_ent[2])
#%%
print('RICK SERIE WAV')
for i in range(3):
    ent_mel, mel_A, mel_B, mel_i, mel_j, mel_match, mel_xm = sampen_own(rick[:5000],
                                                                        i+1, 0.2*np.std(rick[:5000]),'max')
    samp_ent = sampen.sampen2(rick[:5000].tolist(), mm=3, normalize=True)
    print(f'\nEntropy: {ent_mel}')
    print(f'Entropy: {samp_ent[i+1][1]}')





Beispiel #18
0
def mse_img(imgfile, maskfile, outname, m, r, scales, verbose):
    """
    Main function to run all steps for analysis on voxel-wise data
    """

    if verbose:
        print("Computing voxel-wise mse map")

    # load data
    atlasfile = None
    datadict = load_data(imgfile, maskfile, atlasfile, verbose)
    imgdata = datadict["imgdata"]
    mask = datadict["mask"]
    numvoxels = datadict["nregions"]
    numtps = datadict["numtps"]
    voxsize = datadict["voxsize"]

    # grab indices of brain voxels within mask
    indices = np.transpose(np.nonzero(mask))
    imgts = imgdata[indices[:, 0], indices[:, 1], indices[:, 2]]

    # standardize data
    imgts = standardize_data(imgts, numvoxels, verbose)

    # pre-allocate 4d result array
    nscales = len(scales)
    mse_dim = list(mask.shape)
    mse_dim.append(nscales)
    mse_dim = tuple(mse_dim)
    result = np.zeros(mse_dim)

    # loop over scales
    for scale_idx, scale in enumerate(scales):
        if verbose:
            print("Working on scale %d" % scale)

        tmp_img = np.zeros(mask.shape)

        # loop over voxels
        for basevoxel in range(0, numvoxels):
            if verbose:
                print("Working on %d voxel of %d voxels" %
                      (basevoxel + 1, numvoxels))

            #Get x,y,z coords for the voxel
            x, y, z = indices[basevoxel, :]

            # grab specific voxel's time-series
            ts = np.array(imgts[basevoxel, :]).reshape((numtps, 1))

            # do coarsegraining
            s = coarsegraining(ts, scale)

            # # compute sample entropy
            # [sampe, A, B] = sample_entropy(s,m+1,r)
            #
            # # grab sample entropy at specific scale
            # tmp_img[x,y,z] = sampe[m]

            # compute sample entropy
            try:
                sampe = se.sampen2(s, mm=m + 1, r=r, normalize=False)
                # grab sample entropy at specific scale
                tmp_img[x, y, z] = sampe[m][1]
            except ZeroDivisionError:
                # grab sample entropy at specific scale
                tmp_img[x, y, z] = float("Inf")

        result[:, :, :, scale_idx] = tmp_img

    # save result to disk
    if outname is not None:
        fname2save = "%s_mse.nii.gz" % outname
        save_mse_vox_img(result, imgfile, fname2save, verbose)

    return (result)
Beispiel #19
0
def get_data(key):
    sig, fields = wfdb.srdsamp(recordname=key, channels=[1, 2, 5])
    sig_processed = [preprocess(record * 2000) for record in sig.T]
    sig_processed = np.array(sig_processed)
    n = int(3.072 * fs_resampled)
    n_segment = sig_processed.shape[1] // n
    segments_ch2 = np.reshape(sig_processed[0, :n_segment * n],
                              [n_segment, -1])
    segments_ch3 = np.reshape(sig_processed[1, :n_segment * n],
                              [n_segment, -1])
    segments_ch_avf = np.reshape(sig_processed[2, :n_segment * n],
                                 [n_segment, -1])
    features = []
    for i in range(n_segment):
        coeff_ch2 = pywt.swt(data=segments_ch2[i], wavelet='db5', level=6)
        coeff_ch3 = pywt.swt(data=segments_ch3[i], wavelet='db5', level=6)
        coeff_ch_avf = pywt.swt(data=segments_ch_avf[i],
                                wavelet='db5',
                                level=6)
        coeff_2_ch2 = pywt.swt(data=segments_ch2[i]**2, wavelet='db5', level=6)
        coeff_2_ch3 = pywt.swt(data=segments_ch3[i]**2, wavelet='db5', level=6)
        coeff_2_ch_avf = pywt.swt(data=segments_ch_avf[i]**2,
                                  wavelet='db5',
                                  level=6)

        sen_d_1_3_3 = sampen.sampen2(data=list(coeff_ch3[-3][1]),
                                     mm=2,
                                     r=0.2,
                                     normalize=True)[2][1]
        nse_a_2_3_3 = compute_en(coeff_2_ch3[-3][0]) / sum(
            [compute_en(level[0]) for level in coeff_2_ch3])
        #sen_d_1_avf_2=sampen.sampen2(data=list(coeff_ch_avf[-2][1]),mm=2,r=0.2,normalize=True)[2][1]
        lee_d_2_2_2 = sum(
            [np.log2((x)**2) if x != 0 else 0 for x in coeff_2_ch2[-2][1]])
        nse_a_2_3_1 = compute_en(coeff_2_ch3[-1][0]) / sum(
            [compute_en(level[0]) for level in coeff_2_ch3])
        sen_d_1_2_2 = sampen.sampen2(data=list(coeff_ch2[-2][1]),
                                     mm=2,
                                     r=0.2,
                                     normalize=True)[2][1]
        mds_d_1_2_2 = np.median(
            np.abs(coeff_ch2[-2][1][1:] -
                   coeff_ch2[-2][1][:-1])) * fs_resampled
        sen_d_1_avf_2 = sampen.sampen2(data=list(coeff_ch_avf[-2][1]),
                                       mm=2,
                                       r=0.2,
                                       normalize=True)[2][1]
        lee_d_1_2_1 = sum(
            [np.log2((x)**2) if x != 0 else 0 for x in coeff_ch2[-1][1]])
        mds_d_1_avf_1 = np.median(
            np.abs(coeff_ch_avf[-1][1][1:] -
                   coeff_ch_avf[-1][1][:-1])) * fs_resampled

        feature = [
            sen_d_1_3_3, nse_a_2_3_3, sen_d_1_avf_2, lee_d_2_2_2, nse_a_2_3_1,
            sen_d_1_2_2, mds_d_1_2_2, lee_d_1_2_1, mds_d_1_avf_1
        ]
        features.append(np.array(feature))

    if 'Myocardial infarction' in fields['comments'][4]:
        label = 1
    if 'Healthy control' in fields['comments'][4]:
        label = 0
    return (key, np.array(features), label)
    for i in range(15):
        data_var[i] = np.var(data_sec[i])               #计算数据的方差

    #离散小波变换
    data_dwt_appro = np.zeros(15)                       #由离散小波变换得到的近似信号,也就是低频信息
    data_dwt_detail = np.zeros(15)                      #由离散小波变换得到的细节信号,也就是高频信号
    for i in range(15):
        Appro, Detail = pywt.dwt(data_sec[i],'db4')     #根据阅读的文献,4阶Daubechies小波拥有最佳的对于EEG信号的处理效果,因此选择db4小波
        data_dwt_appro[i] = np.mean(Appro)              #对小波变换得到的数据计算均值
        data_dwt_detail[i] = np.mean(Detail)
    data_dwt = [data_dwt_appro,data_dwt_detail]
    
    #样本熵
    data_sampen = np.zeros(15)
    for i in range(15):
        data_temp = sampen2(data_sec[i])                #使用sampen模块中的sampen2函数进行样本熵的计算
        data_temp2 = data_temp[2]                       #由于该函数会生成一个二维矩阵,而我们只需要第三个数组中的第二个数据
        data_sampen[i] = data_temp2[1]
    
    #赫斯特指数
    data_ApEn = np.zeros(15)
    for i in range(15):
        data_ApEn[i] = hurst(data_sec[i])               #使用pyeeg模块中的hurst函数计算赫斯特指数

    #Petrosian分形维数
    data_pfd = np.zeros(15)
    for i in range(15):
        data_pfd[i] = pfd(data_sec[i])                  #使用pyeeg模块中的pfd计算Petrosian分形维数
###################################################################################################################

#将所有的属性值保存到xlsx文件中
 def sampen_wavelet_coefs(y):
     try:
         return smp.sampen2(list(y)[:300],1, normalize=True)[1][1]
     except (ValueError, ZeroDivisionError):
         return np.nan
Beispiel #22
0
def get_features_from_window(window):

    features = []
    cols = [
        'ax', 'ay', 'az', 'am', 'gx', 'gy', 'gz', 'gm', 'mx', 'my', 'mz', 'mm'
    ]

    for c in cols:
        col = window.loc[:, [c]]

        # 1 - Max
        mx = col.max()
        mxindex = col.idxmax()
        features.append(mx)

        # 2 - min of each n
        mn = col.min()
        mnindex = col.idxmin()
        features.append(mn)

        # 3 - mean of each n
        mean = col.mean()
        features.append(mean)

        # 4 - variance of each n
        variance = col.var()
        features.append(variance)

        # 5 - kurtosis of each n
        kurtosis = col.kurt()
        features.append(kurtosis)

        # 6 - skewness of each n
        skew = col.skew()
        features.append(skew)

        # 7 - peak to peak signal
        spp = mx - mn
        features.append(spp)

        # 8 - peak to peak time
        tpp = mxindex + mnindex
        features.append(tpp)

        # 9 - peak to peak slope

        if int(tpp) == 0:
            features.append(spp)
        else:
            spps = spp / tpp
            features.append(spps)

        # 10 - ALAR
        if int(mx) == 0:
            features.append(0)
        else:
            features.append(mxindex / mx)

        # 11 - Energy

        energy = np.einsum('ij,ij->j', col, col)
        features.append(energy[0])

        # 12 - Entropy
        normalized = normalize(col)
        features.append(sampen2(normalized)[1][1])

    return features