def test_sampen_against_predictable_sequence(self):
     data = numpy.asarray([10, 20] * 2000)
     self.assertAlmostEqual(
         samp_entropy(data, 2, 0.2),
         0.0,
         places=2
     )
示例#2
0
def mse(m, r, signal, nbscales=None):
    # Output initialisation
    if nbscales == None:
        nbscales = int((len(signal) * nb_scales) / length_sample)
    y = np.zeros(nbscales + 1)
    y[0] = float('nan')
    for i in range(1, nbscales + 1):
        y[i] = pyeeg.samp_entropy(coarse_graining(i, signal), m, r)
    return y
    def test_sampen_against_original_c_test_data(self):
        """Use test data from
        http://www.physionet.org/physiotools/sampen/c/sampentest.txt
        """
        dir = os.path.dirname(__file__)
        file_path = os.path.join(dir, './demo_data/sampentest.txt')
        data = []
        with open(file_path, 'r') as file:
            for row in file:
                data.append(float(row.strip()))

        self.assertEqual(
            samp_entropy(numpy.asarray(data), 2, 0.2),
            2.1233284920357112
        )
示例#4
0
MIN_EPOCH_N = 256 * 5
MAX_EPOCH_N = 256 * 30
EPOCH_STEP = 256 * 5
N_REPLICATES = 5

SPECT_ENT_BANDS = 2 ** np.arange(0,8)/2

fun_to_test = [
                  {"times":100,"name":"hfd", "is_original":True,"fun": lambda x: pyeeg.hfd(x,2**3)},
                  {"times":100,"name":"hfd", "is_original":False,"fun": lambda x: univ.hfd(x,2**3)},
                  {"times":100,"name":"hjorth", "is_original":True,"fun": lambda x: pyeeg.hjorth(x)},
                  {"times":100,"name":"hjorth", "is_original":False,"fun": lambda x: univ.hjorth(x)},
                  {"times":100,"name":"pfd", "is_original":True, "fun":lambda x: pyeeg.pfd(x)},
                  {"times":100,"name":"pfd", "is_original":False, "fun":lambda x: pyeeg.pfd(x)},
                  {"times":2,"name":"samp_ent", "is_original":True, "fun":lambda x: pyeeg.samp_entropy(x,2,1.5)},
                  {"times":10,"name":"samp_ent", "is_original":False, "fun":lambda x: univ.samp_entropy(x,2,1.5,relative_r=False)},
                  {"times":2,"name":"ap_ent", "is_original":True, "fun":lambda x: pyeeg.ap_entropy(x,2,1.5)},
                  {"times":10,"name":"ap_ent", "is_original":False, "fun":lambda x: univ.ap_entropy(x,2,1.5)},
                  {"times":10,"name":"svd_ent", "is_original":True, "fun":lambda x: pyeeg.svd_entropy(x,2,3)},
                  {"times":100,"name":"svd_ent", "is_original":False, "fun":lambda x: univ.svd_entropy(x,2,3)},
                  {"times":10,"name":"fisher_info", "is_original":True, "fun":lambda x: pyeeg.fisher_info(x,2,3)},
                  {"times":100, "name":"fisher_info", "is_original":False, "fun":lambda x: univ.fisher_info(x,2,3)},
                  {"times":100,"name":"spectral_entropy", "is_original":True, "fun":lambda x: pyeeg.spectral_entropy(x,SPECT_ENT_BANDS,256)},
                  {"times":100, "name":"spectral_entropy", "is_original":False, "fun":lambda x: univ.spectral_entropy(x,256, SPECT_ENT_BANDS)},

    ]


def make_one_rep():
    ldfs = []
         'tol': table[idx, 2],
         'score': table[idx, 0]
     })
     print('    Channel %i: optimal pair is' % (i), table[idx, 1],
           'subwindow', table[idx, 2], 'tol --', 'auc roc is', table[idx,
                                                                     0])
 print('Tuning sample entropy ...')
 opt_sw_tol_sampent = []
 for i in range(train_banded.shape[2]):
     table = []
     for subwindow in subwindow_sizes:
         for tol in tolerance_values:
             entropy_list = []
             for j in range(train_banded.shape[0]):
                 entropy_list.append(
                     pyeeg.samp_entropy(train_banded[j, :, i], subwindow,
                                        tol))
             entropy_list = np.array(entropy_list)
             score = np.max([
                 roc_auc_score(y_true=train_y, y_score=entropy_list),
                 roc_auc_score(y_true=1 - train_y, y_score=entropy_list)
             ])
             table.append([score, subwindow, tol])
     table = np.array(table)
     idx = np.argmax(table[:, 0])
     opt_sw_tol_sampent.append({
         'channel': i,
         'subwindow': table[idx, 1],
         'tol': table[idx, 2],
         'score': table[idx, 0]
     })
     print('    Channel %i: optimal pair is' % (i), table[idx, 1],
示例#6
0
def calculate_features(samples):
    data = samples
    if not samples:
        print("no samples")
        return []

    band = [0.5, 4, 7, 12, 30]
    a = randn(4097)
    # approx = pyeeg.ap_entropy(data, 5, 1)
    approx = 0
    DFA = pyeeg.dfa(data)
    first_order_diff = [data[i] - data[i - 1] for i in range(1, len(data))]
    fisher_info = pyeeg.fisher_info(data, 1, 1, W=None)
    embed_seq = pyeeg.embed_seq(data, 1, 1)
    hfd = pyeeg.hfd(data, 6)
    hjorth = pyeeg.hjorth(data, D=None)
    hurst = pyeeg.hurst(data)
    PFD = pyeeg.pfd(data)
    sam_ent = pyeeg.samp_entropy(data, 1, 2)
    spectral_entropy = pyeeg.spectral_entropy(data,
                                              band,
                                              256,
                                              Power_Ratio=None)
    svd = pyeeg.svd_entropy(data, 6, 4, W=None)
    PSI = pyeeg.bin_power(data, band, 256)

    # # Power Spectral Intensity (PSI) and Relative Intensity Ratio (RIR) Two 1- D v ec t o rs
    # # print("bin_power = ", PSI)
    # # Petrosian Fractal Dimension (PFD) Ascalar
    # print("PFD = ", PFD)
    # # Higuchi Fractal Dimension (HFD) Ascalar
    # print("hfd = ", hfd)
    # # Hjorth mobility and complexity Two s c a la rs
    # print("hjorth = ", hjorth)
    # # Spectral Entropy (Shannon’s entropy of RIRs) Ascalar
    # print("spectral_entropy = ", spectral_entropy)
    # # SVD Entropy Ascalar
    # print("svd = ", svd)
    # # Fisher Information Ascalar
    # print("fisher_info = ", fisher_info)
    # # Approximate Entropy (ApEn) Ascalar
    # print("approx entrophy = ", approx)
    # # Detrended Fluctuation Analysis (DFA) Ascalar
    # print("DFA = ", DFA)
    # # HurstExponent(Hurst) Ascalar
    # print("Hurst_Exponent = ", hurst)
    # # Build a set of embedding sequences from given time series X with lag Tau and embedding dimension
    # print("embed_seq = ", embed_seq)
    # # Compute the first order difference of a time series.
    # print("first_order_diff = ", first_order_diff)

    return {
        'approximate': approx,
        'DFA': DFA,
        'fisher_info': fisher_info,
        'embed_seq': embed_seq,
        'hfd': hfd,
        'hjorth': hjorth,
        'hurst': hurst,
        'PFD': PFD,
        'sam_ent': sam_ent,
        'spectral_entropy': spectral_entropy,
        'svd': svd,
        'PSI': PSI,
        'first_order_diff': first_order_diff
    }
示例#7
0
     "times": 100,
     "name": "pfd",
     "is_original": True,
     "fun": lambda x: pyeeg.pfd(x)
 },
 {
     "times": 100,
     "name": "pfd",
     "is_original": False,
     "fun": lambda x: pyeeg.pfd(x)
 },
 {
     "times": 2,
     "name": "samp_ent",
     "is_original": True,
     "fun": lambda x: pyeeg.samp_entropy(x, 2, 1.5)
 },
 {
     "times": 10,
     "name": "samp_ent",
     "is_original": False,
     "fun": lambda x: univ.samp_entropy(x, 2, 1.5, relative_r=False)
 },
 {
     "times": 2,
     "name": "ap_ent",
     "is_original": True,
     "fun": lambda x: pyeeg.ap_entropy(x, 2, 1.5)
 },
 {
     "times": 10,
def myFeaturesExtractor(
        X, myM, myV):  # X has to be a matrix where each row is a channel
    N = len(X)  # number of channels
    L = len(X[0])
    maxtLyap = min(500, L // 2 + L // 4)
    lyapLags = np.arange(maxtLyap) / Fs

    # get number of features
    nFeatures = nMono * N + N * (N - 1) / 2
    # here we initialize the list of features // We will transform it to an array later
    featList = np.zeros((int(nFeatures)))
    # deal with monovariate features first
    for kChan in range(N):
        kFeat = 0
        mySig = X[kChan, :]
        #========== Stats ========================
        myMean = myM[kChan]
        featList[nMono * kChan + kFeat] = myMean
        kFeat += 1
        myMax = max(mySig)
        featList[nMono * kChan + kFeat] = myMax
        kFeat += 1
        myMin = min(mySig)
        featList[nMono * kChan + kFeat] = myMin
        kFeat += 1
        peak = max(abs(np.array([myMin, myMax])))
        featList[nMono * kChan + kFeat] = peak
        kFeat += 1
        myVar = myV[kChan]
        featList[nMono * kChan + kFeat] = myVar
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.skew(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.kurtosis(mySig)
        kFeat += 1
        myRMS = rms(mySig)
        featList[nMono * kChan + kFeat] = myRMS
        kFeat += 1
        featList[nMono * kChan + kFeat] = peak / myRMS
        kFeat += 1

        featList[nMono * kChan + kFeat] = totVar(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.dfa(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.hurst(mySig)
        kFeat += 1
        hMob, hComp = pyeeg.hjorth(mySig)
        featList[nMono * kChan + kFeat] = hMob
        kFeat += 1
        featList[nMono * kChan + kFeat] = hComp
        kFeat += 1
        ## ======== fractal ========================
        # Now we need to get the embeding time lag Tau and embeding dmension
        ac = delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
        Tau = firstTrue(ac < corrThresh)  # embeding delay

        f1 , f2 , f3 = dimension.fnn(mySig, dim=dim, tau=Tau, R=10.0, A=2.0, metric='euclidean',\
                                     window=10,maxnum=None, parallel=True)
        myEmDim = firstTrue(f3 < fracThresh)
        # Here we construct the Embeding Matrix Em
        Em = pyeeg.embed_seq(mySig, Tau, myEmDim)
        U, s, Vh = linalg.svd(Em)
        W = s / np.sum(s)  # list of singular values in decreasing order
        FInfo = pyeeg.fisher_info(X, Tau, myEmDim, W=W)
        featList[nMono * kChan + kFeat] = FInfo
        kFeat += 1
        featList[nMono * kChan + kFeat] = Tau
        kFeat += 1
        featList[nMono * kChan + kFeat] = myEmDim
        kFeat += 1
        #========================================
        PFD = pyeeg.pfd(mySig, D=None)
        hfd6 = pyeeg.hfd(mySig, 6)
        hfd10 = pyeeg.hfd(mySig, 10)
        # Now we fit aline and get its slope to have Lyapunov exponent
        divAvg = lyapunov.mle(Em,
                              maxt=maxtLyap,
                              window=3 * Tau,
                              metric='euclidean',
                              maxnum=None)
        poly = np.polyfit(lyapLags,
                          divAvg,
                          1,
                          rcond=None,
                          full=False,
                          w=None,
                          cov=False)
        LyapExp = poly[0]

        featList[nMono * kChan + kFeat] = PFD
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd6
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd10
        kFeat += 1
        featList[nMono * kChan + kFeat] = LyapExp
        kFeat += 1

        ## ======== Entropy ========================
        tolerance = 1 / 4
        entropyDim = max([myEmDim, PFD])

        featList[nMono * kChan + kFeat] = pyeeg.samp_entropy(
            mySig, entropyDim, tolerance)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.svd_entropy(mySig,
                                                            Tau,
                                                            myEmDim,
                                                            W=W)
        kFeat += 1

        # here we compute bin power
        power, power_Ratio = pyeeg.bin_power(mySig, freqBins, Fs)
        featList[nMono * kChan + kFeat] = pyeeg.spectral_entropy(
            mySig, freqBins, Fs, Power_Ratio=power_Ratio)
        kFeat += 1
        ## ======== Spectral ========================
        for kBin in range(len(freqBins) - 1):
            featList[nMono * kChan + kFeat] = power[kBin]
            kFeat += 1
            featList[nMono * kChan + kFeat] = power_Ratio[kBin]
            kFeat += 1

    # deal with multivariate features first
    #============ connectivity ==================
    corrList = connectome(X)
    nConnect = len(corrList)
    if N * (N - 1) / 2 != nConnect:
        raise ValueError('incorrect number of correlation coeffs')

    for kC in range(nConnect):
        featList[-nConnect + kC] = corrList[kC]

    return featList