def getShannonEntropyWithWindow(signal,
                                type="freq",
                                window_size=40,
                                step_size=20):
    #testing for entropy

    # Shannon entropy
    #ent = 0.0
    #for freq in freq_list:
    #    ent += freq * np.log2(freq)
    #ent = -ent

    entropy = []
    i = 0
    while (i < signal.shape[1]):
        j = 0
        she_feat = []
        while (j < signal.shape[0]):
            #print i
            if (j + window_size < signal.shape[0]):
                sh_ent = ent.shannon_entropy(signal[j:j + window_size + 1, i])
                #print sh_ent
                she_feat.append(sh_ent)
            else:
                sh_ent = ent.shannon_entropy(signal[j:j + window_size + 1, i])
                #print sh_ent
                she_feat.append(sh_ent)

            j += step_size
        entropy.append(she_feat)
        i += 1
    return entropy
def shan(d1, d2, d3, d4, d5):

    sh1 = []
    sh2 = []
    sh3 = []
    sh4 = []
    sh5 = []
    d1 = np.rint(d1)
    d2 = np.rint(d2)
    d3 = np.rint(d3)
    d4 = np.rint(d4)
    d5 = np.rint(d5)
    for i in range(0, 500):
        X = d1[i]
        print(i)
        sh1.append(entropy.shannon_entropy(X))
    for i in range(0, 500):
        X = d2[i]
        print(i)
        sh2.append(entropy.shannon_entropy(X))
    for i in range(0, 500):
        X = d3[i]
        print(i)
        sh3.append(entropy.shannon_entropy(X))
    for i in range(0, 500):
        X = d4[i]
        print(i)
        sh4.append(entropy.shannon_entropy(X))
    for i in range(0, 500):
        X = d5[i]
        print(i)
        sh5.append(entropy.shannon_entropy(X))
    return (sh1, sh2, sh3, sh4, sh5)
Exemplo n.º 3
0
def test_complexity():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))


    # Shannon
    assert np.allclose(nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0)


    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1))


    # Sample
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)

    assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]
    assert nk.entropy_sample(signal, 2, 0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]

    # MSE
#    assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
#    assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))

    # Fuzzy
    assert np.allclose(nk.entropy_fuzzy(signal, 2, 0.2, 1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
Exemplo n.º 4
0
def test_complexity():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))

    # Shannon
    assert np.allclose(nk.entropy_shannon(signal),
                       6.6438561897747395,
                       atol=0.0000001)
    assert nk.entropy_shannon(signal) == pyentrp.shannon_entropy(signal)

    # Approximate
    assert np.allclose(nk.entropy_approximate(signal),
                       0.17364897858477146,
                       atol=0.000001)
    assert np.allclose(nk.entropy_approximate(np.array([85, 80, 89] * 17)),
                       1.0996541105257052e-05,
                       atol=0.000001)
    #    assert nk.entropy_approximate(signal, 2, 0.2) == pyeeg.ap_entropy(signal, 2, 0.2)

    # Sample
    assert np.allclose(nk.entropy_sample(signal,
                                         order=2,
                                         r=0.2 * np.std(signal)),
                       nolds.sampen(signal,
                                    emb_dim=2,
                                    tolerance=0.2 * np.std(signal)),
                       atol=0.000001)
    #    assert nk.entropy_sample(signal, 2, 0.2) == pyeeg.samp_entropy(signal, 2, 0.2)
    #    pyentrp.sample_entropy(signal, 2, 0.2)  # Gives something different

    # Fuzzy
    assert np.allclose(nk.entropy_fuzzy(signal),
                       0.5216395432372958,
                       atol=0.000001)
Exemplo n.º 5
0
def test_complexity_vs_Python():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))

    # Shannon
    shannon = nk.entropy_shannon(signal)
    #    assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())
    assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)

    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(
        nk.entropy_approximate(
            signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) -
        entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(
        signal, dimension=2,
        r=0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy(
            signal, 2, 0.2 * np.std(signal, ddof=1))

    # Sample
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2 * np.std(signal, ddof=1))
        - entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(
        nk.entropy_sample(signal, dimension=2, r=0.2) -
        pyeeg_samp_entropy(signal, 2, 0.2), 0)

    #    import sampen
    #    sampen.sampen2(signal[0:300], mm=2, r=r)

    assert nk.entropy_sample(signal,
                             dimension=2, r=0.2) != pyentrp.sample_entropy(
                                 signal, 2, 0.2)[1]
    assert nk.entropy_sample(
        signal, dimension=2,
        r=0.2 * np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(
            signal, 2, 0.2)[0.2][2]

    # MSE
    #    assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
    #    assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))

    # Fuzzy
    assert np.allclose(
        nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) -
        entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)

    # DFA
    assert nk.fractal_dfa(signal, windows=np.array([
        4, 8, 12, 20
    ])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp="poly")
def shan(d1):
    sh1 = []
    print("Shannon started")
    d1 = np.rint(d1)
    for i in range(d1.shape[0]):
        X = d1[i]
        sh1.append(entropy.shannon_entropy(X))
    print("Shannon Finished")
    return (sh1)
Exemplo n.º 7
0
def hrv_geometric_features(dataframe: pd.DataFrame,
                           column: str = 'NN') -> HRVGeometricFeatures:
    verify_monotonic(dataframe, column)

    if 'bad' in dataframe:
        dataframe = dataframe.loc[~dataframe.bad]

    features = HRVGeometricFeatures()
    if dataframe.empty:
        logger.warning(
            'Not enough NN segments to calculate HRV geometric features,'
            'returning nan for all features')
        return features

    nn = dataframe[column]
    period_mins = (nn.index[-1] - nn.index[0]) / np.timedelta64(60, 's')
    logger.debug('Calculating geometric features on %.1f minutes of NN data',
                 period_mins)

    # HRV Triangular index: Integral of the density of the NN interval histogram
    # divided by its height
    # [Shaffer and Ginsberg, page 4]
    # According to [Task force, page 356],
    # ... with bins approximately 8 ms long (precisely 7.8125 ms= 1/128 s)
    if period_mins < 5:
        logger.warning(
            'The recommended minimum amount of data for HTI is 5 min, '
            'calculating on %.1f min', period_mins)
    # Bin width is = 1 / 128 = 0.0078125 seconds = 7.8125 milliseconds
    # note that since NN is in milliseconds, we need to put our bins in ms too
    bin_width = 1000 / 128
    bins = np.arange(nn.min(), nn.max(), step=bin_width)
    histogram = np.digitize(nn, bins)
    max_bin = histogram.max()
    # According to Task force:
    # ... [the HRV triangular index] is approximated by the value:
    # (total number of NN intervals)/ (number of NN intervals in the modal bin)
    #
    # Note that in neurokit, this calculation is wrong, since it uses the
    # histogram density not the regular histogram
    features.HTI = nn.shape[0] / max_bin

    # Shannon entropy
    # In [Voss], it's not really well explained what this is supposed to do
    # "...calculated on the basis of the class probabilities p_i ... of the NN
    # interval density distribution ... resulting in a smoothed histogram
    # suitable for HRV analysis [1].
    density = histogram / bin_width / histogram.sum(
    )  # This is how np.histogram calculates density
    features.Shannon_h = shannon_entropy(density)

    logger.debug('HRV geometric features: %s', features)
    return features
Exemplo n.º 8
0
def shannon_entrp(l):
    """
    Shannon entropy
    :param l:
    :return:
    """
    start = time.time()
    se = e.shannon_entropy(l)
    elapsed_time = time.time() - start
    logger.debug("Elapsed time to calculate Shannon entropy value is %s",
                 elapsed_time)
    return se
def fuzzyent(d1):
    sa1 = []
    #d1=np.rint(d1)
    print("Fuzzy started")
    for i in range(d1.shape[0]):
        print(i, end=" ", flush=True)
        X = d1[i]
        X = gaussmf(X, 0, 1)
        X = np.array(X)
        X = np.rint(X)
        ee = entropy.shannon_entropy(list(X))
        sa1.append(ee)
    print("Fuzzy Finished")
    return (sa1)
Exemplo n.º 10
0
def extract_feature(X):
    X = X.astype(float)
    stft = np.abs(librosa.stft(X))
    mfccs = np.mean(librosa.feature.mfcc(y=X, sr=SAMPLE_RATE, n_mfcc=40).T,
                    axis=0)
    chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=SAMPLE_RATE).T,
                     axis=0)
    mel = np.mean(librosa.feature.melspectrogram(X, sr=SAMPLE_RATE).T, axis=0)
    shannon = [ent.shannon_entropy(X)]
    sample = ent.sample_entropy(X, 1)
    per = [ent.permutation_entropy(X)]
    fft = np.fft.fft(X) / len(X)
    fft = np.abs(fft[:len(X) // 7])
    fft = [fft[0]]
    return mfccs, chroma, mel, shannon, sample, per, fft
Exemplo n.º 11
0
def test_complexity():

    signal = np.cos(np.linspace(start=0, stop=30, num=100))

    # Shannon
    assert np.allclose(
        nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0)

    # Approximate
    assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)
    assert np.allclose(
        nk.entropy_approximate(signal, 2, 0.2 * np.std(signal, ddof=1)) -
        entropy_app_entropy(signal, 2), 0)

    assert nk.entropy_approximate(
        signal, 2, 0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy(
            signal, 2, 0.2 * np.std(signal, ddof=1))

    # Sample
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2 * np.std(signal, ddof=1)) -
        entropy_sample_entropy(signal, 2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) -
        entro_py_sampen(signal, 2, 0.2, scale=False), 0)
    assert np.allclose(
        nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2),
        0)

    assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy(
        signal, 2, 0.2)[1]

    # Fuzzy
    assert np.allclose(
        nk.entropy_fuzzy(signal, 2, 0.2, 1) -
        entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
def extract_feature(X):
    X = X.astype(float)
    stft = np.abs(librosa.stft(X))
    mfccs = np.mean(librosa.feature.mfcc(y=X, sr=SAMPLE_RATE, n_mfcc=40).T,
                    axis=0)
    chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=SAMPLE_RATE).T,
                     axis=0)
    mel = np.mean(librosa.feature.melspectrogram(X, sr=SAMPLE_RATE).T, axis=0)
    shannon = [ent.shannon_entropy(X)]
    sample = ent.sample_entropy(X, 1)
    spectral = [np.round(spectral_entropy(X, SAMPLE_RATE), 2)]
    per = [ent.permutation_entropy(X)]
    energy_ent = [energy_entropy(X)]
    energy_sig = [energy(X)]
    zero_cross = [zero_crossing_rate(X)]
    f, psd = welch(X, nfft=1024, fs=SAMPLE_RATE, noverlap=896, nperseg=1024)
    fft = np.fft.fft(X) / len(X)
    fft = np.abs(fft[:len(X) // 7])
    fft = [fft[0]]

    return np.concatenate(
        (mfccs, chroma, mel, shannon, sample, spectral, per, energy_ent,
         energy_sig, zero_cross, psd, chroma, fft))
Exemplo n.º 13
0
rolling = rolling_window(df.logR_ask, window_size, 10)
rolling = rolling_window(df_std.logR_ask, window_size, window_size)
rolling = rolling_window(df_QN_laplace_std.values.transpose()[0], window_size, window_size)
rolling_ns = rolling_window(df.ask, window_size, 10)
rolling_ts = rolling_window(df.index, window_size, 10)
df_ = pd.DataFrame(rolling)

sw_1 = rolling[1]
sw_1_ns = rolling[1]
nolds.lyap_r(sw_1, emb_dim = emb_dim)
nolds.lyap_e(sw_1, emb_dim = emb_dim)
nolds.sampen(sw_1, emb_dim= emb_dim)
nolds.hurst_rs(sw_1)
nolds.corr_dim(sw_1, emb_dim=emb_dim)
nolds.dfa(sw_1)
ent.shannon_entropy(sw_1) # is this even valid? we do not have any p_i states i ALSO IGNORES TEMPORAL ORDER - Practical consideration of permutation entropy
ent.sample_entropy(sw_1, sample_length = 10) #what is sample length?
#ent.multiscale_entropy(sw_1, sample_length = 10, tolerance = 0.1*np.std(sw_1)) # what is tolerance?

                      "Practical considerations of permutation entropy: A Tutorial review - how to choose parameters in permutation entropy"
ent.permutation_entropy(sw_1, m=8, delay = emd_dim )  #Reference paper above 
#ent.composite_multiscale_entropy()
lempel_ziv_complexity(sw_1)
gzip_compress_ratio(sw_1_ns, 9)


#https://www.researchgate.net/post/How_can_we_find_out_which_value_of_embedding_dimensions_is_more_accurate
#when choosing emb_dim for Takens, each dimension should have at least 10 dp ==> 10^1 == 1D, 10^2 == 2D, ..., 10^6 == 6D 

#FALSE NEAREST NEIGHBOR FOR DETERMINING MINIMAL EMBEDDING DIMENSION
Exemplo n.º 14
0
 def extract(self, source, paraphrase, position):
     s = set(tokenize(source))
     p = set(tokenize(paraphrase))
     return ent.shannon_entropy(" ".join(p.difference(s)))
Exemplo n.º 15
0
 def extract(self, source, paraphrase, position):
     return ent.shannon_entropy(paraphrase)
#dict3 = open('sortedlncH', 'w')
'''

for key in dict:
    print key
'''
entrps = []
dict2 = anydbm.open('lncEntrpM', 'n')
count = 0
#keys = dict1.keys()
time1 = time.time()
for key in dict1:
    #time1 = time.time()
    #for num in range(0, 22619):
    if int(dict1[key]) >= 4:
        entrp = ent.shannon_entropy(key.lower())
        if entrp >= 1.7:
            entrps.append(entrp)
            try:
                dict2[str(entrp)] += ' ' + str(key)
            except KeyError:
                dict2[str(entrp)] = key
    count += 1
    print str(count) + ' ' + str(time.time() - time1)

print 'done with first part'
#print entrps
#print time.time()-time1
entrps.sort(reverse=True)

entrps = np.array(entrps)
Exemplo n.º 17
0
 def test_shannonEntropyInt(self):
     self.assertEqual(round(ent.shannon_entropy(TIME_SERIES), 5), SHANNON_ENTROPY)
Exemplo n.º 18
0
def calculate_entropy(melody):
	return ent.shannon_entropy(melody)
Exemplo n.º 19
0
 def test_shannonEntropyString(self):
     self.assertEqual(round(ent.shannon_entropy(TIME_SERIES_STRING), 5), SHANNON_ENTROPY)
Exemplo n.º 20
0
'''

for ind in range(0, 367):  #len(genes)):
    seqEnt = dict()
    ents = list()
    # print('worked')
    line = fread.readline()
    #print genes[ind + 1] + " " + str(ind)
    if line[:len(line) - 1] != '':
        print "new loop: " + line[:len(line) - 1]
    if line[:len(line) - 1] != '':
        while (ind != 366 and not genes[ind + 1] in line) or (ind == 366
                                                              and line != ''):
            #print(str(ind)+'line: ' + line + ' gene: ' + genes[ind+1])

            entropy = ent.shannon_entropy(line)
            seqEnt[entropy] = line
            ents.append(entropy)
            # print(line[:len(line)-9])
            # print(line[:len(line)-9].upper() == 'RFWD2')
            fread.readline()
            fread.readline()
            line = fread.readline()
            print "during loop: " + line + "the next gene: " + genes[ind + 1]
            print ind
        ents.sort()
        seqs = list()
        for entr in ents:
            seqs.append(seqEnt[entr])

        fwrite.write("gene: " + genes[ind] + '\n')
Exemplo n.º 21
0
    apq11Shimmer = call([sound, pointProcess], "Get shimmer (apq11)", 0, 0,
                        0.0001, 0.02, 1.3, 1.6)
    ddaShimmer = call([sound, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001,
                      0.02, 1.3, 1.6)
    voice_report = call([sound, pitch, pointProcess], "Voice report", 0.0, 0.0,
                        f0min, f0max, 1.3, 1.6, 0.03, 0.45)

    return meanF0, stdevF0, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer, voice_report


AudioFile_path = sys.argv[1]
sample_rate, samples = wavfile.read(AudioFile_path)
frequencies, times, spectogram = signal.spectrogram(samples, sample_rate)
sound = parselmouth.Sound(AudioFile_path)
DFA = nolds.dfa(times)
PPE = entropy.shannon_entropy(times)
(meanF0, stdevF0, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter,
 ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer,
 apq11Shimmer, ddaShimmer,
 voice_report) = measurePitch(sound, 75, 500, "Hertz")

voice_report = voice_report.strip()

hnr = voice_report[984:989]
nhr = voice_report[941:953]

# from sklearn.preprocessing import MinMaxScaler
# sc = MinMaxScaler()
# DFA = sc.fit_transform(DFA)
# PPE = sc.fit_transform(PPE)
Exemplo n.º 22
0
def calc_entropy(serie):

    shannon_entropy = ent.shannon_entropy(serie)

    return shannon_entropy
Exemplo n.º 23
0
def Entropy(source, paraphrase, position):
    s = set(tokenize(source))
    p = set(tokenize(paraphrase))
    word_diff_entropy = ent.shannon_entropy(" ".join(p.difference(s)))
    entropy = ent.shannon_entropy(paraphrase)
    return [entropy, word_diff_entropy]
dict3 = open('sortedlncH', 'w')
'''

for key in dict:
    print key
'''
entrps = []
dict2 = dict()
count = 0
#keys = dict1.keys()
time1 = time.time()
for key in dict1:
    #time1 = time.time()
    #for num in range(0, 22619):
    entrp = ent.shannon_entropy(key)
    entrps.append(entrp)
    try:
        dict2[entrp].append(key)
    except KeyError:
        dict2[entrp] = [key]
    count += 1
    print str(count) + ' ' + str(time.time() - time1)

print 'done with first part'
#print entrps
#print time.time()-time1
entrps.sort(reverse=True)
print 'done with Entropy Sort'

for entrp in entrps:
Exemplo n.º 25
0
    def calculate_rri_nonlinear_statistics(self,
                                           rri,
                                           diff_rri,
                                           diff2_rri,
                                           suffix=''):

        # Empty dictionary
        rri_nonlinear_statistics = dict()

        # Non-linear RR statistics
        if len(rri) > 1:
            rri_nonlinear_statistics['rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(rri, m=2, delay=1))
            rri_nonlinear_statistics['rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['rri_fisher_info' + suffix] = fisher_info(
                rri, tau=1, de=2)
            hjorth_parameters = hjorth(rri)
            rri_nonlinear_statistics['rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['rri_hurst_exponent' + suffix] = pfd(rri)
            rri_nonlinear_statistics['rri_svd_entropy' + suffix] = svd_entropy(
                rri, tau=2, de=2)
            rri_nonlinear_statistics['rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(rri)
        else:
            rri_nonlinear_statistics['rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_sample_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_multiscale_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['rri_hurst_exponent' + suffix] = np.nan
            rri_nonlinear_statistics['rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        # Non-linear RR difference statistics
        if len(diff_rri) > 1:
            rri_nonlinear_statistics['diff_rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(diff_rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['diff_rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(diff_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff_rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(diff_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff_rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(diff_rri, m=2, delay=1))
            rri_nonlinear_statistics['diff_rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(diff_rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['diff_rri_fisher_info' +
                                     suffix] = fisher_info(diff_rri,
                                                           tau=1,
                                                           de=2)
            hjorth_parameters = hjorth(diff_rri)
            rri_nonlinear_statistics['diff_rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['diff_rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['diff_rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['diff_rri_hurst_exponent' +
                                     suffix] = pfd(diff_rri)
            rri_nonlinear_statistics['diff_rri_svd_entropy' +
                                     suffix] = svd_entropy(diff_rri,
                                                           tau=2,
                                                           de=2)
            rri_nonlinear_statistics['diff_rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(diff_rri)
        else:
            rri_nonlinear_statistics['diff_rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_sample_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_multiscale_permutation_entropy'
                                     + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_hurst_exponent' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['diff_rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        # Non-linear RR difference difference statistics
        if len(diff2_rri) > 1:
            rri_nonlinear_statistics['diff2_rri_shannon_entropy' + suffix] = \
                self.safe_check(ent.shannon_entropy(diff2_rri))
            rri_nonlinear_statistics['diff2_rri_approximate_entropy' + suffix] = \
                self.safe_check(pyeeg.ap_entropy(diff2_rri, M=2, R=0.1*np.std(rri)))
            rri_nonlinear_statistics['diff2_rri_sample_entropy' + suffix] = \
                self.safe_check(ent.sample_entropy(diff2_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff2_rri_multiscale_entropy' + suffix] = \
                self.safe_check(ent.multiscale_entropy(diff2_rri, sample_length=2, tolerance=0.1*np.std(rri))[0])
            rri_nonlinear_statistics['diff2_rri_permutation_entropy' + suffix] = \
                self.safe_check(ent.permutation_entropy(diff2_rri, m=2, delay=1))
            rri_nonlinear_statistics['diff2_rri_multiscale_permutation_entropy' + suffix] = \
                self.safe_check(ent.multiscale_permutation_entropy(diff2_rri, m=2, delay=1, scale=1)[0])
            rri_nonlinear_statistics['diff2_rri_fisher_info' +
                                     suffix] = fisher_info(diff2_rri,
                                                           tau=1,
                                                           de=2)
            hjorth_parameters = hjorth(diff2_rri)
            rri_nonlinear_statistics['diff2_rri_activity' +
                                     suffix] = hjorth_parameters[0]
            rri_nonlinear_statistics['diff2_rri_complexity' +
                                     suffix] = hjorth_parameters[1]
            rri_nonlinear_statistics['diff2_rri_morbidity' +
                                     suffix] = hjorth_parameters[2]
            rri_nonlinear_statistics['diff2_rri_hurst_exponent' +
                                     suffix] = pfd(diff2_rri)
            rri_nonlinear_statistics['diff2_rri_svd_entropy' +
                                     suffix] = svd_entropy(diff2_rri,
                                                           tau=2,
                                                           de=2)
            rri_nonlinear_statistics['diff2_rri_petrosian_fractal_dimension' +
                                     suffix] = pyeeg.pfd(diff2_rri)
        else:
            rri_nonlinear_statistics['diff2_rri_shannon_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_approximate_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_sample_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_multiscale_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_permutation_entropy' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_multiscale_permutation_entropy'
                                     + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_fisher_info' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_activity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_complexity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_morbidity' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_hurst_exponent' +
                                     suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_svd_entropy' + suffix] = np.nan
            rri_nonlinear_statistics['diff2_rri_petrosian_fractal_dimension' +
                                     suffix] = np.nan

        return rri_nonlinear_statistics