コード例 #1
0
 def test_spectral_entropy(self):
     spectral_entropy(RANDOM_TS, SF_TS, method='fft')
     spectral_entropy(RANDOM_TS, SF_TS, method='welch')
     spectral_entropy(RANDOM_TS, SF_TS, method='welch', nperseg=400)
     self.assertEqual(np.round(spectral_entropy(RANDOM_TS, SF_TS,
                                                normalize=True), 1), 0.9)
     self.assertEqual(np.round(spectral_entropy(PURE_SINE, 100), 2), 0.0)
コード例 #2
0
 def test_spectral_entropy(self):
     spectral_entropy(RANDOM_TS, SF_TS, method='fft')
     spectral_entropy(RANDOM_TS, SF_TS, method='welch')
     spectral_entropy(RANDOM_TS, SF_TS, method='welch', nperseg=400)
     self.assertEqual(
         np.round(spectral_entropy(RANDOM_TS, SF_TS, normalize=True), 1),
         0.9)
     self.assertEqual(np.round(spectral_entropy(PURE_SINE, 100), 2), 0.0)
     # 2D data
     params = dict(sf=SF_TS, normalize=True, method='welch', nperseg=100)
     assert_equal(aal(spectral_entropy, axis=1, arr=data, **params),
                  spectral_entropy(data, **params))
コード例 #3
0
def entropy(x: np.array, freq: int = 1, base: float = e) -> Dict[str, float]:
    """Calculates sample entropy.

    Parameters
    ----------
    x: numpy array
        The time series.
    freq: int
        Frequency of the time series

    Returns
    -------
    dict
        'entropy': Wrapper of the function spectral_entropy.

    References
    ----------
    [1] https://raphaelvallat.com/entropy/build/html/index.html
    """
    try:
        entropy = spectral_entropy(x, 1, normalize=True)
    except:
        entropy = np.nan

    return {'entropy': entropy}
コード例 #4
0
def generate_csv():
    cases_list = unpickle_data()
    csv_name = 'complete_data.csv'
    FULL_CSV = pd.DataFrame(columns=CSV_COLS)
    for c in cases_list:
        print(f"    > Case {c._case_name}")
        for r in c:
            print(f"\t\t + RECORD {r.name}", end="")
            values = list()
            for k, v in r.N_LINEAR.items():
                s = stats.describe(v)
                values.extend([
                    s[2],  # Mean
                    s[3],  # Variance
                    s[4],  # Skewness
                    spectral_entropy(v, sf=r.fs,
                                     method='fft')  # Spectral Entropy
                ])
            row_data = [
                c._case_name,  # Case
                r.name,  # Record
                c.pathology,  # Condition
                COND_ID[c.pathology],  # Condition ID
                len(r.rr),  # RR Length
            ] + values
            FULL_CSV = FULL_CSV.append(pd.Series(data=row_data,
                                                 index=CSV_COLS),
                                       ignore_index=True)
            print("[v]")
    FULL_CSV.to_csv(csv_name, index=False)
コード例 #5
0
ファイル: model.py プロジェクト: lapali20/web_voice_gender
def create_features(sound, sr):
    X = pd.DataFrame()
    segment_id = 0
    FRAME_SIZE = 256
    HOP_LENGTH = 128

    tf = create_time_frequency(sound,
                               frame_size=FRAME_SIZE,
                               hop_length=HOP_LENGTH,
                               rate=sr)
    centroid = librosa.feature.spectral_centroid(
        sound, sr=sr, n_fft=FRAME_SIZE, hop_length=HOP_LENGTH).transpose()

    X.loc[segment_id, 'centroid'] = np.min(centroid) / 1000
    X.loc[segment_id, 'meanfreq'] = med_freq(sound, rate=sr)
    X.loc[segment_id, 'sd'] = np.std(tf)
    X.loc[segment_id, 'kurt'] = kurtosis(tf)
    X.loc[segment_id, 'skew'] = skew(tf)
    X.loc[segment_id, 'mode'] = mode(tf).mode[0]
    X.loc[segment_id, 'peakfreq'] = peak_freq(sound, sr)
    X.loc[segment_id, 'Q25'] = q25 = np.quantile(tf, 0.25)
    X.loc[segment_id, 'Q75'] = q75 = np.quantile(tf, 0.75)
    X.loc[segment_id, 'IQR'] = q75 - q25
    X.loc[segment_id, 'sp.ent'] = ent.spectral_entropy(sound, sf=sr)
    X.loc[segment_id, 'sfm'] = np.std(
        librosa.feature.spectral_flatness(sound,
                                          n_fft=FRAME_SIZE,
                                          hop_length=128))
    X.loc[segment_id, 'mindom'] = np.min(tf)

    return X
コード例 #6
0
def entropy(x):
    ### Unpacking series
    (x, m) = x
    try:
        # Maybe 100 can change
        entropy = spectral_entropy(x, 1)
    except:
        entropy = np.nan

    return {'entropy': entropy}
コード例 #7
0
def entropy4(x, normalize = False, base=None):

    value, counts = np.unique(x, return_counts=True)
    norm_counts = counts / counts.sum()
    base = e if base is None else base
    entr = -(norm_counts * np.log(norm_counts)/np.log(base)).sum()
    if normalize is True:
        entr /= np.log2(len(norm_counts))
    print(entr)
    entr += spectral_entropy(x, sf=len(x), method='fft', normalize=normalize)
    return entr / 2
コード例 #8
0
 def process_row(row: pd.Series) -> pd.Series:
     data = dict(row[[m["tag"] for m in NL_METHODS]])
     for tag, vec in data.items():
         s = stats.describe(vec)
         values = [
             s[2], s[3], s[4],
             spectral_entropy(vec, sf=row['fs'], method='fft')
         ]
         for n, v in zip(punctual_names, values):
             row[tag + n] = v
     return row
コード例 #9
0
def main(args):
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    df = pd.read_csv(args.result)
    states = [
        col.split('true_')[1] for col in df.columns if col.startswith('true')
    ]
    true_cols = ['true_' + state for state in states]
    pred_cols = ['smooth_' + state for state in states]
    nclasses = len(true_cols)
    y_true = np.argmax(df[true_cols].values, axis=1)
    pred_prob = df[pred_cols].values
    y_pred = np.argmax(pred_prob, axis=1)
    indices = np.arange(y_true.shape[0])

    feat_df = pd.read_csv(os.path.join(args.indir, 'features_30.0s.csv'))
    shape_df = pd.read_csv(os.path.join(args.indir, 'datashape_30.0s.csv'))
    num_samples = shape_df['num_samples'].values[0]
    num_timesteps = shape_df['num_timesteps'].values[0]
    num_channels = shape_df['num_channels'].values[0]
    rawdata = np.memmap(os.path.join(args.indir, 'rawdata_30.0s.npz'), mode='r', dtype='float32',\
                        shape=(num_samples, num_timesteps, num_channels))

    # Get entropy of error scenarios
    spec_entropy = []
    for idx in tqdm(indices):
        sidx = feat_df[(df.iloc[idx]['Filenames'] == feat_df['filename']) & (
            df.iloc[idx]['Timestamp'] == feat_df['timestamp'])].index.values[0]
        enorm = np.sqrt(rawdata[sidx, :, 0]**2 + rawdata[sidx, :, 1]**2 +
                        rawdata[sidx, :, 2]**2)
        spec_entropy.append(spectral_entropy(enorm, 50, normalize=True))
    spec_entropy = np.array(spec_entropy)
    spec_entropy[np.isnan(spec_entropy)] = 0.0

    # Plot probability distributions of true and pred states
    bins = 200
    for i, true_state in enumerate(states):
        for j, pred_state in enumerate(states):
            chosen_indices = indices[(y_true == i) & (y_pred == j)]
            state_true_prob = pred_prob[chosen_indices, i]
            state_pred_prob = pred_prob[chosen_indices, j]
            true_prob_hist, true_prob_bins = get_hist(state_true_prob, bins)
            pred_prob_hist, pred_prob_bins = get_hist(state_pred_prob, bins)
            plot_hist(args.outdir, true_prob_bins, true_prob_hist, true_state,\
                      pred_prob_bins, pred_prob_hist, pred_state, metric='prob')

            state_true_ent = spec_entropy[(y_true == i) & (y_pred == i)]
            state_pred_ent = spec_entropy[(y_true == i) & (y_pred == j)]
            true_ent_hist, true_ent_bins = get_hist(state_true_ent, bins)
            pred_ent_hist, pred_ent_bins = get_hist(state_pred_ent, bins)
            plot_hist(args.outdir, true_ent_bins, true_ent_hist, true_state,\
                      pred_ent_bins, pred_ent_hist, pred_state, metric='ent')
コード例 #10
0
def entropy(x, freq=1, normalize=False):
    """
    Spectral Entropy
    """
    try:
        start, stop = arg_longest_not_null(x)
        result = spectral_entropy(x[start:stop],
                                  sf=freq,
                                  method='welch',
                                  normalize=normalize)
    except Exception:
        result = np.nan
    finally:
        return result
コード例 #11
0
def save_test():
    TEST_DIRS = list(Path('.').glob('Test_*ws/'))
    for td in TEST_DIRS:
        t_cases = test_unpickle(td)

        pdir = "Test/"

        csv_name = pdir + td.stem + '.csv'
        pkl_name = pdir + td.stem + '.pkl'

        csv_data = pd.DataFrame(columns=CSV_COLS)
        pkl_data = pd.DataFrame(columns=CSV_COLS[:5])

        for c in t_cases:
            for r in c:
                # Process for CSV
                values = list()
                row_data = [
                    c._case_name,
                    r.name,
                    c.pathology,
                    COND_ID[c.pathology],
                    len(r.rr_int),
                ]
                for k, v in r.N_LINEAR.items():
                    s = stats.describe(v)
                    row_data.extend([
                        s[2], s[3], s[4],
                        spectral_entropy(v, sf=r.fs, method='fft')
                    ])
                csv_data = csv_data.append(pd.Series(
                    data=row_data,
                    index=CSV_COLS,
                ),
                                           ignore_index=True)
                # Process for pickle
                pkl_row = {
                    'case': c._case_name,
                    'record': r.name,
                    'condition': c.pathology,
                    'cond_id': COND_ID[c.pathology],
                    'length': len(r.rr_int)
                }
                pkl_row.update(r.N_LINEAR)
                pkl_data = pkl_data.append(pd.DataFrame(pkl_row))

        # DATA IS SAVED IN BOTH FORMATS
        csv_data.to_csv(csv_name, index=False)
        with open(pkl_name, 'wb') as pf:
            pickle.dump(pkl_data, pf)
コード例 #12
0
 def etrpy(sample, etype):
     if etype == "svd":
         et = entropy.svd_entropy(sample, order=3, delay=1)
     elif etype == "spectral":
         et = entropy.spectral_entropy(sample,
                                       100,
                                       method='welch',
                                       normalize=True)
     elif etype == "sample":
         et = entropy.sample_entropy(sample, order=3)
     elif etype == "perm":
         et = entropy.perm_entropy(sample, order=3, normalize=True)
     else:
         print("Error: unrecognised entropy type {}".format(etype))
         exit(-1)
     return et
コード例 #13
0
def entropy(x, freq=1, normalize=False):
    """
    Spectral Entropy
    """
    if ENTROPY_PACKAGE_AVAILABLE:
        try:
            start, stop = arg_longest_not_null(x)
            result = spectral_entropy(x[start:stop],
                                      sf=freq,
                                      method='welch',
                                      normalize=normalize)
        except Exception:
            result = np.nan
        finally:
            return result
    else:
        raise ImportError('entropy package not found')
コード例 #14
0
    def compute_measures(self, window=[1000]):
        """
        Computing some measures with the wind series

        :return:
        """
        if self.raw_data is None:
            raise NameError("Raw data is not loaded")

        dvals = {}
        dvals['specent'] = spectral_entropy(self.raw_data[:, 0], sf=1)

        data = self.raw_data[:, 0]
        for w in window:
            length = int(data.shape[0] / w)
            size = w * length
            datac = data[:size]
            datac = datac.reshape(-1, w)
            means = np.mean(datac, axis=1)
            vars = np.std(datac, axis=1)
            dvals[f'Stab({w})'] = np.std(means)
            dvals[f'Lump({w})'] = np.std(vars)

        return dvals
コード例 #15
0
def fun(a): 
    # Returning the sum of elements at start index and at last index 
    # inout array 
     return spectral_entropy(a,100,normalize=True,method='welch') 
コード例 #16
0
def entropy(x):
    print(x)
    return spectral_entropy(x, sf=len(x), method='fft', normalize=True)
コード例 #17
0
# ax1.set_ylabel('X', color=color)
# ax1.plot(s[si], color=color)
# ax1.tick_params(axis='y', labelcolor=color)
#
# ax2 = ax1.twinx()
# color = 'tab:blue'
# ax2.set_ylabel('S', color=color)
# ax2.plot(S[si], color=color)
# ax2.tick_params(axis='y', labelcolor=color)
#
# plt.show()

# Entropy:
print(entropy.perm_entropy(s[0], order=3,
                           normalize=True))  # Permutation entropy
print(entropy.spectral_entropy(s[0], 100, method='welch',
                               normalize=True))  # Spectral entropy
print(entropy.svd_entropy(
    s[0], order=3, delay=1,
    normalize=True))  # Singular value decomposition entropy
print(entropy.app_entropy(s[0], order=2,
                          metric='chebyshev'))  # Approximate entropy
print(entropy.sample_entropy(s[0], order=2,
                             metric='chebyshev'))  # Sample entropy

fpath_db = os.path.join(os.path.dirname(__file__), 'data',
                        '06-sir-gamma-beta.sqlite3')
te = TrajectoryEnsemble(fpath_db).stats()
s = te.traj[1].get_signal().series
print(entropy.app_entropy(s[0], order=2,
                          metric='chebyshev'))  # Approximate entropy
コード例 #18
0
def Figure_Diff_Features():
    all_files_dataset1 = glob.glob(path + 'Segmentation/ID1/Figure_paper_B/' +
                                   'T-*.csv')
    sigdata = []
    features1_1 = np.array([])
    features2_1 = np.array([])
    features3_1 = np.array([])
    features4_1 = np.array([])
    features5_1 = np.array([])
    timestamp = np.array([])
    segment = np.array([])
    for filename in all_files_dataset1:
        sigdata = pd.read_csv(filename, sep=',')
        head, tail = os.path.split(filename)
        ######Filtering##########
        signalfilt = np.array(
            BandpassFiilter(sigdata, Frequency_rate)['ppg_filt'])
        ########Peak Detection#############
        rpeaks = _Peak_detection(sigdata, signalfilt, Frequency_rate)
        #########Feature extraction############
        df_features = pd.DataFrame(
            columns=['skewness', 'kurtosis', 'approxentro'])
        heartpeak = _segmentation_heartCycle(sigdata, signalfilt, rpeaks)
        for i in range(heartpeak.shape[0] - 1):
            heart_cycle = signalfilt[heartpeak[i]:heartpeak[i + 1]]
            f_skew = stats.skew(heart_cycle)
            f_kurt = stats.kurtosis(heart_cycle)
            f_appentropy = _aprox_Entropy(heart_cycle, 2, 7)
            df_features.loc[len(df_features)] = [f_skew, f_kurt, f_appentropy]

        timestamp = np.append(timestamp, tail)
        features1_1 = np.append(features1_1, _range(df_features['skewness']))
        features2_1 = np.append(features2_1, _range(df_features['kurtosis']))
        features3_1 = np.append(features3_1,
                                _range(df_features['approxentro']))
        features4_1 = np.append(features4_1, _Shannon_Entropy(signalfilt))
        features5_1 = np.append(
            features5_1,
            spectral_entropy(signalfilt, Frequency_rate, method='welch'))

    all_files_dataset1 = glob.glob(path + 'Segmentation/ID1/Figure_paper_G/' +
                                   'T-*.csv')
    sigdata = []
    features1_2 = np.array([])
    features2_2 = np.array([])
    features3_2 = np.array([])
    features4_2 = np.array([])
    features5_2 = np.array([])
    timestamp = np.array([])
    segment = np.array([])
    for filename in all_files_dataset1:
        sigdata = pd.read_csv(filename, sep=',')
        head, tail = os.path.split(filename)
        signalfilt = np.array(
            BandpassFiilter(sigdata, Frequency_rate)['ppg_filt'])
        rpeaks = _Peak_detection(sigdata, signalfilt, Frequency_rate)
        df_features = pd.DataFrame(
            columns=['skewness', 'kurtosis', 'approxentro'])
        heartpeak = _segmentation_heartCycle(sigdata, signalfilt, rpeaks)
        for i in range(heartpeak.shape[0] - 1):
            heart_cycle = signalfilt[heartpeak[i]:heartpeak[i + 1]]
            f_skew = stats.skew(heart_cycle)
            f_kurt = stats.kurtosis(heart_cycle)
            f_appentropy = _aprox_Entropy(heart_cycle, 2, 7)
            df_features.loc[len(df_features)] = [f_skew, f_kurt, f_appentropy]

        timestamp = np.append(timestamp, tail)
        features1_2 = np.append(features1_2, _range(df_features['skewness']))
        features2_2 = np.append(features2_2, _range(df_features['kurtosis']))
        features3_2 = np.append(features3_2,
                                _range(df_features['approxentro']))
        features4_2 = np.append(features4_2, _Shannon_Entropy(signalfilt))
        features5_2 = np.append(
            features5_2,
            spectral_entropy(signalfilt, Frequency_rate, method='welch'))

    segment = [1, 2, 3, 4, 5]
    segment2 = [6, 7, 8, 9, 10]
    plt.figure(figsize=[6, 2.8])
    plt.scatter(segment, features5_2, c='b', label='Reliable', s=100)
    plt.scatter(segment2, features5_1, c='r', label='Unreliable', s=100)
    plt.yticks(np.arange(0, 7, 1), fontsize=16)
    plt.xticks(np.arange(1, 11, 1), fontsize=16)
    #plt.tick_params(length=0.5, width=0.5)

    plt.xlabel('#Segment', fontsize=24)
    plt.ylabel('Amplitude', fontsize=24)
    plt.legend(fontsize=16)
    plt.grid(ls='-.')
    plt.show
コード例 #19
0
def spec_entropy(x):
    return entropy.spectral_entropy(x, fs, method="welch", normalize=True)
コード例 #20
0
ファイル: firstloop.py プロジェクト: mastefed/BG-DBS-Thesis
def getdata():
    ########################## Initial time to make my network settle down ######################
    run(300 * ms)
    ##############################################################################################

    print(f"Freq input CTX = {rate_CTX} Hz\nFreq input STR = {rate_STR} Hz\n")
    """ Functions to monitor neurons' state
    """
    spikemonitorSTN = SpikeMonitor(STNGroup, variables=['v'])
    statemonitorSTN = StateMonitor(STNGroup,
                                   ['v', 'I_lfp_stn', 'I_chem_GPe_STN'],
                                   record=True)

    statemonitorSTNRB = StateMonitor(STNRBGroup, ['v'], record=True)
    spikemonitorSTNRB = SpikeMonitor(STNRBGroup, variables=['v'])

    statemonitorSTNLLRS = StateMonitor(STNLLRSGroup, ['v'], record=True)
    spikemonitorSTNLLRS = SpikeMonitor(STNLLRSGroup, variables=['v'])

    statemonitorSTNNR = StateMonitor(STNNRGroup, ['v'], record=True)
    spikemonitorSTNNR = SpikeMonitor(STNNRGroup, variables=['v'])

    spikemonitorGPe = SpikeMonitor(GPeGroup, variables=['v'])
    statemonitorGPe = StateMonitor(GPeGroup,
                                   ['v', 'I_lfp_gpe', 'I_chem_STN_GPe'],
                                   record=True)

    statemonitorGPeA = StateMonitor(GPeAGroup, variables=['v'], record=True)
    spikemonitorGPeA = SpikeMonitor(GPeAGroup, variables=['v'])

    statemonitorGPeB = StateMonitor(GPeBGroup, variables=['v'], record=True)
    spikemonitorGPeB = SpikeMonitor(GPeBGroup, variables=['v'])

    statemonitorGPeC = StateMonitor(GPeCGroup, variables=['v'], record=True)
    spikemonitorGPeC = SpikeMonitor(GPeCGroup, variables=['v'])

    spikemonitorCTX = SpikeMonitor(CorticalGroup)

    ##############################################################################################
    run(duration)  # Run boy, run!
    ##############################################################################################
    """ Calculating the Firing Rates for the entire simulation
    """

    frGPe = firingrate(spikemonitorGPe, duration)
    frGPeA = firingrate(spikemonitorGPeA, duration)
    frGPeB = firingrate(spikemonitorGPeB, duration)
    frGPeC = firingrate(spikemonitorGPeC, duration)

    frSTN = firingrate(spikemonitorSTN, duration)
    frSTNRB = firingrate(spikemonitorSTNRB, duration)
    frSTNLLRS = firingrate(spikemonitorSTNLLRS, duration)
    frSTNNR = firingrate(spikemonitorSTNNR, duration)

    frCTX = firingrate(spikemonitorCTX, duration)

    frGPe = np.mean(frGPe)
    frGPeA = np.mean(frGPeA)
    frGPeB = np.mean(frGPeB)
    frGPeC = np.mean(frGPeC)

    frSTN = np.mean(frSTN)
    frSTNRB = np.mean(frSTNRB)
    frSTNLLRS = np.mean(frSTNLLRS)
    frSTNNR = np.mean(frSTNNR)
    """ Calculating ISI, mean ISI and standard deviation of ISI
        for each population.
    """
    isiSTN, mean_isiSTN, std_isiSTN = isi_mean_std(spikemonitorSTN)
    isiSTNRB, mean_isiSTNRB, std_isiSTNRB = isi_mean_std(spikemonitorSTNRB)
    isiSTNLLRS, mean_isiSTNLLRS, std_isiSTNLLRS = isi_mean_std(
        spikemonitorSTNLLRS)
    isiSTNNR, mean_isiSTNNR, std_isiSTNNR = isi_mean_std(spikemonitorSTNNR)

    isiGPe, mean_isiGPe, std_isiGPe = isi_mean_std(spikemonitorGPe)
    isiGPeA, mean_isiGPeA, std_isiGPeA = isi_mean_std(spikemonitorGPeA)
    isiGPeB, mean_isiGPeB, std_isiGPeB = isi_mean_std(spikemonitorGPeB)
    isiGPeC, mean_isiGPeC, std_isiGPeC = isi_mean_std(spikemonitorGPeC)
    """ Calculating Coefficient of Variation:
        How irregular is the firing of my network?
    """
    cv_gpe = coeffvar(std_isiGPe, mean_isiGPe)
    cv_gpea = coeffvar(std_isiGPeA, mean_isiGPeA)
    cv_gpeb = coeffvar(std_isiGPeB, mean_isiGPeB)
    cv_gpec = coeffvar(std_isiGPeC, mean_isiGPeC)

    cv_stn = coeffvar(std_isiSTN, mean_isiSTN)
    cv_stnrb = coeffvar(std_isiSTNRB, mean_isiSTNRB)
    cv_stnllrs = coeffvar(std_isiSTNLLRS, mean_isiSTNLLRS)
    cv_stnnr = coeffvar(std_isiSTNNR, mean_isiSTNNR)
    """ Calculating meaning currents: mean excitatory and inhibitory current and mean currents to STN and GPe
    """
    mean_I_lfp_STN = np.mean(statemonitorSTN.I_lfp_stn, 0)
    mean_I_lfp_GPe = np.mean(statemonitorGPe.I_lfp_gpe, 0)
    """ Calculating spectra of LFP currents I obtained before
        This is done via scipy.signal.welch and scipy.integrate.simps
    """
    filtered_lfp_STN = butter_bandpass_filter(mean_I_lfp_STN,
                                              1,
                                              100,
                                              1 / deft,
                                              order=3)
    filtered_lfp_GPe = butter_bandpass_filter(mean_I_lfp_GPe,
                                              1,
                                              100,
                                              1 / deft,
                                              order=3)

    fstn, specstn = welch(filtered_lfp_STN,
                          fs=1 / deft,
                          nperseg=2 / deft,
                          nfft=2**18)
    fgpe, specgpe = welch(filtered_lfp_GPe,
                          fs=1 / deft,
                          nperseg=2 / deft,
                          nfft=2**18)
    low = 12 * Hz
    high = 38 * Hz
    idx_beta_stn = np.logical_and(fstn >= low, fstn <= high)
    idx_beta_gpe = np.logical_and(fgpe >= low, fgpe <= high)

    freq_res_stn = fstn[1] - fstn[0]
    freq_res_gpe = fgpe[1] - fgpe[0]
    total_power_stn = simps(specstn, dx=freq_res_stn)
    total_power_gpe = simps(specgpe, dx=freq_res_gpe)
    beta_power_stn = simps(specstn[idx_beta_stn], dx=freq_res_stn)
    beta_power_gpe = simps(specgpe[idx_beta_gpe], dx=freq_res_gpe)
    """ Spectral Entropy of nuclei:
        How much peaked and concentrated is my beta band spectrum?
    """
    specentropy_stn = spectral_entropy(filtered_lfp_STN,
                                       sf=1 / deft,
                                       method='welch',
                                       nperseg=2 / deft,
                                       normalize=True)
    specentropy_gpe = spectral_entropy(filtered_lfp_GPe,
                                       sf=1 / deft,
                                       method='welch',
                                       nperseg=2 / deft,
                                       normalize=True)
    """ Piece of code to calculate the synchronization between neuron in a single population
        and among the three populations of GPe and STN.
    """
    var_time_v_GPe = variance_time_fluctuations_v(statemonitorGPe)
    norm_GPe = variance_time_flu_v_norm(N_GPe, statemonitorGPe)
    sync_par_GPe = sqrt(var_time_v_GPe / norm_GPe)

    var_time_v_STN = variance_time_fluctuations_v(statemonitorSTN)
    norm_STN = variance_time_flu_v_norm(N_STN, statemonitorSTN)
    sync_par_STN = sqrt(var_time_v_STN / norm_STN)

    var_time_v_GPeA = variance_time_fluctuations_v(statemonitorGPeA)
    norm_GPeA = variance_time_flu_v_norm(N_GPe_A, statemonitorGPeA)
    sync_par_GPeA = sqrt(var_time_v_GPeA / norm_GPeA)

    var_time_v_GPeB = variance_time_fluctuations_v(statemonitorGPeB)
    norm_GPeB = variance_time_flu_v_norm(N_GPe_B, statemonitorGPeB)
    sync_par_GPeB = sqrt(var_time_v_GPeB / norm_GPeB)

    var_time_v_GPeC = variance_time_fluctuations_v(statemonitorGPeC)
    norm_GPeC = variance_time_flu_v_norm(N_GPe_C, statemonitorGPeC)
    sync_par_GPeC = sqrt(var_time_v_GPeC / norm_GPeC)

    var_time_v_STNRB = variance_time_fluctuations_v(statemonitorSTNRB)
    norm_STNRB = variance_time_flu_v_norm(N_STN_RB, statemonitorSTNRB)
    sync_par_STNRB = sqrt(var_time_v_STNRB / norm_STNRB)

    var_time_v_STNLLRS = variance_time_fluctuations_v(statemonitorSTNLLRS)
    norm_STNLLRS = variance_time_flu_v_norm(N_STN_LLRS, statemonitorSTNLLRS)
    sync_par_STNLLRS = sqrt(var_time_v_STNLLRS / norm_STNLLRS)

    var_time_v_STNNR = variance_time_fluctuations_v(statemonitorSTNNR)
    norm_STNNR = variance_time_flu_v_norm(N_STN_NR, statemonitorSTNNR)
    sync_par_STNNR = sqrt(var_time_v_STNNR / norm_STNNR)
    """ Space reserved to plot useful stuff down here.
    """
    """ Retrieving data I need for analysis
    """
    data_provv = [
        rate_CTX, rate_STR, frGPe, frGPeA, frGPeB, frGPeC, frSTN, frSTNRB,
        frSTNLLRS, frSTNNR, cv_gpe, cv_gpea, cv_gpeb, cv_gpec, cv_stn,
        cv_stnrb, cv_stnllrs, cv_stnnr, beta_power_stn / total_power_stn,
        beta_power_gpe / total_power_gpe, specentropy_stn, specentropy_gpe,
        sync_par_STNRB, sync_par_STNLLRS, sync_par_STNNR, sync_par_STN,
        sync_par_GPeA, sync_par_GPeB, sync_par_GPeC, sync_par_GPe
    ]

    data_provv = np.asarray(data_provv)
    return data_provv
コード例 #21
0
    def createEntropyFeatureArray(self, epochSeries : pd.Series, samplingFreq : int) -> (np.ndarray, List[str]):
        ''' Creates 3d Numpy with a entropy features - also returns the feature names
        
        Creates the following features:
            - Approximate Entropy (AE)
            - Sample Entropy (SamE)
            - Spectral Entropy (SpeE)
            - Permutation Entropy (PE)
            - Singular Value Decomposition Entropy (SvdE)

        For each channel there are 5 features then

        NaN Values will be set to Zero (not good but it works for now)

        '''
        # Create np array, where the data will be stored
        d1 = len(epochSeries) # First Dimesion
        d2 = 1 # only one sample in that epoch
        
        channels = len(epochSeries[0].columns)
        d3 = channels * 5 # second dimension - 5 because we calculate five different entropies for each channel
        
        entropyFeatureArrayX = createEmptyNumpyArray(d1, d2, d3)
        
        # Create a list where all feature names are stored
        entropyFeatureList = [None] * d3
        
        stepSize = 5 # step is 5 because we calculate 5 different entropies
        for i in range (0, len(epochSeries)): # loop through the epochs
            
            # We start the the stepz size and loop through the columns, but we have to multiply by the stepzsize and add once the step size (because we don't start at 0)
            for j in range(stepSize, (len(epochSeries[i].columns)*stepSize)+stepSize, stepSize): # loop through the columns
                
                # j_epoch is the normalized index for the epoch series (like the step size would be 1)
                j_epoch = j/stepSize - 1
                
                # get the column name
                col = epochSeries[i].columns[j_epoch]
                
                # The values of the epoch of the current column
                colEpochList = epochSeries[i][col].tolist()
                
                ######################################
                # calculate Approximate Entropy
                # ------------------------------------
                val = entropy.app_entropy(colEpochList, order=2)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-1] = val
                
                # add approximate entropy feature to the list
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-1,
                                                    newFeatureName = "{col}_approximate_entropy".format(col=col))
                
                ######################################
                # calculate Sample Entropy
                # ------------------------------------
                val = entropy.sample_entropy(colEpochList, order=2)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-2] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-2,
                                                    newFeatureName = "{col}_sample_entropy".format(col=col))
                
                ######################################
                # calculate Spectral Entropy
                # ------------------------------------
                val = entropy.spectral_entropy(colEpochList, sf=samplingFreq ,method='fft', normalize=True)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-3] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-3,
                                                    newFeatureName = "{col}_spectral_entropy".format(col=col))
                
                ######################################
                # calculate Permutation Entropy
                # ------------------------------------
                val = entropy.perm_entropy(colEpochList, order=3, normalize=True, delay=1)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-4] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-4,
                                                    newFeatureName = "{col}_permutation_entropy".format(col=col))
                
                ######################################
                # calculate Singular Value Decomposition entropy.
                # ------------------------------------
                val = entropy.svd_entropy(colEpochList, order=3, normalize=True, delay=1)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-5] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-5,
                                                    newFeatureName = "{col}_svd_entropy".format(col=col))
                
                #break
            #break
        

        # Normalize everything to 0-1
        print("Normalizing the entropy features...")

        # Norm=max -> then it will normalize between 0-1, axis=0 is important too!
        # We need to reshape it to a 2d Array
        X_entropy_norm = preprocessing.normalize(entropyFeatureArrayX.reshape(entropyFeatureArrayX.shape[0], entropyFeatureArrayX.shape[2]), norm='max', axis=0)

        # Now reshape it back to a simple 3D array
        X_entropy_norm = X_entropy_norm.reshape(X_entropy_norm.shape[0], 1, X_entropy_norm.shape[1])


        return X_entropy_norm, entropyFeatureList
コード例 #22
0
def main():
    all_files_dataset1 = glob.glob(path + 'Segmentation/ID12/SixDay_30s/' +
                                   'T-*.csv')
    sigdata = []
    features1 = []
    features2 = []
    features3 = []
    features4 = []
    features5 = []
    features6 = []
    features7 = []
    features8 = []
    features9 = []
    features10 = []
    features11 = []
    features12 = []
    features13 = []
    features14 = []
    features15 = []
    features16 = []
    features17 = []
    features18 = []
    features19 = []
    features20 = []
    features21 = []
    features22 = []
    features23 = []
    features24 = []
    timestamp = []

    for filename in all_files_dataset1:
        sigdata = pd.read_csv(filename, sep=',')
        head, tail = os.path.split(filename)
        timestamp.append(tail)
        signalfilt = np.array(
            BandpassFiilter(sigdata, Frequency_rate)['ppg_filt'])

        rpeaks = _Peak_detection(sigdata, signalfilt, Frequency_rate)

        df_features = pd.DataFrame(
            columns=['skewness', 'kurtosis', 'approxentro'])
        heartpeak = _segmentation_heartCycle(sigdata, signalfilt, rpeaks)
        for i in range(heartpeak.shape[0] - 1):
            heart_cycle = signalfilt[heartpeak[i]:heartpeak[i + 1]]
            f_skew = stats.skew(heart_cycle)
            f_kurt = stats.kurtosis(heart_cycle)
            f_appentropy = _aprox_Entropy(heart_cycle, 2, 7)
            df_features.loc[len(df_features)] = [f_skew, f_kurt, f_appentropy]
        features1.append(np.mean(signalfilt))
        features2.append(np.std(signalfilt))
        features3.append(np.median(signalfilt))
        features4.append(_range(df_features['skewness']))
        features5.append(_range(df_features['kurtosis']))
        features6.append(_range(df_features['power']))
        features7.append(_range(df_features['approxentro']))
        features8.append(_Shannon_Entropy(signalfilt))
        features9.append(_aprox_Entropy(signalfilt, 2, 7))

        frequency_psd = signal.periodogram(signalfilt, fs=Frequency_rate)[0]
        amplitude_psd = signal.periodogram(signalfilt, fs=Frequency_rate)[1]

        features10.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 0.6,
                                   frequency_psd <= 0.8))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 0.6,
                        frequency_psd <= 0.8))]))  # between 0.6 to 0.8
        features11.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 0.8, frequency_psd <= 1))],
                frequency_psd[np.where(
                    np.logical_and(frequency_psd >= 0.8,
                                   frequency_psd <= 1))]))  # between 0.8 to 1
        features12.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 1, frequency_psd <= 1.2))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 1,
                        frequency_psd <= 1.2))]))  # between 1 to 1.2
        features13.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 1.2,
                                   frequency_psd <= 1.4))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 1.2,
                        frequency_psd <= 1.4))]))  # between 1.2 to 1.4
        features14.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 1.4,
                                   frequency_psd <= 1.6))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 1.4,
                        frequency_psd <= 1.6))]))  # between 1.4 to 1.6
        features15.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 1.6,
                                   frequency_psd <= 1.8))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 1.6,
                        frequency_psd <= 1.8))]))  # between 1.6 to 1.8
        features16.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 1.8, frequency_psd <= 2))],
                frequency_psd[np.where(
                    np.logical_and(frequency_psd >= 1.8,
                                   frequency_psd <= 2))]))  # between 1.8 to 2
        features17.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 2, frequency_psd <= 2.2))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 2,
                        frequency_psd <= 2.2))]))  # between 2 to 2.2
        features18.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 2.2,
                                   frequency_psd <= 2.4))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 2.2,
                        frequency_psd <= 2.4))]))  # between 2.2 to 2.4
        features19.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 2.4,
                                   frequency_psd <= 2.6))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 2.4,
                        frequency_psd <= 2.6))]))  # between 2.4 to 2.6
        features20.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 2.6,
                                   frequency_psd <= 2.8))],
                frequency_psd[np.where(
                    np.logical_and(
                        frequency_psd >= 2.6,
                        frequency_psd <= 2.8))]))  # between 2.6 to 2.8
        features21.append(
            np.trapz(
                amplitude_psd[np.where(
                    np.logical_and(frequency_psd >= 2.8, frequency_psd <= 3))],
                frequency_psd[np.where(
                    np.logical_and(frequency_psd >= 2.8,
                                   frequency_psd <= 3))]))  # between 2.8 to 3
        features22.append(np.std(amplitude_psd))
        features23.append(np.max(amplitude_psd))
        features24.append(
            spectral_entropy(signalfilt, Frequency_rate, method='welch'))

    df_data = list(
        zip(*[
            timestamp, features1, features2, features3, features4, features5,
            features6, features7, features8, features9, features10, features11,
            features12, features13, features14, features15, features16,
            features17, features18, features19, features20, features21,
            features22, features23, features24
        ]))
    df = pd.DataFrame(df_data,
                      columns=[
                          'Timestamp', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6',
                          'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14',
                          'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21',
                          'f22', 'f23', 'f24'
                      ])
    df = df.dropna()
    #df.to_csv(path+'Features_ID1_30s_40.csv', mode='a', index=False)
    ######################Normalized#########################
    #print(df)
    df_z = pd.DataFrame(columns=['ave', 'std'])
    df_z['ave'] = ([
        np.mean(df['f1']),
        np.mean(df['f2']),
        np.mean(df['f3']),
        np.mean(df['f4']),
        np.mean(df['f5'])
    ])
    df_z['std'] = ([
        np.std(df['f1']),
        np.std(df['f2']),
        np.std(df['f3']),
        np.std(df['f4']),
        np.std(df['f5'])
    ])

    df['f1'] = (df['f1'] - df_z['ave'][0]) / df_z['std'][0]
    df['f2'] = (df['f2'] - df_z['ave'][1]) / df_z['std'][1]
    df['f3'] = (df['f3'] - df_z['ave'][2]) / df_z['std'][2]
    df['f4'] = (df['f4'] - df_z['ave'][3]) / df_z['std'][3]
    df['f5'] = (df['f5'] - df_z['ave'][4]) / df_z['std'][4]
    df['f6'] = (df['f6'] - df_z['ave'][5]) / df_z['std'][5]
    df['f7'] = (df['f7'] - df_z['ave'][6]) / df_z['std'][6]
    df['f8'] = (df['f8'] - df_z['ave'][7]) / df_z['std'][7]
    df['f9'] = (df['f9'] - df_z['ave'][8]) / df_z['std'][8]
    df['f10'] = (df['f10'] - df_z['ave'][9]) / df_z['std'][9]
    df['f11'] = (df['f11'] - df_z['ave'][10]) / df_z['std'][10]
    df['f12'] = (df['f12'] - df_z['ave'][11]) / df_z['std'][11]
    df['f13'] = (df['f13'] - df_z['ave'][12]) / df_z['std'][12]
    df['f14'] = (df['f14'] - df_z['ave'][13]) / df_z['std'][13]
    df['f15'] = (df['f15'] - df_z['ave'][14]) / df_z['std'][14]
    df['f16'] = (df['f16'] - df_z['ave'][15]) / df_z['std'][15]
    df['f17'] = (df['f17'] - df_z['ave'][16]) / df_z['std'][16]
    df['f18'] = (df['f18'] - df_z['ave'][17]) / df_z['std'][17]
    df['f19'] = (df['f19'] - df_z['ave'][18]) / df_z['std'][18]
    df['f20'] = (df['f20'] - df_z['ave'][19]) / df_z['std'][19]
    df['f21'] = (df['f21'] - df_z['ave'][20]) / df_z['std'][20]
    df['f22'] = (df['f22'] - df_z['ave'][21]) / df_z['std'][21]
    df['f23'] = (df['f23'] - df_z['ave'][22]) / df_z['std'][22]
    df['f24'] = (df['f24'] - df_z['ave'][23]) / df_z['std'][23]

    df_data = list(
        zip(*[
            df['Timestamp'], df['f1'], df['f2'], df['f3'], df['f4'], df['f5'],
            df['f6'], df['f7'], df['f8'], df['f9'], df['f10'], df['f11'],
            df['f12'], df['f13'], df['f14'], df['f15'], df['f16'], df['f17'],
            df['f18'], df['f19'], df['f20'], df['f21'], df['f22'], df['f23'],
            df['f24']
        ]))
    df_data = pd.DataFrame(df,
                           columns=[
                               'Timestamp', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6',
                               'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13',
                               'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20',
                               'f21', 'f22', 'f23', 'f24'
                           ])
    df_data.to_csv(path + 'norm_features.csv', mode='a', index=False)