Ejemplo n.º 1
0
 def test_sample_entropy(self):
     se = sample_entropy(RANDOM_TS, order=2)
     sample_entropy(RANDOM_TS_LONG, order=2)
     se_eu_3 = sample_entropy(RANDOM_TS, order=3, metric='euclidean')
     # Compare with MNE-features
     self.assertEqual(np.round(se, 3), 2.192)
     self.assertEqual(np.round(se_eu_3, 3), 2.725)
     sample_entropy(RANDOM_TS, order=3)
     sample_entropy(RANDOM_TS, order=2, metric='euclidean')
     with self.assertRaises(ValueError):
         sample_entropy(RANDOM_TS, order=2, metric='wrong')
Ejemplo n.º 2
0
 def test_sample_entropy(self):
     se = sample_entropy(RANDOM_TS, order=2)
     sample_entropy(RANDOM_TS_LONG, order=2)
     se_eu_3 = sample_entropy(RANDOM_TS, order=3, metric='euclidean')
     # Compare with MNE-features
     # Note that MNE-features uses the sample standard deviation
     # np.std(ddof=1) and not the population standard deviation to define r
     self.assertEqual(np.round(se, 3), 2.192)
     self.assertEqual(np.round(se_eu_3, 3), 2.724)
     sample_entropy(RANDOM_TS, order=3)
     sample_entropy(RANDOM_TS, order=2, metric='euclidean')
     with self.assertRaises(ValueError):
         sample_entropy(RANDOM_TS, order=2, metric='wrong')
Ejemplo n.º 3
0
 def test_sampleEntropy(self):
     ts = TS_SAMPLE_ENTROPY
     std_ts = np.std(ts)
     sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
     np.testing.assert_array_equal(
         np.around(sample_entropy,
                   8), [2.21187685, 2.10787948, 2.36712361, 1.79175947])
Ejemplo n.º 4
0
def SampEn(RR_windows, **kwargs):
    feat = []
    for wRR in RR_windows:
        if len(wRR) < 8:
            value = np.nan
        else:
            # value = nolds.sampen(wRR, emb_dim=1)
            value = entropy.sample_entropy(wRR, order=2, metric='chebyshev')
        feat.append(value)
    return np.array(feat)
 def etrpy(sample, etype):
     if etype == "svd":
         et = entropy.svd_entropy(sample, order=3, delay=1)
     elif etype == "spectral":
         et = entropy.spectral_entropy(sample,
                                       100,
                                       method='welch',
                                       normalize=True)
     elif etype == "sample":
         et = entropy.sample_entropy(sample, order=3)
     elif etype == "perm":
         et = entropy.perm_entropy(sample, order=3, normalize=True)
     else:
         print("Error: unrecognised entropy type {}".format(etype))
         exit(-1)
     return et
    def createEntropyFeatureArray(self, epochSeries : pd.Series, samplingFreq : int) -> (np.ndarray, List[str]):
        ''' Creates 3d Numpy with a entropy features - also returns the feature names
        
        Creates the following features:
            - Approximate Entropy (AE)
            - Sample Entropy (SamE)
            - Spectral Entropy (SpeE)
            - Permutation Entropy (PE)
            - Singular Value Decomposition Entropy (SvdE)

        For each channel there are 5 features then

        NaN Values will be set to Zero (not good but it works for now)

        '''
        # Create np array, where the data will be stored
        d1 = len(epochSeries) # First Dimesion
        d2 = 1 # only one sample in that epoch
        
        channels = len(epochSeries[0].columns)
        d3 = channels * 5 # second dimension - 5 because we calculate five different entropies for each channel
        
        entropyFeatureArrayX = createEmptyNumpyArray(d1, d2, d3)
        
        # Create a list where all feature names are stored
        entropyFeatureList = [None] * d3
        
        stepSize = 5 # step is 5 because we calculate 5 different entropies
        for i in range (0, len(epochSeries)): # loop through the epochs
            
            # We start the the stepz size and loop through the columns, but we have to multiply by the stepzsize and add once the step size (because we don't start at 0)
            for j in range(stepSize, (len(epochSeries[i].columns)*stepSize)+stepSize, stepSize): # loop through the columns
                
                # j_epoch is the normalized index for the epoch series (like the step size would be 1)
                j_epoch = j/stepSize - 1
                
                # get the column name
                col = epochSeries[i].columns[j_epoch]
                
                # The values of the epoch of the current column
                colEpochList = epochSeries[i][col].tolist()
                
                ######################################
                # calculate Approximate Entropy
                # ------------------------------------
                val = entropy.app_entropy(colEpochList, order=2)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-1] = val
                
                # add approximate entropy feature to the list
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-1,
                                                    newFeatureName = "{col}_approximate_entropy".format(col=col))
                
                ######################################
                # calculate Sample Entropy
                # ------------------------------------
                val = entropy.sample_entropy(colEpochList, order=2)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-2] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-2,
                                                    newFeatureName = "{col}_sample_entropy".format(col=col))
                
                ######################################
                # calculate Spectral Entropy
                # ------------------------------------
                val = entropy.spectral_entropy(colEpochList, sf=samplingFreq ,method='fft', normalize=True)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-3] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-3,
                                                    newFeatureName = "{col}_spectral_entropy".format(col=col))
                
                ######################################
                # calculate Permutation Entropy
                # ------------------------------------
                val = entropy.perm_entropy(colEpochList, order=3, normalize=True, delay=1)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-4] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-4,
                                                    newFeatureName = "{col}_permutation_entropy".format(col=col))
                
                ######################################
                # calculate Singular Value Decomposition entropy.
                # ------------------------------------
                val = entropy.svd_entropy(colEpochList, order=3, normalize=True, delay=1)
                # if the value is NaN, just set it to 0
                if np.isnan(val):
                    val = 0
                entropyFeatureArrayX[i][0][j-5] = val
                
                entropyFeatureList = addFeatureToList(featureList = entropyFeatureList,
                                                    featureListIndex = j-5,
                                                    newFeatureName = "{col}_svd_entropy".format(col=col))
                
                #break
            #break
        

        # Normalize everything to 0-1
        print("Normalizing the entropy features...")

        # Norm=max -> then it will normalize between 0-1, axis=0 is important too!
        # We need to reshape it to a 2d Array
        X_entropy_norm = preprocessing.normalize(entropyFeatureArrayX.reshape(entropyFeatureArrayX.shape[0], entropyFeatureArrayX.shape[2]), norm='max', axis=0)

        # Now reshape it back to a simple 3D array
        X_entropy_norm = X_entropy_norm.reshape(X_entropy_norm.shape[0], 1, X_entropy_norm.shape[1])


        return X_entropy_norm, entropyFeatureList
Ejemplo n.º 7
0
def sample_entropy(x):
    return entropy.sample_entropy(x, order=2, metric='chebyshev')
def prepareLRMData(data):

    dg = pd.DataFrame(columns=[
        'uhid', 'dischargestatus', 'ecg_resprate_SE', 'ecg_resprate_DFA',
        'ecg_resprate_ADF', 'ecg_resprate_Mean', 'ecg_resprate_Var', 'spo2_SE',
        'spo2_DFA', 'spo2_ADF', 'spo2_Mean', 'spo2_Var', 'heartrate_SE',
        'heartrate_DFA', 'heartrate_ADF', 'heartrate_Mean', 'heartrate_Var',
        'peep_SE', 'peep_DFA', 'peep_ADF', 'peep_Mean', 'peep_Var', 'pip_SE',
        'pip_DFA', 'pip_ADF', 'pip_Mean', 'pip_Var', 'map_SE', 'map_DFA',
        'map_ADF', 'map_Mean', 'map_Var', 'tidalvol_SE', 'tidalvol_DFA',
        'tidalvol_ADF', 'tidalvol_Mean', 'tidalvol_Var', 'minvol_SE',
        'minvol_DFA', 'minvol_ADF', 'minvol_Mean', 'minvol_Var', 'ti_SE',
        'ti_DFA', 'ti_ADF', 'ti_Mean', 'ti_Var', 'fio2_SE', 'fio2_DFA',
        'fio2_ADF', 'fio2_Mean', 'fio2_Var', 'abdomen_girth', 'urine',
        'totalparenteralvolume', 'new_ph', 'gender', 'birthweight',
        'birthlength', 'birthheadcircumference', 'inout_patient_status',
        'gestation', 'baby_type', 'central_temp', 'apgar_onemin',
        'apgar_fivemin', 'apgar_tenmin', 'motherage', 'conception_type',
        'mode_of_delivery', 'steroidname', 'numberofdose', 'rbs', 'temp',
        'currentdateweight', 'currentdateheight', 'tpn-tfl', 'mean_bp',
        'sys_bp', 'dia_bp', 'abd_difference', 'stool_day_total',
        'total_intake', 'typevalue_Antibiotics', 'typevalue_Inotropes'
    ])

    for i in data.uhid.unique():
        try:
            print(i)
            t = data[data['uhid'] == i]
            t.fillna(t.mean(), inplace=True)
            t.fillna(0, inplace=True)
            #t = t.apply(pd.to_numeric, errors='coerce')
            dg = dg.append(
                {
                    'uhid':
                    i,
                    'dischargestatus':
                    t['dischargestatus'].iloc[0],
                    'ecg_resprate_SE':
                    sample_entropy(t.ecg_resprate, order=2,
                                   metric='chebyshev'),
                    'ecg_resprate_DFA':
                    nolds.dfa(t.ecg_resprate),
                    'ecg_resprate_ADF':
                    adfuller(t.ecg_resprate)[0],
                    'ecg_resprate_Mean':
                    np.mean(t.ecg_resprate),
                    'ecg_resprate_Var':
                    np.var(t.ecg_resprate),
                    'spo2_SE':
                    sample_entropy(t.spo2, order=2, metric='chebyshev'),
                    'spo2_DFA':
                    nolds.dfa(t.spo2),
                    'spo2_ADF':
                    adfuller(t.spo2)[0],
                    'spo2_Mean':
                    np.mean(t.spo2),
                    'spo2_Var':
                    np.var(t.spo2),
                    'heartrate_SE':
                    sample_entropy(t.heartrate, order=2, metric='chebyshev'),
                    'heartrate_DFA':
                    nolds.dfa(t.heartrate),
                    'heartrate_ADF':
                    adfuller(t.heartrate)[0],
                    'heartrate_Mean':
                    np.mean(t.heartrate),
                    'heartrate_Var':
                    np.var(t.heartrate),
                    'peep_SE':
                    sample_entropy(t.peep, order=2, metric='chebyshev'),
                    'peep_DFA':
                    nolds.dfa(t.peep),
                    'peep_ADF':
                    adfuller(t.peep)[0],
                    'peep_Mean':
                    np.mean(t.peep),
                    'peep_Var':
                    np.var(t.peep),
                    'pip_SE':
                    sample_entropy(t.pip, order=2, metric='chebyshev'),
                    'pip_DFA':
                    nolds.dfa(t.pip),
                    'pip_ADF':
                    adfuller(t.pip)[0],
                    'pip_Mean':
                    np.mean(t.pip),
                    'pip_Var':
                    np.var(t.pip),
                    'map_SE':
                    sample_entropy(t.map, order=2, metric='chebyshev'),
                    'map_DFA':
                    nolds.dfa(t.map),
                    'map_ADF':
                    adfuller(t.map)[0],
                    'map_Mean':
                    np.mean(t.map),
                    'map_Var':
                    np.var(t.map),
                    'tidalvol_SE':
                    sample_entropy(t.tidalvol, order=2, metric='chebyshev'),
                    'tidalvol_DFA':
                    nolds.dfa(t.tidalvol),
                    'tidalvol_ADF':
                    adfuller(t.tidalvol)[0],
                    'tidalvol_Mean':
                    np.mean(t.tidalvol),
                    'tidalvol_Var':
                    np.var(t.tidalvol),
                    'minvol_SE':
                    sample_entropy(t.minvol, order=2, metric='chebyshev'),
                    'minvol_DFA':
                    nolds.dfa(t.minvol),
                    'minvol_ADF':
                    adfuller(t.minvol)[0],
                    'minvol_Mean':
                    np.mean(t.minvol),
                    'minvol_Var':
                    np.var(t.minvol),
                    'ti_SE':
                    sample_entropy(t.ti, order=2, metric='chebyshev'),
                    'ti_DFA':
                    nolds.dfa(t.ti),
                    'ti_ADF':
                    adfuller(t.ti)[0],
                    'ti_Mean':
                    np.mean(t.ti),
                    'ti_Var':
                    np.var(t.ti),
                    'fio2_SE':
                    sample_entropy(t.fio2, order=2, metric='chebyshev'),
                    'fio2_DFA':
                    nolds.dfa(t.fio2),
                    'fio2_ADF':
                    adfuller(t.fio2)[0],
                    'fio2_Mean':
                    np.mean(t.fio2),
                    'fio2_Var':
                    np.var(t.fio2),
                    'abdomen_girth':
                    np.mean(t.abdomen_girth),
                    'urine':
                    np.nansum(t.urine),
                    'totalparenteralvolume':
                    np.nansum(t.totalparenteralvolume),
                    'total_intake':
                    np.nansum(t.total_intake),
                    'new_ph':
                    np.mean(t.new_ph),
                    'gender':
                    t['gender'].iloc[0],
                    'birthweight':
                    t['birthweight'].iloc[0],
                    'birthlength':
                    t['birthlength'].iloc[0],
                    'birthheadcircumference':
                    t['birthheadcircumference'].iloc[0],
                    'inout_patient_status':
                    t['inout_patient_status'].iloc[0],
                    'gestation':
                    t['gestation'].iloc[0],
                    'baby_type':
                    t['baby_type'].iloc[0],
                    'central_temp':
                    np.nanmean(t.central_temp),
                    'apgar_onemin':
                    t['apgar_onemin'].iloc[0],
                    'apgar_fivemin':
                    t['apgar_fivemin'].iloc[0],
                    'apgar_tenmin':
                    t['apgar_tenmin'].iloc[0],
                    'motherage':
                    t['motherage'].iloc[0],
                    'conception_type':
                    t['conception_type'].iloc[0],
                    'mode_of_delivery':
                    t['mode_of_delivery'].iloc[0],
                    'numberofdose':
                    np.nansum(t.numberofdose),
                    'rbs':
                    np.nanmean(t.rbs),
                    'temp':
                    np.nanmean(t.temp),
                    'currentdateweight':
                    np.nanmean(t.currentdateweight),
                    'currentdateheight':
                    np.nanmean(t.currentdateheight),
                    'mean_bp':
                    np.nanmean(t.mean_bp),
                    'dia_bp':
                    np.nanmean(t.dia_bp),
                    'sys_bp':
                    np.nanmean(t.sys_bp),
                    'stool_day_total':
                    np.nanmean(t.stool_day_total),
                    'tpn-tfl':
                    np.nansum(t['tpn-tfl']),
                    'typevalue_Inotropes':
                    np.nansum(t.typevalue_Inotropes),
                    'typevalue_Antibiotics':
                    np.nansum(t.typevalue_Antibiotics),
                    'steroidname':
                    np.nansum(t.steroidname),
                },
                ignore_index=True)

        except Exception as e:
            print(e, "error")

    dg.to_csv('LRM_all_data.csv')

    return dg
Ejemplo n.º 9
0
# ax1.plot(s[si], color=color)
# ax1.tick_params(axis='y', labelcolor=color)
#
# ax2 = ax1.twinx()
# color = 'tab:blue'
# ax2.set_ylabel('S', color=color)
# ax2.plot(S[si], color=color)
# ax2.tick_params(axis='y', labelcolor=color)
#
# plt.show()

# Entropy:
print(entropy.perm_entropy(s[0], order=3,
                           normalize=True))  # Permutation entropy
print(entropy.spectral_entropy(s[0], 100, method='welch',
                               normalize=True))  # Spectral entropy
print(entropy.svd_entropy(
    s[0], order=3, delay=1,
    normalize=True))  # Singular value decomposition entropy
print(entropy.app_entropy(s[0], order=2,
                          metric='chebyshev'))  # Approximate entropy
print(entropy.sample_entropy(s[0], order=2,
                             metric='chebyshev'))  # Sample entropy

fpath_db = os.path.join(os.path.dirname(__file__), 'data',
                        '06-sir-gamma-beta.sqlite3')
te = TrajectoryEnsemble(fpath_db).stats()
s = te.traj[1].get_signal().series
print(entropy.app_entropy(s[0], order=2,
                          metric='chebyshev'))  # Approximate entropy
def get_entropy_information(input, synthesized):
    real, synthesized = load_data(input, synthesized)
    day = 24 * 4
    rows = []
    for sensor in synthesized.columns:
        sample_entropy_order = 16
        real_entropy_dict = {'Sensor id': '%s (real)' % sensor}
        synthesized_entropy_dict = {'Sensor id': '%s (synthesized)' % sensor}

        sensor_data = synthesized[[sensor]].values.reshape(1, -1)[0]
        real_sensor_data = real[[sensor]].values.reshape(1, -1)[0]

        fig, ax = plt.subplots()

        pd.plotting.autocorrelation_plot(real_sensor_data, ax=ax, label='Real')
        pd.plotting.autocorrelation_plot(sensor_data,
                                         ax=ax,
                                         label='Synthesized')
        ax.set_xlim(0, len(sensor_data))
        ax.legend()
        ax.set_title("Sensor %s autocorrelation plot" % sensor)
        plt.show()

        real_entropy_dict['Sample entropy'] = sample_entropy(
            real_sensor_data, order=sample_entropy_order)
        synthesized_entropy_dict['Sample entropy'] = sample_entropy(
            sensor_data, order=sample_entropy_order)

        sample_entropy_real = [
            sample_entropy(real_sensor_data[i:(i + day)],
                           order=sample_entropy_order)
            for i in range(0, len(real_sensor_data), day)
        ]
        sample_entropy_synthesized = [
            sample_entropy(sensor_data[i:(i + day)],
                           order=sample_entropy_order)
            for i in range(0, len(sensor_data), day)
        ]
        sample_entropy_real = np.array(sample_entropy_real)
        sample_entropy_synthesized = np.array(sample_entropy_synthesized)
        sample_entropy_real = sample_entropy_real[~np.isnan(sample_entropy_real
                                                            )]
        sample_entropy_synthesized = sample_entropy_synthesized[
            ~np.isnan(sample_entropy_synthesized)]

        real_entropy_dict['Sample entropy 24 hours'] = np.mean(
            sample_entropy_real)
        synthesized_entropy_dict['Sample entropy 24 hours'] = np.mean(
            sample_entropy_synthesized)

        entropy_real = [
            entropy(pk=calc_distribution(real_sensor_data[i:(i + day)]))
            for i in range(0, len(real_sensor_data), day)
        ]
        entropy_synthesized = [
            entropy(pk=calc_distribution(sensor_data[i:(i + day)]))
            for i in range(0, len(sensor_data), day)
        ]
        entropy_real = np.array(entropy_real)
        entropy_synthesized = np.array(entropy_synthesized)

        entropy_real = entropy_real[~np.isnan(entropy_real)]
        entropy_synthesized = entropy_synthesized[~np.isnan(entropy_synthesized
                                                            )]

        real_entropy_dict['Entropy 24 hours'] = np.mean(entropy_real)
        synthesized_entropy_dict['Entropy 24 hours'] = np.mean(
            entropy_synthesized)

        real_entropy_dict['Entropy'] = np.mean(
            entropy(pk=calc_distribution(real_sensor_data), base=2))
        synthesized_entropy_dict['Entropy'] = entropy(
            pk=calc_distribution(sensor_data), base=2)
        rows.append(real_entropy_dict)
        rows.append(synthesized_entropy_dict)

    df = pd.DataFrame(rows)
    df.to_csv('entropy.csv')
Ejemplo n.º 11
0
 def extract(self, signal):
     logging.info("extracting %s" % self.__class__.__name__)
     return {
         self.__class__.__name__:
         entropy.sample_entropy(signal, order=2, metric="chebyshev")
     }