Пример #1
0
def test_eda_plot():

    sampling_rate = 1000
    eda = nk.eda_simulate(duration=30, sampling_rate=sampling_rate,
                          scr_number=6, noise=0, drift=0.01, random_state=42)
    eda_summary, _ =nk.eda_process(eda, sampling_rate=sampling_rate)

    # Plot data over samples.
    nk.eda_plot(eda_summary)
    # This will identify the latest figure.
    fig = plt.gcf()
    assert len(fig.axes) == 3
    titles = ["Raw and Cleaned Signal",
              "Skin Conductance Response (SCR)",
              "Skin Conductance Level (SCL)"]
    for (ax, title) in zip(fig.get_axes(), titles):
        assert ax.get_title() == title
    assert fig.get_axes()[2].get_xlabel() == "Samples"
    np.testing.assert_array_equal(fig.axes[0].get_xticks(),
                                  fig.axes[1].get_xticks(),
                                  fig.axes[2].get_xticks())
    plt.close(fig)

    # Plot data over seconds.
    nk.eda_plot(eda_summary, sampling_rate=sampling_rate)
    # This will identify the latest figure.
    fig = plt.gcf()
    assert fig.get_axes()[2].get_xlabel() == "Seconds"
Пример #2
0
def process_eda(eda, show_fig=False):
    """
        Resample EDA signal from 4 Hz to 64 Hz.
        Compute EDA signal features (more info: https://neurokit2.readthedocs.io/en/latest/functions.html#module-neurokit2.eda).

        Parameters
        ----------
        eda : dict [timestamp : value]
            EDA signal.
        show_fig : bool
            set if plot specific features.

        Returns
        -------
        eda_signals : DataFrame
        eda_info : dict
    """

    eda_signal = nk.signal_resample(eda['value'],
                                    sampling_rate=4,
                                    desired_sampling_rate=64)
    eda_signals, eda_info = nk.eda_process(eda_signal, sampling_rate=64)

    if show_fig:
        plt.plot(eda_signals['EDA_Phasic'], label='fázická složka')
        plt.plot(eda_signals['EDA_Tonic'], label='tónická složka')
        plt.plot(eda_signals['EDA_Raw'], label='původní')
        plt.xlabel('Vzorek [n]')
        plt.ylabel('EDA [uS]')
        plt.legend()
        plt.show()

    return eda_signals, eda_info
Пример #3
0
def test_eda_process():

    eda = nk.eda_simulate(duration=30,
                          scr_number=5,
                          drift=0.1,
                          noise=0,
                          sampling_rate=250)
    signals, info = nk.eda_process(eda, sampling_rate=250)

    assert signals.shape == (7500, 11)
    assert (np.array([
        "EDA_Raw",
        "EDA_Clean",
        "EDA_Tonic",
        "EDA_Phasic",
        "SCR_Onsets",
        "SCR_Peaks",
        "SCR_Height",
        "SCR_Amplitude",
        "SCR_RiseTime",
        "SCR_Recovery",
        "SCR_RecoveryTime",
    ]) in signals.columns.values)

    # Check equal number of markers
    peaks = np.where(signals["SCR_Peaks"] == 1)[0]
    onsets = np.where(signals["SCR_Onsets"] == 1)[0]
    recovery = np.where(signals["SCR_Recovery"] == 1)[0]
    assert peaks.shape == onsets.shape == recovery.shape == (5, )
Пример #4
0
def load_csv_file():
    file = open( "EDA3.csv", "r")
    reader = csv.reader(file)
    data = []
    i =0
    for line in reader:
        if i!=0:
            value = float(line[0])
            data.append(value) #TODO: se podria usar el promedio (D[i-1] + D[i])/2.0
            data.append(value)
        i+=1

    data_eda = np.array(data)
   
    # Preprocess the data (filter, find peaks, etc.)
    #signals, info = nk.bio_process(eda=data_eda, sampling_rate=f_muestreo)
    signals, info = nk.eda_process(data_eda, sampling_rate=f_muestreo)
    #signals, info = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=100)

    # Compute relevant features
    #results = nk.bio_analyze(signals, sampling_rate=f_muestreo)
    #print(results)
    # Extract clean EDA and SCR features
    cleaned = signals["EDA_Clean"]
    features = [info["SCR_Onsets"], info["SCR_Peaks"], info["SCR_Recovery"]]
    #print(features)
    #print(cleaned[0])
    plt.plot(cleaned)
    plt.ylabel('some numbers')
    plt.show()

    # Visualise the processing
    nk.eda_plot(signals, sampling_rate=f_muestreo)
Пример #5
0
def generarDataSintetica():
    #generando datos
    # Generate 10 seconds of EDA signal (recorded at 250 samples / second) with 2 SCR peaks
    eda = nk.eda_simulate(duration=20, sampling_rate=250, scr_number=3, drift=0.01)

    # Process
    signals, info = nk.eda_process(eda, sampling_rate=250)

    # Visualise the processing
    nk.eda_plot(signals, sampling_rate=250)
Пример #6
0
def test_eda_eventrelated():

    eda = nk.eda_simulate(duration=15, scr_number=3)
    eda_signals, info = nk.eda_process(eda, sampling_rate=1000)
    epochs = nk.epochs_create(eda_signals, events=[5000, 10000, 15000],
                              sampling_rate=1000,
                              epochs_start=-0.1, epochs_end=1.9)
    eda_eventrelated = nk.eda_eventrelated(epochs)

    no_activation = np.where(eda_eventrelated["EDA_SCR"] == 0)[0][0]
    assert int(pd.DataFrame(eda_eventrelated.values
                            [no_activation]).isna().sum()) == 4

    assert len(eda_eventrelated["Label"]) == 3
Пример #7
0
def test_eda_eventrelated():

    eda = nk.eda_simulate(duration=15, scr_number=3)
    eda_signals, info = nk.eda_process(eda, sampling_rate=1000)
    epochs = nk.epochs_create(
        eda_signals,
        events=[5000, 10000, 15000],
        sampling_rate=1000,
        epochs_start=-0.1,
        epochs_end=1.9,
    )
    eda_eventrelated = nk.eda_eventrelated(epochs)

    no_activation = np.where(eda_eventrelated["EDA_SCR"] == 0)[0][0]
    assert int(
        pd.DataFrame(eda_eventrelated.values[no_activation]).isna().sum()) == 4

    assert len(eda_eventrelated["Label"]) == 3

    # Test warning on missing columns
    with pytest.warns(nk.misc.NeuroKitWarning,
                      match=r".*does not have an `EDA_Phasic`.*"):
        first_epoch_key = list(epochs.keys())[0]
        first_epoch_copy = epochs[first_epoch_key].copy()
        del first_epoch_copy["EDA_Phasic"]
        nk.eda_eventrelated({**epochs, first_epoch_key: first_epoch_copy})

    with pytest.warns(nk.misc.NeuroKitWarning,
                      match=r".*does not have an `SCR_Amplitude`.*"):
        first_epoch_key = list(epochs.keys())[0]
        first_epoch_copy = epochs[first_epoch_key].copy()
        del first_epoch_copy["SCR_Amplitude"]
        nk.eda_eventrelated({**epochs, first_epoch_key: first_epoch_copy})

    with pytest.warns(nk.misc.NeuroKitWarning,
                      match=r".*does not have an `SCR_RecoveryTime`.*"):
        first_epoch_key = list(epochs.keys())[0]
        first_epoch_copy = epochs[first_epoch_key].copy()
        del first_epoch_copy["SCR_RecoveryTime"]
        nk.eda_eventrelated({**epochs, first_epoch_key: first_epoch_copy})

    with pytest.warns(nk.misc.NeuroKitWarning,
                      match=r".*does not have an `SCR_RiseTime`.*"):
        first_epoch_key = list(epochs.keys())[0]
        first_epoch_copy = epochs[first_epoch_key].copy()
        del first_epoch_copy["SCR_RiseTime"]
        nk.eda_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
Пример #8
0
def eda_processing(eda_signal):
    # De aqui sacamos los picos y onsets
    processed_eda = nk.eda_process(eda_signal, sampling_rate=700)
    peaks = processed_eda[1]['SCR_Peaks']
    # r es la señal scr
    [r, p, t, l, d, e, obj] = cvx.cvxEDA(eda_signal, 1/700)
    scr = r

    mean_scr = np.mean(scr)
    max_scr = np.max(scr)
    min_scr = np.min(scr)
    # Preguntar por range
    skewness = stats.skew(scr)
    kurtosis = stats.kurtosis(scr)

    # Derivada 1 de SCR
    derivada1 = np.gradient(r, edge_order=1)
    mean_der1 = np.mean(derivada1)
    std_der1 = np.std(derivada1)

    # Derivada 2 de SCR
    derivada2 = np.gradient(r, edge_order=2)
    mean_der2 = np.mean(derivada2)
    std_der2 = np.std(derivada2)

    # Peaks
    peaks = processed_eda[1]['SCR_Peaks']
    mean_peaks = np.mean(peaks)
    max_peaks = np.max(peaks)
    min_peaks = np.min(peaks)
    std_peaks = np.std(peaks)

    # Investigar (otra vez) onsets

    # ALSC, INSC, APSC, RMSC
    alsc_result = alsc(scr)
    insc_result = insc(scr)
    apsc_result = apsc(scr)
    rmsc_result = rmsc(scr)

    eda = np.hstack((mean_scr, max_scr, min_scr, skewness, kurtosis, mean_der1, std_der1, mean_der2,
                     std_der2, mean_peaks, max_peaks, min_peaks, alsc_result, insc_result, apsc_result, rmsc_result))

    names = ['mean_scr_eda', 'max_scr_eda', 'min_scr_eda', 'skewness_eda', 'kurtosis_eda', 'mean_der1_eda', 'std_der1_eda', 'mean_der2_eda',
             'std_der2_eda', 'mean_peaks_eda', 'max_peaks_eda', 'min_peaks_eda', 'alsc_result_eda', 'insc_result_eda', 'apsc_result_eda', 'rmsc_result_eda']
    return eda, names
Пример #9
0
def test_eda_intervalrelated():

    data = nk.data("bio_resting_8min_100hz")
    df, info = nk.eda_process(data["EDA"], sampling_rate=100)
    columns = ['SCR_Peaks_N', 'SCR_Peaks_Amplitude_Mean']

    # Test with signal dataframe
    features_df = nk.eda_intervalrelated(df)

    assert all(elem in columns for elem
               in np.array(features_df.columns.values, dtype=str))
    assert features_df.shape[0] == 1  # Number of rows

    # Test with dict
    epochs = nk.epochs_create(df, events=[0, 25300],
                              sampling_rate=100, epochs_end=20)
    features_dict = nk.eda_intervalrelated(epochs)

    assert all(elem in columns for elem
               in np.array(features_dict.columns.values, dtype=str))
    assert features_dict.shape[0] == 2  # Number of rows
Пример #10
0
def test_eda_eventrelated():

    eda = nk.eda_simulate(duration=15, scr_number=3)
    eda_signals, info = nk.eda_process(eda, sampling_rate=1000)
    epochs = nk.epochs_create(eda_signals,
                              events=[5000, 10000, 15000],
                              sampling_rate=1000,
                              epochs_start=-0.1,
                              epochs_end=1.9)
    eda_eventrelated = nk.eda_eventrelated(epochs)

    no_activation = np.where(eda_eventrelated["EDA_Activation"] == 0)[0][0]
    assert int(
        pd.DataFrame(eda_eventrelated.values[no_activation]).isna().sum()) == 4

    assert len(eda_eventrelated["Label"]) == 3
    assert len(eda_eventrelated.columns) == 6

    assert all(elem in [
        "EDA_Activation", "EDA_Peak_Amplitude", "EDA_Peak_Amplitude_Time",
        "EDA_RiseTime", "EDA_RecoveryTime", "Label"
    ] for elem in np.array(eda_eventrelated.columns.values, dtype=str))
Пример #11
0
                 layout=(5, 1),
                 color=['#f44336', "#E91E63", "#2196F3", "#9C27B0", "#FF9800"])
fig = plt.gcf()
fig.set_size_inches(10, 6, forward=True)
[ax.legend(loc=1) for ax in plt.gcf().axes]
fig.savefig("README_simulation.png", dpi=300, h_pad=3)

# =============================================================================
# Electrodermal Activity (EDA) processing
# =============================================================================

# Generate 10 seconds of EDA signal (recorded at 250 samples / second) with 2 SCR peaks
eda = nk.eda_simulate(duration=10, sampling_rate=250, scr_number=2, drift=0.1)

# Process it
signals, info = nk.eda_process(eda, sampling_rate=250)

# Visualise the processing
nk.eda_plot(signals, sampling_rate=None)

# Save it
plot = nk.eda_plot(signals, sampling_rate=None)
plot.set_size_inches(10, 6, forward=True)
plot.savefig("README_eda.png", dpi=300, h_pad=3)

# =============================================================================
# Cardiac activity (ECG) processing
# =============================================================================

# Generate 15 seconds of ECG signal (recorded at 250 samples / second)
ecg = nk.ecg_simulate(duration=15,
def process_thread_implementation(data=None,
                                  prediction_queue=None,
                                  model=None):
    """
    This thread take data from the 'master_thread_implementation' and applies to them the processing for the
    Machine Learning training.

    :param data: data from the 'master_thread_implementation'
    :param prediction_queue: queue linked to PredictionPage
    :param model: Machine Learning classifier
    """
    timestamps = []
    hr = []
    eda = []

    # Remember that EDA and HR are at the same frequency, so here they have the same length
    for i in range(len(data['HR'])):
        hr.append(data['HR'][i][1])
        eda.append(data['EDA'][i][1])
        timestamps.append(data['EDA'][i][0])

    # Filtering EDA
    eda = butter_lowpass_filter(eda, 1, 64, order=4)
    # Applying NeuroKit2 on EDA
    signal, process = nk.eda_process(eda, sampling_rate=64)
    scr = signal['EDA_Phasic'].to_numpy()
    scr = scr.reshape((scr.shape[0], 1))
    scl = signal['EDA_Tonic'].to_numpy()
    scl = scl.reshape((scl.shape[0], 1))

    hr = np.array(hr).reshape((len(hr), 1))

    # Now we have to resample the signals to 4 Hz
    hr = downsampling_mean(hr, 64, 4)
    scr = downsampling_mean(scr, 64, 4)
    scl = downsampling_mean(scl, 64, 4)
    timestamps = downsampling_last(timestamps, 64, 4)

    # Let's standardize the data.
    means = []
    stds = []
    # If we have 30 seconds of data (30 seconds * 4 Hz)
    if len(scr) >= 4 * 30:
        try:
            # Let's see if the mean and stds are already computed and use them
            f = open("statistics.pkl", 'rb')
            statistics = pickle.load(f)
            means = statistics['means']
            stds = statistics['stds']
            f.close()
        except FileNotFoundError:
            # If this is the first time that a 'process_thread' have 30 seconds of data we have to
            # compute std and mean and use them for every data of the future
            statistics = {}
            means = np.array([np.mean(scr), np.mean(scl), np.mean(hr)])
            stds = np.array([np.std(scr), np.std(scl), np.std(hr)])
            statistics['means'] = means
            statistics['stds'] = stds
            f = open("statistics.pkl", 'wb')
            pickle.dump(statistics, f)
            f.close()

    # Now we should concatenate belong axis 1 to create the X
    # this is the order of the features (depends by the training process)
    X = np.concatenate((scr, scl, hr), axis=1)
    if X.shape[0] >= 4 * 30:
        X = (X - means) / stds
        print("Mean of X: ", np.mean(X))
        print("Std of X: ", np.std(X))

    X = X.reshape((1, X.shape[0], X.shape[1]))

    predictions = model.predict(X)
    predictions = predictions.reshape((-1, ))
    predictions = list(predictions)
    timestamps = list(timestamps)
    # We take only the last second of predictions
    if len(predictions) >= 4:
        predictions = predictions[-4:]
        timestamps = timestamps[-4:]

    prediction_queue.put([timestamps, predictions])