def process_phase(all_signal, all_events, start_mrk, stop_mrk, n_fragments, fs): # hardcoded: length of trial and baseline n_s = (9 + 10) * fs # take 9 seconds of CS and 10 seconds of fix n_b_s = 2 * fs # extract signal and events for the requested parts signal, events = extract_phase(all_signal, all_events, start_mrk, stop_mrk) # decompose [r, p, t, l, d, e, obj] = cvxEDA.cvxEDA(signal, 1 / fs) # divide the tonic signal into fragments and calculate SCL fragments = np.array_split(t, n_fragments) levels = [a.mean() for a in fragments] # extract trials trials = [] onsets_cs = events.samples_for_marker(1) + events.samples_for_marker(2) onsets_cs.sort() for onset in onsets_cs: trial_events = events.events_between_samples(onset, onset + n_s) trials.append( Trial( events=trial_events, n_samples=n_s, n_bl_samples=n_b_s, fs=fs, signal=r, smna=p, )) return levels, trials
def calculate_sc_f(scdata, sample_rate, sc_time, sc_chunks): features_chunks = [] for chunk in sc_chunks: data = list(map(lambda x: x['data'], chunk)) y = np.asarray( data) # convert list to Phsio model with sanple rate 400 yn = (y - y.mean()) / y.std() Fs = sample_rate [scr, p, scl, l, d, e, obj] = cvxEDA.cvxEDA(yn, 1. / Fs) scr_peaks = find_peaks(scr) sc_avg = np.average(y) scl_avg = np.average(scl) scl_slope = np.amax(scl) - np.amin(scl) scr_avg = np.average(scr) scr_max = np.amax(scr) scr_peaks_number = len(scr_peaks[0]) features = { 'sc_avg': sc_avg, 'scl_avg': scl_avg, 'scl_slope': scl_slope, 'scr_avg': scr_avg, 'scr_max': scr_max, 'scr_peak': scr_peaks_number } features_chunks.extend([features]) return features_chunks
def eda_processing(eda_signal): # De aqui sacamos los picos y onsets processed_eda = nk.eda_process(eda_signal, sampling_rate=700) peaks = processed_eda[1]['SCR_Peaks'] # r es la señal scr [r, p, t, l, d, e, obj] = cvx.cvxEDA(eda_signal, 1/700) scr = r mean_scr = np.mean(scr) max_scr = np.max(scr) min_scr = np.min(scr) # Preguntar por range skewness = stats.skew(scr) kurtosis = stats.kurtosis(scr) # Derivada 1 de SCR derivada1 = np.gradient(r, edge_order=1) mean_der1 = np.mean(derivada1) std_der1 = np.std(derivada1) # Derivada 2 de SCR derivada2 = np.gradient(r, edge_order=2) mean_der2 = np.mean(derivada2) std_der2 = np.std(derivada2) # Peaks peaks = processed_eda[1]['SCR_Peaks'] mean_peaks = np.mean(peaks) max_peaks = np.max(peaks) min_peaks = np.min(peaks) std_peaks = np.std(peaks) # Investigar (otra vez) onsets # ALSC, INSC, APSC, RMSC alsc_result = alsc(scr) insc_result = insc(scr) apsc_result = apsc(scr) rmsc_result = rmsc(scr) eda = np.hstack((mean_scr, max_scr, min_scr, skewness, kurtosis, mean_der1, std_der1, mean_der2, std_der2, mean_peaks, max_peaks, min_peaks, alsc_result, insc_result, apsc_result, rmsc_result)) names = ['mean_scr_eda', 'max_scr_eda', 'min_scr_eda', 'skewness_eda', 'kurtosis_eda', 'mean_der1_eda', 'std_der1_eda', 'mean_der2_eda', 'std_der2_eda', 'mean_peaks_eda', 'max_peaks_eda', 'min_peaks_eda', 'alsc_result_eda', 'insc_result_eda', 'apsc_result_eda', 'rmsc_result_eda'] return eda, names
import cvxEDA import pandas as pd import numpy as np import pylab as pl df = pd.read_csv('EDAnumpy.csv') hw_eda = np.asarray(df.iloc[:,0]) y = hw_eda yn = (y - y.mean()) / y.std() Fs = 4. [r, p, t, l, d, e, obj] = cvxEDA.cvxEDA(yn, 1./Fs) tm = pl.arange(1., len(y)+1.) / Fs pl.hold(True) pl.plot(tm, yn) pl.plot(tm, r) pl.plot(tm, p) pl.plot(tm, t) pl.savefig('foo.png') pl.savefig('foo.pdf')
###Min-max normalization yntemp = yn yn = [] for i in range(0, numberOfDataSet): yn.append(eda.minmaxNormalisation(yntemp[i])) ############# USING CVXEDA LIBRARY ################################################### r, p, t, l, d, e, obj = [[] for i in range(numberOfDataSet)], [ [] for i in range(numberOfDataSet) ], [[] for i in range(numberOfDataSet) ], [[] for i in range(numberOfDataSet) ], [[] for i in range(numberOfDataSet) ], [[] for i in range(numberOfDataSet) ], [[] for i in range(numberOfDataSet)], for i in range(0, numberOfDataSet): [r[i], p[i], t[i], l[i], d[i], e[i], obj[i]] = cvxEDA.cvxEDA(yn[i], delta) #create x values for ploting tm = [] for i in range(0, numberOfDataSet): tm.append(eda.create_XAxis(yafilt[i], delta)) # Four subplots, the axes array is 1-d f, axarr = pl.subplots(4, sharex=True) for j in range(0, numberOfDataSet): axarr[0].plot(tm[j], yn[j]) for j in range(0, numberOfDataSet): axarr[1].plot(tm[j], t[j]) for j in range(0, numberOfDataSet): axarr[2].plot(tm[j], r[j])
for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_10s[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_10s[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_10s[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_10s[i]))] for i in range(numberOfDataSet)] for i in range(0, numberOfDataSet): for j in range(0, len(RawDataList_10s[i])): [ r_10s[i][j], p_10s[i][j], t_10s[i][j], l_10s[i][j], d_10s[i][j], e_10s[i][j], obj_10s[i][j] ] = cvxEDA.cvxEDA(RawDataList_10s[i][j], delta) phasicList_10s_reconstruct[i].extend(array(p_10s[i][j])) TonicList_10s_reconstruct[i].extend(array(t_10s[i][j])) phasicNoSparseList_10s_reconstruct[i].extend(array(r_10s[i][j])) #Images number window r_20imgs, p_20imgs, t_20imgs, l_20imgs, d_20imgs, e_20imgs, obj_20imgs = [ [[] for j in range(len(RawDataList_20imgs[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_20imgs[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_20imgs[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_20imgs[i]))] for i in range(numberOfDataSet) ], [[[] for j in range(len(RawDataList_20imgs[i]))]
#f = s.makefile() #for line in f.readlines(): line = s.recv(4) print(1.0 / float(line)) y.extend([1.0 / float(line)]) #y.extend([randint(0,600)])#) #np.append(y, s.recv(1024),0) if counter > 10: #yn = st.zscore(y) yn = y if (yn[0] == nan): yn = np.zeros(yn.size()) r, p, t, l, d, e, obj = ce.cvxEDA(yn, 1.0 / 2.0) #x = range(np.size(yn,0)) x.append(counter - 11) tonic[(counter - 1) % 10] = t phasic[(counter - 1) % 10] = r numLists = 10 if (counter - 10 < 10): numLists = counter - 10 #print "numlists: ",numLists tonicMax = runningMax(tonic, dataSize, numLists) phasicMax = runningMax(phasic, dataSize, numLists) tonicMaxList.append(tonicMax) phasicMaxList.append(phasicMax)
return lambda t: np.exp(-t / tau0) - np.exp(-t / tau1) dt = 1 / 32 ts = np.arange(1000) * dt driver = np.zeros(len(ts)) driver[50] = 1.0 driver[100] = 0.5 driver[400] = 1.0 kernel = bateman()(ts) kernel = np.array([0.0] * len(kernel) + list(kernel)) halflen = int(len(kernel) / 2.0) signal = np.convolve(driver, kernel, mode='full')[halflen:-halflen + 1] ax = plt.subplot(3, 1, 2) plt.plot(ts, driver) plt.ylabel('Neural activity') ax = plt.subplot(3, 1, 1) plt.plot(ts, signal) plt.ylabel('Skin conductance') plt.subplot(3, 1, 3) phasic, driver, tonic, *_ = list(cvxEDA.cvxEDA(signal, dt)) plt.ylabel('Estimated neural activity') plt.xlabel('Time (s)') plt.plot(ts, driver / np.max(driver)) plt.show()
def eda_stats(y): Fs = fs_dict['EDA'] yn = (y - y.mean()) / y.std() [r, p, t, l, d, e, obj] = cvxEDA.cvxEDA(yn, 1. / Fs) return [r, p, t, l, d, e, obj]
#y.extend([randint(0,600)])#) #np.append(y, s.recv(1024),0) #print "yn.size: ", yn.size #print "yn before: ",yn if counter>100: yn = st.zscore(y) if (yn[0] == nan): yn = np.zeros(yn.size) print "yn: ",yn r, p, t, l, d, e, obj = ce.cvxEDA(yn, 0.05) x = range(r.size) plt.figure(1) plt.clf() plt.plot(x,yn) plt.figure(2) plt.clf() #plt.ylim([0,0.01]) #plt.plot(x,y,x,p) plt.plot(x,t) #plt.show(block=False) plt.figure(3)
def objective(taus): tau0, tau1 = np.exp(taus) wtf = list(cvxEDA.cvxEDA(scr, dt, tau0=tau0, tau1=tau1)) print(tau0, tau1, float(wtf[-1])) return float(wtf[-1])
ts = np.arange(len(ts)) * dt #plt.plot(data[:,0], 1.0/data[:,1]) def objective(taus): tau0, tau1 = np.exp(taus) wtf = list(cvxEDA.cvxEDA(scr, dt, tau0=tau0, tau1=tau1)) print(tau0, tau1, float(wtf[-1])) return float(wtf[-1]) #print(objective([2.0, 0.7])) #fit = scipy.optimize.minimize(objective, np.log((10.0, 5.0))) #print(fit) #tau0, tau1 = np.exp(fit.x) #tau0, tau1 = np.exp([ 4.40451525, -1.79824158]) # WTF!! wtf = list(cvxEDA.cvxEDA(scr, dt)) driver, tonic, kernel = gsr.deconv_baseline(oscr, 1 / dt) ax = plt.subplot(2, 1, 1) plt.plot(ts, scr) recon = scr - wtf[5] plt.plot(ts, recon) #plt.plot(ts, wtf[2]) plt.subplot(2, 1, 2, sharex=ax) plt.plot(ts, wtf[1] / np.max(wtf[1])) plt.plot(ts, driver / np.max(driver)) plt.show()
def decomposition(eda, Fs): y = np.array((eda)) yn = (y - y.mean()) / y.std() [r, p, t, l, d, e, obj] = cvxEDA.cvxEDA(yn, 1. / Fs) return (np.array(a).ravel() for a in (r, p, t, l, d, e, obj))
ts = np.arange(len(left)) * dt valid = (ts > 1000) & (ts < 3500) ts = ts[valid] left = left[valid] right = right[valid] plt.plot(ts, left[:, 0]) plt.plot(ts, right[:, 0]) plt.show() def znorm(x): return (x - np.mean(x)) / np.std(x) phasic, driver, tonic, *_ = list(cvxEDA.cvxEDA((left[:, 0]), dt)) ax = plt.subplot(2, 1, 1) plt.plot(ts, left[:, 0]) plt.plot(ts, tonic) plt.subplot(2, 1, 2, sharex=ax) plt.ylabel('Skin conductance (microsiemens)') plt.plot(ts, driver) plt.ylabel('Estimated neural activity') plt.xlabel('Time') #events = np.isfinite(left[:,1]) events = left[:, 1] == 20 plt.subplot(2, 1, 2, sharex=ax)