def __init__(self, ensemble=False, **kwargs): self.ensemble = ensemble if ensemble: self.emd_obj = EEMD(**kwargs) else: self.emd_obj = EMD(**kwargs)
IMF orders higher than 5th will all be summed back to the 5th order, including residue. This is to prevent having different number of IMFs available for denoising using neural networks in the next stage. ''' import numpy as np from chicken_selects import * from PyEMD import EMD #noiselevel = int(input("EMG noise level?: ")) noiselevel = 1 # Object Data('model type', 'motion', noiselevel, cuda = False) data = Data('Convolutional Autoencoder', 'mixed', noiselevel=noiselevel) # Object EMD from PyEMD package. Default Cauchy convergence. EMD = EMD() # Specify directory if you have changed folder name / dir data.set_ecg_filepath() data.set_emg_filepath(filepath='emgdata_final') data.set_acc_filepath(filepath='accdata_final') # Call data into numpy array format. Check soure code for additional input specifications clean_ecg = data.pull_all_ecg(tf=240000) # Total of 14 recordings emg_noise = data.pull_all_emg( tf=10000) # 10,000 data points * 3 motions * 2 trials * 4 subjects acc_dat = data.pull_all_acc(tf=10000) # equiv to emg # Remove mean, normalize to range (-1,1), adjust for noiselevel setting. clean_ecg[0, :] -= np.mean(clean_ecg[0, :]) clean_ecg[0, :] = clean_ecg[0, :] / max(abs(clean_ecg[0, :]))
def emd(self, series: ndarray) -> ndarray: """Remove the lowest-frequency trend as determined by Empirical Mode Decomposition """ trend = EMD().emd(series)[-1] return series - trend
speech = Speech() xx, fs = speech.audioread(filename, 8000) xx = xx - np.mean(xx) # DC x = xx / np.max(xx) # normalized N = len(x) time = np.arange(N) / fs noisy = Noisy() signal, _ = noisy.Gnoisegen(x, SNR) # add noise wnd = np.hamming(wlen) # window function overlap = wlen - inc NIS = int((IS * fs - wlen) / inc + 1) # unvoice segment frame number y = speech.enframe(signal, list(wnd), inc).T fn = y.shape[1] # frame number frameTime = speech.FrameTime(fn, wlen, inc, fs) # frame to time imf = EMD().emd(signal) # EMD decomposition M = imf.shape[0] # EMD order u = np.zeros(N) for k in range(2, M): # reconstruct without 1st, 2nd IMF u += imf[k, :] z = speech.enframe(u, list(wnd), inc).T # reconstruction signal enframe Tg = np.zeros(z.shape).T Tgf = np.zeros(fn) for k in range(fn): v = z[:, k] # one frame imf = EMD().emd(v) # EMD decomposition L = imf.shape[0] # EMD order Etg = np.zeros(wlen) for i in range(L): # average Teager energy in each frame Etg += steager(imf[i, :])
def ver_dme2(): for widget in frametree.winfo_children(): widget.destroy() #botón para volver boton_regresar = ttk.Button(framevolver, text="Volver", command=regresar, state='disabled') boton_regresar.grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) #botón para desplegar datos boton_select = ttk.Button(framebotones, text="Desplegar Tabla", command=ver, state='disabled') boton_select.grid(row=0, column=1, padx=5, pady=5, ipadx=5, ipady=5) #botón para aplicar el dme ttk.Button(framebotones, text="Aplicar DME", command=ver_dme2, state='disabled').grid(row=0, column=3, padx=5, pady=5, ipadx=5, ipady=5) #botón ver grafica boton_grafica = ttk.Button(framebotones, text="Ver gráfica", command=ver_grafica, state='disabled') boton_grafica.grid(row=0, column=2, padx=5, pady=5, ipadx=5, ipady=5) #crea una nueva ventana y la pone de frente self.show_dme = tk.Toplevel() #frames extras selectframe = tk.Frame(self.show_dme) my_frame_grid = tk.Frame(self.show_dme) frametodo = tk.Frame(self.show_dme) #función del botón volver def borrar(): #botón volver boton_regresar = ttk.Button(framevolver, text="Volver", command=regresar, state='enabled') boton_regresar.grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) #boton para desplegar datos boton_select = ttk.Button(framebotones, text="Desplegar Tabla", command=ver, state='enabled') boton_select.grid(row=0, column=1, padx=5, pady=5, ipadx=5, ipady=5) #botón aplicar dme ttk.Button(framebotones, text="Aplicar DME", command=ver_dme2, state='enabled').grid(row=0, column=3, padx=5, pady=5, ipadx=5, ipady=5) #boton ver grafica boton_grafica = ttk.Button(framebotones, text="Ver gráfica", command=ver_grafica, state='enabled') boton_grafica.grid(row=0, column=2, padx=5, pady=5, ipadx=5, ipady=5) #destruye todo lo que contenga el frame for widget in frametodo.winfo_children(): widget.destroy() for widget in selectframe.winfo_children(): widget.destroy() for widget in my_frame_grid.winfo_children(): widget.destroy() #destruye la ventana self.show_dme.destroy() #botón volver ttk.Button(self.show_dme, text="Volver", command=borrar).pack(padx=5, pady=5, ipadx=5, ipady=5) #label del título de la nueva ventana tk.Label(self.show_dme, text="Descomposición Modal Empírica", font=("Arial", 20)).pack(padx=5, pady=5, ipadx=5, ipady=5) #función del boton de salir que se encuentra en la ventana def on_exit(): #volver botón activar boton_regresar = ttk.Button(framevolver, text="Volver", command=regresar, state='enabled') boton_regresar.grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) #Desplegar botón activar boton_select = ttk.Button(framebotones, text="Desplegar Tabla", command=ver, state='enabled') boton_select.grid(row=0, column=1, padx=5, pady=5, ipadx=5, ipady=5) #botón aplicar dme ttk.Button(framebotones, text="Aplicar DME", command=ver_dme2, state='enabled').grid(row=0, column=3, padx=5, pady=5, ipadx=5, ipady=5) #boton ver grafica boton_grafica = ttk.Button(framebotones, text="Ver gráfica", command=ver_grafica, state='enabled') boton_grafica.grid(row=0, column=2, padx=5, pady=5, ipadx=5, ipady=5) #destruye la ventana creada self.show_dme.destroy() #asiganción de la función al botón de salir (-),(<>),--->(x) self.show_dme.protocol("WM_DELETE_WINDOW", on_exit) array1year = [] array1month = [] array2 = [] #limpieza de datos for line in open(path): array1year.append(line[0:4]) array1month.append(line[4:6]) if line[7] == '-': array2.append(line[7:]) else: array2.append(line[7:]) #Aplicación del DME lista = [] lista2 = [] listaGeneral = [] for valor in array2: lista.append(valor.rstrip('\n')) for sal in range(len(lista)): lista2.append(array1year[sal] + '-' + array1month[sal]) #creación del dataframe listaGeneral = dict(zip(lista2[1:], lista[1:])) tupla = listaGeneral.items() df = pd.DataFrame(list(tupla)) df.columns = ["fecha", "valor"] global my_graph my_graph = df["valor"].astype(float) global new_list new_list = lista[1:] dataoni = [] for item in new_list: dataoni.append(float(item)) signal = np.array(dataoni) emd = EMD() IMFS = emd.emd(signal) funcion = tk.StringVar(selectframe) funcion.set("Seleccione la Descomposición") #pone todas las descomposiciones en una lista opciones = [] for line in range(len(IMFS)): opciones.append(line) global dropselect #cuadro de selección con todas las descomposiciones dropselect = ttk.OptionMenu(selectframe, funcion, *opciones) dropselect.pack(padx=5, pady=5, ipadx=5, ipady=5) #función para graficar el dme seleccionado def ver_dme(): global seleccion seleccion = funcion.get() #función para verificar que no este vacío el campo de selección if (seleccion == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para visualizar", parent=self.show_dme) else: #limpia el frame for widget in frametodo.winfo_children(): widget.destroy() global my_canvas #limpia la gráfica a.clear() #gráfica el dme seleccionado a.plot(IMFS[int(seleccion)]) #agrega leyenda a la gráfica a.legend(["IMF " + str(seleccion)]) #asigan título a la gráfica a.set_title("DME " + str(seleccion)) #pone título a los ejes a.set_xlabel("Tiempo [s]") #pone la gráfica dentro del canvas canvas2 = FigureCanvasTkAgg(f, frametodo) canvas2.draw() canvas2.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) #agrega barra de herraminetas para la gráfica toolbar = NavigationToolbar2Tk(canvas2, frametodo) toolbar.update() toolbar.pack() my_canvas = canvas2 #función para comparación de datos y dme def prueba(): #se obtiene la selección del usuario seleccion = funcion.get() #se verifica que no este vacío el cuadro de selección if (seleccion == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para visualizar", parent=self.show_dme) else: #limpia el frame for widget in frametodo.winfo_children(): widget.destroy() file_name = os.path.basename(path) index_of_dot = file_name.index('.') file_name_without_extension = file_name[:index_of_dot] #print (file_name_without_extension) global mycanvas3 #limpia gráfica a.clear() #crea la gráfica con los datos a.plot(my_graph) a.plot(IMFS[int(seleccion)]) #agrega leyenda dde los datos a.legend(["Datos " + lista[0], "IMF " + str(seleccion)]) #agrega título a la gráfica a.set_title(file_name_without_extension + " y " + "DME " + str(seleccion)) #agrega la gráfica al canvas canvas3 = FigureCanvasTkAgg(f, frametodo) canvas3.draw() canvas3.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) #barra de herraminentas para la gráfica toolbar = NavigationToolbar2Tk(canvas3, frametodo) toolbar.update() toolbar.pack() #canvas3._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = True) mycanvas3 = canvas3 #función para guardar el dme def guardar(): #se obtiene la selección del usuario my_dme = funcion.get() #condición para verificar si esta vacío if (my_dme == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para guardar", parent=self.show_dme) else: #limpiar frame for widget in frametodo.winfo_children(): widget.destroy() messagebox.showinfo("Correcto", "Archivo Guardado con exito", parent=self.show_dme) #crea el archivo with open('DME ' + my_dme + '.dat', 'w') as f: f.write("Pos IMFS " + my_dme + "\n") posicion = 0 for i in IMFS[int(my_dme)]: posicion = posicion + 1 pos = str(posicion) new_pos = pos.zfill(4) new_pos = pos.rjust(4, '0') f.writelines(new_pos + " " + str(i) + "\n") f.close() selectframe.pack() my_frame_grid.pack() #botón desplegar gráfica ttk.Button(my_frame_grid, text="Desplegar grafica", command=ver_dme).grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) #botón comparación de datos ttk.Button(my_frame_grid, text="Comparacion con los datos", command=prueba).grid(row=0, column=2, padx=5, pady=5, ipadx=5, ipady=5) #botón de guardar datos ttk.Button(my_frame_grid, text="Guardar DME", command=guardar).grid(row=0, column=3, padx=5, pady=5, ipadx=5, ipady=5) frametodo.pack()
import numpy as np import pywt import matplotlib.pyplot as plt from PyEMD import EMD sig = np.loadtxt('a0.txt') print(type(sig)) print(sig) print(sig.shape) coeff_list = [sig, None] print(type(coeff_list)) print(len(coeff_list)) w = pywt.waverec(coeff_list, pywt.Wavelet('sym5')) freq = 44100 t = np.arange(0, w.size / freq, 1 / freq) IMF = EMD().emd(w, t) N = IMF.shape[0] + 1 print(len(w)) plt.plot(w) plt.show()
def __init__(self, parent, controller): tk.Frame.__init__(self, parent) tk.Label(self, text="Descompoición Modal Empirica", font=("Arial", 20)).pack(padx=5, pady=5, ipadx=5, ipady=5) array1year = [] array1month = [] array2 = [] #limpieza de datos for line in open(path2): array1year.append(line[0:4]) array1month.append(line[4:6]) if line[7] == '-': array2.append(line[7:]) else: array2.append(line[7:]) lista = [] lista2 = [] listaGeneral = [] for valor in array2: lista.append(valor.rstrip('\n')) #print(lista) for sal in range(len(lista)): lista2.append(array1year[sal] + '-' + array1month[sal]) #print(lista2) #creación del dataframe listaGeneral = dict(zip(lista2[1:], lista[1:])) tupla = listaGeneral.items() df = pd.DataFrame(list(tupla)) df.columns = ["fecha", "valor"] global my_graph my_graph = df["valor"].astype(float) global new_list new_list = lista[1:] dataoni = [] for item in new_list: dataoni.append(float(item)) signal = np.array(dataoni) emd = EMD() IMFS = emd.emd(signal) funcion = tk.StringVar() funcion.set("Seleccione la Descomposición") opciones = [] for line in range(len(IMFS)): opciones.append(line) #print (line) dropselect = ttk.OptionMenu(self, funcion, *opciones) dropselect.pack(padx=5, pady=5, ipadx=5, ipady=5) def ver_dme(): seleccion = int(funcion.get()) global my_canvas a.clear() a.plot(IMFS[seleccion]) a.set_title("IMF " + str(seleccion)) a.set_xlabel("Tiempo [s]") canvas2 = FigureCanvasTkAgg(f, self) canvas2.show() canvas2.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) my_canvas = canvas2 def clear2(): my_canvas.get_tk_widget().destroy() my_frame_grid = tk.Frame(self) my_frame_grid.pack() ttk.Button(my_frame_grid, text="Desplegar grafica", command=ver_dme).grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) ttk.Button(my_frame_grid, text="Limpiar Pantalla", command=clear2).grid(row=0, column=1, padx=5, pady=5, ipadx=5, ipady=5) parejo = tk.Frame(self) parejo.pack() ttk.Button(parejo, text="Volver", command=lambda: controller.show_frame(Show_graph)).grid( row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5)
def test_instantiation(self): emd = EMD() with self.assertRaises(ValueError): Visualisation(emd)
def fun_loadExFeats(dSet, ri, idx, item, Fs, LPcutoff, Nmodes, FFTregLen=25, gaussSigma=5, FFTreg='gaussian'): #load eeg featNames = ["Group", "decTime"] featsTuple = { "EMD": 0, "EEMD": 0, "CEEMDAN": 0, "EWT": 0, "VMD": 0, "Orig": 0 } if dSet == "NSC_ND": fLoad = loadmat("%s/data/%s/%s%d" % (dSet, item, item, ri + 1)) f = fLoad[item][:, 0] ltemp = int(np.ceil(f.size / 2)) #to behave the same as matlab's round fMirr = np.append(np.flip(f[0:ltemp - 1], axis=0), f) fMirr = np.append(fMirr, np.flip(f[-ltemp - 1:-1], axis=0)) f = np.copy(fMirr) if dSet == "BonnDataset": f = np.loadtxt("%s/data/%s/%s%.3d.txt" % (dSet, item, item, ri + 1)) #preprocessing - LP filter and remove DC f = f - np.mean(f) b, a = signal.butter(4, LPcutoff / (0.5 * Fs), btype='low', analog=False) fp = signal.filtfilt(b, a, f) #% EMD features tic = time.time() emd = EMD() emd.MAX_ITERATION = 2000 IMFs = emd.emd(fp, max_imf=Nmodes) toc = time.time() featsTuple["EMD"] = toc - tic #execution time (decomposition) if Nmodes != IMFs.shape[0] - 1: print("\nCheck number of EMD modes: %s%.3d" % (item, ri + 1)) for mi in range(IMFs.shape[0]): featOut, labelTemp = featExtract(IMFs[mi, :], Fs, welchWin=1024) featsTuple["EMD"] = np.append(featsTuple["EMD"], featOut) #write feature name header if ri == 0 and idx == 0: for ii in labelTemp: featNames = np.append(featNames, "%s%d" % (ii, mi)) if IMFs.shape[0] < Nmodes + 1: featsTuple["EMD"] = np.append( featsTuple["EMD"], np.zeros(Nfeats * (Nmodes + 1 - IMFs.shape[0]))) #% EEMD - Ensemble Empirical Mode Decomposition tic = time.time() if __name__ == "__main__": eemd = EEMD(trials=200) eemd.MAX_ITERATION = 2000 eIMFs = eemd(fp, max_imf=Nmodes) toc = time.time() featsTuple["EEMD"] = toc - tic #execution time (decomposition ) if Nmodes != eIMFs.shape[0] - 1: print("\nCheck number of EEMD modes: %s%.3d" % (item, ri + 1)) #for each mode, extract features for mi in range(eIMFs.shape[0]): featOut, labelTemp = featExtract(eIMFs[mi, :], Fs, welchWin=1024) featsTuple["EEMD"] = np.append(featsTuple["EEMD"], featOut) if eIMFs.shape[0] < Nmodes + 1: featsTuple["EEMD"] = np.append( featsTuple["EEMD"], np.zeros(Nfeats * (Nmodes + 1 - eIMFs.shape[0]))) #% CEEMDAN - Complete Ensemble Empirical Mode Decomposition with Adaptive Noise tic = time.time() if __name__ == "__main__": ceemdan = CEEMDAN() ceIMFs = ceemdan(fp, max_imf=Nmodes) toc = time.time() featsTuple["CEEMDAN"] = toc - tic #execution time (decomposition ) if Nmodes != ceIMFs.shape[0] - 1: print("\nCheck number of CEEMDAN modes: %s%.3d" % (item, ri + 1)) #for each mode, extract features for mi in range(ceIMFs.shape[0]): featOut, labelTemp = featExtract(ceIMFs[mi, :], Fs, welchWin=1024) featsTuple["CEEMDAN"] = np.append(featsTuple["CEEMDAN"], featOut) if ceIMFs.shape[0] < Nmodes + 1: featsTuple["CEEMDAN"] = np.append( featsTuple["CEEMDAN"], np.zeros(Nfeats * (Nmodes + 1 - ceIMFs.shape[0]))) #%EWT features tic = time.time() ewt, _, _ = ewtpy.EWT1D(fp, N=Nmodes, log=0, detect="locmax", completion=0, reg=FFTreg, lengthFilter=FFTregLen, sigmaFilter=gaussSigma) toc = time.time() featsTuple["EWT"] = toc - tic #execution time (decomposition ) if Nmodes != ewt.shape[1]: print("\nCheck number of EWT modes: %s%.3d" % (item, ri + 1)) #for each mode, extract features for mi in range(Nmodes): featOut, labelTemp = featExtract(ewt[:, mi], Fs, welchWin=1024) featsTuple["EWT"] = np.append(featsTuple["EWT"], featOut) #% VMD features DC = np.mean(fp) # no DC part imposed tic = time.time() vmd, _, _ = VMD(fp, alpha, tau, Nmodes, DC, init, tol) toc = time.time() featsTuple["VMD"] = toc - tic #execution time (decomposition ) if Nmodes != vmd.shape[0]: print("\nCheck number of VMD modes: %s%.3d" % (item, ri + 1)) #for each mode, extract features for mi in range(Nmodes): featOut, labelTemp = featExtract(vmd[mi, :], Fs, welchWin=1024) featsTuple["VMD"] = np.append(featsTuple["VMD"], featOut) #% Original non-decomposed signal features tic = time.time() featOut, labelTemp = featExtract(fp, Fs, welchWin=1024) toc = time.time() featsTuple["Orig"] = np.append(toc - tic, featOut) return item, featsTuple
def plot_4_all_cluster(): hostname = 'localhost' username = '******' password = '******' database = 'load_cloud' conn = psycopg2.connect(host=hostname, user=username, password=password, dbname=database) cur0 = conn.cursor() cur0.execute('delete from google_cpu_req_200') cur0.execute('delete from google_cpu_req_emd_200') cur0.execute('delete from google_ram_req_200') cur0.execute('delete from google_ram_req_emd_200') conn.commit() timestamp = [] cpuReq = [] ramReq = [] for k in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: print(k, '...') if k < 10: data = np.array( pd.read_csv( r'E:\ThesisNew\Thesis\Data\task_events\task_events_part-0000' + str(k) + r'-of-00500.csv\task_events_part-0000' + str(k) + r'-of-00500.csv')) elif k < 100: data = np.array( pd.read_csv( r'E:\ThesisNew\Thesis\Data\task_events\task_events_part-000' + str(k) + r'-of-00500.csv\task_events_part-000' + str(k) + r'-of-00500.csv')) timestamp.append(data[:, 0]) cpuReq.append(data[:, 9]) ramReq.append(data[:, 10]) del data timestamp = [j for i in timestamp for j in i] cpuReq = [j for i in cpuReq for j in i] ramReq = [j for i in ramReq for j in i] print(len(cpuReq), len(ramReq), len(timestamp)) cpuReq = np.array([0 if math.isnan(x) else x for x in cpuReq]) ## todo : replace it with KNNs ramReq = np.array([0 if math.isnan(x) else x for x in ramReq]) unique_times = list(sorted(set(list(timestamp)))) ## sort times print(unique_times[:4], unique_times[-4:-1]) # print('first time is ',datetime.datetime.fromtimestamp( # int(str(unique_times[0])) # ).strftime('%Y-%m-%d %H:%M:%S')) # print('last time is ', datetime.datetime.fromtimestamp( # int(str(unique_times[-1])) # ).strftime('%Y-%m-%d %H:%M:%S')) print('total time series length is : ', len(cpuReq), len(ramReq), len(unique_times)) ''' set prediction window size''' chunk_size = 200 print('total chunk timestamps are ', str(int(len(cpuReq) / chunk_size))) chunk_ts = [ unique_times[x:x + chunk_size] for x in range(0, len(unique_times), chunk_size) ] timeCpuReq = [] timeRamReq = [] time_stamp_symbol = [] unique_times = np.array(unique_times) k = 0 for time in chunk_ts: k += 1 time_stamp_symbol.append(k) #print('len of this chunk is',len(time),', first step is ',time[0]) ids1 = list(np.where(time[0] <= unique_times)[0]) ids2 = list(np.where(unique_times <= time[-1])[0]) ids = list(set(ids1).intersection(ids2)) print('Timestamp chunk of ', str(k), ' are collecting .... ') #print('num of requests in this chunk timestamp is ',len(ids)) timeCpuReq.append(np.sum(cpuReq[ids])) timeRamReq.append(np.sum(ramReq[ids])) # # del cpuReq, ramReq print(len(timeCpuReq), len(timeRamReq)) plt.subplot(2, 1, 1) plt.plot(time_stamp_symbol, timeCpuReq) plt.xlabel('Time') plt.ylabel('CPU Req') plt.subplot(2, 1, 2) plt.plot(time_stamp_symbol, timeRamReq, color='red') plt.xlabel('Time') plt.ylabel('RAM Req') plt.show() # ''' writing to data base''' print('writing raw data to DB ...') cur1 = conn.cursor() for kkk in range(len(time_stamp_symbol)): cur1.execute('insert into google_ram_req_200 values (%s,%s)', (int(time_stamp_symbol[kkk]), timeRamReq[kkk])) conn.commit() cur1.execute('insert into google_cpu_req_200 values (%s,%s)', (int(time_stamp_symbol[kkk]), timeCpuReq[kkk])) conn.commit() print('****** Start to 60min Analysis for CPU ...') emd = EMD() IMFs = emd(np.array(timeCpuReq, dtype=float)) print('****** 60min Analysis Completed! ') num_of_IMF = len(IMFs) print('total number of IMFs are', num_of_IMF) imf_lens = [] ''' plotting IMFs from CPU ''' for imf_index in range(len(IMFs)): imf_lens.append(len(IMFs[imf_index])) for kkk in range(len(time_stamp_symbol)): cur2 = conn.cursor() cur2.execute( 'insert into google_cpu_req_emd_200 values (%s,%s,%s,%s)', (int(time_stamp_symbol[kkk]), IMFs[imf_index][kkk], int(imf_index + 1), int(imf_index + 1))) conn.commit() print('IMF ' + str(imf_index + 1) + ' written to DB !!! ') #plt.plot(time_stamp_symbol, IMFs[imf_index]) #plt.title('imf # ' + str(imf_index + 1)) #plt.show() print('****** Start to 60min Analysis for RAM ...') emd = EMD() IMFs = emd(np.array(timeRamReq, dtype=float)) print('****** 60min Analysis Completed! ') num_of_IMF = len(IMFs) print('total number of IMFs are', num_of_IMF) imf_lens = [] ''' plotting IMFs from RAM ''' for imf_index in range(len(IMFs)): imf_lens.append(len(IMFs[imf_index])) for kkk in range(len(time_stamp_symbol)): cur2 = conn.cursor() cur2.execute( 'insert into google_ram_req_emd_200 values (%s,%s,%s,%s)', (int(time_stamp_symbol[kkk]), IMFs[imf_index][kkk], int(imf_index + 1), int(imf_index + 1))) conn.commit() print('IMF ' + str(imf_index + 1) + ' written to DB !!! ') # plt.plot(time_stamp_symbol, IMFs[imf_index]) # plt.title('imf # ' + str(imf_index + 1)) # plt.show() print('length of IMFs are', imf_lens) print('***********') ''' reconstructing main data from its IMFs''' reconstructed_data = np.zeros([1, imf_lens[0]]) for imf_index in range(len(IMFs)): reconstructed_data = reconstructed_data + np.array(IMFs[imf_index]) print('RMSE between original and reconstructed signal is', np.sqrt(np.sum(np.square(reconstructed_data - timeCpuReq)))) plt.subplot(2, 1, 1) plt.plot(time_stamp_symbol, timeCpuReq, color='blue', label='original data') plt.subplot(2, 1, 2) plt.plot(time_stamp_symbol, timeCpuReq, color='red', label='reconstructed data') plt.legend() plt.grid() plt.show() plt.plot(time_stamp_symbol, list(np.transpose(np.square(reconstructed_data - timeCpuReq))), color='blue', label='squared error') plt.legend() plt.grid() plt.show()
'loss_function': loss_f, 'learning_rate': lr }, dir + '/model.pth') np.save(dir + '/trainloss.npy',train_loss) np.save(dir + '/valloss.npy',val_loss) print("Step 3: Model Saved") # Train the model try: # Generates mini_batchs for training. Loads data for validation. train_loader = loader.DataLoader(dataset = train_set, batch_size = BATCH_SIZE, shuffle = True) print(np.shape(val_set[:,0:1,:,:])) v_x_IMFs = EMD(val_set[:,0:1,:,:]) v_x1 = Variable(torch.from_numpy(v_x_IMFs[0]).float()) v_x2 = Variable(torch.from_numpy(v_x_IMFs[1]).float()) v_x3 = Variable(torch.from_numpy(v_x_IMFs[2]).float()) v_y = Variable(torch.from_numpy(val_set[:,1:2,:,:]).float()) # Moves data and model to gpu if available if cuda: v_x1, v_x2, v_x3, v_y = v_x1.cuda(), v_x2.cuda(), v_x3.cuda(), v_y.cuda() print("Step 2: Model Training Start") for epoch in range(EPOCH): for step, train_data in enumerate(train_loader):
def extract_features_grouped_for_channel(X, channels): samples = [] features = [] for n in range(0, 12): samples.append([]) features.append([]) for n in range(0, len(channels)): # if n == 1: # # Solo un canale # break start = 0 end = 672 for i in range(0, 12): signal_sample = np.array(channels[n][start:end]) samples[i].append(signal_sample) IMFs = EMD().emd(signal_sample, None, 1) # plot_IMFs(signal_sample, IMFs) # # print(IMFs) # # print(IMFs[1]) ###### Per ogni canale, estrai le 3 features ###### IMF = IMFs[0] D_t = get_first_difference_of_time_series(IMF) D_p = get_first_difference_of_phase(IMF) E_norm = get_normalized_energy(signal_sample, IMF) features[i].append(D_t) features[i].append(D_p) features[i].append(E_norm) start = end end += 672 # print(len(samples)) samples = np.array(samples) features = np.array(features) for f in features: X.append(np.array(f)) # for s in range(0, len(samples)): # features = [] # IMFs = EMD().emd(samples[s], None, 1) # # ###### Per ogni canale, estrai le 3 features ###### # # IMF = IMFs[0] # D_t = get_first_difference_of_time_series(IMF) # D_p = get_first_difference_of_phase(IMF) # E_norm = get_normalized_energy(samples[s], IMF) # # features.append(D_t) # features.append(D_p) # features.append(E_norm) # # X.append(np.array(features)) return X
def main(args): subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S') if ~os.path.isabs(args.output_base_dir): dirpath = os.path.dirname(__file__) args.output_base_dir = os.path.join(dirpath, args.output_base_dir) output_dir = os.path.join(os.path.expanduser(args.output_base_dir), subdir) if not os.path.isdir(output_dir): # Create the model directory if it doesn't exist os.makedirs(output_dir) log_dir = args.logs_base_dir if ~os.path.isabs(args.logs_base_dir): log_dir = os.path.join(output_dir, args.logs_base_dir) if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist os.makedirs(log_dir) # Store some git revision info in a text file in the log directory src_path, _ = os.path.split(os.path.realpath(__file__)) # store_revision_info(src_path, log_dir, ' '.join(sys.argv)) print('Output directory: %s' % output_dir) print('Log directory: %s' % log_dir) args.output_dir = output_dir #data = loadmat('data/20msclean_n.mat') data = loadmat('data/1ERBclean_n.mat') args.channels = data['channels'] args.labels = data['labels'] args.labels = args.labels[0] cz = np.squeeze(args.channels[-1, :, :]) ind = (args.labels == 1000) cz_ind = cz[:, ind] x = np.arange(-100,2000) #plt.plot(x, np.mean(cz_ind, 1)) #, 'LineWidth', 2 #plt.show() #plt.plot([0, 0], [-4, 4]) #, 'LineWidth', 2) #plt.plot([-100, 1999], [0, 0]) #, 'LineWidth', 2) #xlim([-100 1000]) #legend(num2str([1: 69]')) fs = 1000 emd = EMD() n_tr = cz_ind.shape[1] nbin = 200 hist_wind = np.zeros([nbin, cz_ind.shape[0]-1000]) wind_ = np.zeros([cz_ind.shape[0], cz_ind.shape[1]]) allign_vals = np.zeros([cz_ind.shape[1]]) for cnt_trial in range(0, n_tr): S = np.squeeze(cz_ind[:, cnt_trial]) #plt.plot(S) #plt.show() S1 = S[100:250] minpos1 = np.argmin(S1) allign_vals[cnt_trial] = minpos1 + 100 order = 2 lf_cutoff = 4. hf_cutoff = 100. imf3 = butter_bandpass(S, lf_cutoff, hf_cutoff, fs, order) emd.emd(S) imfs, res = emd.get_imfs_and_residue() #imf3 = imfs[-3] #vis = Visualisation() #t = np.arange(0, S.size/fs, 1/fs) #vis.plot_imfs(imfs=imfs, residue=res, t=t, include_residue=True) #vis.plot_instant_freq(t, imfs=imfs) #vis.show() stop = 1 # check the emd components if 1: #for imf in imfs: spectrum = plt.magnitude_spectrum(imf3, fs) #plt.plot(spectrum[1], spectrum[0]) #plt.show() #stop = 1 #plt.plot(imf3) #plt.show() # let's say the algorithm above calculates the spectrum magnitude and gives us the imf which is in delta range # manually we have seen it is imfs[-3] y2 = imf3 #S y = hilbert(y2) angles = np.angle(y) insta_phase = np.unwrap(angles) # should we ingore this and go straight to the normsss insta_phase_norm = (insta_phase + math.pi) / (2 * math.pi) % 1. wind_[:, cnt_trial] = insta_phase_norm print(cnt_trial) stop = 1 #phase_reset_wind = np.zeros([args.len_uc, args.nbin, args.win_l + args.win_r]) v = 1 #phase_reset_wind = np.exp(1j * v * 2 * math.pi * wind_) #phase_reset_mean = np.zeros([1, cz_ind.shape[0]]) #phase_reset_std = np.zeros([1, cz_ind.shape[0]]) #for i, uclass in enumerate(args.uc): #ind = (args.uc_ind == i) #temp = wind_[ind, :] allign_vals = allign_vals.astype(int) wind_alligned = np.zeros([cz_ind.shape[0]-1000, cz_ind.shape[1]]) for cnt_trial in range(0, n_tr): print(allign_vals[cnt_trial] - 100) print(allign_vals[cnt_trial] + 999) wind_alligned[:, cnt_trial] = wind_[allign_vals[cnt_trial]-100:allign_vals[cnt_trial] + 1000, cnt_trial] for cnti in range(wind_alligned.shape[0]): test = np.histogram(wind_alligned[cnti, :], nbin, (0, 1)) # calc hist wind_[ind, :] hist_wind[:, cnti] = test[0] # step = np.abs(np.mean(phase_reset_wind[ind, :])) # mean_step = np.mean() #phase_reset_mean[i, :] = np.abs(np.mean(phase_reset_wind[ind, :], 0)) #phase_reset_std[i, :] = np.abs(np.std(phase_reset_wind[ind, :], 0)) # fig, ax = plt.subplots(nrows=1, ncols=1) sigma_y = 2.0 sigma_x = 2.0 sigma = [sigma_y, sigma_x] y = sp.ndimage.filters.gaussian_filter(hist_wind, sigma, mode='constant') plt.imshow(y) # , aspect='auto' #plt.title(np.max(y)) # ax.set_adjustable('box-forced') #filename = 'histophases' + str(cnt_ch) + 'ch' + str(i) + 'cl' + '.png' #plt.savefig(os.path.join(args.output_dir, filename), bbox_inches='tight', pad_inches=0) #plt.close() plt.show() plt.waitforbuttonpress(0.1) plt.close()
def desplegar_emd(): array1year = [] array1month = [] array2 = [] #limpieza de datos for line in open(path): array1year.append(line[0:4]) array1month.append(line[4:6]) if line[7] == '-': array2.append(line[7:]) else: array2.append(line[7:]) lista = [] lista2 = [] listaGeneral = [] for valor in array2: lista.append(valor.rstrip('\n')) #print(lista) for sal in range(len(lista)): lista2.append(array1year[sal] + '-' + array1month[sal]) #print(lista2) #creación del dataframe listaGeneral = dict(zip(lista2[1:], lista[1:])) tupla = listaGeneral.items() df = pd.DataFrame(list(tupla)) df.columns = ["fecha", "valor"] global my_graph my_graph = df["valor"].astype(float) global new_list new_list = lista[1:] dataoni = [] for item in new_list: dataoni.append(float(item)) signal = np.array(dataoni) emd = EMD() IMFS = emd.emd(signal) funcion = tk.StringVar(selectframe) funcion.set("Seleccione la Descomposición") opciones = [] for line in range(len(IMFS)): opciones.append(line) #print (line) global dropselect dropselect = ttk.OptionMenu(selectframe, funcion, *opciones) dropselect.pack(padx=5, pady=5, ipadx=5, ipady=5) ttk.Button(parejo, text="Desplegar DME", command=desplegar_emd, state='disabled').grid(row=0, column=1, padx=5, pady=5, ipadx=5, ipady=5) def ver_dme(): global seleccion seleccion = funcion.get() if (seleccion == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para visualizar") else: for widget in frametodo.winfo_children(): widget.destroy() global my_canvas a.clear() a.plot(IMFS[int(seleccion)]) a.set_title("IMF " + str(seleccion)) a.set_xlabel("Tiempo [s]") canvas2 = FigureCanvasTkAgg(f, frametodo) canvas2.show() canvas2.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) my_canvas = canvas2 def prueba(): seleccion = funcion.get() if (seleccion == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para visualizar") else: for widget in frametodo.winfo_children(): widget.destroy() file_name = os.path.basename(path) index_of_dot = file_name.index('.') file_name_without_extension = file_name[:index_of_dot] #print (file_name_without_extension) global mycanvas3 a.clear() a.plot(IMFS[int(seleccion)]) a.plot(my_graph) a.set_title(file_name_without_extension + " y " + "IMF " + str(seleccion)) nuevo_frame = tk.Frame(self) canvas3 = FigureCanvasTkAgg(f, frametodo) canvas3.show() canvas3.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) mycanvas3 = canvas3 def guardar(): my_dme = funcion.get() if (my_dme == "Seleccione la Descomposición"): messagebox.showerror("Error", "Selecciona una DME para guardar") else: for widget in frametodo.winfo_children(): widget.destroy() messagebox.showinfo("Correcto", "Archivo Guardado con exito") """ with open('DME '+my_dme+'.csv', 'w') as f: thewritter = csv.writer(f) posicion=0 thewritter.writerow(["Posicion"+" " , "IMFS "+my_dme]) for i in IMFS[int(my_dme)]: posicion=posicion+1 pos = str(posicion) new_pos = pos.zfill(4) new_pos = pos.rjust(4,'0') thewritter.writerow([new_pos+" ", i]) """ with open('DME ' + my_dme + '.dat', 'w') as f: f.write("Pos IMFS " + my_dme + "\n") posicion = 0 for i in IMFS[int(my_dme)]: posicion = posicion + 1 pos = str(posicion) new_pos = pos.zfill(4) new_pos = pos.rjust(4, '0') f.writelines(new_pos + " " + str(i) + "\n") f.close() selectframe.pack() my_frame_grid.pack() ttk.Button(my_frame_grid, text="Desplegar grafica", command=ver_dme).grid(row=0, column=0, padx=5, pady=5, ipadx=5, ipady=5) ttk.Button(my_frame_grid, text="Comparacion con los datos", command=prueba).grid(row=0, column=2, padx=5, pady=5, ipadx=5, ipady=5) ttk.Button(my_frame_grid, text="Guardar DME", command=guardar).grid(row=0, column=3, padx=5, pady=5, ipadx=5, ipady=5) frametodo.pack()
plt.show() else: continue else: print( colored("You have to do at least one composition first.", 'red')) continue if name == 'joint': # to make a joint signal, like combine the first part of A with the second part of B, then EMD joint = raw_input('Which two? From where?') joint = [n for n in joint.split(',')] joint[-1] = int(joint[-1]) realwindset = np.append(windset[joint[0]][:joint[-1] + 1], windset[joint[1]][joint[-1]:-1]) x = np.linspace(1, len(realwindset), len(realwindset)) emd = EMD() realwindset.shape = (len(realwindset), ) imfs = emd(realwindset) size = imfs.shape plt.figure() plt.plot(x, realwindset) plt.title(joint) plt.show() plt.figure(figsize=(20, 18)) for loop in range(1, size[0] + 1): plt.subplot(size[0], 1, loop) plt.plot(x, imfs[loop - 1]) plt.title(loop) plt.show() continue if name not in windset.keys(): # in case the user give some wrong names
def get_imf(point): point = list(map(float, point)) return EMD().emd(np.array(point), max_imf=2)
def main(): full_data, scaler = load_data('data-569-1.csv') training_set_split = int(len(full_data) * 0.8) lookback_window = 2 global result result = '\nEvaluation.' # #数组划分为不同的数据集 data_regular = data_split(full_data, training_set_split, lookback_window) data_regular_DL = data_split_LSTM(data_regular) y_real = scaler.inverse_transform(data_regular[3].reshape(-1, 1)).reshape( -1, ) predict_svr = Training_Prediction_ML(model_SVR(), y_real, scaler, data_regular, 'SVR') predict_elm = Training_Prediction_ML(model_ELM(), y_real, scaler, data_regular, 'ELM') predict_bp = Training_Prediction_ML(model_BPNN(), y_real, scaler, data_regular, 'BPNN') predict_LSTM = Training_Prediction_DL(model_LSTM(lookback_window), y_real, scaler, data_regular_DL, 'LSTM') #################################################################EMD_LSTM emd = EMD() emd_imfs = emd.emd(full_data.reshape(-1), None, 8) emd_imfs_prediction = [] # i = 1 # plt.rc('font', family='Times New Roman') # plt.subplot(len(emd_imfs) + 1, 1, i) # plt.plot(full_data, color='black') # plt.ylabel('Signal') # plt.title('EMD') # for emd_imf in emd_imfs: # plt.subplot(len(emd_imfs) + 1, 1, i + 1) # plt.plot(emd_imf, color='black') # plt.ylabel('IMF ' + str(i)) # i += 1 # plt.show() test = np.zeros([len(full_data) - training_set_split - lookback_window, 1]) i = 1 for emd_imf in emd_imfs: print('-' * 45) print('This is ' + str(i) + ' time(s)') print('*' * 45) data_imf = data_split_LSTM( data_split(imf_data(emd_imf, 1), training_set_split, lookback_window)) test += data_imf[3] model = EEMD_LSTM_Model(data_imf[0], data_imf[1], i) # model.save('EEMD-LSTM-imf' + str(i) + '.h5') emd_prediction_Y = model.predict(data_imf[2]) emd_imfs_prediction.append(emd_prediction_Y) i += 1 emd_imfs_prediction = np.array(emd_imfs_prediction) emd_prediction = [0.0 for i in range(len(test))] emd_prediction = np.array(emd_prediction) for i in range(len(test)): emd_t = 0.0 for emd_imf_prediction in emd_imfs_prediction: emd_t += emd_imf_prediction[i][0] emd_prediction[i] = emd_t emd_prediction = scaler.inverse_transform(emd_prediction.reshape( -1, 1)).reshape(-1, ) result += '\n\nMAE_emd_lstm: {}'.format(MAE1(y_real, emd_prediction)) result += '\nRMSE_emd_lstm: {}'.format(RMSE1(y_real, emd_prediction)) result += '\nMAPE_emd_lstm: {}'.format(MAPE1(y_real, emd_prediction)) result += '\nR2_emd_lstm: {}'.format(R2(y_real, emd_prediction)) ################################################################EEMD_LSTM # eemd = EEMD() # # eemd.noise_seed(12345) # eemd_imfs = eemd.eemd(full_data.reshape(-1), None, 8) # eemd_imfs_prediction = [] # # # i = 1 # # plt.rc('font', family='Times New Roman') # # plt.subplot(len(eemd_imfs) + 1, 1, i) # # plt.plot(full_data, color='black') # # plt.ylabel('Signal') # # plt.title('EEMD') # # for imf in eemd_imfs: # # plt.subplot(len(eemd_imfs) + 1, 1, i + 1) # # plt.plot(imf, color='black') # # plt.ylabel('IMF ' + str(i)) # # i += 1 # # # # # # plt.savefig('result_imf.png') # # plt.show() # # test = np.zeros([len(full_data) - training_set_split - lookback_window, 1]) # # i = 1 # for imf in eemd_imfs: # print('-' * 45) # print('This is ' + str(i) + ' time(s)') # print('*' * 45) # # data_imf = data_split_LSTM(data_split(imf_data(imf, 1), training_set_split, lookback_window)) # # test += data_imf[3] # # model = EEMD_LSTM_Model(data_imf[0], data_imf[1], i) # # prediction_Y = model.predict(data_imf[2]) # eemd_imfs_prediction.append(prediction_Y) # i += 1 # # eemd_imfs_prediction = np.array(eemd_imfs_prediction) # # eemd_prediction = [0.0 for i in range(len(test))] # eemd_prediction = np.array(eemd_prediction) # for i in range(len(test)): # t = 0.0 # for imf_prediction in eemd_imfs_prediction: # t += imf_prediction[i][0] # eemd_prediction[i] = t # # eemd_prediction = scaler.inverse_transform(eemd_prediction.reshape(-1, 1)).reshape(-1, ) # # result += '\n\nMAE_eemd_lstm: {}'.format(MAE1(y_real, eemd_prediction)) # result += '\nRMSE_eemd_lstm: {}'.format(RMSE1(y_real, eemd_prediction)) # result += '\nMAPE_eemd_lstm: {}'.format(MAPE1(y_real, eemd_prediction)) # result += '\nR2_eemd_lstm: {}'.format(R2(y_real, eemd_prediction)) ################################################################CEEMDAN_LSTM ceemdan = CEEMDAN() ceemdan_imfs = ceemdan.ceemdan(full_data.reshape(-1), None, 8) ceemdan_imfs_prediction = [] # i = 1 # plt.rc('font', family='Times New Roman') # plt.subplot(len(ceemdan_imfs) + 1, 1, i) # plt.plot(full_data, color='black') # plt.ylabel('Signal') # plt.title('CEEMDAN') # for imf in ceemdan_imfs: # plt.subplot(len(ceemdan_imfs) + 1, 1, i + 1) # plt.plot(imf, color='black') # plt.ylabel('IMF ' + str(i)) # i += 1 # # # # plt.savefig('result_imf.png') # plt.show() test = np.zeros([len(full_data) - training_set_split - lookback_window, 1]) i = 1 for imf in ceemdan_imfs: print('-' * 45) print('This is ' + str(i) + ' time(s)') print('*' * 45) data_imf = data_split_LSTM( data_split(imf_data(imf, 1), training_set_split, lookback_window)) test += data_imf[3] model = EEMD_LSTM_Model(data_imf[0], data_imf[1], i) # [X_train, Y_train, X_test, y_test] prediction_Y = model.predict(data_imf[2]) ceemdan_imfs_prediction.append(prediction_Y) i += 1 ceemdan_imfs_prediction = np.array(ceemdan_imfs_prediction) ceemdan_prediction = [0.0 for i in range(len(test))] ceemdan_prediction = np.array(ceemdan_prediction) for i in range(len(test)): t = 0.0 for imf_prediction in ceemdan_imfs_prediction: t += imf_prediction[i][0] ceemdan_prediction[i] = t ceemdan_prediction = scaler.inverse_transform( ceemdan_prediction.reshape(-1, 1)).reshape(-1, ) result += '\n\nMAE_ceemdan_lstm: {}'.format( MAE1(y_real, ceemdan_prediction)) result += '\nRMSE_ceemdan_lstm: {}'.format( RMSE1(y_real, ceemdan_prediction)) result += '\nMAPE_ceemdan_lstm: {}'.format( MAPE1(y_real, ceemdan_prediction)) result += '\nR2_ceemdan_lstm: {}'.format(R2(y_real, ceemdan_prediction)) ##################################################evaluation print(result) ###===============画图=========================== # plt.rc('font', family='Times New Roman') # plt.figure(1, figsize=(15, 5)) # plt.plot(y_real , 'black', label='true', linewidth=2.5, linestyle='--', marker='.') # plt.plot(predict_svr, 'tan', label='SVR', linewidth=1) # plt.plot(predict_bp, 'indianred', label='bp', linewidth=1) # plt.plot(predict_elm, 'khaki', label='elm', linewidth=1) # plt.plot(predict_LSTM, 'lightsteelblue', label='lstm', linewidth=1) # plt.plot(emd_prediction, 'seagreen', label='EMD-LSTM', linewidth=1) # # plt.plot(eemd_prediction, 'r', label='EEMD-LSTM', linewidth=2.5, linestyle='--', marker='^', markersize=6) # plt.plot(ceemdan_prediction, 'darkred', label='CEEMDAN-LSTM', linewidth=2.5, linestyle='--', marker='^',markersize=6) # plt.grid(True, linestyle=':', color='gray', linewidth='0.5', axis='both') # plt.xlabel('time(days)', fontsize=18) # plt.ylabel('height(mm)', fontsize=18) # plt.title('563') # plt.legend(loc='best') # # plt.show() plt.rc('font', family='Times New Roman') plt.figure(1, figsize=(15, 5)) plt.plot(y_real, 'black', linewidth=2.5, linestyle='--', marker='.') plt.plot(predict_svr, 'tan', linewidth=1) plt.plot(predict_bp, 'indianred', linewidth=1) plt.plot(predict_elm, 'khaki', linewidth=1) plt.plot(predict_LSTM, 'lightsteelblue', linewidth=1) plt.plot(emd_prediction, 'seagreen', linewidth=1) # plt.plot(eemd_prediction, 'r', linewidth=2.5, linestyle='--', marker='^', markersize=2) plt.plot(ceemdan_prediction, 'red', linewidth=2.5, linestyle='--', marker='^', markersize=6) plt.legend(loc='best') plt.show()
def processSignal(db, signal, Fs, plot, label, file, episode, channel=1, Te=5): """ Extracts features from the signal Arguments: db {str} -- name of the database signal {array} -- signal Fs {int} -- sampling frequency plot {bool} -- plot or not ? label {[type]} -- annotation file {int} -- file number episode {int} -- episode number Keyword Arguments: channel {int} -- channel number (default: {1}) Te {int} -- episode length (default: {5}) """ Le = 5 # need to ignore this for now La = Te # episode length alpha = 0.05 # parameters used in paper beta = 0.02 delL = Le - La + 1 # ignore this for now, should be 1 nSamplesInWindow = Te * Fs # samples in a window thetaIMF = 0.0 # thetaR = 0.0 theta12 = 0.0 theta23 = 0.0 # (1) Choose a segment xi(n) of ECG signal of duration Te-sec containing N samples. window = signal[:] window2 = filtering(signal=window, Fs=Fs, lowPassFc=20) # filtering and preprocessing # computing Empirical Mode Decomposition IMF = EMD().emd(window2) IMF1 = IMF[0] # first IMF Vn = alpha * np.max(window2) # please refer to paper for this section sumIMF1sq = 0 sumXsq = 0 for j in range(len(window2)): if (abs(IMF1[j]) <= Vn): sumIMF1sq += IMF1[j]**2 sumXsq += window2[j]**2 if (abs(sumXsq - 0.0) < 1e-7): return # there is no valid ecg signal, just flat line signal NLCR = sumIMF1sq / sumXsq if (NLCR <= beta): # still noisy IMF1 = IMF[0] + IMF[1] try: # (this is unncessary now-) IMF2 = IMF[1] except: IMF2 = np.zeros(len(IMF1)) try: # (this is unncessary now-) IMF3 = IMF[2] except: IMF3 = np.zeros(len(IMF1)) R = window2 - IMF1 # computing residue else: # less noisy IMF1 = IMF[0] try: # (this is unncessary now-) IMF2 = IMF[1] except: IMF2 = np.zeros(len(IMF1)) try: # (this is unncessary now-) IMF3 = IMF[2] except: IMF3 = np.zeros(len(IMF1)) R = window2 - IMF1 - IMF2 # computing residue Signal_FFT = fftshift(fft(window2)) # computing FFT of signal IMF1_FFT_CMPLX = fftshift(fft(IMF1)) # computing FFT of IMF1 R_FFT_CMPLX = fftshift(fft(R)) # computing FFT of Residue IMF1_Sim = cosineSimilarity( window2, IMF1) # cosine similarity of signal and IMF1 (unnecessary) R_Sim = cosineSimilarity( window2, R) # cosine similarity of signal and R (unnecessary) IMF12 = cosineSimilarity( IMF1, IMF2) # cosine similarity between of IMF1 and IMF2 (unnecessary) IMF23 = cosineSimilarity( IMF2, IMF3) # cosine similarity between of IMF2 and IMF3 (unnecessary) # extracted featrues newData = Features(Signal_FFT=Signal_FFT, Fs=Fs, IMF1_FFT=IMF1_FFT_CMPLX, R_FFT=R_FFT_CMPLX, imf1_Sim=IMF1_Sim, R_Sim=R_Sim, label=label, file=file, episode=episode, channel=channel, imf12=IMF12, imf23=IMF23) # saving the features saveToPickle(db, data=newData, file=file, episode=episode, channel=channel)
def getEMDs(signal): emd = EMD() IMFs = emd(signal) return IMFs
def test_get_imfs_and_residue_without_running(self): emd = EMD() with self.assertRaises(ValueError): imfs, residue = emd.get_imfs_and_residue()
def create_emd_from_lightcurve(self, label, figsize=(20, 28)): from matplotlib import gridspec as gs from PyEMD import EMD import pandas as pd import numpy as np import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() import glob from datetime import timedelta from matplotlib import rc from multiprocessing import Process emd = EMD() font = {"family": "DejaVu", "weight": "normal", "size": 16} rc("font", **font) numpyrow, numpycol = self.translate_number(int(label), self.nrowcol) picture_folder_id = f"{numpycol:02d}_{numpyrow:02d}" path_to_lcurves = self.global_save_path path_to_picture = f"{self.global_save_path}{picture_folder_id}/" dataset = pd.read_csv(f"{path_to_lcurves}complete_lcurves.csv") time = pd.to_datetime(dataset["Time"]) region = dataset[f"{label}"] imfs = emd(region.values) image_list = sorted(glob.glob(f"{path_to_picture}*.png")) vector_index_div = np.array_split(np.array(np.arange(len(image_list))), self.n_cpus) proc = [] colors = ["black", "blue", "red"] fig = plt.figure(figsize=(10, 20)) time_curr = time[0] lineset = {} min_height = 0 max_height = 0 for index, i in enumerate(imfs): if index < len(imfs) - 1: if i.min() < min_height: min_height = i.min() if i.max() > max_height: max_height = i.max() # Draw all IMFs for index, i in enumerate(imfs): if index != len(imfs) - 1: fig.add_subplot(len(imfs), 1, len(imfs) - 1 - index) ax = plt.gca() plt.plot(time, i, color=colors[0], linewidth=4, label=f"IMF {index}") ybot, ytop = ax.get_ylim() delta_y = ytop - ybot move_by = (max_height - min_height) / 2 ax.set_ylim(ybot - move_by, ytop + move_by) ax.yaxis.set_label_position("right") ax.set_ylim(min_height, max_height) ax.set(ylabel=f"IMF {index + 1}", xlabel="", xticks=[]) lineset[f"{index}"] = ax.axvline(x=time_curr, color="black", alpha=0.4, linewidth=5) # Draw original signal + last IMF (residual) fig.add_subplot(len(imfs), 1, len(imfs)) ax1 = plt.gca() plt.plot(time, region, color=colors[1], linewidth=5, label="Signal") plt.plot( time, imfs[len(imfs) - 1], color=colors[2], linestyle="dashed", label="Residual", linewidth=4, ) lineset["signal"] = ax1.axvline(x=time_curr, color="black", alpha=0.4, linewidth=5) ax1.set(ylabel=f"Original Signal") import matplotlib.dates as mdates import matplotlib.units as munits import datetime converter = mdates.ConciseDateConverter() munits.registry[np.datetime64] = converter munits.registry[datetime.date] = converter munits.registry[datetime.datetime] = converter plt.gcf().autofmt_xdate() plt.legend() ax1.yaxis.set_label_position("right") save_emd = f"{path_to_picture}/emd_results/" os.makedirs(save_emd, exist_ok=True) def multi_proc(index_sublist, save_path=save_emd): for curr_obs in index_sublist: # Draw all of the lines# for k in list(lineset): lineset[k].set_xdata(time[curr_obs]) fig.canvas.draw() plt.savefig(f"{save_path}{curr_obs:03d}") for list_indices_proc in vector_index_div: if list_indices_proc.size: # noinspection PyTypeChecker pr = Process(target=multi_proc, args=([list_indices_proc])) proc.append(pr) pr.start() for process in proc: process.join() # Then combine all of these from Scripts.Imports.Data_analysis.Tools import imgrecomb_any imgrecomb_any( path_1=f"{path_to_picture}", path_2=f"{save_emd}", save_path=f"{path_to_picture}with_EMD/", figsize=figsize, )
def test_imfs_and_residue_accessor2(self): emd = EMD() with self.assertRaises(ValueError): imfs, residue = emd.get_imfs_and_residue()
#close_column = close_column.values time_column = time_column.as_matrix(columns=None) close_column = close_column.as_matrix(columns=None) #time_column = time_column[:200] #close_column = close_column[:200] #print (time_column) #print (close_column) # Define signal #t = np.linspace(0, 1, 200) #s = np.cos(11*2*np.pi*t*t) + 6*t*t t = time_column s = close_column # Execute EMD on signal IMF = EMD().emd(s, t) N = IMF.shape[0] + 1 # Plot results plt.subplot(N, 1, 1) plt.plot(t, s, 'r') plt.title("Input signal: AAPL close price") plt.xlabel("Time [s]") for n, imf in enumerate(IMF): plt.subplot(N, 1, n + 2) plt.plot(t, imf, 'g') plt.title("IMF " + str(n + 1)) plt.xlabel("Time [s]") plt.tight_layout()
def pointprediction( differsets, draw=0): # forecast the next differ of members in differsets emd = EMD() forecast_result = [] imfset = [] imfsumset = [] for loop in np.arange(len(differsets)): imfs = emd(differsets[loop]) # do the EMD nimfs = len(imfs) imfset.append(imfs[0]) extrema_upper_index_vector = [] # record, make no sense extrema_lower_index_vector = [] forecast_value_vector = [] imfsums = [] for n in np.arange(nimfs): # try to figure out the trend and give prediction #---------wash the extremas---------------------------------------------------- extrema_upper_index = extrema(imfs[n], np.greater_equal)[0] # max extrema neighbours = [] imfsums.append(np.sum(imfs[n])) for i in np.arange( len(extrema_upper_index) - 1): # clean the indexes which close to each other if extrema_upper_index[i] - extrema_upper_index[i + 1] == -1: neighbours.append(i) extrema_upper_index = np.delete(extrema_upper_index, neighbours) extrema_upper_index = np.delete( extrema_upper_index, np.where((extrema_upper_index == 0) | (extrema_upper_index == len(imfs[n]) - 1))) neighbours = [] extrema_lower_index = extrema(imfs[n], np.less_equal)[0] # min exrema for i in np.arange( len(extrema_lower_index) - 1): # clean the indexes which close to each other if extrema_lower_index[i] - extrema_lower_index[i + 1] == -1: neighbours.append(i) extrema_lower_index = np.delete(extrema_lower_index, neighbours) extrema_lower_index = np.delete( extrema_lower_index, np.where((extrema_lower_index == 0) | (extrema_lower_index == len(imfs[n] - 1) - 1))) if draw == 1: extrema_upper_index_vector.append(extrema_upper_index) extrema_lower_index_vector.append(extrema_lower_index) #------------------------ the derivation starts from here--------------------- #--some basic calculations --------# extrema_upper_value = imfs[n][extrema_upper_index] extrema_lower_value = imfs[n][extrema_lower_index] extremas = np.unique( np.hstack([extrema_upper_index, extrema_lower_index])) if extremas.any(): last_extrema = extremas[-1] else: last_extrema = len(imfs[n]) - 1 if len(extrema_upper_index) + len( extrema_lower_index) <= 0: # if there is no real extrema distance = last_extrema # means that there is no enough extremas to do the calculation amplitude_upper_ema = max(imfs[n]) amplitude_lower_ema = min(imfs[n]) step = abs(amplitude_upper_ema - amplitude_lower_ema) / distance forecast_value = imfs[n][-1] + step * ( imfs[n][-1] - imfs[n][-2]) / abs( (imfs[n][-1] - imfs[n][-2])) # just extend the tread elif len(extrema_upper_index) + len( extrema_lower_index) == 1: # if there is only one extrema distance = len(imfs[n]) - last_extrema amplitude_upper_ema = max(imfs[n][last_extrema], imfs[n][-1]) amplitude_lower_ema = min(imfs[n][last_extrema], imfs[n][-1]) step = abs(amplitude_upper_ema - amplitude_lower_ema) / distance #reference_amplitude = abs(imfs[n][-1]) + 2 * step forecast_value = imfs[n][-1] + step * ( imfs[n][-1] - imfs[n][-2]) / abs( (imfs[n][-1] - imfs[n][-2])) # also, extend is the best way else: # if there are more than two extremas amplitude_upper_ema = ema( extrema_upper_value, alpha=0.6) # whether use ema is a good thing here? amplitude_lower_ema = ema( extrema_lower_value, alpha=0.6) # whether use ema is a good thing here? nextremas = min(len(extrema_lower_index), len(extrema_upper_index)) distance_set = abs(extrema_upper_index[-nextremas:] - extrema_lower_index[-nextremas:]) distance = ema( distance_set, alpha=0.6 ) # here as well, not so sure whether ema is better though step = abs(amplitude_upper_ema - amplitude_lower_ema) / distance reference_amplitude = abs(amplitude_lower_ema) * 0.25 + abs( amplitude_upper_ema) * 0.25 + abs( imfs[n][last_extrema]) * 0.5 if imfs[n][-1] * imfs[n][ last_extrema] < 0: # if the last point has already crossed the axis if abs(imfs[n][-1]) >= 0.8 * reference_amplitude and abs( imfs[n][-1]) + step > 1.3 * reference_amplitude: forecast_value = imfs[n][-1] + step * ( -abs(imfs[n][-1]) / imfs[n][-1]) else: forecast_value = reference_amplitude * ( abs(imfs[n][-1]) / imfs[n][-1]) else: forecast_value = imfs[n][-1] + step * ( imfs[n][-1] - imfs[n][-2]) / abs( (imfs[n][-1] - imfs[n][-2])) if abs(forecast_value) >= abs(imfs[n][last_extrema]) * 1.1: forecast_value = abs(imfs[n][last_extrema]) * 1.1 * ( -abs(imfs[n][-1]) / imfs[n][-1]) forecast_value_vector.append(forecast_value) imfsumset.append(imfsums) forecast_result.append((sum(forecast_value_vector))) #-------------------------the derivation is done------------------------------ # a drawing to show some result for bugging if draw == 1: size = imfs.shape x = np.arange(len(differsets[0])) plt.figure() plt.plot(x, differsets[0], marker='.', markerfacecolor='blue', markersize=6) plt.show() plt.figure(figsize=(20, 18)) for loop in range(1, size[0] + 1): plt.subplot(size[0], 1, loop) plt.plot(x, imfs[loop - 1], marker='.', markerfacecolor='blue', markersize=6) plt.scatter(extrema_upper_index_vector[loop - 1], imfs[loop - 1][extrema_upper_index_vector[loop - 1]], c='red', marker='+', s=50) plt.scatter(extrema_lower_index_vector[loop - 1], imfs[loop - 1][extrema_lower_index_vector[loop - 1]], marker='+', color='green', s=50) plt.scatter(x[-1] + 1, forecast_value_vector[loop - 1], marker='o', c='black', s=50) plt.hlines(0, 0, len(differsets[0]), colors="black", linestyles="--") plt.title(loop) plt.show() return forecast_value_vector, forecast_result return forecast_result, imfset, imfsumset
from scipy import fftpack mpl.rcParams['font.sans-serif'] = ['SimHei'] matplotlib.rcParams['axes.unicode_minus'] = False # 准备数据 stk_df = get_total_table_data(conn_k, 'k300508') s = np.array(stk_df.close) x_axis = range(0, len(s)) # 定义信号 # t = np.linspace(0, 1, 200) # s = np.cos(11*2*np.pi*t*t) + 6*t*t # 对信号执行emd IMF = EMD().emd(s) N = IMF.shape[0] + 1 # 图示结果 plt.subplot(N + 1, 1, 1) plt.plot(x_axis, s, 'r') # plt.title("原始数据") # 求取第一个imf的希尔伯特变换# plt.xlabel("日期") ht = fftpack.hilbert(IMF[0]) plt.subplot(N + 1, 1, 2) plt.plot(x_axis, ht) for n, imf in enumerate(IMF): plt.subplot(N + 1, 1, n + 3)
def aa(signal): emd = EMD() IMFs = emd(signal) return IMFs
# -*- coding: utf-8 -*- """ Created on Sat Mar 28 20:25:32 2020 @author: Dell """ from PyEMD import EMD import numpy as np from PyEMD import CEEMDAN import scipy.io from PyEMD import EMD, Visualisation mat = scipy.io.loadmat( 'F:/Masters/Thesis/Dataset/crossposition-activity-recognition/Dataset_PerCom18_STL/Dataset_PerCom18_STL/LOSO Feature/raw_data_user_1.mat' ) data = mat['raw_data_user_1'] x = data[0:500:, 0] # Extract imfs and residue # In case of EMD t = np.arange(0, 5, 0.01) emd = EMD() emd.emd(x) imfs, res = emd.get_imfs_and_residue() vis = Visualisation() vis.plot_imfs(imfs=imfs, residue=res, t=t, include_residue=True) vis.plot_instant_freq(t, imfs=imfs) vis.show()
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score data = pd.read_csv('dataset-2007.csv', usecols=['wind direction at 100m (deg)', 'wind speed at 100m (m/s)', 'air temperature at 2m (K)', 'surface air pressure (Pa)', 'density at hub height (kg/m^3)'], skiprows=3) data.columns = ['direction', 'speed', 'temp', 'pressure', 'density'] data.head(3) D=data['speed'].values T=D[0:105120] F=T[0:2000] from PyEMD import EMD IMF = EMD().emd(F) N = IMF.shape[0]+1 # Plot results plt.figure(figsize=(12,9)) plt.subplot(N,1,1) plt.plot(F, 'r') plt.title("Input signal") plt.xlabel("Time [s]") for n, imf in enumerate(IMF): plt.figure(figsize=(12,9)) plt.subplot(N,1,n+2) plt.plot(imf, 'g') plt.title("IMF "+str(n+1)) plt.xlabel("Time [s]")
def _emd(self): emd = EMD() return emd(self._data)
def test_bound_extrapolation_simple(self): emd = EMD() emd.extrema_detection = "simple" emd.nbsym = 1 emd.DTYPE = np.int64 S = [0,-3, 1, 4, 3, 2,-2, 0, 1, 2, 1, 0, 1, 2, 5, 4, 0,-2,-1] S = np.array(S) T = np.arange(len(S)) pp = emd.prepare_points # There are 4 cases for both (L)eft and (R)ight ends. In case of left (L) bound: # L1) ,/ -- ext[0] is min, s[0] < ext[1] (1st max) # L2) / -- ext[0] is min, s[0] > ext[1] (1st max) # L3) ^. -- ext[0] is max, s[0] > ext[1] (1st min) # L4) \ -- ext[0] is max, s[0] < ext[1] (1st min) ## CASE 1 # L1, R1 -- no edge MIN & no edge MIN s = S.copy() t = T.copy() maxPos, maxVal, minPos, minVal, nz = emd.find_extrema(t, s) # Should extrapolate left and right bounds maxExtrema, minExtrema = pp(t, s, \ maxPos, maxVal, minPos, minVal) self.assertEqual([-1,3,9,14,20], maxExtrema[0].tolist()) self.assertEqual([4,4,2,5,5], maxExtrema[1].tolist()) self.assertEqual([-4,1,6,11,17,23], minExtrema[0].tolist()) self.assertEqual([-2,-3,-2,0,-2,0], minExtrema[1].tolist()) ## CASE 2 # L2, R2 -- edge MIN, edge MIN s = S[1:-1].copy() t = T[1:-1].copy() maxPos, maxVal, minPos, minVal, nz = emd.find_extrema(t, s) # Should extrapolate left and right bounds maxExtrema, minExtrema = pp(t, s, \ maxPos, maxVal, minPos, minVal) self.assertEqual([-1,3,9,14,20], maxExtrema[0].tolist()) self.assertEqual([4,4,2,5,5], maxExtrema[1].tolist()) self.assertEqual([1,6,11,17], minExtrema[0].tolist()) self.assertEqual([-3,-2,0,-2], minExtrema[1].tolist()) ## CASE 3 # L3, R3 -- no edge MAX & no edge MAX s, t = S[2:-3], T[2:-3] maxPos, maxVal, minPos, minVal, nz = emd.find_extrema(t, s) # Should extrapolate left and right bounds maxExtrema, minExtrema = pp(t, s, \ maxPos, maxVal, minPos, minVal) self.assertEqual([-3,3,9,14,19], maxExtrema[0].tolist()) self.assertEqual([2,4,2,5,2], maxExtrema[1].tolist()) self.assertEqual([0,6,11,17], minExtrema[0].tolist()) self.assertEqual([-2,-2,0,0], minExtrema[1].tolist()) ## CASE 4 # L4, R4 -- edge MAX & edge MAX s, t = S[3:-4], T[3:-4] maxPos, maxVal, minPos, minVal, nz = emd.find_extrema(t, s) # Should extrapolate left and right bounds maxExtrema, minExtrema = pp(t, s, \ maxPos, maxVal, minPos, minVal) self.assertEqual([3,9,14], maxExtrema[0].tolist()) self.assertEqual([4,2,5], maxExtrema[1].tolist()) self.assertEqual([0,6,11,17], minExtrema[0].tolist()) self.assertEqual([-2,-2,0,0], minExtrema[1].tolist())