def convert_to_fft(window_length, window_step, fft_min_freq, fft_max_freq, sampling_frequency, file_path): warnings.filterwarnings("ignore") type_data = pickle.load(open(file_path, 'rb')) pipeline = Pipeline( [FFT(), Slice(fft_min_freq, fft_max_freq), Magnitude(), Log10()]) #time_series_data = type_data.data time_series_data = type_data start, step = 0, int(np.floor(window_step * sampling_frequency)) stop = start + int(np.floor(window_length * sampling_frequency)) fft_data = [] while stop < time_series_data.shape[1]: signal_window = time_series_data[:, start:stop] fft_window = pipeline.apply(signal_window) fft_dfft_dataata.append(fft_window) start, stop = start + step, stop + step fft_data = np.array(fft_data) #named_data = seizure_type_data(patient_id=type_data.patient_id, seizure_type=type_data.seizure_type, data=fft_data) #return named_data,os.path.basename(file_path) return fft_data, os.path.basename(file_path)
def create_s2(window_length, window_step, fft_min_freq, fft_max_freq, sampling_frequency, file_path): warnings.filterwarnings("ignore") type_data = pickle.load(open(file_path, 'rb')) pipeline = Pipeline([Center_surround_diff()]) time_series_data = type_data.data start, step = 0, int(np.floor(window_step * sampling_frequency)) stop = start + int(np.floor(window_length * sampling_frequency)) s2_data = [] while stop < time_series_data.shape[1]: signal_window = time_series_data[:, start:stop] window = pipeline.apply(signal_window) s2_data.append(window) start, stop = start + step, stop + step s2_data = np.array(s2_data) named_data = seizure_type_data(patient_id=type_data.patient_id, seizure_type=type_data.seizure_type, data=type_data.data, s1=type_data.s1, s2=s2_data) return named_data, os.path.basename(file_path)
def create_s1(window_length, window_step, fft_min_freq, fft_max_freq, sampling_frequency, file_path): warnings.filterwarnings("ignore") type_data = pickle.load(open(file_path, 'rb')) pipeline = Pipeline( [Substract_average_plus_P_2(), IFFT(), Smooth_Gaussian()]) time_series_data = type_data.data start, step = 0, int(np.floor(window_step * sampling_frequency)) stop = start + int(np.floor(window_length * sampling_frequency)) s1_data = [] while stop < time_series_data.shape[1]: signal_window = time_series_data[:, start:stop] window = pipeline.apply(signal_window) s1_data.append(window) start, stop = start + step, stop + step s1_data = np.array(s1_data) named_data = seizure_type_data(patient_id=type_data.patient_id, seizure_type=type_data.seizure_type, data=type_data.data, s1=s1_data) return named_data, os.path.basename(file_path)
def create_d(window_length, window_step, fft_min_freq, fft_max_freq, sampling_frequency, file_path): warnings.filterwarnings("ignore") type_data = pickle.load(open(file_path, 'rb')) #Three of these pipelines are needed, as concatenation takes a different kind of parameter (three maps) pipeline1 = Pipeline([Normalise()]) pipeline2 = Pipeline([Concatenation()]) pipeline3 = Pipeline([RGB_0_255()]) #The three feature maps data_ft = type_data.data data_s1 = type_data.s1 data_s2 = type_data.s2 start, step = 0, int(np.floor(window_step * sampling_frequency)) stop = start + int(np.floor(window_length * sampling_frequency)) d_data = [] while stop < data_ft.shape[1]: #Window definitions, the maps are of same size & shape so 1 looper can be used for all window_ft = data_ft[:, start:stop] window_s1 = data_s1[:, start:stop] window_s2 = data_s2[:, start:stop] #Normalise each window value window_ft_norm = pipeline1.apply(window_ft) window_s1_norm = pipeline1.apply(window_s1) window_s2_norm = pipeline1.apply(window_s2) #Concatenate normalised values d_norm = pipeline2.apply(window_ft_norm, window_s1_norm, window_s2_norm) #RGB 0-255 conversion d_rgb = pipeline3.apply(d_norm) d_data.append(d_rgb) start, stop = start + step, stop + step d_data = np.array(d_data) named_data = seizure_type_data(patient_id=type_data.patient_id, seizure_type=type_data.seizure_type, data=d_data) return named_data, os.path.basename(file_path)