def run(n_modes=1, fault_prop=.5, pcs=5200, repetitions=1, filename='FFT-PCA-ANN', batchsize=512): normadf, faultdf = dp.load_df(n_modes, fault_prop) pre_process_init = time.perf_counter() X, y = preprocessor_fft_pca.df_fft_pca(normadf, faultdf, pcs) pre_process_finish = time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init # --------------------------------------------------------------------------------------------------------------------------------- ann_settings.inputsize = pcs estimator = KerasClassifier(build_fn=ann_settings.bin_baseline_model, epochs=20, batch_size=batchsize, verbose=0) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename, pcs=pcs, batchsize=batchsize)
def run(n_modes=1, fault_prop=.5, pcs=52, repetitions=1, filename='PCA-ANN', batchsize=512): normal_data, fault1_df = dp.load_df(n_modes, fault_prop) pre_process_init = time.perf_counter() # -------------loading data----------- # Applying PCA X, y = preprocessor_pca.df_pca(normal_data, fault1_df, pcs, dp.colNames) pre_process_finish = time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init # setup classifier ann_settings.inputsize = pcs estimator = KerasClassifier(build_fn=ann_settings.bin_baseline_model, epochs=20, batch_size=batchsize, verbose=0) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename, pcs=pcs, batchsize=batchsize)
def run(n_modes=1, fault_prop=.5, repetitions=1, filename='svm_'): normadf, faultdf = dp.load_df(n_modes, fault_prop) # Adding dummy data, labels that mark if a given occurrence is normal or a failure pre_process_init = time.perf_counter() faultdf['failure'] = 1 normadf['failure'] = 0 # join both data classes full_df = normadf.append(faultdf, ignore_index=True) full_df = full_df.sample(frac=1).reset_index(drop=True) # Specify the data X = full_df.iloc[:, 0:52].astype(float) # Specify the target labels and flatten the array # y = np_utils.to_categorical(full_df.iloc[:, 13:14]) y = full_df['failure'] pre_process_finish = time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init print(filename + ' pre-process finished') #setup classifier estimator = LinearSVC(dual=False, verbose=True) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename)
def run(n_modes=1, fault_prop=.5, pcs=5200, repetitions=1, filename='FFT-PCA-KNN', neighbors=5): normadf, faultdf = dp.load_df(n_modes, fault_prop) pre_process_init =time.perf_counter() X, y = preprocessor_fft_pca.df_fft_pca(normadf, faultdf, pcs) pre_process_finish =time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init # --------------------------------------------------------------------------------------------------------------------------------- estimator = KNeighborsClassifier(n_neighbors=neighbors) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop,filename,pcs=pcs,n_neghbors=neighbors)
def run(n_modes=1, fault_prop=.5, pcs=52, repetitions=1, filename='PCA-KNN', batchsize=32, neighbors=5): normal_data, fault1_df = dp.load_df(n_modes, fault_prop) pre_process_init =time.perf_counter() # -------------loading data----------- X, y = preprocessor_pca.df_pca(normal_data, fault1_df, pcs, dp.colNames) pre_process_finish =time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init # setup classifier estimator = KNeighborsClassifier(n_neighbors=neighbors) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename,pcs=pcs,n_neghbors=neighbors)
def run(n_modes=1, fault_prop=.5, pcs=52, repetitions=1, filename='fft_pca_svm_'): normal_data, fault1_df = dp.load_df(n_modes, fault_prop) pre_process_init = time.perf_counter() # -------------loading data----------- X, y = preprocessor_pca.df_pca(normal_data, fault1_df, pcs, dp.colNames) pre_process_finish = time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init print(filename + ' pre-process finished') #setup classifier estimator = LinearSVC(dual=False, verbose=True) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename)
def run(n_modes=1, fault_prop=.5, pcs=52, repetitions=1, filename='ANN', batchsize=512): normadf, faultdf = dp.load_df(n_modes, fault_prop) # Adding dummy data, labels that mark if a given occurrence is normal or a failure pre_process_init = time.perf_counter() faultdf['failure'] = 1 normadf['failure'] = 0 # join both data classes full_df = normadf.append(faultdf, ignore_index=True) full_df = full_df.sample(frac=1).reset_index(drop=True) # Specify the data X = full_df.iloc[:, 0:52].astype(float) # Specify the target labels and flatten the array # y = np_utils.to_categorical(full_df.iloc[:, 13:14]) y = full_df['failure'] # capture pre-process time pre_process_finish = time.perf_counter() pre_proc_time = pre_process_finish - pre_process_init ann_settings.inputsize = pcs # set input_size as the number of principle components estimator = KerasClassifier(build_fn=ann_settings.bin_baseline_model, epochs=20, batch_size=batchsize, verbose=0) dp.validation(X, y, estimator, repetitions, n_modes, pre_proc_time, fault_prop, filename, batchsize=batchsize)