import numpy as np from sklearn import preprocessing from sklearn.decomposition import PCA from evaluation.compare import compare, compare_selected, classifiers_all from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader FILE_PATH_1 = "../../resource/PostureEntry_DMP_Phil_Monday.csv" FILE_PATH_2 = "../../resource/PostureEntry_DMP_Sergio_Monday.csv" FILE_PATH_3 = "../../resource/PostureEntry_DMP_Ozan.csv" N_VALIDATIONS = 30 samples_raw, labels_raw, label_names = FileReader.readAll( [FILE_PATH_1, FILE_PATH_2, FILE_PATH_3]) scores = [] samples_filtered, labels_reduced = PreProcessor.magnitude_theta( samples_raw, labels_raw, 5) samples_features = preprocessing.scale(samples_filtered) # samples_features = PCA().fit_transform(samples_features) compare(samples_features, labels_reduced, classifiers_all, N_VALIDATIONS)
import numpy as np from sklearn import preprocessing from evaluation.compare import compare from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader FILE_PATH = "../../resource/PostureEntry.csv" N_VALIDATIONS = 4 samples_raw, labels, label_names = FileReader.read(FILE_PATH) labels = labels[(labels == 4) | (labels == 5)] samples_raw = samples_raw[(labels == 4) | (labels == 5), :] # determine g when calibrating # determine upright angle # put threshold on angles samples_features = preprocessing.scale(samples_raw) compare(samples_features, labels, N_VALIDATIONS)
import numpy as np from sklearn import preprocessing from evaluation.compare import compare from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader FILE_PATH = "../../resource/PostureEntry.csv" N_VALIDATIONS = 4 FILE_PATH_1 = "../../resource/PostureEntry_11_02_back_sit_stand_phil.csv" samples_raw, labels, _ = FileReader.read(FILE_PATH_1) samples_raw = samples_raw[:, 0:6] window_size = 50 window = PreProcessor.merge_window(samples_raw, window_size) # p2p = PreProcessor.peak2peak(samples_raw, window_size) labels_reduced = labels[0::window_size] labels_reduced = labels_reduced[:-1] samples_features = window samples_features = preprocessing.scale(samples_features) compare(samples_features, labels_reduced, N_VALIDATIONS)
from sklearn import preprocessing from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from evaluation.compare import compare, compare_selected, classifiers_all from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader FILE_PATH_1 = "../../resource/PostureEntry_DMP_Phil_Monday.csv" FILE_PATH_2 = "../../resource/PostureEntry_DMP_Sergio_Monday.csv" FILE_PATH_3 = "../../resource/PostureEntry_DMP_Ozan.csv" paths = [FILE_PATH_1, FILE_PATH_2, FILE_PATH_3] N_VALIDATIONS = 30 # samples_filtered, labels_reduced = PreProcessor.average(samples_raw, labels_raw, window_size) scores_mean = 0.0 scores_std = 0.0 for path in paths: samples_raw, labels_raw, label_names = FileReader.readAll([path]) samples_features = preprocessing.scale(samples_raw) scores_clf = cross_val_score(KNeighborsClassifier(5, metric='manhattan', algorithm='kd_tree'), samples_features, labels_raw.ravel(), cv=N_VALIDATIONS) scores_mean += scores_clf.mean() scores_std += scores_clf.std() print("Mean:" + str(scores_mean/3)) print("Std:" + str(scores_std/3))
import numpy as np from sklearn import preprocessing from evaluation.compare import compare from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader FILE_PATH = "../../resource/PostureEntry.csv" N_VALIDATIONS = 4 # FILE_PATH = "../../../../App/Generated-Data/PostureEntry_11_02_chest_sitting.csv" FILE_PATH_1 = "../../resource/PostureEntry_11_02_back_sit_move_sergio.csv" # FILE_PATH_2 = "../../resource/PostureEntry_11_02_back_phil_walk_outside.csv" FILE_PATH_3 = "../../resource/PostureEntry_11_02_chest_sitting_phil.csv" samples_raw_1, labels_1, _ = FileReader.read(FILE_PATH_1) samples_raw_1 = samples_raw_1[(labels_1.ravel() == 0) | (labels_1.ravel() == 1), :] labels_1 = labels_1[(labels_1.ravel() == 0) | (labels_1.ravel() == 1)] #samples_raw_2, labels_2, _ = FileReader.read(FILE_PATH_2) samples_raw_3, labels_3, _ = FileReader.read(FILE_PATH_3) samples_raw = np.vstack([samples_raw_1, samples_raw_3]) labels = np.vstack([labels_1, labels_3]) window_size = 20 window = PreProcessor.merge_window(samples_raw, window_size) # p2p = PreProcessor.peak2peak(samples_raw, window_size) labels_reduced = labels[0::window_size] labels_reduced = labels_reduced[:-1]
import numpy as np from sklearn import preprocessing from sklearn.decomposition import PCA from evaluation.compare import compare, compare_selected from preprocessing.PreProcessor import PreProcessor from src.filereader.FileReader import FileReader # FILE_PATH = "../../../../App/Generated-Data/PostureEntry_11_02_chest_sitting.csv" FILE_PATH_1 = "../../resource/PostureEntry_DMP_Phil_Monday.csv" FILE_PATH_2 = "../../resource/PostureEntry_DMP_Sergio_Monday.csv" N_VALIDATIONS = 20 samples_raw, labels_raw, _ = FileReader.readAll([FILE_PATH_1]) window_size = 20 average, labels_reduced = PreProcessor.median(samples_raw, labels_raw, window_size) p2p = PreProcessor.peak2peak(samples_raw, window_size) samples_features = np.zeros( shape=(int(np.floor(samples_raw.shape[0] / window_size)), 9)) samples_features[:, 0:9] = average[:, 0:9] # samples_features[:, 6] = p2p.ravel() samples_features = preprocessing.scale(samples_features) # samples_features = PCA().fit_transform(samples_features) compare_selected(samples_features, labels_reduced, N_VALIDATIONS)