def plot_umafall(): umafall = UMAFALL_Model() p = 1 relevant_features = s.load_var( "umafall_relevant_features_best_window{}relevant_features_{}.pkl". format(slash, p)) y = s.load_var("umafall_relevant_features_best_window{}y_{}.pkl".format( slash, p)) y = pd.DataFrame(y, columns=[umafall.label_tag]) balanced_data = balance_data.balance_data(relevant_features, y, threshold_balance_data) #plot_hist(relevant_features, y, 'Base HMP desbalanceada.') plot_hist(balanced_data[0], balanced_data[1], 'Base HMP Balanceada.')
def umafall(): umafall = UMAFALL_Model() p = 1 umafall_threshold_classification = 0.45 relevant_features = s.load_var( "umafall_relevant_features_best_window{}relevant_features_{}.pkl". format(slash, p)) y = s.load_var("umafall_relevant_features_best_window{}y_{}.pkl".format( slash, p)) y = pd.DataFrame(y, columns=[umafall.label_tag]) balanced_data = balance_data.balance_data(relevant_features, y, threshold_balance_data) plot_confusion_matrix(umafall, balanced_data[0], balanced_data[1], umafall_threshold_classification)
from sklearn.ensemble import RandomForestClassifier # Random Forest from sklearn.ensemble import ExtraTreesClassifier # Extra Trees from sklearn.naive_bayes import GaussianNB #Naive Bayes from sklearn import svm #SVM from sklearn.neural_network import MLPClassifier #multi-layer percept import pandas as pd from sklearn.model_selection import train_test_split from pre_processing.get_accuracy import Get_Accuracy from scripts.save_workspace import save import numpy as np from pre_processing.balance_data import BalanceData import statistics as st #===INITIALIZATION===# Debug.DEBUG = 0 umafall = UMAFALL_Model() processing = Processing_DB_Files() project = Project() #tuple from MPL t_aux = [] for i in range(0, 500): t_aux.append(500) t = tuple(t_aux) #### classifiers = { "MPL": MLPClassifier(random_state=1, solver="adam", activation="relu", max_iter=100000, alpha=1e-5,
#===INIT BASES===# hmp_persons = ["f1", "m1", "m2", "f2", "m3", "f3", "m4", "f4"] # at least 5 activities umafall_persons = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] arcma_persons = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] models = [] #Round 01 models.append({ "model_name": "hmp", "model": HMP_Model(), "persons": hmp_persons, "window": 16 }) models.append({ "model_name": "umafall", "model": UMAFALL_Model(), "persons": umafall_persons, "window": 10 }) models.append({ "model_name": "arcma", "model": ARCMA_Model(), "persons": arcma_persons, "window": 26 }) #Round 02 #models.append({"model_name":"hmp", "model":HMP_Model(), "persons":hmp_persons, "window":90}) #models.append({"model_name":"umafall", "model":UMAFALL_Model(), "persons":umafall_persons, "window":10}) #models.append({"model_name":"arcma", "model":ARCMA_Model(), "persons":arcma_persons, "window":40}) #tuple from MPL
from models.arcma_model import ARCMA_Model #===INITIALIZATION===# Debug.DEBUG = 0 processing = Processing_DB_Files() project = Project() s = save() get_accuracy = Get_Accuracy() #===INIT BASES===# hmp_persons = ["f1", "m1", "m2", "f2", "m3", "f3", "m4", "f4"] # at least 5 activities umafall_persons = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17] arcma_persons = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] models = [] #models.append({"model_name":"hmp", "model":HMP_Model(), "persons":hmp_persons, "window":90}) models.append({"model_name":"umafall", "model":UMAFALL_Model(), "persons":umafall_persons, "window":10}) models.append({"model_name":"arcma", "model":ARCMA_Model(), "persons":arcma_persons, "window":40}) #tuple from MPL t_aux = [] for i in range(0,500): t_aux.append(500) t = tuple(t_aux) #### classifiers = {"Extratrees": ExtraTreesClassifier(n_estimators = 1000, random_state=1)} #============OLD=============#