def random_forest(X,y,X_train,y_train,X_test,y_test,params): reg = RandomForestClassifier(n_estimators = params['n_estimators'],max_depth = params['max_depth']) reg.fit(X_train,y_train) y_pre = reg.predict(X_test) metrics = show_metrics('Random Forest',y_test,y_pre) draw_roc(X,y,X_train,y_train,X_test,y_test,reg) return metrics
def svm(X,y,X_train,y_train,X_test,y_test,params): reg = SVC(params) reg.fit (X_train,y_train) y_pre = reg.predict(X_test) metrics = show_metrics('SVM',y_test,y_pre) draw_roc(X,y,X_train,y_train,X_test,y_test,reg) return metrics
def logistic_regression(X,y,X_train,y_train,X_test,y_test,params): reg = LogisticRegression(C = params) reg.fit (X_train,y_train) y_pre = reg.predict(X_test) metrics = show_metrics('Logistic Regression',y_test,y_pre) draw_roc(X,y,X_train,y_train,X_test,y_test,reg) return metrics
length_episode[episode] += 1 total_reward_episode[episode] += reward if is_done: break state = next_state policy = {} Q = Q1 + Q2 for state in range(n_state): policy[state] = torch.argmax(Q[state]).item() return Q, policy gamma = 1 n_episode = 3000 alpha = 0.4 epsilon = 0.1 epsilon_greedy_policy = gen_epsilon_greedy_policy(env.action_space.n, epsilon) length_episode = [0] * n_episode total_reward_episode = [0] * n_episode optimal_Q, optimal_policy = double_q_learning(env, gamma, n_episode, alpha) plot_length_reward(length_episode, total_reward_episode) show_metrics(length_episode, total_reward_episode) print(optimal_policy)
from utils import compute_metrics, show_metrics ROOT_PATH = '.' #%% Train Dataframe print("---------------------------") print("# Train Dataset Dataframe #") print("---------------------------") path_to_dataset = os.path.join(ROOT_PATH, 'data', 'Full_Dataset.csv') df = pd.read_csv(path_to_dataset) recordid, y_C_prob, y_C_pred = testing_function(df) y_test = df['In-hospital_death'] recall, precision, prc_auc, roc_auc = compute_metrics(y_test, y_C_prob, y_C_pred) show_metrics(precision, recall, prc_auc, roc_auc) # --------------------------# # Train Dataset Dataframe # # --------------------------# # Precision : 0.667 # Recall : 0.663 # Min(P,R) : 0.663 # AUPRC : 0.745 # AUROC : 0.950 #%% Test Dataframe print("---------------------------") print("# Test Dataset Dataframe #") print("---------------------------") path_to_dataset = os.path.join(ROOT_PATH, 'data', 'Testing_Dataset.csv')