-
Notifications
You must be signed in to change notification settings - Fork 0
/
try_all.py
110 lines (90 loc) · 3.4 KB
/
try_all.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
from __future__ import division
import common
import smote
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
from sklearn import metrics
# ref : http://scikit-learn.org/stable/auto_examples/plot_classifier_comparison.html
names = ["SGD", "Nearest Neighbors", # "Linear SVM", "RBF SVM",
"Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
SGDClassifier(loss='hinge', penalty='l2', alpha=0.005, n_iter=10, random_state=42, n_jobs=-1, average=True),
KNeighborsClassifier(3),
# SVC(kernel="linear", C=0.025),
# SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=15),
RandomForestClassifier(max_depth=15, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()
]
X_train, X_test, y_train, y_test = common.load_train_data_and_split(file='data/processed_missing_filled_in.csv')
X_train = np.asarray(X_train)
y_train = np.array(y_train)
y_train = y_train.astype(np.int32)
X_train, y_train = smote.smote_data(X_train, y_train)
# iterate over classifiers
for name, clf in zip(names, classifiers):
print("Fitting " + name + "...")
predicted_test = clf.fit(X_train, y_train).predict(X_test)
test_p = ((y_test != predicted_test).sum())/(len(X_test))*100
print("Error on test set: %d" % test_p)
print(metrics.classification_report(y_test, predicted_test))
'''
results (f1-score)
-> rows with missing medical speciality removed
sgd 0.46
knn 0.49
lin-svm 0.49 (warning: precision/f-score are being set to 0.0 in labels with no predicted samples)
rbf-svm 0.46
dt 0.52
rand-forest 0.46
adaboost 0.54 ***
n.bayes 0.03
lda 0.51 (warning: variables are colinear)
qda 0.02 (warning: variables are colinear)
results (f1-score)
-> rows with missing medical speciality removed, diagnoses grouped together by icd9 code category
sgd 0.49 (warning: precision/f-score are being set to 0.0 in labels with no predicted samples)
knn 0.51
lin-svm 0.49
rbf-svm 0.46
dt 0.52
rand-forest 0.46
adaboost 0.54 ***
n.bayes 0.03
lda 0.51 (warning: variables are colinear)
qda 0.02 (warning: variables are colinear)
results (f1-score)
-> rows with missing medical speciality using imputed data, diagnoses grouped together by icd9 code category
sgd 0.48 (warning: precision/f-score are being set to 0.0 in labels with no predicted samples)
knn 0.49
lin-svm 0.48
rbf-svm 0.45
dt 0.51
rand-forest 0.45
adaboost 0.53 ***
n.bayes 0.03
lda 0.50 (warning: variables are colinear)
qda 0.02 (warning: variables are colinear)
results (f1-score)
-> rows with missing medical speciality using value of 0, diagnoses grouped together by icd9 code category
sgd 0.48 (warning: precision/f-score are being set to 0.0 in labels with no predicted samples)
knn 0.49
lin-svm 0.48
rbf-svm 0.45
dt 0.51
rand-forest 0.45
adaboost 0.53 ***
n.bayes 0.03
lda 0.50 (warning: variables are colinear)
qda 0.02 (warning: variables are colinear)
'''