/
ds_ex_125.py
44 lines (36 loc) · 1.59 KB
/
ds_ex_125.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from matplotlib import pyplot as plt
import sklearn
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import pandas as pd
import sklearn.datasets
def get_iris_df():
ds = sklearn.datasets.load_iris()
df = pd.DataFrame(ds['data'], columns=ds['feature_names'])
code_species_map = dict(zip(range(3), ds['target_names']))
df['species'] = [code_species_map[c] for c in ds['target']]
return df
df = get_iris_df()
CLASS_MAP = {'Logistic Regression': ('-', LogisticRegression()), 'Naive Bayes':('--', GaussianNB()), 'Desicion Tree': ('.-', DecisionTreeClassifier(max_depth=5)), 'Random Forest': (':', RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)),}
X, Y = df[df.columns[:3]], (df['species']=='virginica')
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.8)
for name, (line_fmt, model) in CLASS_MAP.items():
model.fit(X_train, Y_train)
preds = model.predict_proba(X_test)
pred = pd.Series(preds[:,1])
fpr, tpr, thresholds = roc_curve(Y_test, pred)
auc_score = auc(fpr, tpr)
label = '%s: auc=%f' % (name, auc_score)
plt.plot(fpr, tpr, line_fmt, linewidth=5, label=label)
plt.legend(loc="lower right")
plt.title("Comparisons among classifiers")
plt.plot([0,1], [0,1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Ratio')
plt.ylabel('True Positive Ratio')
plt.show()