/
code1_nagarathna_sali.py
228 lines (184 loc) · 8.16 KB
/
code1_nagarathna_sali.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
# -*- coding: utf-8 -*-
"""Code1_Nagarathna_Sali.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/13Rasfyh55NGjgHrNsPveygPK_-nVvrk0
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from imblearn.over_sampling import SMOTE # imblearn library can be installed using pip install imblearn
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
import plotly.graph_objs as go
import plotly .offline as offline
import plotly.figure_factory as ff
# Importing dataset and examining it
dataset = pd.read_csv("/content/EireJet.csv")
print(dataset.head())
print(dataset.shape)
print(dataset.info())
print(dataset.describe())
# Plotting Correlation Heatmap
corrs = dataset.corr()
figure = ff.create_annotated_heatmap(
z=corrs.values,
x=list(corrs.columns),
y=list(corrs.index),
annotation_text=corrs.round(2).values,
showscale=True)
offline.plot(figure,filename='corrheatmap.html')
dataset1 = dataset.drop(['Cleanliness','Arrival Delay in Minutes'], axis = 1)
print(dataset1.head())
print(dataset1.shape)
print(dataset1.info())
print(dataset1.describe())
# Plotting Correlation Heatmap
corrs = dataset1.corr()
figure = ff.create_annotated_heatmap(
z=corrs.values,
x=list(corrs.columns),
y=list(corrs.index),
annotation_text=corrs.round(2).values,
showscale=True)
offline.plot(figure,filename='corrheatmap1.html')
# Converting Categorical features into Numerical features
dataset1['Gender'] = dataset1['Gender'].map({'Male':0, 'Female':1})
dataset1['Frequent Flyer'] = dataset1['Frequent Flyer'].map({'No':0, 'Yes':1})
dataset1['Type of Travel'] = dataset1['Type of Travel'].map({'Personal Travel':0, 'Business travel':1})
dataset1['Class'] = dataset1['Class'].map({'Eco':0,'Eco Plus':0,'Business':1})
dataset1['satisfaction'] = dataset1['satisfaction'].map({'neutral or dissatisfied':0, 'satisfied': 1})
print(dataset1.head(5))
print(dataset1.isnull().sum())
# Dividing dataset into label and feature sets
X = dataset1.drop(['satisfaction'], axis = 1) # Features
Y = dataset1['satisfaction'] # Labels
print(type(X))
print(type(Y))
print(X.shape)
print(Y.shape)
# Normalizing numerical features so that each feature has mean 0 and variance 1
feature_scaler = StandardScaler()
X_scaled = feature_scaler.fit_transform(X)
# Dividing dataset into training and test sets
X_train, X_test, Y_train, Y_test = train_test_split( X_scaled, Y, test_size = 0.3, random_state = 100)
print(X_train.shape)
print(X_test.shape)
# Implementing Oversampling to balance the dataset; SMOTE stands for Synthetic Minority Oversampling TEchnique
print("Number of observations in each class before oversampling (training data): \n", pd.Series(Y_train).value_counts())
smote = SMOTE(random_state = 101)
X_train,Y_train = smote.fit_sample(X_train,Y_train)
print("Number of observations in each class after oversampling (training data): \n", pd.Series(Y_train).value_counts())
rfc = RandomForestClassifier(criterion='entropy', max_features='auto', random_state=1)
grid_param = {'n_estimators': [50, 100, 150, 200, 250, 300]}
gd_sr = GridSearchCV(estimator=rfc, param_grid=grid_param, scoring='precision', cv=5)
gd_sr.fit(X_train, Y_train)
best_parameters = gd_sr.best_params_
print(best_parameters)
best_result = gd_sr.best_score_ # Mean cross-validated score of the best_estimator
print(best_result)
print(type(best_result))
rfc = RandomForestClassifier(n_estimators=150, criterion='entropy', max_features='auto', random_state=1)
rfc.fit(X_train,Y_train)
featimp = pd.Series(rfc.feature_importances_, index=list(X)).sort_values(ascending=False)
print(featimp)
Y_pred = rfc.predict(X_test)
conf_mat = metrics.confusion_matrix(Y_test, Y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(conf_mat,annot=True)
plt.title("Confusion_matrix")
plt.xlabel("Predicted Class")
plt.ylabel("Actual class")
plt.show()
print('Confusion matrix: \n', conf_mat)
print('TP: ', conf_mat[1,1])
print('TN: ', conf_mat[0,0])
print('FP: ', conf_mat[0,1])
print('FN: ', conf_mat[1,0])
X1 = dataset1[['Frequent Flyer','Checkin service','Age','Type of Travel','Class','Flight Distance','Inflight wifi service','Ease of Online booking','Online boarding','Seat comfort','On-board service','Departure/Arrival time convenient','Inflight service','Leg room service','Baggage handling','Inflight entertainment']]
feature_scaler = StandardScaler()
X_scaled1 = feature_scaler.fit_transform(X1)
# Dividing dataset into training and test sets
X_train1, X_test1, Y_train1, Y_test1 = train_test_split( X_scaled1, Y, test_size = 0.3, random_state = 100)
smote = SMOTE(random_state = 101)
X_train1,Y_train1 = smote.fit_sample(X_train1,Y_train1)
rfc = RandomForestClassifier(n_estimators=150, criterion='entropy', max_features='auto', random_state=1)
rfc.fit(X_train1,Y_train1)
Y_pred1 = rfc.predict(X_test1)
conf_mat = metrics.confusion_matrix(Y_test1, Y_pred1)
plt.figure(figsize=(8,6))
sns.heatmap(conf_mat,annot=True)
plt.title("Confusion_matrix")
plt.xlabel("Predicted Class")
plt.ylabel("Actual class")
plt.show()
print('Confusion matrix: \n', conf_mat)
print('TP: ', conf_mat[1,1])
print('TN: ', conf_mat[0,0])
print('FP: ', conf_mat[0,1])
print('FN: ', conf_mat[1,0])
# Tuning the AdaBoost parameter 'n_estimators' and implementing cross-validation using Grid Search
abc = AdaBoostClassifier(random_state=1)
grid_param = {'n_estimators': [5,10,20,30,40,50]}
gd_sr = GridSearchCV(estimator=abc, param_grid=grid_param, scoring='precision', cv=5)
gd_sr.fit(X_train, Y_train)
best_parameters = gd_sr.best_params_
print(best_parameters)
best_result = gd_sr.best_score_ # Mean cross-validated score of the best_estimator
print(best_result)
# Building AdaBoost using the tuned parameter
abc = AdaBoostClassifier(n_estimators=50, random_state=1)
abc.fit(X_train,Y_train)
featimp = pd.Series(abc.feature_importances_, index=list(X)).sort_values(ascending=False)
print(featimp)
Y_pred = abc.predict(X_test)
print('Classification report: \n', metrics.classification_report(Y_test, Y_pred))
conf_mat = metrics.confusion_matrix(Y_test, Y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(conf_mat,annot=True)
plt.title("Confusion_matrix")
plt.xlabel("Predicted Class")
plt.ylabel("Actual class")
plt.show()
print('Confusion matrix: \n', conf_mat)
print('TP: ', conf_mat[1,1])
print('TN: ', conf_mat[0,0])
print('FP: ', conf_mat[0,1])
print('FN: ', conf_mat[1,0])
# # Tuning the Gradent Boost parameter 'n_estimators' and implementing cross-validation using Grid Search
gbc = GradientBoostingClassifier(random_state=1)
grid_param = {'n_estimators': [60,70,80], 'max_depth' : [12,13,14,15,16], 'max_leaf_nodes': [8,12,16,20,24,28,32]}
gd_sr = GridSearchCV(estimator=gbc, param_grid=grid_param, scoring='precision', cv=5)
# In the above GridSearchCV(), scoring parameter should be set as follows:
# scoring = 'accuracy' when you want to maximize prediction accuracy
# scoring = 'recall' when you want to minimize false negatives
# scoring = 'precision' when you want to minimize false positives
# scoring = 'f1' when you want to balance false positives and false negatives (place equal emphasis on minimizing both)
gd_sr.fit(X_train, Y_train)
best_parameters = gd_sr.best_params_
print(best_parameters)
best_result = gd_sr.best_score_ # Mean cross-validated score of the best_estimator
print(best_result)
gbc = GradientBoostingClassifier(n_estimators=110, max_depth=18, max_leaf_nodes=32, random_state=1)
gbc.fit(X_train,Y_train)
featimp = pd.Series(gbc.feature_importances_, index=list(X)).sort_values(ascending=False)
print(featimp)
Y_pred = gbc.predict(X_test)
print('Classification report: \n', metrics.classification_report(Y_test, Y_pred))
conf_mat = metrics.confusion_matrix(Y_test, Y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(conf_mat,annot=True)
plt.title("Confusion_matrix")
plt.xlabel("Predicted Class")
plt.ylabel("Actual class")
plt.show()
print('Confusion matrix: \n', conf_mat)
print('TP: ', conf_mat[1,1])
print('TN: ', conf_mat[0,0])
print('FP: ', conf_mat[0,1])
print('FN: ', conf_mat[1,0])