-
Notifications
You must be signed in to change notification settings - Fork 0
/
regressor.py
217 lines (171 loc) · 7.76 KB
/
regressor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
#!/usr/bin/env python
"""
Reads in features, and prints out the trained weight vector.
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import pydot
from sklearn.externals.six import StringIO
from sklearn import metrics
from sklearn import tree
from sklearn import svm
from sklearn import ensemble
from sklearn import grid_search
from sklearn import linear_model, cross_validation
from sklearn.preprocessing import PolynomialFeatures
DEFAULT_DATA_FILE = "output/features.txt"
def cap_results(train_results):
retval = []
for i in range(len(train_results)):
temp = min(1.0, train_results[i])
temp = max(0.0, temp)
retval.append(temp)
return retval
def boost(train_features, train_labels, test_features, test_labels):
regressor = ensemble.GradientBoostingRegressor()
regressor.fit(train_features, train_labels)
test_results = regressor.predict(test_features)
train_results = regressor.predict(train_features)
print "test result", metrics.mean_squared_error(test_labels, test_results)
print "test r2", metrics.r2_score(test_labels, test_results)
print "train result", metrics.mean_squared_error(train_labels, train_results)
print "train r2", metrics.r2_score(train_labels, train_results)
return (test_results, train_results)
def decision_tree(train_features, train_labels, test_features, test_labels, feature_names):
regressor = tree.DecisionTreeRegressor()
regressor.fit(train_features, train_labels)
test_results = cap_results(regressor.predict(test_features))
train_results = cap_results(regressor.predict(train_features))
print "test result", metrics.mean_squared_error(test_labels, test_results)
print "test r2", metrics.r2_score(test_labels, test_results)
print "train result", metrics.mean_squared_error(train_labels, train_results)
print "train r2", metrics.r2_score(train_labels, train_results)
# print "importances"
# temp = []
# for index, val in enumerate(regressor.feature_importances_):
# if val > 0.001:
# temp.append((index, val))
# print sorted(temp, key=lambda x: x[1])
'''graph stuff'''
dot_data = StringIO()
tree.export_graphviz(regressor, out_file=dot_data,
special_characters=True,
class_names=regressor.classes_,
impurity=False,
feature_names=feature_names)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("tree.pdf")
return (test_results, train_results)
def forest(train_features, train_labels, test_features, test_labels):
regressor = ensemble.RandomForestRegressor(max_depth=5)
regressor.fit(train_features, train_labels)
test_results = cap_results(regressor.predict(test_features))
train_results = cap_results(regressor.predict(train_features))
print "test result", metrics.mean_squared_error(test_labels, test_results)
print "test r2", metrics.r2_score(test_labels, test_results)
print "train result", metrics.mean_squared_error(train_labels, train_results)
print "train r2", metrics.r2_score(train_labels, train_results)
print "importnaces"
temp = []
for index, val in enumerate(regressor.feature_importances_):
if val > 0.001:
temp.append((index, val))
print sorted(temp, key=lambda x: x[1])
return (test_results, train_results)
def svr(train_features, train_labels, test_features, test_labels):
"""
Trains a support vector regressor on the given training data
and then runs it against the test data. Returns the result.
"""
rbf_svr = svm.SVR()
search_params = {'kernel':['poly'], 'C':[1, 10, 100, 1000], 'gamma': [0.000001, 0.0001, 0.01, 1, 10, 100]}
svm_cv = grid_search.GridSearchCV(rbf_svr, param_grid=search_params, cv=5, n_jobs=1, verbose=5, scoring='mean_absolute_error')
svm_cv.fit(train_features, train_labels)
print(svm_cv.best_params_)
test_results = svm_cv.predict(test_features)
train_results = svm_cv.predict(train_features)
return (test_results, train_results)
def linear_regressor(train_features, train_labels, test_features, test_labels):
"""
Trains a linear regressor on the given training data and then
runs it against the test data. Returns the result.
"""
# lr = linear_model.RANSACRegressor(min_samples=2)
lr = linear_model.RidgeCV()
# lr = linear_model.LassoCV(verbose=True, n_jobs=-1)
lr.fit(train_features, train_labels)
test_results = lr.predict(test_features)
train_results = lr.predict(train_features)
return test_results, train_results
def load_data(datafile = DEFAULT_DATA_FILE):
"""
Loads feature and classification data from the given file.
Returns a tuple of (features, labels) where both are
features is an NP array and labels is a list.
"""
# Read data
dataframe = pd.read_csv(datafile)
views = [ float(x) for x in dataframe["views"].tolist()]
favs = [ float(x) for x in dataframe["favorites"].tolist()]
ratio = []
for i in range(len(views)):
ratio.append(favs[i] / views[i])
ratio = cap_results(ratio)
# Uncomment this and fix the return line if you want to do
# favorites prediction instead
# favs = dataframe["favorites"].tolist()
# 5 is the first column after 'favs'.
# -1 means last column from the end, because I added an extra
# column for the artist name, which we want to ignore.
dataframe = dataframe.drop("artist", axis=1)
dataframe = dataframe.iloc[:, 5:]
features = dataframe.values
# # interaction terms
poly = PolynomialFeatures(interaction_only=True)
features = poly.fit_transform(features)
print "interaction"
return features, ratio, dataframe.columns.values
def main(args):
if len(args) == 2 and args[1] == "help":
print """
Usage: ./regressor.py [features file]
"""
return
elif len(args) < 1:
print "Insufficient or incorrect arguments. Try 'help' for more information";
return
# Load data.
if len(args) > 1:
features, labels, feature_names = load_data(args[1])
else:
features, labels, feature_names = load_data() # using default features file
# Partition into training and test datasets
train_features, test_features, train_labels, test_labels = cross_validation.train_test_split(features, labels, test_size=0.33)
# Linear regression
# test_results, train_results = boost(train_features, train_labels, test_features, test_labels)
# test_results, train_results = decision_tree(train_features, train_labels, test_features, test_labels, feature_names)
# test_results, train_results = forest(train_features, train_labels, test_features, test_labels)
# test_results, train_results = svr(train_features, train_labels, test_features, test_labels)
test_results, train_results = linear_regressor(train_features, train_labels, test_features, test_labels)
test_results = cap_results(test_results)
train_results = cap_results(train_results)
print "test result", metrics.mean_squared_error(test_labels, test_results)
print "test r2", metrics.r2_score(test_labels, test_results)
print "train result", metrics.mean_squared_error(train_labels, train_results)
print "train r2", metrics.r2_score(train_labels, train_results)
# Plot output
fig, ax = plt.subplots()
ax.scatter(train_labels, train_results, color="r")
ax.scatter(test_labels, test_results, color="b")
# Plot reference line
ax.plot([min(test_labels), max(test_labels)], [min(test_labels), max(test_labels)], 'k--', lw=4)
# Label axes
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.title("RANSAC")
plt.show()
if __name__ == "__main__":
main(sys.argv)