Example #1
0
model_file = 'model.pkl'
output_predictions_file = 'predictions.txt'

X2 = pd.read_csv('Data/Test/Test_Combine.csv',
                 usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Data/Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values
net = pickle.load(open(model_file, 'rb'))

y_test_dummy = np.zeros(Y2.shape)

input_size = X2.shape[1]
target_size = X2.shape[1]

ds = SDS(input_size, target_size)
ds.setField('input', X2)
ds.setField('target', y_test_dummy)

p = net.activateOnDataset(ds)

mse = MSE(Y2, p)
rmse = sqrt(mse)

print("testing RMSE:", rmse)
print("testing MSE: ", mse)

main(Y2, p)
np.savetxt(output_predictions_file, p, fmt='%.6f')
Example #2
0
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn import metrics
from Confuse import main

X = pd.read_csv('Train/Train_Combine.csv',
                usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
y = pd.read_csv('Train/Train_Combine.csv', usecols=['PM 2.5'])

X = np.array(X)
y = np.array(y)

lin = linear_model.LinearRegression()

lin.fit(X, y)

X2 = pd.read_csv('Test/Test_Combine.csv',
                 usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values

preds = lin.predict(X2)

err = metrics.mean_absolute_error(Y2, preds) * 100
print("Mean Absolute Error: %f" % err)
main(Y2, preds)
Example #3
0
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
import numpy as np
from sklearn import metrics
from Confuse import main

X = pd.read_csv('Train/Train_Combine.csv',
                usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y = pd.read_csv('Train/Train_Combine.csv', usecols=['PM 2.5'])

X = X.values
Y = Y.values

X2 = pd.read_csv('Test/Test_Combine.csv',
                 usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values

abc = RandomForestRegressor(n_estimators=10)
abc.fit(X, Y)

err = metrics.mean_absolute_error(Y2, abc.predict(X2)) * 100
print("Mean Absolute Error: %f" % err)
main(Y2, abc.predict(X2))
Example #4
0
model_file = 'model.pkl'
output_predictions_file = 'predictions.txt'

X2 = pd.read_csv('Test/Test_Combine.csv', usecols=[
                 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values
net = pickle.load(open(model_file, 'rb'))

y_test_dummy = np.zeros(Y2.shape)

input_size = X2.shape[1]
target_size = X2.shape[1]

ds = SDS(input_size, target_size)
ds.setField('input', X2)
ds.setField('target', y_test_dummy)

p = net.activateOnDataset(ds)

mse = MSE(Y2, p)
rmse = sqrt(mse)

print "testing RMSE:", rmse
print "testing MSE: ", mse

main(Y2, p)
np.savetxt(output_predictions_file, p, fmt='%.6f')
X = X.values
Y = Y.values

X2 = pd.read_csv('Data/Test/Test_Combine.csv', usecols=[
                 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Data/Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values

regr_1 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, Y)


y_1 = regr_1.predict(X2)


print ("Mean Absolute Error: ", mean_absolute_error(Y2, y_1))

main(Y2, y_1)

 #plt.figure()
 #plt.scatter(Y, Y, c="k", label="data")
 #plt.plot(Y2, y_1, c="g", label="max_depth=2", linewidth=2)
 #plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
 #plt.xlabel("data")
 #plt.ylabel("target")
 #plt.title("Decision Tree Regression")
 #plt.legend()
 #plt.show()
Example #6
0
from sklearn.svm import SVR
import pandas as pd
from sklearn import metrics
from Confuse import main

X = pd.read_csv('Train/Train_Combine.csv', usecols=[
                'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y = pd.read_csv('Train/Train_Combine.csv', usecols=['PM 2.5'])

X = X.values
Y = Y.values

X2 = pd.read_csv('Test/Test_Combine.csv', usecols=[
                 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values

abc = SVR(kernel='rbf')
abc.fit(X, Y)

err = metrics.mean_absolute_error(Y2, abc.predict(X2)) * 100
print ("Mean Absolute Error: %f" % err)  # evaluate performance
main(Y2, abc.predict(X2))
Example #7
0
model.add(Dense(10, input_dim=8, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(10, input_dim=10, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, input_dim=10, init='uniform'))
model.add(Activation('tanh'))

sgd = SGD(lr=0.1, decay=1e-3, momentum=0.5, nesterov=True)
model.compile(loss='mse', optimizer=sgd)

model.fit(X, Y, nb_epoch=100, batch_size=1, show_accuracy=False)
score = model.evaluate(X2, Y2, batch_size=1)
preds = model.predict(X2, batch_size=1, verbose=0)


main(Y2, preds)

# plt.plot(xrange(0, 441), preds, label='Observed')
# plt.plot(xrange(0, 441), Y2, label='Expected')
# plt.xlabel('Data Points')
# plt.ylabel('PM 2.5')
# plt.legend(loc='upper right')
# plt.show()
A = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['PM 2.5'])
B = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['T'])
C = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['TM'])
D = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['Tm'])
E = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['SLP'])
F = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['H'])
G = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['VV'])
H = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['VM'])
Example #8
0
from sklearn.neighbors import KNeighborsRegressor, NearestNeighbors
import pandas as pd
from sklearn import metrics
from Confuse import main

X = pd.read_csv('Train/Train_Combine.csv', usecols=[
                'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y = pd.read_csv('Train/Train_Combine.csv', usecols=['PM 2.5'])

X = X.values
Y = Y.values

X2 = pd.read_csv('Test/Test_Combine.csv', usecols=[
                 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values


knn = KNeighborsRegressor(
    n_neighbors=10, algorithm='auto', leaf_size=30, weights='uniform')
knn.fit(X, Y)
nn = NearestNeighbors(n_neighbors=10, algorithm='auto', leaf_size=30)
nn.fit(X, Y)

err = metrics.mean_absolute_error(Y2, knn.predict(X2)) * 100
print ("Mean Absolute Error: %f" % err)
main(Y2, knn.predict(X2))
Example #9
0
from sklearn.neighbors import KNeighborsRegressor, NearestNeighbors
import pandas as pd
from sklearn import metrics
from Confuse import main

X = pd.read_csv('Data/Train/Train_Combine.csv',
                usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['PM 2.5'])

X = X.values
Y = Y.values

X2 = pd.read_csv('Data/Test/Test_Combine.csv',
                 usecols=['T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Data/Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values

knn = KNeighborsRegressor(n_neighbors=10,
                          algorithm='auto',
                          leaf_size=30,
                          weights='uniform')
knn.fit(X, Y)
nn = NearestNeighbors(n_neighbors=10, algorithm='auto', leaf_size=30)
nn.fit(X, Y)

err = metrics.mean_absolute_error(Y2, knn.predict(X2)) * 100
print("Mean Absolute Error: %f" % err)
main(Y2, knn.predict(X2))