look_back) # Train the model regressor.fit(X_train, y_train.ravel()) # Predict values predicted_y_value = regressor.predict(X_test) if label_target == label: y_test_predictions.append(predicted_y_value) X_test_new_row = np.append(X_test_new_row, predicted_y_value) X_test_new_row = X_test_new_row.reshape(1, no_of_features * look_back) X_test = np.concatenate((X_test, X_test_new_row), axis=0) X_test = np.delete(X_test, 0, axis=0) # Calculate the RMSE between ground truth and predictions and # add to the CSV file rmse_val = rmse(y_test, np.array(y_test_predictions)) csv_file.add_row([ company_name, regressor_name, str(len(y_test)), label, str(rmse_val) ]) # Plot the graph comparing ground truth with predictions plot_graph( y_test, y_test_predictions, [i for i in range(len(y_test))], directory + company_name + "_" + regressor_name + "_" + label + ".png")
def error(model, ratings_test): return rmse([r - model.predict(uid, iid) for uid, iid, r in ratings_test])
for m in range(nens): filename = workdir+'/'+casename+'/'+'{:04d}'.format(m+1)+'/'+name+'.bin' psik = util.read_field(filename, nkx, nky, nz) # varens[m, :, :, :] = util.spec2grid(util.spec_bandpass(convertor(psik), krange, s)) varens[m, :, :, :] = util.spec2grid(convertor(psik)) varens = np.roll(np.roll(varens, -40, axis=1), 60, axis=2) #shift domain position for better plotting ##some error statistics # for m in range(nens): # varmean = np.mean(varens[m:m+1, :, :, lv], axis=0) # rmse = util.rmse(varmean, var[:, :, lv]) # pcorr = util.pattern_correlation(varmean, var[:, :, lv]) # print('error = {:7.2f}'.format(rmse)) # print('pattern correlation = {:7.2f}'.format(pcorr)) varmean = np.mean(varens[:, :, :, lv], axis=0) rmse = util.rmse(varmean, var[:, :, lv]) pcorr = util.pattern_correlation(varmean, var[:, :, lv]) print('error = {:7.2f}'.format(rmse)) print('pattern correlation = {:7.2f}'.format(pcorr)) sprd = util.sprd(varens[:, :, :, lv]) print('ensemble spread = {:7.2f}'.format(sprd)) cmap = [plt.cm.jet(m) for m in np.linspace(0, 1, nens)] for m in range(nens): out = util.smooth(varens[m, :, :, lv], smth) ax[1].contour(ii, jj, out, clevel_highlight, colors=[cmap[m][0:3]], linestyles='solid', linewidths=1) ax[1].contour(ii, jj, out1, clevel_highlight, colors='black', linestyles='solid', linewidths=2) cax = colorbar_ax(ax[1]) cax.set_visible(False) set_axis(ax[1], varname+' ensemble')
def evaluate_burgers(X_eval, U_eval, model: Scalar_PDE): U_hat = model.predict(X_eval) return util.rmse(U_eval, U_hat)
import xgboost as xgb import csv import sys # preprocessing print("start preprocessing ...") dfTrain, dfRawTrain, dfAdr_train, dfIsCanceled_train, dfAdrReal_train, dfValid, dfRawValid, dfAdr_valid, dfIsCanceled_valid, dfAdrReal_valid, dfTest, dfRawTest = pp.preprocessing('../data/train.csv', '../data/test.csv') print("preprocessing done ...") print("start training ..") # adr, is_canceled combined modelList_adr_real, dtrain_adr_real, dvalid_adr_real = train.train_adr(dfTrain, dfAdrReal_train, dfValid, dfAdrReal_valid, 5, 10000) print("training done ...") # compute error Ein_adr_real = util.rmse(dtrain_adr_real.get_label(), train.predict_adr_ensemble(dtrain_adr_real, modelList_adr_real)) Eval_adr_real = util.rmse(dvalid_adr_real.get_label(), train.predict_adr_ensemble(dvalid_adr_real, modelList_adr_real)) print("Ein_adr_real: ", Ein_adr_real) print("Eval_adr_real: ", Eval_adr_real) print("computing revenue ...") # adr, is_canceled combined revenue_train = train.predict_revenue_ensemble_adr_isCanceled_combined(dfTrain, dfRawTrain, modelList_adr_real, 0) revenue_valid = train.predict_revenue_ensemble_adr_isCanceled_combined(dfValid, dfRawValid, modelList_adr_real, 1) revenue_test = train.predict_revenue_ensemble_adr_isCanceled_combined(dfTest, dfRawTest, modelList_adr_real, 2) # print("train_predict: ", revenue_train) print("computing revenue done ...") dfTrain_label = pd.read_csv('../data/train_label.csv') train_label = dfTrain_label['label'].tolist()
# make sure we have a file if exp_grey_img is None or exp_bw_img is None or gt_grey_img is None or gt_bw_img is None: continue # reshape if necessary if exp_grey_img.shape != gt_grey_img.shape: exp_grey_img = cv2.resize(exp_grey_img, (gt_grey_img.shape[1], gt_grey_img.shape[0])) if exp_bw_img.shape != gt_bw_img.shape: exp_bw_img = cv2.resize(exp_bw_img, (gt_bw_img.shape[1], gt_bw_img.shape[0])) # rmse on greyscale masks r = rmse(exp_grey_img, gt_grey_img) cumulative_rmse += r ''' #convert into bw # thresholding less than exp_l_channel[exp_l_channel < 50] = 0 # everything else to white exp_l_channel[exp_l_channel > 0] = 255 ''' # read in and convert exp_mask = cv2.cvtColor(exp_bw_img, cv2.COLOR_BGR2LAB) exp_l_channel, _, _ = cv2.split(exp_mask) gt_mask = cv2.cvtColor(gt_bw_img, cv2.COLOR_BGR2LAB)
# Predict the average for this user. query['rating'] = ((float(users[cur_user]['total']) / users[cur_user]['count']) + prediction) / 2 else: #print "Book not rated... f**k" if users[cur_user]['count'] == 0: # Perhaps we did not having any ratings in the training set. # In this case, make a global mean prediction. query['rating'] = mean_rating else: # Predict the average for this user. query['rating'] = float(users[cur_user]['total']) / users[cur_user]['count'] book_except_count += 1 else: #print "User not rated... f**k" query['rating'] = mean_rating user_except_count += 1 count += 1 print "Book Excepted " + str(book_except_count) print "User Excepted " + str(user_except_count) # Write the prediction file. #util.write_predictions(test_queries, pred_filename) print util.rmse(test_queries)