def evaluate_crossval_prediction(model, X, y, outfile="", cv=LeaveOneOut(), robust=False): predicted = cross_val_predict(model, X, y, cv=cv) p_value, r_2, residual, regline = pred_stat(y, predicted, robust=robust) expl_var = (1 - (-mean_squared_error(y_pred=predicted, y_true=y) / -mean_squared_error(np.repeat(y.mean(), len(y)), y))) * 100 print "R2=" + "{:.3f}".format(r_2) + " R=" + "{:.3f}".format(np.sqrt(r_2)) \ + " p=" + "{:.6f}".format(p_value) + " Expl. Var.: " + "{:.1f}".format(expl_var) + "%" \ + " Expl. Var.2: " + "{:.1f}".format(explained_variance_score(y_pred=predicted, y_true=y)*100) + "%" \ + " MSE=" + "{:.3f}".format(mean_squared_error(y_pred=predicted, y_true=y)) \ + " RMSE=" + "{:.3f}".format(np.sqrt(mean_squared_error(y_pred=predicted, y_true=y))) \ + " MAE=" + "{:.3f}".format(mean_absolute_error(y_pred=predicted, y_true=y)) \ + " MedAE=" + "{:.3f}".format(median_absolute_error(y_pred=predicted, y_true=y)) \ + " R^2=" + "{:.3f}".format(r2_score(y_pred=predicted, y_true=y)) plot.plot_prediction(y, predicted, outfile, robust=robust, sd=True, text="$R2$=" + "{:.3f}".format(r_2) + " p=" + "{:.3f}".format(p_value) + " Expl. Var.: " + "{:.1f}".format(expl_var) + "%") return predicted
def evaluate_prediction(model, X, y, orig_mean=None, outfile="", robust=False, covar=[]): predicted = model.predict(X) p_value, r_2, residual, regline = pred_stat(y, predicted, robust=robust) if orig_mean: y_base = orig_mean else: y_base = y.mean() expl_var = (1 - (-mean_squared_error(y_pred=predicted, y_true=y) / -mean_squared_error(np.repeat(y_base, len(y)), y))) * 100 print "R2=" + "{:.3f}".format(r_2) + " R=" + "{:.3f}".format(np.sqrt(r_2))\ + " p=" + "{:.6f}".format(p_value) +" Expl. Var.: " + "{:.1f}".format(expl_var) + "%" \ + " Expl. Var.2: " + "{:.1f}".format(explained_variance_score(y_pred=predicted, y_true=y)*100) + "%" \ + " MSE=" + "{:.3f}".format(mean_squared_error(y_pred=predicted, y_true=y))\ + " RMSE=" + "{:.3f}".format(np.sqrt(mean_squared_error(y_pred=predicted, y_true=y))) \ + " MAE=" + "{:.3f}".format(mean_absolute_error(y_pred=predicted, y_true=y)) \ + " MedAE=" + "{:.3f}".format(median_absolute_error(y_pred=predicted, y_true=y)) \ + " R^2=" + "{:.3f}".format(r2_score(y_pred=predicted, y_true=y)) plot.plot_prediction(y, predicted, outfile, robust=robust, sd=True, covar=covar, text="$R^2$ = " + "{:.3f}".format(r_2) + " p = " + "{:.3f}".format(p_value) + " Expl. Var.: " + "{:.1f}".format(expl_var)) return predicted
i = i + 1 predicted /= 2 # to crete average of the two predictions we had print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean())) print("Explained Variance: " + str(1 - nested_scores_test.mean() / -mean_squared_error(np.repeat(y.mean(), len(y)), y))) print("Correlation: " + str(np.corrcoef(y, predicted)[0, 1])) plot.plot_prediction(y, predicted, sd=True, outfile='L1SO_all.pdf') #%%%%%%%%%%%%%%%%%%%%%%%%%%%% study = 'bochum' plot.plot_prediction(y[df.study == study], predicted[df.study == study], sd=True, outfile='L1SO_bochum.pdf') print("*** Score on mean as model:\t" + str(-mean_squared_error( np.repeat(y[df.study == study].mean(), len(y[df.study == study])), y[ df.study == study]))) print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train[0].mean())) print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test[0].mean()))