def measure_performance(est, X, y): y_pred = est.predict(X) print "Explained variance: {0:.5f}".format( metrics.explained_variance_score(y, y_pred)), "\n" print "Mean abs error: {0:.5f}".format( metrics.mean_absolute_error(y, y_pred)), "\n" print "Mean sqrt error: {0:.5f}".format( metrics.mean_squared_error(y, y_pred)), "\n" print "R2 score: {0:.5f}".format(metrics.r2_score(y, y_pred)), "\n"
def reportPerformance(self, X, y): y_pred = self.reg.predict(X) print "Explained variance: {0:.5f}".format( metrics.explained_variance_score(y, y_pred)), "\n" print "Mean abs error: {0:.5f}".format( metrics.mean_absolute_error(y, y_pred)), "\n" print "Mean sqrt error: {0:.5f}".format( metrics.mean_squared_error(y, y_pred)), "\n" print "R2 score: {0:.5f}".format(metrics.r2_score(y, y_pred)), "\n"
prediction_.extend(prediction) verbose('----------\n') verbose("Evaluation") if opts.mode in ['age', 'gender']: from sklearn.metrics.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, f1_score # Calculando desempeño print('Accuracy :', accuracy_score(y_, prediction_)) print('Precision :', precision_score(y_, prediction_)) print('Recall :', recall_score(y_, prediction_)) print('F-score :', f1_score(y_, prediction_)) print('\nClasification report:\n', classification_report(y_, prediction_)) print('\nConfussion matrix :\n', confusion_matrix(y_, prediction_)) else: from sklearn.metrics.metrics import mean_absolute_error, mean_squared_error, r2_score print('Mean Abs Error :', mean_absolute_error(y_, prediction_)) print('Mean Sqr Error :', mean_squared_error(y_, prediction_)) print('R2 Error :', r2_score(y_, prediction_)) #plots: #import matplotlib.pyplot as plt #confusion_matrix_plot = confusion_matrix(y_test, prediction) #plt.title('matriz de confusion') #plt.colorbar() #plt.xlabel() #plt.xlabel('categoria de verdad') #plt.ylabel('categoria predecida') #plt.show()
def measure_performance(est, X, y ): y_pred=est.predict(X) print "Explained variance: {0:.5f}".format(metrics.explained_variance_score(y,y_pred)),"\n" print "Mean abs error: {0:.5f}".format(metrics.mean_absolute_error(y,y_pred)),"\n" print "Mean sqrt error: {0:.5f}".format(metrics.mean_squared_error(y,y_pred)),"\n" print "R2 score: {0:.5f}".format(metrics.r2_score(y,y_pred)),"\n"
def reportPerformance( self, X, y ): y_pred=self.reg.predict(X) print "Explained variance: {0:.5f}".format(metrics.explained_variance_score(y,y_pred)),"\n" print "Mean abs error: {0:.5f}".format(metrics.mean_absolute_error(y,y_pred)),"\n" print "Mean sqrt error: {0:.5f}".format(metrics.mean_squared_error(y,y_pred)),"\n" print "R2 score: {0:.5f}".format(metrics.r2_score(y,y_pred)),"\n"
def get_errors(forecast_actual_data, forecast_data): return round(mean_absolute_error(forecast_actual_data, forecast_data),2), round(mean_squared_error(forecast_actual_data, forecast_data),2)
verbose('----------\n') verbose("Evaluation") if opts.mode in ['age','gender']: from sklearn.metrics.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, f1_score # Calculando desempeño print( 'Accuracy :', accuracy_score(y_, prediction_)) print( 'Precision :', precision_score(y_, prediction_)) print( 'Recall :', recall_score(y_, prediction_)) print( 'F-score :', f1_score(y_, prediction_)) print( '\nClasification report:\n', classification_report(y_, prediction_)) print( '\nConfussion matrix :\n',confusion_matrix(y_, prediction_)) else: from sklearn.metrics.metrics import mean_absolute_error, mean_squared_error,r2_score print( 'Mean Abs Error :', mean_absolute_error(y_, prediction_)) print( 'Mean Sqr Error :', mean_squared_error(y_, prediction_)) print( 'R2 Error :', r2_score(y_, prediction_)) #plots: #import matplotlib.pyplot as plt #confusion_matrix_plot = confusion_matrix(y_test, prediction) #plt.title('matriz de confusion') #plt.colorbar() #plt.xlabel() #plt.xlabel('categoria de verdad') #plt.ylabel('categoria predecida') #plt.show()