nmse = normalized_mean_squared_error(truthMean_test, truthMean_pred) mse = mean_squared_error(truthMean_test, truthMean_pred) evs = explained_variance_score(truthMean_test, truthMean_pred) r2 = r2_score(truthMean_test, truthMean_pred) mae = mean_absolute_error(truthMean_test, truthMean_pred) med_a_e = median_absolute_error(truthMean_test, truthMean_pred) truthClass_test = [0 if t == 'no-clickbait' else 1 for t in truthClass_test] truthClass_pred = [0 if t < 0.5 else 1 for t in truthMean_pred] tn, fp, fn, tp = conf_matrix = confusion_matrix(truthClass_test, truthClass_pred).ravel() print("(TN, FP, FN, TP) = {}".format((tn, fp, fn, tp))) compute_diffs(truthClass_test, truthClass_pred, truthMean_test, truthMean_pred) plot_confusion_matrix(truthClass_test, truthClass_pred, title='Random Forest confusion matrix') accuracy = accuracy_score(truthClass_test, truthClass_pred) auc = roc_auc_score(truthClass_test, truthClass_pred) precision = precision_score(truthClass_test, truthClass_pred) recall = recall_score(truthClass_test, truthClass_pred) f1 = f1_score(truthClass_test, truthClass_pred) print('----------------Regression metrics-------------------------\n') print("Explained Variance Score is ", evs) print("Mean Squared Error is ", mse) print("Normalized Mean Squared Error is ", nmse) print("Mean Absolute Error is ", mae) print("Median Absolute Error is ", med_a_e) print("R2 score is ", r2)
evs = explained_variance_score(truthMean_test, truthMean_pred) r2 = r2_score(truthMean_test, truthMean_pred) mae = mean_absolute_error(truthMean_test, truthMean_pred) med_a_e = median_absolute_error(truthMean_test, truthMean_pred) truthClass_test = [0 if t == 'no-clickbait' else 1 for t in truthClass_test] truthClass_pred = [0 if t < 0.5 else 1 for t in truthMean_pred] compute_diffs(truthClass_test, truthClass_pred, truthMean_test, truthMean_pred) tn, fp, fn, tp = conf_matrix = confusion_matrix(truthClass_test, truthClass_pred).ravel() print("(TN, FP, FN, TP) = {}".format((tn, fp, fn, tp))) plot_confusion_matrix(truthClass_test, truthClass_pred, title='Linear regression confusion matrix') accuracy = accuracy_score(truthClass_test, truthClass_pred) auc = roc_auc_score(truthClass_test, truthClass_pred) precision = precision_score(truthClass_test, truthClass_pred) recall = recall_score(truthClass_test, truthClass_pred) f1 = f1_score(truthClass_test, truthClass_pred) print('----------------Regression metrics-------------------------\n') print("Explained Variance Score is ", evs) print("Mean Squared Error is ", mse) print("Normalized Mean Squared Error is ", nmse) print("Mean Absolute Error is ", mae) print("Median Absolute Error is ", med_a_e) print("R2 score is ", r2)
clf.fit(X_train, truthMean_train) truthMean_pred = clf.predict(X_test) nmse = normalized_mean_squared_error(truthMean_test, truthMean_pred) mse = mean_squared_error(truthMean_test, truthMean_pred) evs = explained_variance_score(truthMean_test, truthMean_pred) r2 = r2_score(truthMean_test, truthMean_pred) mae = mean_absolute_error(truthMean_test, truthMean_pred) med_a_e = median_absolute_error(truthMean_test, truthMean_pred) truthClass_test = [0 if t == 'no-clickbait' else 1 for t in truthClass_test] truthClass_pred = [0 if t < 0.5 else 1 for t in truthMean_pred] compute_diffs(truthClass_test, truthClass_pred, truthMean_test, truthMean_pred) plot_confusion_matrix(truthClass_test, truthClass_pred, title='Adaboost confusion matrix') tn, fp, fn, tp = conf_matrix = confusion_matrix(truthClass_test, truthClass_pred).ravel() print("(TN, FP, FN, TP) = {}".format((tn, fp, fn, tp))) accuracy = accuracy_score(truthClass_test, truthClass_pred) auc = roc_auc_score(truthClass_test, truthClass_pred) precision = precision_score(truthClass_test, truthClass_pred) recall = recall_score(truthClass_test, truthClass_pred) f1 = f1_score(truthClass_test, truthClass_pred) print('----------------Regression metrics-------------------------\n') print("Explained Variance Score is ", evs) print("Mean Squared Error is ", mse)
mse = mean_squared_error(truthMean_test, truthMean_pred) evs = explained_variance_score(truthMean_test, truthMean_pred) r2 = r2_score(truthMean_test, truthMean_pred) mae = mean_absolute_error(truthMean_test, truthMean_pred) med_a_e = median_absolute_error(truthMean_test, truthMean_pred) truthClass_test = [0 if t == 'no-clickbait' else 1 for t in truthClass_test] truthClass_pred = [0 if t < 0.5 else 1 for t in truthMean_pred] tn, fp, fn, tp = conf_matrix = confusion_matrix(truthClass_test, truthClass_pred).ravel() print("(TN, FP, FN, TP) = {}".format((tn, fp, fn, tp))) compute_diffs(truthClass_test, truthClass_pred, truthMean_test, truthMean_pred) plot_confusion_matrix(truthClass_test, truthClass_pred, title='SVR confusion matrix') accuracy = accuracy_score(truthClass_test, truthClass_pred) auc = roc_auc_score(truthClass_test, truthClass_pred) precision = precision_score(truthClass_test, truthClass_pred) recall = recall_score(truthClass_test, truthClass_pred) f1 = f1_score(truthClass_test, truthClass_pred) print('----------------Regression metrics-------------------------\n') print("Explained Variance Score is ", evs) print("Mean Squared Error is ", mse) print("Normalized Mean Squared Error is ", nmse) print("Mean Absolute Error is ", mae) print("Median Absolute Error is ", med_a_e) print("R2 score is ", r2)