Example #1
0
        try:
            logger.info(
                f"Cross validation done, best score was {est.best_score_}")
            logger.info(f"Best params were {est.best_params_}")
            logger.info(f"Best estimator were {est.best_estimator_}")
            logger.info(f"Checking using the validation set.")
        except Exception as e:
            print(f"Logging exception: {e}")
        validation_auc_score = metrics.roc_auc_score(y_test, yhat)
        logger.info(
            f"AUC score for validation set of size {len(y_test)} is {validation_auc_score:.5f}"
        )
        logger.info(
            f"AUC score for validation set of size {len(y_test)} is {validation_auc_score:.5f}"
        )
        fig, ax, aucscore = plotting.plotROC(y_test, yhat)
        fig.savefig(
            f'figs/joachim_exercise_resampling_binarized_sparse_{name}_resampled.pdf'
        )
        est.y_test = y_test  # save for plotting ROC curve later
        est.yhat = yhat  # save for plotting ROC curve later
        est.validation_auc_score = validation_auc_score
        est.X_test = X_test
        est.y_test = y_test
        with open(
                f"joachim_exercise_resampling_binarized_sparse_{name}_resampled.pkl",
                'bw') as fid:
            pickle.dump(est, fid)
except Exception as err:
    jn.send(err)
    sleep(8)
Example #2
0
                                       pre_dispatch=processes,
                                       return_train_score=True)
    est.fit(x_re, y_re)  # I think this is redundant
    _, yhat = est.predict_proba(x_va).T
    try:
        logger.info(f"Cross validation done, best score was {est.best_score_}")
        logger.info(f"Best params were {est.best_params_}")
        logger.info(f"Best estimator were {est.best_estimator_}")
        logger.info(f"Checking using the validation set.")
    except Exception as e:
        print(f"Logging exception: {e}")
    validation_auc_score = metrics.roc_auc_score(y_va, yhat)
    logger.info(
        f"AUC score for validation set of size {len(y_va)} is {validation_auc_score:.5f}"
    )
    fig, ax, aucscore = plotting.plotROC(y_va, yhat)
    fig.savefig('figs/userMovement_cv_adaboost_roc_curve_coarse.pdf')
    est.y_va = y_va  # save for plotting ROC curve later
    est.yhat = yhat  # save for plotting ROC curve later
    est.validation_auc_score = validation_auc_score
    est.x_va = x_va
    est.y_va = y_va
    with open("userMovement_adaboost_coarse.pkl", 'bw') as fid:
        pickle.dump(est, fid)
except Exception as err:
    jn.send(err)
    sleep(8)
    raise err

jn.send(message=f"Cross validation is done.")
Example #3
0
print('Predicted statechange')
statechange_prob = statechange.predict_proba(x_va)[:, 1]
print('Predicted statechange_prob')
rf_coarse_prob = rf_coarse.best_estimator_.predict_proba(rf_coarse.x_va)[:, 1]
print('Predicted rf_coarse_prob')
cv_subgrid_search_prob = cv_subgrid_search.best_estimator_.predict_proba(
    cv_subgrid_search.x_va)[:, 1]
print('Predicted cv_subgrid_search_prob')
sgd_std_final_coarse_prob = sgd_std_final_coarse.best_estimator_.predict_proba(
    sgd_std_final_coarse.x_va)[:, 1]
print('Predicted sgd_std_final_coarse_prob')

fig, ax = plt.subplots()
plotting.plotROC(y_va,
                 statechange_prob,
                 ax=ax,
                 label='Log, statechange',
                 alpha=0.55)  # noqa
plotting.plotROC(rf_coarse.y_va,
                 rf_coarse_prob,
                 ax=ax,
                 label='RF, coarse',
                 alpha=0.55)  # noqa
plotting.plotROC(sgd_std_final_coarse.y_va,
                 sgd_std_final_coarse_prob,
                 ax=ax,
                 label='SGD Log, coarse',
                 alpha=0.55)  # noqa
plotting.plotROC(cv_subgrid_search.y_va,
                 cv_subgrid_search_prob,
                 ax=ax,