Exemplo n.º 1
0
def print_score(no_nca_list, Y_train, Y_test):
    """ The function prints the scores of the models and the prediction performance """

    target_names = ["class 0", "class 1"]
    no_nca = namedtuple("no_nca", [
        "grid_score", "grid_params", "grid_confusion", "tr_report",
        "te_report", "train_mat", "grid_matthews", "grid_train_confusion"
    ])
    # Model comparison
    grid_score = no_nca_list.fitted_grid.best_score_
    grid_params = no_nca_list.fitted_grid.best_params_

    # Training scores
    grid_train_confusion = confusion_matrix(Y_train, no_nca_list.grid_train_Y)
    grid_tr_report = class_re(Y_train,
                              no_nca_list.grid_train_Y,
                              target_names=target_names,
                              output_dict=True)
    train_mat = matthews_corrcoef(Y_train, no_nca_list.grid_train_Y)

    # Test metrics grid
    grid_confusion = confusion_matrix(Y_test, no_nca_list.y_grid)
    grid_matthews = matthews_corrcoef(Y_test, no_nca_list.y_grid)
    grid_te_report = class_re(Y_test,
                              no_nca_list.y_grid,
                              target_names=target_names,
                              output_dict=True)

    everything = no_nca(*[
        grid_score, grid_params, grid_confusion, grid_tr_report,
        grid_te_report, train_mat, grid_matthews, grid_train_confusion
    ])

    return everything
Exemplo n.º 2
0
def print_score(Y_test,
                y_grid,
                train_predicted,
                Y_train,
                test_index=None,
                train_index=None,
                mode=None):
    """ The function prints the scores of the models and the prediction performance """
    score_tuple = namedtuple("scores", [
        "test_confusion", "tr_report", "te_report", "train_mat", "test_mat",
        "train_confusion"
    ])

    target_names = ["class 0", "class 1"]

    # looking at the scores of those predicted by al 3 of them
    if mode:
        Y_test = Y_test.iloc[[
            x for x in range(len(Y_test)) if x not in test_index
        ]]
        Y_train = Y_train.iloc[[
            x for x in range(len(Y_train)) if x not in train_index
        ]]
        y_grid = [y_grid[x] for x in range(len(y_grid)) if x not in test_index]
        train_predicted = [
            train_predicted[x] for x in range(len(train_predicted))
            if x not in train_index
        ]

    # Training scores
    train_confusion = confusion_matrix(Y_train, train_predicted)
    train_matthews = matthews_corrcoef(Y_train, train_predicted)
    # print(f"Y_train : {Y_train}, predicted: {train_predicted}")
    tr_report = class_re(Y_train,
                         train_predicted,
                         target_names=target_names,
                         output_dict=True)

    # Test metrics
    test_confusion = confusion_matrix(Y_test, y_grid)
    test_matthews = matthews_corrcoef(Y_test, y_grid)
    te_report = class_re(Y_test,
                         y_grid,
                         target_names=target_names,
                         output_dict=True)

    all_scores = score_tuple(*[
        test_confusion, tr_report, te_report, train_matthews, test_matthews,
        train_confusion
    ])

    return all_scores
Exemplo n.º 3
0
def print_score(fitted_grid, fitted_random, Y_test, y_random, y_grid,
                random_train_predicted, grid_train_predicted, Y_train):
    """ The function prints the scores of the models and the prediction performance """
    target_names = ["class 0", "class 1"]
    # Model comparison
    grid_score = fitted_grid.best_score_
    grid_params = fitted_grid.best_params_
    random_score = fitted_random.best_score_
    random_params = fitted_random.best_params_

    # Training scores
    random_train_confusion = confusion_matrix(Y_train, random_train_predicted)
    grid_train_confusion = confusion_matrix(Y_train, grid_train_predicted)
    g_train_matthews = matthews_corrcoef(Y_train, grid_train_predicted)
    r_train_matthews = matthews_corrcoef(Y_train, random_train_predicted)
    random_tr_report = class_re(Y_train,
                                random_train_predicted,
                                target_names=target_names,
                                output_dict=True)
    grid_tr_report = class_re(Y_train,
                              grid_train_predicted,
                              target_names=target_names,
                              output_dict=True)
    # Test metrics
    random_confusion = confusion_matrix(Y_test, y_random)
    random_matthews = matthews_corrcoef(Y_test, y_random)
    random_te_report = class_re(Y_test,
                                y_random,
                                target_names=target_names,
                                output_dict=True)

    grid_matthews = matthews_corrcoef(Y_test, y_grid)
    grid_confusion = confusion_matrix(Y_test, y_grid)
    grid_te_report = class_re(Y_test,
                              y_grid,
                              target_names=target_names,
                              output_dict=True)

    return grid_score, grid_params, grid_confusion, random_tr_report, random_te_report, grid_te_report, grid_tr_report, \
           grid_matthews, random_score, random_params, random_confusion, random_matthews, \
           random_train_confusion, grid_train_confusion, r_train_matthews, g_train_matthews
Exemplo n.º 4
0
def print_score(fitted_grid, Y_test, y_grid, grid_train_predicted, Y_train,
                X_test):
    """ The function prints the scores of the models and the prediction performance """
    score_tuple = namedtuple("scores", [
        "grid_score", "grid_params", "grid_confusion", "tr_report",
        "te_report", "train_mat", "grid_matthews", "grid_train_confusion",
        "grid_r2"
    ])
    target_names = ["class 0", "class 1"]
    # Model comparison
    grid_score = fitted_grid.best_score_
    grid_params = fitted_grid.best_params_
    grid_r2 = fitted_grid.best_estimator_.score(X_test, Y_test)

    # Training scores
    grid_train_confusion = confusion_matrix(Y_train, grid_train_predicted)
    g_train_matthews = matthews_corrcoef(Y_train, grid_train_predicted)
    grid_tr_report = class_re(Y_train,
                              grid_train_predicted,
                              target_names=target_names,
                              output_dict=True)
    # Test metrics
    grid_confusion = confusion_matrix(Y_test, y_grid)
    grid_matthews = matthews_corrcoef(Y_test, y_grid)
    grid_te_report = class_re(Y_test,
                              y_grid,
                              target_names=target_names,
                              output_dict=True)

    all_scores = score_tuple(*[
        grid_score, grid_params, grid_confusion, grid_tr_report,
        grid_te_report, g_train_matthews, grid_matthews, grid_train_confusion,
        grid_r2
    ])

    return all_scores