Beispiel #1
0
def evaluate(joint_model, test_data_generator_1, test_data_generator_2, pos_rate):

    # Testing
    pred_model_2 = models.Model(inputs=joint_model.input, outputs=joint_model.layers[-1].output)
    y_prob = pred_model_2.predict_generator(test_data_generator_2)[:, 1]
    y_test = test_data_generator_2[0][1][:, 1]

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr),
          "AU-PRC:", "%0.4f" % metrics.auc(rec, prec))

    (sensitivity, specificity, PPV, NPV, f1, acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)
    alarm_rate = pos_rate * sensitivity / PPV
    print("sensitivity:", "%0.4f" % sensitivity,
          "specificity:", "%0.4f" % specificity,
          "PPV:", "%0.4f" % PPV,
          "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1,
          "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')

    # Testing
    pred_model_1 = models.Model(inputs=joint_model.input, outputs=joint_model.layers[-2].output)
    y_prob = pred_model_1.predict_generator(test_data_generator_1)[:, 1]
    y_test = test_data_generator_1[0][1][:, 1]

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr),
          "AU-PRC:", "%0.4f" % metrics.auc(rec, prec))

    (sensitivity, specificity, PPV, NPV, f1, acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)
    alarm_rate = pos_rate * sensitivity / PPV
    print("sensitivity:", "%0.4f" % sensitivity,
          "specificity:", "%0.4f" % specificity,
          "PPV:", "%0.4f" % PPV,
          "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1,
          "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')
def evaluate(joint_model, test_data_generator, pos_rate, labelsTe):
    # Testing
    pred_model = models.Model(inputs=joint_model.input,
                              outputs=joint_model.layers[-1].output)
    y_prob = pred_model.predict_generator(test_data_generator)[:, 1]
    y_test = test_data_generator[0][1][:, 1]

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
          "%0.4f" % metrics.auc(rec, prec))

    (sensitivity, specificity, PPV, NPV, f1,
     acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)
    alarm_rate = pos_rate * sensitivity / PPV
    print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
          "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1, "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')

    spo2_model = models.Model(inputs=joint_model.input,
                              outputs=joint_model.layers[-2].output)
    out = spo2_model.predict_generator(test_data_generator)
    y_pred = np.zeros(len(out), )

    for ind in range(len(out)):
        window = out[ind, 5:, 0]
        if_low = window <= 0.9
        if np.sum(if_low) == 5:
            y_pred[ind] = 1

    C = metrics.confusion_matrix(y_test, y_pred)
    tn = np.float(C[0][0])
    fn = np.float(C[1][0])
    tp = np.float(C[1][1])
    fp = np.float(C[0][1])

    sensitivity = tp / (tp + fn) if (tp + fn) != 0 else 0
    specificity = tn / (tn + fp) if (tn + fp) != 0 else 0
    PPV = tp / (tp + fp) if (tp + fp) != 0 else 0
    NPV = tn / (tn + fn) if (tn + fn) != 0 else 0
    f1 = metrics.f1_score(y_test, y_pred)
    acc = metrics.accuracy_score(y_test, y_pred)

    print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
          "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1, "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')
def evaluate(model, X_test, y_test, pos_rate, args):
    # Testing
    y_prob = model.predict_proba(X_test)[:, 1]
    np.savetxt('data/result/y_prob', y_prob)
    np.savetxt('data/result/y_test', y_test)

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)
    (sensitivity, specificity, PPV, NPV, f1,
     acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)
    alarm_rate = pos_rate * sensitivity / PPV

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
          "%0.4f" % metrics.auc(rec, prec))
    print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
          "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1, "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')

    result_table = pd.DataFrame(columns=[
        'args', 'fpr', 'tpr', 'roc', 'prec', 'rec', 'prc', 'pos_rate'
    ])
    result_table = result_table.append(
        {
            'args': args.__dict__,
            'fpr': fpr,
            'tpr': tpr,
            'roc': metrics.auc(fpr, tpr),
            'prec': prec,
            'rec': rec,
            'prc': metrics.auc(rec, prec),
            'y_test': y_test,
            'y_prob': y_prob,
            'pos_rate': pos_rate
        },
        ignore_index=True)
    # save results
    result_table.to_pickle('data/result/realtime_gbtree.pkl')

    return metrics.auc(rec, prec)
Beispiel #4
0
def evaluate(model, test_data_generator, pos_rate):
    # Testing
    y_prob = model.predict_generator(test_data_generator)[:, 1]
    y_test = test_data_generator[0][1][:, 1]

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)
    (sensitivity, specificity, PPV, NPV, f1,
     acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)
    alarm_rate = pos_rate * sensitivity / PPV

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
          "%0.4f" % metrics.auc(rec, prec))
    print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
          "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1, "accuracy:", "%0.4f" % acc)
    print("Alarm rate:", alarm_rate)
    print('--------------------------------------------')

    result_table = pd.DataFrame(columns=[
        'model', 'fpr', 'tpr', 'roc', 'prec', 'rec', 'prc', 'pos_rate'
    ])
    result_table = result_table.append(
        {
            'model': 'LSTM',
            'fpr': fpr,
            'tpr': tpr,
            'roc': metrics.auc(fpr, tpr),
            'prec': prec,
            'rec': rec,
            'prc': metrics.auc(rec, prec),
            'y_test': y_test,
            'y_prob': y_prob,
            'pos_rate': pos_rate
        },
        ignore_index=True)

    # save results
    result_table.to_pickle('data/result/realtime_pretrain.pkl')
    X_train, X_test, y_train, y_test = prepare_data(df_static,
                                                    df_dynamic,
                                                    dynamic_feature,
                                                    args=args)
    model = train_gbtree(X_train, y_train)

    # Testing
    y_prob = model.predict_proba(X_test)[:, 1]
    y_prob2 = best_ntree_score(model, X_test)

    # Evaluation
    fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
    prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)
    (sensitivity, specificity, PPV, NPV, f1,
     acc), _ = line_search_best_metric(y_test, y_prob, spec_thresh=0.95)

    FPR.append(fpr)
    TPR.append(tpr)
    PREC.append(PREC)
    REC.append(REC)
    METRIC.append([sensitivity, specificity, PPV, NPV, f1, acc])

    print('--------------------------------------------')
    print('Evaluation of test set:')
    print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
          "%0.4f" % metrics.auc(rec, prec))
    print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
          "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:", "%0.4f" % NPV,
          "F1 score:", "%0.4f" % f1, "accuracy:", "%0.4f" % acc)
    print('--------------------------------------------')
Beispiel #6
0
def train_gbtree(X_train, labelsTr, pos_rate, args, labelsTe):

    y_train = labelsTr['label']

    X_train, labelsTr = shuffle(X_train, y_train, random_state=0)

    result_table = pd.DataFrame(columns=[
        'random_state', 'model', 'fpr', 'tpr', 'roc', 'prec', 'rec', 'prc',
        'pos_rate'
    ])

    for rs in range(1):
        classifiers = [
            CatBoostClassifier(
                verbose=0,
                # scale_pos_weight=(1 - pos_rate) / pos_rate,
                learning_rate=args.lr,
                depth=args.depth,
                l2_leaf_reg=args.l2,
                random_state=rs)
        ]
        for cls in classifiers:

            print('Round', rs)
            print('Training:', cls.__class__.__name__)
            model = cls.fit(X_train, labelsTr)
            y_prob = model.predict_proba(X_test)[::, 1]
            y_test = labelsTe['label']
            labelsTe = labelsTe.assign(y_prob=y_prob)
            y_prob_w_hypo = labelsTe[labelsTe.if_to_drop == 0]['y_prob'].values
            y_test_w_hypo = labelsTe[labelsTe.if_to_drop == 0]['label'].values

            # Evaluation 1
            fpr, tpr, _ = metrics.roc_curve(y_test, y_prob)
            prec, rec, _ = metrics.precision_recall_curve(y_test, y_prob)

            print('--------------------------------------------')
            print('Evaluation of full test set:')
            print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
                  "%0.4f" % metrics.auc(rec, prec))

            # Evaluation 2
            fpr, tpr, _ = metrics.roc_curve(y_test_w_hypo, y_prob_w_hypo)
            prec, rec, _ = metrics.precision_recall_curve(
                y_test_w_hypo, y_prob_w_hypo)

            print('Evaluation of test set without hypoxemia samples:')
            print("AU-ROC:", "%0.4f" % metrics.auc(fpr, tpr), "AU-PRC:",
                  "%0.4f" % metrics.auc(rec, prec))
            print('--------------------------------------------')

            (sensitivity, specificity, PPV, NPV, f1,
             acc), _ = line_search_best_metric(y_test_w_hypo,
                                               y_test_w_hypo,
                                               spec_thresh=0.95)
            alarm_rate = pos_rate * sensitivity / PPV

            print("sensitivity:", "%0.4f" % sensitivity, "specificity:",
                  "%0.4f" % specificity, "PPV:", "%0.4f" % PPV, "NPV:",
                  "%0.4f" % NPV, "F1 score:", "%0.4f" % f1, "accuracy:",
                  "%0.4f" % acc)
            print("Alarm rate:", alarm_rate)
            print('--------------------------------------------')

            # result_table = result_table.append({
            #     'random_state': rs,
            #     'model': cls.__class__.__name__,
            #     'fpr': fpr,
            #     'tpr': tpr,
            #     'roc': metrics.auc(fpr, tpr),
            #     'prec': prec,
            #     'rec': rec,
            #     'prc': metrics.auc(rec, prec),
            #     'y_test': labelsTe,
            #     'y_prob': y_prob,
            #     'pos_rate': pos_rate
            # }, ignore_index=True)

    save_name = 'data/result/model_comparison/realtime_gbtree_random.pkl'