Пример #1
0
def get_scores(y_true,y_pred):
    brier_score = brier_score_loss(y_true,y_pred)
    log_score = log_loss(y_true,y_pred)
    roc_score = roc_auc_score(y_true, y_pred)
    pr_score = average_precision_score(y_true,y_pred)
    r2score = r2_score(y_true,y_pred)
    return math.sqrt(brier_score),log_score,roc_score,pr_score,r2score
Пример #2
0
 def auc_metrics(self, y, ypreds):
     self.log("Features Used: {}".format(self.featurizer.get_feature_names()))
     auc = roc_auc_score(y, ypreds)
     self.log("\tROC AUC: {}".format(auc))
     prc = average_precision_score(y, ypreds, average="weighted")
     self.log("\tPrecision-Recall AUC: {}".format(prc))
     return auc, prc
Пример #3
0
def get_scores(shots):
    y_true = [shot.result for shot in shots]
    y_pred = [shot.pred for shot in shots]
    brier_score = brier_score_loss(y_true,y_pred)
    log_score = log_loss(y_true,y_pred)
    roc_score = roc_auc_score(y_true, y_pred)
    pr_score = average_precision_score(y_true,y_pred)
    r2score = r2_score(y_true,y_pred)
    return math.sqrt(brier_score),log_score,roc_score,pr_score,r2score
Пример #4
0
def plot_precision_recall(y_true, y_pred):
    from matplotlib import pyplot as plt

    if platform == 'linux':
        plt.switch_backend('agg')

    precition, recall, _ = precision_recall_curve(y_true, y_pred)
    score = average_precision_score(y_true, y_pred)
    plt.plot(precition,
             recall,
             label='curva. Area: {:.3f}'.format(score),
             lw=1)

    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.legend()
    plt.savefig('prec_rec.png')
Пример #5
0
def run_svm_decision_distance(test, train, agreement=1):
    """
    :param test:
    :param train:
    :param agreement:
    :return:
    """
    from sklearn.pipeline import Pipeline
    svc = Pipeline([("svm",
                     SVC(class_weight='balanced',
                         kernel='rbf',
                         C=0.7,
                         gamma=0.001,
                         random_state=0))])

    features = get_experimential_pipeline(train)
    X_train = features.fit_transform(train)

    y = [1 if sent.label >= agreement else 0 for sent in train]
    X_train, y = balance(X_train, y)

    print("Start training SVM.")
    svc.fit(X_train, y)
    print("Finished training SVM.")
    X = features.fit_transform(test)

    y_pred_proba = svc.decision_function(X)
    y_pred_proba = MinMaxScaler().fit_transform(y_pred_proba).tolist()

    y_pred = svc.predict(X)

    for sent, prob, pred_label in zip(test, y_pred_proba, y_pred):
        sent.pred = prob
        sent.pred_label = pred_label

    y_true = [1 if s.label >= agreement else 0 for s in test]

    print(average_precision_score(y_true, y_pred_proba))
    return test
Пример #6
0
def run_svm_prob(test, train, agreement=1, C=1, gamma=0.0001):
    """
    :param test:
    :param train:
    :param agreement:
    :return:
    """
    svc = SVC(class_weight='balanced',
              kernel='rbf',
              C=C,
              gamma=gamma,
              probability=True,
              random_state=0)
    features = get_experimential_pipeline(train)
    X_train = features.fit_transform(train)

    y = [1 if sent.label >= agreement else 0 for sent in train]

    print("Start training SVM.")
    svc.fit(X_train, y)
    print("Finished training SVM.")
    X = features.fit_transform(test)
    y_pred_proba = svc.predict_proba(X)
    y_pred_proba = MinMaxScaler().fit_transform(
        [pred[1] for pred in y_pred_proba]).tolist()

    y_pred = svc.predict(X)

    for sent, prob, pred_label in zip(test, y_pred_proba, y_pred):
        sent.pred = prob
        sent.pred_label = pred_label

    y_true = [1 if s.label >= agreement else 0 for s in test]

    print(average_precision_score(y_true, y_pred_proba))
    return test