Esempio n. 1
0
def eval(emb,
         label_mat,
         emb_IDmap,
         label_IDmap,
         n_splits=5,
         random_state=None,
         shuffle=False):
    """Evaluate predictions using auROC

    Args:
        emb(:obj:`np.ndarray`): embedding matrix
        label_mat(:obj:`np.ndarray`): label matrix
        emb_IDmap(dict of `str`:`int`): IDmap for embedding matrix
        label_IDmap(dict of `str`:`int`): IDmap fro label matrix
        n_splits(int): number folds in stratified k-fold cross validation
        random_state(int): random state used to generate split

    """
    y_true_all, y_pred_all = test(emb,
                                  label_mat,
                                  emb_IDmap,
                                  label_IDmap,
                                  n_splits=n_splits,
                                  random_state=random_state,
                                  shuffle=shuffle)

    auroc_all = [
        auroc(y_true, y_pred)
        for y_true, y_pred in zip(y_true_all, y_pred_all)
    ]

    return auroc_all
Esempio n. 2
0
def _get_misclass_auroc(preds,
                        targets,
                        criterion,
                        topk=1,
                        expected_data_uncertainty_array=None):
    """
    Get AUROC for Misclassification detection
    :param preds: Prediction probabilities as numpy array
    :param targets: Targets as numpy array
    :param criterion: Criterion to use for scoring on misclassification detection.
    :param topk: Top-kl class probabilities to consider while making predictions.
    :param expected_data_uncertainty_array: Expected data uncertainty as numpy array
    :return: AUROC on misclassification detection
    """
    misclassification_targets = (1 - _misclass_tgt(preds, targets,
                                                   (topk, ))).astype(bool)

    if criterion == 'entropy':
        criterion_values = np.sum(-preds * np.log(preds), axis=1)
    elif criterion == 'confidence':
        criterion_values = -preds.max(axis=1)
    elif criterion == 'model_uncertainty':
        criterion_values = np.sum(-preds * np.log(preds),
                                  axis=1) - expected_data_uncertainty_array
    else:
        raise NotImplementedError

    return auroc(misclassification_targets, criterion_values)
def get_misclass_auroc(preds, targets, criterion, topk=1, **args):
    misclassification_targets = (
        1 - misclass_tgt(preds, targets, topk)).astype(bool)

    if criterion == 'entropy':
        criterion_values = np.sum(-preds * np.log(preds), axis=1)
    elif criterion == 'confidence':
        criterion_values = -preds.max(axis=1)
    elif criterion == 'MI':
        criterion_values = np.sum(-preds * np.log(preds),
                                  axis=1) - args['mean_ens_entropy']
    else:
        raise NotImplementedError

    return auroc(misclassification_targets, criterion_values)
Esempio n. 4
0
        self.w = np.linalg.inv(X.T.dot(X)+lambdaa).dot((X.T).dot(y))
    def decision_function(self,X):
        return X.dot(self.w)

if __name__=='__main__':
    plt.close("all")

    lambdaa = 0.0
    X,y = getExamples()
    
    ols = OLS(lambdaa = lambdaa)
    ols.fit(X,y)
    
    plt.figure()
    e = plotit(X = X, Y = y, clf = ols.decision_function, conts =[-1,0,1])
    plt.title("OLS"+" AUC:"+"{0:.2f}".format(auroc(y,ols.decision_function(X))))
    plt.show()
    
    clf = LinearSVC(C=1e1)
    clf.fit(X,y)
    plt.figure()
    plotit(X = X, Y = y, clf = clf.decision_function, conts =[-1,0,1],extent = e)
    plt.title("SVM"+" AUC:"+"{0:.2f}".format(auroc(y,clf.decision_function(X))))
    plt.show()
    
    
    X[0]=-1000*X[0] #Let's add some fun!
    
    ols2 = OLS(lambdaa = lambdaa)
    ols2.fit(X,y)
    plt.figure()