def test_sensitivity_specificity_f_binary_single_class():
    # Such a case may occur with non-stratified cross-validation
    assert sensitivity_score([1, 1], [1, 1]) == 1.
    assert specificity_score([1, 1], [1, 1]) == 0.

    assert sensitivity_score([-1, -1], [-1, -1]) == 0.
    assert specificity_score([-1, -1], [-1, -1]) == 0.
Example #2
0
def test_sensitivity_specificity_extra_labels():
    y_true = [1, 3, 3, 2]
    y_pred = [1, 1, 3, 2]

    # No average: zeros in array
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average=None)
    assert_allclose([1., 0.67, 1., 1., 1.], actual, rtol=R_TOL)

    # Macro average is changed
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='macro')
    assert_allclose(np.mean([1., 0.67, 1., 1., 1.]), actual, rtol=R_TOL)

    # Check for micro
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='micro')
    assert_allclose(15. / 16., actual, rtol=R_TOL)

    # Check for weighted
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='macro')
    assert_allclose(np.mean([1., 0.67, 1., 1., 1.]), actual, rtol=R_TOL)
Example #3
0
def test_sensitivity_specificity_f_binary_single_class():
    # Such a case may occur with non-stratified cross-validation
    assert_equal(1., sensitivity_score([1, 1], [1, 1]))
    assert_equal(0., specificity_score([1, 1], [1, 1]))

    assert_equal(0., sensitivity_score([-1, -1], [-1, -1]))
    assert_equal(0., specificity_score([-1, -1], [-1, -1]))
Example #4
0
def test_sensitivity_specificity_extra_labels():
    """Test handling of explicit additional (not in input) labels to SS"""
    y_true = [1, 3, 3, 2]
    y_pred = [1, 1, 3, 2]

    # No average: zeros in array
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average=None)
    assert_array_almost_equal([1., 0.67, 1., 1., 1.], actual, 2)

    # Macro average is changed
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='macro')
    assert_array_almost_equal(np.mean([1., 0.67, 1., 1., 1.]), actual, 2)

    # Check for micro
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='micro')
    assert_array_almost_equal(15. / 16., actual)

    # Check for weighted
    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average='macro')
    assert_array_almost_equal(np.mean([1., 0.67, 1., 1., 1.]), actual, 2)
 def specificity(self):
     """ Specificity is also known as True Negative Rate """
     self.overall_specificity = specificity_score(
         self.y_true, self.y_pred,
         average=self.average_type).round(self.digits_count_fp)
     self.classes_specificity = specificity_score(self.y_true,
                                                  self.y_pred,
                                                  average=None).round(
                                                      self.digits_count_fp)
Example #6
0
def test_sensitivity_specificity_f_binary_single_class():
    """Test sensitivity and specificity behave with a single positive or
    negative class"""
    # Such a case may occur with non-stratified cross-validation
    assert_equal(1., sensitivity_score([1, 1], [1, 1]))
    assert_equal(0., specificity_score([1, 1], [1, 1]))

    assert_equal(0., sensitivity_score([-1, -1], [-1, -1]))
    assert_equal(0., specificity_score([-1, -1], [-1, -1]))
Example #7
0
def examine_model(prob_path, pred_path, data_path):
    prob_data = pd.read_csv(prob_path)
    pred_data = pd.read_csv(pred_path)

    label = prob_data['label'].values
    model_list = prob_data.columns[2:]

    dic = {
        'NCP': 'non_contrast',
        'CMP': 'arterial',
        'NP': 'venous',
        'NCP+CMP': ['non_contrast', 'arterial'],
        'NCP+NP': ['non_contrast', 'venous'],
        'CMP+NP': ['arterial', 'venous'],
        'NCP+CMP+NP': ['non_contrast', 'arterial', 'venous']
    }
    for model_name in model_list:
        period_list = dic[model_name]
        if isinstance(period_list, list):
            compute_ic(united_model_path, data_path, period_list)
        else:
            compute_period_ic(united_model_path, data_path, period_list)

        prob = prob_data[model_name]
        pred = pred_data[model_name]
        auc = roc_auc_score(label, prob)
        acc = accuracy_score(label, pred)
        sen = recall_score(label, pred)
        spe = specificity_score(label, pred)

        print('{}: auc:{:.3f},\tacc: {:.3f},\tsen: {:.3f},\tspe: {:.3f}\n'.
              format(model_name, auc, acc, sen, spe))
def test_sensitivity_specificity_extra_labels(average, expected_specificty):
    y_true = [1, 3, 3, 2]
    y_pred = [1, 1, 3, 2]

    actual = specificity_score(
        y_true, y_pred, labels=[0, 1, 2, 3, 4], average=average)
    assert_allclose(expected_specificty, actual, rtol=R_TOL)
Example #9
0
def identification(method, rs_cnt, batch_size):
    out_file = open(
        os.path.join(os.path.abspath(""), "results",
                     "{}_{}_vggface2.txt".format(method, rs_cnt)), "w")
    data_file = os.path.join(os.path.abspath(""), "data",
                             "vggface2_{}_dataset.hdf5".format(method))
    train, val, test = [
        h5py.File(data_file, "r")[part] for part in ["train", "val", "test"]
    ]
    rs = np.load(
        os.path.join(os.path.abspath(""), "data",
                     "rs_{}_feat.npz".format(method)))["arr_0"]

    mlp = get_mlp(512, np.unique(train[:, -1].astype(int) - 1).shape[0])

    mlp = train_mlp(mlp, train, val, rs, rs_cnt, method, batch_size)

    y_prob = predict_mlp(mlp, test, rs, rs_cnt)
    y_pred = np.argmax(y_prob, axis=-1)
    y_true = test[:, -1].astype(int) - 1

    out_file.write("ACC -- {:.6f}\n".format(accuracy_score(y_true, y_pred)))
    out_file.write("FPR -- {:.6f}\n".format(
        1 - sensitivity_score(y_true, y_pred, average="micro")))
    out_file.write("FRR -- {:.6f}\n".format(
        1 - specificity_score(y_true, y_pred, average="micro")))
    out_file.write("PRE -- {:.6f}\n".format(
        precision_score(y_true, y_pred, average="micro")))
    out_file.write("REC -- {:.6f}\n".format(
        recall_score(y_true, y_pred, average="micro")))
    out_file.write("F1 -- {:.6f}\n".format(
        f1_score(y_true, y_pred, average="micro")))
    out_file.close()
Example #10
0
def evaluate_prediction(y, pred):
    # Convert to numpy array
    pred = np.array(pred, dtype=np.float64)

    # Count null predictions
    n_null = np.isnan(pred).sum()
    #pred.isnull().sum()

    # Remove null predictions and convert to np.bool
    y = y[~np.isnan(pred)].astype(np.bool)
    pred = pred[~np.isnan(pred)].astype(np.bool)

    if y.all() or (~y).all():
        return pd.Series((
            1.0 - n_null / (n_null + len(y)),
            metrics.accuracy_score(y, pred),
            np.nan,
            np.nan,
            np.nan,
            np.nan,
            np.nan,
        ),
                         index=('coverage', 'accuracy', 'precision',
                                'specificity', 'sensitivity', 'auc', 'mcc'))
    return pd.Series((
        1.0 - n_null / (n_null + len(y)),
        metrics.accuracy_score(y, pred),
        metrics.precision_score(y, pred),
        specificity_score(y, pred),
        metrics.recall_score(y, pred),
        metrics.roc_auc_score(y, pred),
        metrics.matthews_corrcoef(y, pred),
    ),
                     index=('coverage', 'accuracy', 'precision', 'specificity',
                            'sensitivity', 'auc', 'mcc'))
Example #11
0
def print_metrics(model, test, cols):
    test_x = test.drop(['Label'], axis=1)
    test_y = test['Label']
    y_probs = model.predict_proba(test_x[cols])[:, 1]
    y_test_predictions = np.where(
        model.predict_proba(test_x[cols])[:, 1] > 0.119, 2, 1)
    n_levels = test_y.value_counts().count()
    if (n_levels == 1):
        #print(test_x.shape)
        return y_test_predictions
    else:
        mcc = metrics.matthews_corrcoef(test_y, y_test_predictions)
        tn, fp, fn, tp = confusion_matrix(test_y, y_test_predictions).ravel()
        ppv = tp / (tp + fp)
        npv = tn / (tn + fn)
        sen = tp / (tp + fn)
        spe = tn / (tn + fp)
        score = ppv + npv + sen + spe
        #y_test_predictions=model.predict(test_x[cols])
        sensi = sensitivity_score(test_y, y_test_predictions, pos_label=2)
        speci = specificity_score(test_y, y_test_predictions, pos_label=2)
        accu = accuracy_score(test_y, y_test_predictions)
        auro = roc_auc_score(test_y, y_test_predictions)
        #acc=accuracy_score(test_y,y_test_predictions)
        print("Composite Score for Martelotto et al.: ", score)
        return y_test_predictions
Example #12
0
def calc_metrics(y_test, pred, auc, i):
    sen = metrics.sensitivity_score(y_test, pred, pos_label=1)
    spe = metrics.specificity_score(y_test, pred, pos_label=1)
    geo = metrics.geometric_mean_score(y_test, pred, pos_label=1)
    index = ['sm', 'b1', 'b2', 'enn', 'tom', 'ada', 'mnd']
    metrics_list = [index[i], sen, spe, geo, auc]
    return metrics_list
Example #13
0
def compute_metrics(y_test,
                    y_pred,
                    y_proba=None,
                    average='weighted',
                    return_index=False):
    """
    Function computing metrics of interest for a sets of prediction

    :input y_test: pd.DataFrame or np.array of original label
    :input y_pred: pd.DataFrame or np.array of predicted label

    :output red: list of value for metrics, in order - Accuracy - Precision - Recall - F1 Score - Sensitivity - Specifity
    """
    if return_index:
        return [
            'accuracy', 'precision', 'recall', 'f1_score', 'sensitivity_score',
            'specificity_score', 'geometric_mean_score',
            'average_precision_score'
        ]
    else:
        res = []
        res.append(accuracy_score(y_test, y_pred))
        res.append(precision_score(y_test, y_pred, average=average))
        res.append(recall_score(y_test, y_pred, average=average))
        res.append(f1_score(y_test, y_pred, average=average))
        res.append(sensitivity_score(y_test, y_pred, average=average))
        res.append(specificity_score(y_test, y_pred, average=average))
        res.append(geometric_mean_score(y_test, y_pred, average=average))
        if y_proba is not None:
            res.append(
                average_precision_score(y_test, y_proba, average=average))
        return res
Example #14
0
def performance_summary(
    clf: OptimalSamplingClassifier,
    X: np.ndarray,
    y: np.ndarray,
    info: Optional[Dict[str, any]] = None,
) -> Dict[str, float]:
    predicted_proba = clf.predict_proba(X)
    predicted = clf.predict(X)
    nominal_proba = (y == clf.positive_class).mean()
    return dict(model=str(clf.estimator).replace("\n", "").replace(" ", ""),
                class_ratio=1 / nominal_proba,
                weight_ratio=clf.positive_weight / clf.negative_weight,
                sampling_probability=clf._sampling_proba,
                previous_probabilities=clf._prev_sampling_probas,
                cross_val_probabilities=clf._cross_val_sampling_probas,
                sampling_ratio=clf._sampling_proba / nominal_proba,
                iter_to_converge=clf._iter_count,
                accuracy=accuracy_score(y, predicted),
                sensitivity=sensitivity_score(y, predicted),
                specificity=specificity_score(y, predicted),
                precision=precision_score(y, predicted) if
                (predicted == clf.positive_class).sum() > 0 else None,
                recall=recall_score(y, predicted) if
                (predicted == clf.positive_class).sum() > 0 else None,
                f1_score=f1_score(y, predicted),
                geometric_mean_score=geometric_mean_score(y, predicted) if
                (predicted == clf.positive_class).sum() > 0 else None,
                roc_auc_score=roc_auc_score(y, predicted_proba),
                average_precision_score=average_precision_score(
                    y, predicted_proba),
                weighted_loss=clf.weighted_loss(X, y).mean(),
                cost=clf.cost(X, y).mean(),
                **(info if info else {}))
Example #15
0
def classification_report_imbalanced_values(
    y_true, y_pred, labels, target_names=None, sample_weight=None, digits=2, alpha=0.1
):
    """Copy of imblearn.metrics.classification_report_imbalanced to have
    access to the raw values. The code is mostly the same except the
    formatting code and generation of the report which haven removed. Copied
    from version 0.4.3. The original code is living here:
    https://github.com/scikit-learn-contrib/imbalanced-learn/blob/b861b3a8e3414c52f40a953f2e0feca5b32e7460/imblearn/metrics/_classification.py#L790
    """
    labels = np.asarray(labels)

    if target_names is None:
        target_names = [str(label) for label in labels]

    # Compute the different metrics
    # Precision/recall/f1
    precision, recall, f1, support = precision_recall_fscore_support(
        y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight
    )
    # Specificity
    specificity = specificity_score(
        y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight
    )
    # Geometric mean
    geo_mean = geometric_mean_score(
        y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight
    )
    # Index balanced accuracy
    iba_gmean = make_index_balanced_accuracy(alpha=alpha, squared=True)(
        geometric_mean_score
    )
    iba = iba_gmean(
        y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight
    )

    result = {"targets": {}}

    for i, label in enumerate(labels):
        result["targets"][target_names[i]] = {
            "precision": precision[i],
            "recall": recall[i],
            "specificity": specificity[i],
            "f1": f1[i],
            "geo_mean": geo_mean[i],
            "iba": iba[i],
            "support": support[i],
        }

    result["average"] = {
        "precision": np.average(precision, weights=support),
        "recall": np.average(recall, weights=support),
        "specificity": np.average(specificity, weights=support),
        "f1": np.average(f1, weights=support),
        "geo_mean": np.average(geo_mean, weights=support),
        "iba": np.average(iba, weights=support),
        "support": np.sum(support),
    }

    return result
Example #16
0
 def GetMetrics(self, y_test):
     tn, fp, fn, tp = confusion_matrix(y_test, self.y_pred).flatten()
     self.acc = '%.3f' % accuracy_score(y_test, self.y_pred)
     self.AUC = '%.3f' % roc_auc_score(y_test, self.y_pred_proba_pos)
     #        self.sens = tp/(tp+fn)
     self.sens = '%.3f' % sensitivity_score(y_test, self.y_pred)
     #        self.spec = '%.3f'%(tn/(tn+fp))
     self.spec = '%.3f' % specificity_score(y_test, self.y_pred)
Example #17
0
def classification_results(train,test):
    #Derivation of NBDriver using training data 
    """
    Arguments:
        train = feature matrix derived from Brown et al.
        test= feature matrix derived from Martelotto et al.
    Returns:
        best_model = Best ensemble model derived using the training data
        X_red= Dataframe derived after sampling that was used to train the model
        scores= probability based classification scores
    """
    sen=[];spe=[];acc=[];auc=[];c=[];m=[];s=[]
    train_x=train.drop('Label',axis=1);train_y=train['Label'];    
    test_x=test.drop('Label',axis=1);test_y=test['Label'];
    #Random undersampling to reduce the majority class size
    samp=RepeatedEditedNearestNeighbours(random_state=42)
    X_samp,y_samp=samp.fit_resample(train_x,train_y)
    X_samp = pd.DataFrame(X_samp, columns = train_x.columns)
    #Experimenting with different numbers of top features derived from the tree-based feature extraction method 
    top_n_feats=[30,40,50,60,70]
    X_r=feature_reduction_using_trees(X_samp,y_samp) 
    cols=X_r.columns
    for n in top_n_feats:
        print("For top: ",n," features")
        X_red=X_r[cols[0:n]]
        sv=SVC(kernel="linear",probability=True,C=0.01,random_state=42) #chosen from 5foldCV based grid search
        kde=KDEClassifier(bandwidth=1.27) #chosen from 5foldCV based grid search
        best_model = VotingClassifier(estimators=[('sv', sv), ('kde', kde)],
                        voting='soft',weights=[4, 7]) #best combination of weights selected by a brute force search (possible weights 1-10) using a cross-validation approach on the training data  
        
        best_model.fit(X_red,y_samp)
        y_probs = best_model.predict_proba(test_x[X_red.columns])[:,1]
        thresholds = arange(0, 1, 0.001)
        scores = [roc_auc_score(test_y, to_labels(y_probs, t)) for t in thresholds]
        ix= argmax(scores)
        y_test_predictions = np.where(best_model.predict_proba(test_x[X_red.columns])[:,1] > thresholds[ix], 2, 1)
        print("Thresh: ",thresholds[ix])
        sensi= sensitivity_score(test_y, y_test_predictions, pos_label=2)
        speci=specificity_score(test_y,y_test_predictions,pos_label=2)
        accu=accuracy_score(test_y,y_test_predictions)
        auro=roc_auc_score(test_y,y_test_predictions)
        mcc=metrics.matthews_corrcoef(test_y,y_test_predictions)
        tn, fp, fn, tp = confusion_matrix(test_y, y_test_predictions).ravel()
        ppv=tp/(tp+fp)
        npv=tn/(tn+fn)
        sen=tp/(tp+fn)
        spe=tn/(tn+fp)
        score=ppv+npv+sen+spe
        print("For kmer size: ",len(train.columns[0]))
        print("for top ",n," features")
        print(list(X_red.columns.values),"\n")
        score_dict={"Sen":sen,"Spe":spe,"PPV":ppv,"NPV":npv,"AUC":auro,"MCC":mcc,"ACC":accu}
        print(score)
        print(score_dict)
        df=pd.DataFrame(y_test_predictions)
        y_samp = pd.DataFrame(y_samp, columns = ['x'])
    return best_model,X_red,scores
def test_sensitivity_specificity_extra_labels(average, expected_specificty):
    y_true = [1, 3, 3, 2]
    y_pred = [1, 1, 3, 2]

    actual = specificity_score(y_true,
                               y_pred,
                               labels=[0, 1, 2, 3, 4],
                               average=average)
    assert_allclose(expected_specificty, actual, rtol=R_TOL)
def compute_confusion_matrix(pred_list, label_list, model_name='Unnamed'):
    acc = metrics.accuracy_score(label_list, pred_list)
    sen = metrics.recall_score(label_list, pred_list)
    spe = specificity_score(label_list, pred_list)
    ppv = metrics.precision_score(label_list, pred_list)
    youden_index = sen + spe - 1
    print(
        '{}: \tacc: {:.3f},\tsen: {:.3f},\tspe: {:.3f}\n,\tppv:{:.3f},\tyouden index:{:.3f}'
        .format(model_name, acc, sen, spe, ppv, youden_index))
Example #20
0
def compute_metrics(gt, pred, competition=True):
    """
    Computes accuracy, precision, recall and F1-score from prediction scores.
    Args:
        gt: Pytorch tensor on GPU, shape = [n_samples, n_classes]
          true binary labels.
        pred: Pytorch tensor on GPU, shape = [n_samples, n_classes]
          can either be probability estimates of the positive class,
          confidence values, or binary decisions.
        competition: whether to use competition tasks. If False, 
          use all tasks
    Returns:
        List of AUROCs of all classes.
    """

    AUROCs, Accus, Senss, Recas, Specs = [], [], [], [], []
    gt_np = gt.cpu().detach().numpy()
    # if cfg.uncertainty == 'U-Zeros':
    #     gt_np[np.where(gt_np==-1)] = 0
    # if cfg.uncertainty == 'U-Ones':
    #     gt_np[np.where(gt_np==-1)] = 1
    pred_np = pred.cpu().detach().numpy()
    THRESH = 0.18
    #     indexes = TARGET_INDEXES if competition else range(N_CLASSES)
    #indexes = range(n_classes)
    
#     pdb.set_trace()
    indexes = range(len(CLASS_NAMES))
    
    for i, cls in enumerate(indexes):
        try:
            AUROCs.append(roc_auc_score(gt_np[:, i], pred_np[:, i]))
        except ValueError as error:
            print('Error in computing accuracy for {}.\n Error msg:{}'.format(i, error))
            AUROCs.append(0)
        
        try:
            Accus.append(accuracy_score(gt_np[:, i], (pred_np[:, i]>=THRESH)))
        except ValueError as error:
            print('Error in computing accuracy for {}.\n Error msg:{}'.format(i, error))
            Accus.append(0)
        
        try:
            Senss.append(sensitivity_score(gt_np[:, i], (pred_np[:, i]>=THRESH)))
        except ValueError:
            print('Error in computing precision for {}.'.format(i))
            Senss.append(0)
        

        try:
            Specs.append(specificity_score(gt_np[:, i], (pred_np[:, i]>=THRESH)))
        except ValueError:
            print('Error in computing F1-score for {}.'.format(i))
            Specs.append(0)
    
    return AUROCs, Accus, Senss, Specs
Example #21
0
def calc_metrics(y_test, pred, i):
        sen = metrics.sensitivity_score(y_test, pred, average='micro')   
        spe = metrics.specificity_score(y_test, pred, average='micro')   
        geo = metrics.geometric_mean_score(y_test, pred, average='micro')
        f1 = f1_score(y_test, pred, average='micro')
        mcc = matthews_corrcoef(y_test, pred)
          
        index = ['sm', 'b1', 'b2', 'enn', 'tom', 'ada', 'mnd'] 
        metrics_list = [index[i], sen, spe, geo, f1, mcc]
        return metrics_list
Example #22
0
def calc_metrics(y_test, pred, auc, i):
    sen = metrics.sensitivity_score(y_test, pred, pos_label=1)
    spe = metrics.specificity_score(y_test, pred, pos_label=1)
    geo = metrics.geometric_mean_score(y_test, pred, pos_label=1)
    f1 = f1_score(y_test, pred, pos_label=1)
    mcc = matthews_corrcoef(y_test, pred)

    index = ['original', 'sm', 'b1', 'b2', 'enn', 'tom', 'ada', 'mnd']
    metrics_list = [index[i], sen, spe, geo, f1, mcc, auc]
    return metrics_list
Example #23
0
 def measure(self, j_gamma, k_c, i_folder, Label_test, Label_predict):
     j = j_gamma
     k = k_c
     i = i_folder
     self.Accuracy[j, k,
                   i] = metrics.accuracy_score(Label_test, Label_predict)
     self.Precision[j, k,
                    i] = metrics.precision_score(Label_test, Label_predict)
     self.Recall[j, k, i] = metrics.recall_score(Label_test, Label_predict)
     self.Specificity[j, k, i] = specificity_score(Label_test,
                                                   Label_predict)
     self.G_Mean[j, k, i] = geometric_mean_score(Label_test, Label_predict)
     self.F_Mean[j, k, i] = metrics.f1_score(Label_test, Label_predict)
def test_sensitivity_specificity_extra_labels():
    y_true = [1, 3, 3, 2]
    y_pred = [1, 1, 3, 2]

    # No average: zeros in array
    actual = specificity_score(
        y_true, y_pred, labels=[0, 1, 2, 3, 4], average=None)
    assert_allclose([1., 0.67, 1., 1., 1.], actual, rtol=R_TOL)

    # Macro average is changed
    actual = specificity_score(
        y_true, y_pred, labels=[0, 1, 2, 3, 4], average='macro')
    assert_allclose(np.mean([1., 0.67, 1., 1., 1.]), actual, rtol=R_TOL)

    # Check for micro
    actual = specificity_score(
        y_true, y_pred, labels=[0, 1, 2, 3, 4], average='micro')
    assert_allclose(15. / 16., actual, rtol=R_TOL)

    # Check for weighted
    actual = specificity_score(
        y_true, y_pred, labels=[0, 1, 2, 3, 4], average='macro')
    assert_allclose(np.mean([1., 0.67, 1., 1., 1.]), actual, rtol=R_TOL)
Example #25
0
def metrique_classe(y_pred, y_true, xclass):
    from imblearn.metrics import specificity_score
    from imblearn.metrics import sensitivity_score

    from imblearn.metrics import geometric_mean_score

    # La sensibilité est le rapport où est le nombre de vrais positifs et le nombre de faux négatifs.
    # La sensibilité quantifie la capacité à éviter les faux négatifs.tp

    # estimator issu de quelques FIT

    log.traceLogInfo("Classe ", xclass)
    if xclass == 0:
        log.traceLogInfo("Classe 0")
    if xclass == 1:
        log.traceLogInfo("Classe 1")

    log.traceLogInfo("Sensibilité  du re-equilibrage des données sur le TEST")
    #log.traceLogInfo("Binary ",sensitivity_score(y_true, y_pred, average='binary', pos_label=xclass))

    log.traceLogInfo(
        "La spécificité est intuitivement la capacité du classificateur à trouver tous les échantillons positifs"
    )

    log.traceLogInfo("Binary ")
    log.traceLogInfo(
        specificity_score(y_true,
                          y_pred,
                          labels=None,
                          pos_label=xclass,
                          average='binary',
                          sample_weight=None))

    print("\nCalculer la moyenne géométrique")
    print(geometric_mean_score(y_true, y_pred, labels=None, pos_label=xclass))

    print("\n Calculer  sensitivity score")
    print(
        "La sensibilité est le rapport où est le nombre de vrais positifs et le nombre de faux négatifs."
    )
    print("La sensibilité quantifie la capacité à éviter les faux négatifs.")

    print(
        sensitivity_score(y_true,
                          y_pred,
                          labels=None,
                          pos_label=xclass,
                          average='binary'))
Example #26
0
    def fit(self, x, y, x_test, y_test):
        #
        xPos = []
        xNeg = []
        xTrain = []
        yTrain = []
        xlastTrain = []
        ylastTrain = []

        for i in range(0, len(y)):
            if y[i] == 1:
                xPos.append(x[i])
                xlastTrain.append(x[i])
                ylastTrain.append(y[i])
            else:
                xNeg.append(x[i])
            xTrain.append(x[i])
            yTrain.append(y[i])
        xNLSV = []
        iterRecord = 0
        for i in range(0, self.T):
            svc = SVC(C=self.C,
                      class_weight=self.class_weight,
                      degree=self.degree,
                      gamma=self.gamma,
                      kernel='linear')
            print(iterRecord)
            iterRecord += 1
            svc.fit(xTrain, yTrain)
            sv = svc.support_  # This is support vector
            xTrain, yTrain, xNLSV, lastMar = self.rebuild(
                xTrain, yTrain, sv, xNLSV)  # rebuild sample
            #print (lastMar)
            if lastMar < 0.1 * len(xPos):
                break

        for i in xNLSV:
            xlastTrain.append(i)
            ylastTrain.append(-1)

        self.allSVC.fit(xlastTrain, ylastTrain)

        y_pre = self.allSVC.predict(x_test)
        ACC = accuracy_score(y_test, y_pre)
        SN = sensitivity_score(y_test, y_pre)
        SP = specificity_score(y_test, y_pre)
        MCC = matthews_corrcoef(y_test, y_pre)
        return SN, SP, ACC, MCC
def specificity(output, target):
    """
    Specificity = TN / (TN + FP)
    :param output: Batch x Channel x ....
    :param target: Batch x ....
    :return:
    """
    with torch.no_grad():
        if len(output.shape) == (len(target.shape) + 1):
            # reverse one-hot encode output
            output = torch.argmax(output, 1)

        assert output.shape == target.shape, "The output size should be the same or one dimension more than the shape of target."

        output = output.cpu().detach().numpy()
        target = target.cpu().detach().numpy()
        score = specificity_score(target, output, average='micro')

    return score
Example #28
0
 def eval_(self, data, label):
     self.ROI = self.ROI.eval()
     gt = []
     pre = []
     total = int(len(label) / 240)
     with torch.no_grad():
         for i in range(total):
             a = i * 240
             b = a + 240
             sin_x = torch.Tensor(data[a:b]).cuda()
             sin_x = sin_x.view(240, 1024, 15)
             sin_y = label[a:b]
             predict = self.ROI(sin_x)
             predict, index = torch.max(predict, 1)
             pre.extend(index.cpu().tolist())
             gt.extend(sin_y)
     print("ppv:{}".format(metrics.precision_score(gt, pre, average='micro')))
     print("spe:{}".format(specificity_score(gt, pre, average='micro')))
     print("sen:{}".format(metrics.recall_score(gt, pre, average='micro')))
Example #29
0
 def measure(self, i_folder, Label_test, Label_predict, average):
     i = i_folder
     self.Accuracy[i] = metrics.accuracy_score(Label_test, Label_predict)
     #        print(self.Accuracy)
     self.Precision[i] = metrics.precision_score(Label_test,
                                                 Label_predict,
                                                 average=average)
     #        print(self.Precision)
     self.Recall[i] = metrics.recall_score(Label_test,
                                           Label_predict,
                                           average=average)
     #        print(self.Recall)
     self.Specificity[i] = specificity_score(Label_test,
                                             Label_predict,
                                             average=average)
     #        print(self.Specificity)
     self.G_Mean[i] = geometric_mean_score(Label_test,
                                           Label_predict,
                                           average=average)
     #        print(self.G_Mean)
     self.F_Mean[i] = metrics.f1_score(Label_test,
                                       Label_predict,
                                       average=average)
Example #30
0
print(mean_SP)
print(mean_ACC)
print(mean_MCC)
print(mean_AUC)


clf.fit(gram_train, y_train)

y_score = clf.predict_proba(gram_test)
y_score = get_y_score(y_score)
precision, recall, thresholds = metrics.precision_recall_curve(y_test, y_score)

y_pred = clf.predict(gram_test)
ACC = metrics.accuracy_score(y_test, y_pred)
precision = metrics.precision_score(y_test, y_pred)
sensitivity = metrics.recall_score(y_test, y_pred)
specificity = specificity_score(y_test, y_pred)
AUC = metrics.roc_auc_score(y_test, y_score)
MCC = metrics.matthews_corrcoef(y_test, y_pred)
AUPR = get_AUPR(y_test, y_score)

#print("===========================")
#print('testing:')
print(sensitivity)
print(specificity)
print(ACC)
print(MCC)
print(AUC)
#print('AUPR =', AUPR)

def eval_specificity(gt, pred, average='macro'):
    if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
        return specificity_score(gt.fillna(0.0), pred.fillna(0.0), average=average)
    else:
        return specificity_score(gt, pred, average=average)
Example #32
0
best = fmin(
    fn=svm_weight_ACC,
    space=space,
    algo=tpe.suggest,
    max_evals=100,
    trials=trials,
)
C = best['C']
g = best['gamma']
prob = svm_problem(membership, y_svm, X_svm)
param = svm_parameter('-t 2 -c ' + str(C) + ' -g ' + str(g) + ' -b 1')
m = svm_train(prob, param)
p_label, p_acc, p_val = svm_predict(y_test_svm, X_test_svm, m, '-b 1')
y_prob = np.reshape([p_val[i][0] for i in range(np.shape(p_val)[0])],
                    (np.shape(p_val)[0], 1))
ACC = metrics.accuracy_score(y_test, p_label)
precision = metrics.precision_score(y_test, p_label)
sensitivity = metrics.recall_score(y_test, p_label)
specificity = specificity_score(y_test, p_label)
AUC = metrics.roc_auc_score(y_test, y_prob)
MCC = metrics.matthews_corrcoef(y_test, p_label)

print('C =', C)
print('g =', g)

print('SN =', sensitivity)
print('SP =', specificity)
print('ACC =', p_acc[0])
print('MCC =', MCC)
print('AUC =', AUC)
def test_sensitivity_specificity_f_binary_single_class(y_pred,
                                                       expected_sensitivity,
                                                       expected_specificity):
    # Such a case may occur with non-stratified cross-validation
    assert sensitivity_score(*y_pred) == expected_sensitivity
    assert specificity_score(*y_pred) == expected_specificity
def test_sensitivity_specificity_f_binary_single_class(
        y_pred, expected_sensitivity, expected_specificity):
    # Such a case may occur with non-stratified cross-validation
    assert sensitivity_score(*y_pred) == expected_sensitivity
    assert specificity_score(*y_pred) == expected_specificity