Exemple #1
0
    def stats_overall(self):
        """
        Returns an OrderedDict with overall statistics
        """
        df = self._df_confusion
        d_stats = collections.OrderedDict()

        d_class_agreement = class_agreement(df)

        key = 'Accuracy'
        try:
            d_stats[key] = d_class_agreement['diag']  # 0.35
        except KeyError:
            d_stats[key] = np.nan

        key = '95% CI'
        try:
            d_stats[key] = binom_interval(np.sum(np.diag(df)),
                                          df.sum().sum())  # (0.1539, 0.5922)
        except:  # noqa
            d_stats[key] = np.nan

        d_prop_test = prop_test(df)
        d_stats['No Information Rate'] = 'ToDo'  # 0.8
        d_stats['P-Value [Acc > NIR]'] = d_prop_test['p.value']  # 1
        d_stats['Kappa'] = d_class_agreement['kappa']  # 0.078
        d_stats['Mcnemar\'s Test P-Value'] = 'ToDo'  # np.nan

        return (d_stats)
Exemple #2
0
    def stats_overall(self):
        """
        Returns an OrderedDict with overall statistics
        """
        df = self._df_confusion
        d_stats = collections.OrderedDict()

        d_class_agreement = class_agreement(df)

        key = 'Accuracy'
        try:
            d_stats[key] = d_class_agreement['diag']  # 0.35
        except:
            d_stats[key] = np.nan

        key = '95% CI'
        try:
            d_stats[key] = binom_interval(np.sum(np.diag(df)), df.sum().sum())  # (0.1539, 0.5922)
        except:
            d_stats[key] = np.nan

        d_prop_test = prop_test(df)
        d_stats['No Information Rate'] = 'ToDo'  # 0.8
        d_stats['P-Value [Acc > NIR]'] = d_prop_test['p.value']  # 1
        d_stats['Kappa'] = d_class_agreement['kappa']  # 0.078
        d_stats['Mcnemar\'s Test P-Value'] = 'ToDo'  # np.nan

        return(d_stats)