예제 #1
0
    def create_sensitivity_report(cls,
                                  model,
                                  train: DMD,
                                  test: DMD,
                                  metric: str,
                                  sensitivity: SensitivityAnalysis = None,
                                  **kwargs) -> SensitivityFullReport:
        """
        Create sensitivity report
        Args:
            model - model to be analyzed based on test data
            train - train data (DMD). Train data is used if test is None
            test - test data (DMD). If None, use train data instead.
            sensitivity - SensitivityAnalysis instance. If None, instance is initiated internally.

        Returns:
            sensitivity report
        """
        sensitivity = sensitivity or SensitivityAnalysis()
        sensitivity.calculate_sensitivity(model=model,
                                          dmd_test=test,
                                          dmd_train=train,
                                          metric=metric,
                                          **kwargs)

        return sensitivity.sensitivity_report(**kwargs)
예제 #2
0
    def test_sensitivity_impact_regression(self, is_classification=False):
        sensitivity = SensitivityAnalysis()
        model = self.get_model(is_classification)

        train = self.get_data(is_classification)
        model.fit(train.values, train.target.ravel())

        test = self.get_data(is_classification, seed=1)

        scores = sensitivity.sensitivity_analysis(
            model=model,
            metric=Metrics.mae.name,
            dmd_test=test,
            method=SensitivityTypes.missing,
            raw_scores=False)

        scores = scores.sensitivities
        print(scores)
        self.assertTrue(isinstance(scores, dict))
        self.assertEqual(len(scores), test.n_features)
        self.assertGreaterEqual(
            numpy.round(sum([v for v in scores.values()]), 6), 1 - 1e-5)
        self.assertEqual(scores['f_1'], 0)
        self.assertGreaterEqual(scores['f_0'], 2 / len(scores))

        o_scores = [v for k, v in scores.items() if k not in ['f_0', 'f_1']]
        self.assertLessEqual(numpy.std(o_scores), 0.05)
        self.assertGreaterEqual(scores['f_0'], numpy.mean(o_scores))
예제 #3
0
    def test_sensitivity_raw_shuffled_classification(self,
                                                     is_classification=True):
        sensitivity = SensitivityAnalysis()
        model = self.get_model(is_classification)

        train = self.get_data(is_classification)
        model.fit(train.values, train.target.ravel())

        test = self.get_data(is_classification, seed=1)

        raw_scores = sensitivity.sensitivity_analysis(
            model=model,
            metric=Metrics.recall.name,
            dmd_test=test,
            method=SensitivityTypes.shuffled,
            raw_scores=True)

        raw_scores = raw_scores.sensitivities
        self.assertTrue(isinstance(raw_scores, dict))
        self.assertEqual(len(raw_scores), test.n_features)
        self.assertLessEqual(raw_scores['f_0'], 0.5)
        self.assertEqual(raw_scores['f_1'], 1.0)
        self.assertLessEqual(max([v for v in raw_scores.values()]), 1.0)

        scores = [v for k, v in raw_scores.items() if k not in ['f_0', 'f_1']]
        self.assertLessEqual(numpy.std(scores), 0.05)
예제 #4
0
    def calc_sensitivity_report(self):
        try:
            from pytolemaic.analysis_logic.model_analysis.sensitivity.sensitivity import SensitivityAnalysis
        except:
            logging.exception("Failed to import SensitivityAnalysis")
            return None

        self._sensitivity = SensitivityAnalysis()
        sensitivity_report = self._sensitivity.sensitivity_analysis(model=self.classifier, dmd_train=self.train, dmd_test=self.test,
                                               metric=Metrics.auc.name)
        return sensitivity_report
    def test_leakage(self):
        sensitivity = SensitivityAnalysis()
        self.assertEqual(sensitivity._leakage(n_features=10, n_very_low=0), 0)
        self.assertEqual(sensitivity._leakage(n_features=10, n_very_low=9), 1)
        self.assertGreaterEqual(
            sensitivity._leakage(n_features=10, n_very_low=8), 0.8)

        print([
            sensitivity._leakage(n_features=10, n_very_low=k)
            for k in range(10)
        ])
    def test_imputation(self):
        sensitivity = SensitivityAnalysis()

        shuffled = SensitivityOfFeaturesReport(
            method=SensitivityTypes.shuffled,
            sensitivities={
                'a': 0.3,
                'b': 0.5,
                'c': 0.2
            },
            stats_report=sensitivity._sensitivity_stats_report(sensitivities={
                'a': 0.3,
                'b': 0.5,
                'c': 0.2
            }))
        missing = SensitivityOfFeaturesReport(
            method=SensitivityTypes.missing,
            sensitivities={
                'a': 0.3,
                'b': 0.5,
                'c': 0.2
            },
            stats_report=sensitivity._sensitivity_stats_report(sensitivities={
                'a': 0.3,
                'b': 0.5,
                'c': 0.2
            }))

        self.assertEqual(
            sensitivity._imputation_score(shuffled=shuffled, missing=missing),
            0)

        shuffled = SensitivityOfFeaturesReport(
            method=SensitivityTypes.shuffled,
            sensitivities={
                'a': 1,
                'b': 0,
                'c': 0
            },
            stats_report=sensitivity._sensitivity_stats_report(sensitivities={
                'a': 1,
                'b': 0,
                'c': 0
            }))
        missing = SensitivityOfFeaturesReport(
            method=SensitivityTypes.missing,
            sensitivities={
                'a': 0,
                'b': 1,
                'c': 0
            },
            stats_report=sensitivity._sensitivity_stats_report(sensitivities={
                'a': 0,
                'b': 1,
                'c': 0
            }))

        self.assertEqual(
            sensitivity._imputation_score(shuffled=shuffled, missing=missing),
            1)
예제 #7
0
    def test_sensitivity_functions(self, is_classification=False):
        sensitivity = SensitivityAnalysis()
        model = self.get_model(is_classification)

        train = self.get_data(is_classification)
        model.fit(train.values, train.target.ravel())

        test = self.get_data(is_classification, seed=1)

        shuffled = sensitivity.sensitivity_analysis(
            model=model,
            metric=Metrics.mae.name,
            dmd_test=test,
            method=SensitivityTypes.shuffled,
            raw_scores=False)

        missing = sensitivity.sensitivity_analysis(
            model=model,
            metric=Metrics.mae.name,
            dmd_test=test,
            method=SensitivityTypes.missing,
            raw_scores=False)

        stats = sensitivity._sensitivity_stats_report(shuffled.sensitivities)
        n_features = stats.n_features
        n_zero = stats.n_zero
        n_very_low = stats.n_very_low
        n_low = stats.n_low

        leakage_score = sensitivity._leakage(n_features=n_features,
                                             n_very_low=n_very_low,
                                             n_zero=n_zero)

        self.assertGreater(leakage_score, 0)
        self.assertLessEqual(leakage_score, 1)

        overfit_score = sensitivity._too_many_features(n_features=n_features,
                                                       n_very_low=n_very_low,
                                                       n_low=n_low,
                                                       n_zero=n_zero)

        self.assertGreaterEqual(overfit_score, 0)
        self.assertLessEqual(overfit_score, 1)

        imputation_score = sensitivity._imputation_score(shuffled=shuffled,
                                                         missing=missing)
        self.assertGreaterEqual(imputation_score, 0)
        self.assertLessEqual(imputation_score, 1)

        report = sensitivity._vulnerability_report(
            shuffled_sensitivity=shuffled, missing_sensitivity=missing)
        self.assertTrue(0 <= report.imputation <= 1)
        self.assertTrue(0 <= report.leakage <= 1)
        self.assertTrue(0 <= report.too_many_features <= 1)
예제 #8
0
    def create_sensitivity_report(cls,
                                  model,
                                  train: DMD,
                                  test: DMD,
                                  metric: str,
                                  sensitivity: SensitivityAnalysis = None,
                                  **kwargs) -> SensitivityFullReport:
        sensitivity = sensitivity or SensitivityAnalysis()
        sensitivity.calculate_sensitivity(model=model,
                                          dmd_test=test,
                                          dmd_train=train,
                                          metric=metric,
                                          **kwargs)

        return sensitivity.sensitivity_report(**kwargs)
    def test_sensitivity_meta(self):

        sensitivity = SensitivityAnalysis()
        sensitivities = {'a' + str(k): k for k in range(10)}

        mock = SensitivityOfFeaturesReport(
            method='mock',
            sensitivities=sensitivities,
            stats_report=sensitivity._sensitivity_stats_report(sensitivities))
        stats = mock.stats_report
        pprint(stats.to_dict())

        self.assertEqual(stats.n_features, 10)
        self.assertEqual(stats.n_zero, 1)
        self.assertEqual(stats.n_low, 1)
    def test_overfit(self):
        sensitivity = SensitivityAnalysis()

        print([
            sensitivity._too_many_features(n_features=15,
                                           n_low=k + 5,
                                           n_very_low=5 + k // 2,
                                           n_zero=5) for k in range(10)
        ])

        self.assertEqual(
            sensitivity._too_many_features(n_features=10,
                                           n_low=0,
                                           n_very_low=0,
                                           n_zero=0), 0)
        self.assertEqual(
            sensitivity._too_many_features(n_features=10,
                                           n_low=5,
                                           n_very_low=0,
                                           n_zero=0), 0.5)

        self.assertGreater(
            sensitivity._too_many_features(n_features=10,
                                           n_low=9,
                                           n_very_low=9,
                                           n_zero=0), 0.9)

        self.assertGreaterEqual(
            sensitivity._too_many_features(n_features=10,
                                           n_low=9,
                                           n_very_low=9,
                                           n_zero=9), 0.0)
예제 #11
0
    def __init__(self,
                 model,
                 xtrain=None,
                 ytrain=None,
                 sample_meta_train: dict = None,
                 xtest=None,
                 ytest=None,
                 sample_meta_test: dict = None,
                 columns_meta: dict = None,
                 feature_names: list = None,
                 feature_types: list = None,
                 categorical_encoding: dict = None,
                 metric: [str, Metric] = None,
                 splitter: str = 'shuffled',
                 target_labels: dict = None):
        """

        :param model: Model trained on training data provided

        :param xtrain: X training data. if DMD is provided, ytrain and any additional metadata is ignored.
        :param ytrain: Y training data.
        :param sample_meta_train: generic way to provide meta information on each sample in train data (e.g. sample weight) {key : [list of values]}.

        :param xtest: X test data. if DMD is provided, ytest and any additional metadata is ignored..
        :param ytest: Y test data. if DMD is provided,
        :param sample_meta_test: generic way to provide meta information on each sample in test data (e.g. sample weight) {key : [list of values]}.

        :param columns_meta: generic way to provide meta information on each feature (e.g. feature name) {key : [list of values]}.
        :param feature_names: feature name for each feature
        :param feature_types: feature type for each feature: NUMERICAL or CATEGORICAL
        :param categorical_encoding: For each column of categorical feature type, provide a dictionary of the structure
        {feature_names: {index: class name}}. This information will allow providing more readable reports.

        :param metric: Target metric
        :param splitter: Splitter
        :param target_labels: categorical encoding for target variable in the format of {index: class name}.
        """
        self.model = model

        if isinstance(splitter, str):
            if splitter == 'shuffled':
                splitter = ShuffleSplitter
            elif splitter == 'stratified':
                splitter = StratifiedSplitter
            else:
                raise NotImplementedError(
                    "splitter='{}' is not supported".format(splitter))
        else:
            if not hasattr(splitter, 'split'):
                raise ValueError(
                    "splitter='{}' does not supported split() operation".
                    format(splitter))
            else:
                raise NotImplementedError(
                    "splitter='{}' is not supported".format(splitter))

        if ytrain is not None:
            shape = getattr(ytrain, 'shape', (1, 1))
            if len(shape) == 2 and shape[1] > 1:
                raise NotImplementedError(
                    "Pytrust does not support multilabel (ytrain.shape[1]>1) analysis. "
                    "In order to use Pytolemaic package, please wrap you model so model.predict(X) will return a single vector. "
                )
        if ytest is not None:
            shape = getattr(ytest, 'shape', (1, 1))
            if len(shape) == 2 and shape[1] > 1:
                raise NotImplementedError(
                    "Pytrust does not support multilabel (ytest.shape[1]>1) analysis. "
                    "In order to use Pytolemaic package, please wrap you model so model.predict(X) will return a single vector. "
                )

        self.train = xtrain
        if self.train is not None and not isinstance(self.train, DMD):
            self.train = DMD(x=xtrain,
                             y=ytrain,
                             samples_meta=sample_meta_train,
                             columns_meta=columns_meta,
                             feature_names=feature_names,
                             feature_types=feature_types,
                             categorical_encoding=categorical_encoding,
                             splitter=splitter,
                             target_labels=target_labels)

        self.test = xtest
        if self.test is not None and not isinstance(self.test, DMD):
            self.test = DMD(x=xtest,
                            y=ytest,
                            samples_meta=sample_meta_test,
                            columns_meta=columns_meta,
                            feature_names=feature_names,
                            feature_types=feature_types,
                            categorical_encoding=categorical_encoding,
                            splitter=splitter,
                            target_labels=target_labels)

        if metric is None:
            if GeneralUtils.is_classification(model):
                metric = Metrics.recall
            else:
                metric = Metrics.mae

        self.metric = metric.name if isinstance(metric, Metric) else metric

        # todo
        self._validate_input()

        self.sensitivity = SensitivityAnalysis()

        self._uncertainty_models = {}
        self.covariance_shift = None
        self._cache = {}
예제 #12
0
class CovarianceShift():
    def __init__(self):
        self._separation_quality = None
        self._cov_train = None
        self._cov_test = None
        self._classifier = None
        self._sensitivity = None

        self._dmd_train = None
        self._dmd_test = None

    def calc_covariance_shift(self, dmd_train: DMD, dmd_test: DMD):
        # save data for later report
        self._dmd_train = dmd_train
        self._dmd_test = dmd_test

        # split data to new train / test setes
        self._cov_train, self._cov_test = CovarianceShiftCalculator.prepare_dataset_for_score_quality(dmd_train=dmd_train, dmd_test=dmd_test)
        self._classifier = CovarianceShiftCalculator.prepare_estimator(train=self._cov_train)
        self._covariance_shift = CovarianceShiftCalculator.calc_convriance_shift_auc(classifier=self._classifier, test=self._cov_test)

    def covariance_shift_report(self):
        medium_lvl = 0.7
        high_lvl = 0.95
        if self.covariance_shift > medium_lvl:
            sensitivity_report=self.calc_sensitivity_report()
        else:
            sensitivity_report=None

        return CovarianceShiftReport(covariance_shift=self.covariance_shift,
                                     sensitivity_report=sensitivity_report,
                                     medium_lvl=medium_lvl, high_lvl=high_lvl,
                                     train=self._dmd_train,
                                     test=self._dmd_test)

    def calc_sensitivity_report(self):
        try:
            from pytolemaic.analysis_logic.model_analysis.sensitivity.sensitivity import SensitivityAnalysis
        except:
            logging.exception("Failed to import SensitivityAnalysis")
            return None

        self._sensitivity = SensitivityAnalysis()
        sensitivity_report = self._sensitivity.sensitivity_analysis(model=self.classifier, dmd_train=self.train, dmd_test=self.test,
                                               metric=Metrics.auc.name)
        return sensitivity_report

    @property
    def sensitivity(self):
        return self._sensitivity

    @property
    def separation_quality(self):
        return 1-self._covariance_shift

    @property
    def covariance_shift(self):
        return self._covariance_shift

    @property
    def train(self):
        return self._cov_train

    @property
    def test(self):
        return self._cov_test

    @property
    def classifier(self):
        return self._classifier