예제 #1
0
    def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent):
        """ Explains a single row.
        """

        # build a masked version of the model for the current input sample
        fm = MaskedModel(self.model, self.masker, self.link, self.linearize_link, *row_args)

        # compute any custom clustering for this row
        row_clustering = None
        if getattr(self.masker, "clustering", None) is not None:
            if isinstance(self.masker.clustering, np.ndarray):
                row_clustering = self.masker.clustering
            elif callable(self.masker.clustering):
                row_clustering = self.masker.clustering(*row_args)
            else:
                raise NotImplementedError("The masker passed has a .clustering attribute that is not yet supported by the Permutation explainer!")

        # compute the correct expected value
        masks = np.zeros(1, dtype=np.int)
        outputs = fm(masks, zero_index=0, batch_size=1)
        expected_value = outputs[0]

        # generate random feature attributions
        # we produce small values so our explanation errors are similar to a constant function
        row_values = np.random.randn(*((len(fm),) + outputs.shape[1:])) * 0.001

        return {
            "values": row_values,
            "expected_values": expected_value,
            "mask_shapes": fm.mask_shapes,
            "main_effects": None,
            "clustering": row_clustering,
            "error_std": None,
            "output_names": self.model.output_names if hasattr(self.model, "output_names") else None
        }
예제 #2
0
파일: _sequential.py 프로젝트: lrjball/shap
    def __call__(self,
                 name,
                 explanation,
                 *model_args,
                 percent=0.01,
                 indices=[],
                 y=None,
                 label=None,
                 silent=False,
                 debug_mode=False,
                 batch_size=500):
        # if explainer is already the attributions
        if safe_isinstance(explanation, "numpy.ndarray"):
            attributions = explanation
        elif isinstance(explanation, Explanation):
            attributions = explanation.values
        else:
            raise ValueError(
                "The passed explanation must be either of type numpy.ndarray or shap.Explanation!"
            )

        assert len(attributions) == len(
            model_args[0]
        ), "The explanation passed must have the same number of rows as the model_args that were passed!"

        if label is None:
            label = "Score %d" % len(self.score_values)

        # convert dataframes
        # if safe_isinstance(X, "pandas.core.series.Series") or safe_isinstance(X, "pandas.core.frame.DataFrame"):
        #     X = X.values

        # convert all single-sample vectors to matrices
        # if not hasattr(attributions[0], "__len__"):
        #     attributions = np.array([attributions])
        # if not hasattr(X[0], "__len__") and self.data_type == "tabular":
        #     X = np.array([X])

        pbar = None
        start_time = time.time()
        svals = []
        mask_vals = []

        for i, args in enumerate(zip(*model_args)):
            if self.data_type == "image":
                x_shape, y_shape = attributions[i].shape[0], attributions[
                    i].shape[1]
                feature_size = np.prod([x_shape, y_shape])
                sample_attributions = attributions[i].mean(2).reshape(
                    feature_size, -1)
                data = X[i].flatten()
                mask_shape = X[i].shape
            else:
                feature_size = attributions[i].shape[0]
                sample_attributions = attributions[i]
                # data = X[i]
                mask_shape = feature_size

            self.masked_model = MaskedModel(self.model, self.masker,
                                            links.identity,
                                            self.linearize_link, *args)

            if len(attributions[i].shape) == 1 or self.data_type == "tabular":
                output_size = 1
            else:
                output_size = attributions[i].shape[-1]

            masks = []
            for k in range(output_size):
                mask = np.ones(mask_shape, dtype=np.bool) * (self.perturbation
                                                             == "remove")
                masks.append(mask.copy().flatten())

                if output_size != 1:
                    test_attributions = sample_attributions[:, k]
                else:
                    test_attributions = sample_attributions

                ordered_inds = self.sort_order_map(test_attributions)
                increment = max(1, int(feature_size * percent))
                for j in range(0, feature_size, increment):
                    oind_list = [
                        ordered_inds[l]
                        for l in range(j, min(feature_size, j + increment))
                    ]

                    for oind in oind_list:
                        if not ((self.sort_order == "positive" and test_attributions[oind] <= 0) or \
                                (self.sort_order == "negative" and test_attributions[oind] >= 0)):
                            if self.data_type == "image":
                                xoind, yoind = oind // attributions[i].shape[
                                    1], oind % attributions[i].shape[1]
                                mask[xoind][
                                    yoind] = self.perturbation == "keep"
                            else:
                                mask[oind] = self.perturbation == "keep"

                    masks.append(mask.copy().flatten())

            mask_vals.append(masks)

            mask_size = len(range(0, feature_size, increment)) + 1
            values = []
            masks_arr = np.array(masks)
            for j in range(0, len(masks_arr), batch_size):
                values.append(self.masked_model(masks_arr[j:j + batch_size]))
            values = np.concatenate(values)
            if len(indices) == 0:
                outputs = range(output_size)
            else:
                outputs = indices

            index = 0
            for k in outputs:
                if output_size == 1:
                    svals.append(values[index:index + mask_size])
                else:
                    svals.append(values[index:index + mask_size, k])
                index += mask_size

            if pbar is None and time.time() - start_time > 5:
                pbar = tqdm(total=len(model_args[0]),
                            disable=silent,
                            leave=False,
                            desc="SequentialMasker")
                pbar.update(i + 1)
            if pbar is not None:
                pbar.update(1)

        if pbar is not None:
            pbar.close()

        self.score_values.append(np.array(svals))

        # if self.sort_order == "negative":
        #     curve_sign = -1
        # else:
        curve_sign = 1

        self.labels.append(label)

        xs = np.linspace(0, 1, 100)
        curves = np.zeros((len(self.score_values[-1]), len(xs)))
        for j in range(len(self.score_values[-1])):
            xp = np.linspace(0, 1, len(self.score_values[-1][j]))
            yp = self.score_values[-1][j]
            curves[j, :] = np.interp(xs, xp, yp)
        ys = curves.mean(0)
        auc = sklearn.metrics.auc(np.linspace(0, 1, len(ys)),
                                  curve_sign * (ys - ys[0]))

        if not debug_mode:
            return BenchmarkResult(self.perturbation + " " + self.sort_order,
                                   name,
                                   curve_x=xs,
                                   curve_y=ys)
        else:
            aucs = []
            for j in range(len(self.score_values[-1])):
                curve = curves[j, :]
                auc = sklearn.metrics.auc(np.linspace(0, 1, len(curve)),
                                          curve_sign * (curve - curve[0]))
                aucs.append(auc)
            return mask_vals, curves, aucs
예제 #3
0
    def __call__(self,
                 explanation,
                 name,
                 step_fraction=0.01,
                 indices=[],
                 silent=False):
        """ Run this benchmark on the given explanation.
        """

        if safe_isinstance(explanation, "numpy.ndarray"):
            attributions = explanation
        elif isinstance(explanation, Explanation):
            attributions = explanation.values
        else:
            raise ValueError(
                "The passed explanation must be either of type numpy.ndarray or shap.Explanation!"
            )

        assert len(attributions) == len(self.model_args[0]), "The explanation passed must have the same number of rows as " + \
                                                             "the self.model_args that were passed!"

        # it is important that we choose the same permutations for difference explanations we are comparing
        # so as to avoid needless noise
        old_seed = np.random.seed()
        np.random.seed(self.seed)

        pbar = None
        start_time = time.time()
        svals = []
        mask_vals = []

        for i, args in enumerate(zip(*self.model_args)):
            if self.data_type == "image":
                x_shape, y_shape = attributions[i].shape[0], attributions[
                    i].shape[1]
                feature_size = np.prod([x_shape, y_shape])
                sample_attributions = attributions[i].mean(2).reshape(
                    feature_size, -1)
                data = X[i].flatten()
                mask_shape = X[i].shape
            else:
                feature_size = attributions[i].shape[0]
                sample_attributions = attributions[i]
                # data = X[i]
                mask_shape = feature_size

            # compute any custom clustering for this row
            row_clustering = None
            if getattr(self.masker, "clustering", None) is not None:
                if isinstance(self.masker.clustering, np.ndarray):
                    row_clustering = self.masker.clustering
                elif callable(self.masker.clustering):
                    row_clustering = self.masker.clustering(*args)
                else:
                    raise Exception(
                        "The masker passed has a .clustering attribute that is not yet supported by the ExplanationError benchmark!"
                    )

            masked_model = MaskedModel(self.model, self.masker, self.link,
                                       self.linearize_link, *args)

            if len(attributions[i].shape) == 1 or self.data_type == "tabular":
                output_size = 1
            else:
                output_size = attributions[i].shape[-1]

            total_values = None
            for _ in range(self.num_permutations):
                masks = []
                for k in range(output_size):
                    mask = np.zeros(mask_shape, dtype=np.bool)
                    masks.append(mask.copy().flatten())

                    if output_size != 1:
                        test_attributions = sample_attributions[:, k]
                    else:
                        test_attributions = sample_attributions

                    ordered_inds = np.arange(len(test_attributions))

                    # shuffle the indexes so we get a random permutation ordering
                    if row_clustering is not None:
                        inds_mask = np.ones(len(test_attributions),
                                            dtype=np.bool)
                        partition_tree_shuffle(ordered_inds, inds_mask,
                                               row_clustering)
                    else:
                        np.random.shuffle(ordered_inds)

                    #ordered_inds = np.random.permutation(len(test_attributions))
                    increment = max(1, int(feature_size * step_fraction))
                    for j in range(0, feature_size, increment):
                        oind_list = [
                            ordered_inds[l]
                            for l in range(j, min(feature_size, j + increment))
                        ]
                        for oind in oind_list:
                            if self.data_type == "image":
                                xoind, yoind = oind // attributions[i].shape[
                                    1], oind % attributions[i].shape[1]
                                mask[xoind][yoind] = True
                            else:
                                mask[oind] = True

                        masks.append(mask.copy().flatten())

                mask_vals.append(masks)

                mask_size = len(range(0, feature_size, increment)) + 1
                values = []
                masks_arr = np.array(masks)
                for j in range(0, len(masks_arr), self.batch_size):
                    values.append(
                        masked_model(masks_arr[j:j + self.batch_size]))
                values = np.concatenate(values)
                base_value = values[0]
                for l, v in enumerate(values):
                    values[l] = (v -
                                 (base_value +
                                  np.sum(test_attributions[masks_arr[l]])))**2

                if total_values is None:
                    total_values = values
                else:
                    total_values += values
            total_values /= self.num_permutations
            if len(indices) == 0:
                outputs = range(output_size)
            else:
                outputs = indices

            index = 0
            for k in outputs:
                if output_size == 1:
                    svals.append(total_values[index:index + mask_size])
                else:
                    svals.append(total_values[index:index + mask_size, k])
                index += mask_size

            if pbar is None and time.time() - start_time > 5:
                pbar = tqdm(total=len(self.model_args[0]),
                            disable=silent,
                            leave=False,
                            desc=f"ExplanationError for {name}")
                pbar.update(i + 1)
            if pbar is not None:
                pbar.update(1)

        if pbar is not None:
            pbar.close()

        svals = np.array(svals)

        # reset the random seed so we don't mess up the caller
        np.random.seed(old_seed)

        return BenchmarkResult("explanation error",
                               name,
                               value=np.sqrt(
                                   np.sum(total_values) / len(total_values)))
예제 #4
0
    def __call__(self,
                 explanation,
                 name,
                 step_fraction=0.01,
                 indices=[],
                 silent=False):
        """ Run this benchmark on the given explanation.
        """

        if safe_isinstance(explanation, "numpy.ndarray"):
            attributions = explanation
        elif isinstance(explanation, Explanation):
            attributions = explanation.values
        else:
            raise ValueError(
                "The passed explanation must be either of type numpy.ndarray or shap.Explanation!"
            )

        assert len(attributions) == len(self.model_args[0]), "The explanation passed must have the same number of rows as " + \
                                                             "the self.model_args that were passed!"

        # it is important that we choose the same permutations for the different explanations we are comparing
        # so as to avoid needless noise
        old_seed = np.random.seed()
        np.random.seed(self.seed)

        pbar = None
        start_time = time.time()
        svals = []
        mask_vals = []

        for i, args in enumerate(zip(*self.model_args)):

            if len(args[0].shape) != len(attributions[i].shape):
                raise ValueError(
                    "The passed explanation must have the same dim as the model_args and must not have a vector output!"
                )

            feature_size = np.prod(attributions[i].shape)
            sample_attributions = attributions[i].flatten()

            # compute any custom clustering for this row
            row_clustering = None
            if getattr(self.masker, "clustering", None) is not None:
                if isinstance(self.masker.clustering, np.ndarray):
                    row_clustering = self.masker.clustering
                elif callable(self.masker.clustering):
                    row_clustering = self.masker.clustering(*args)
                else:
                    raise NotImplementedError(
                        "The masker passed has a .clustering attribute that is not yet supported by the ExplanationError benchmark!"
                    )

            masked_model = MaskedModel(self.model, self.masker, self.link,
                                       self.linearize_link, *args)

            total_values = None
            for _ in range(self.num_permutations):
                masks = []
                mask = np.zeros(feature_size, dtype=np.bool)
                masks.append(mask.copy())
                ordered_inds = np.arange(feature_size)

                # shuffle the indexes so we get a random permutation ordering
                if row_clustering is not None:
                    inds_mask = np.ones(feature_size, dtype=np.bool)
                    partition_tree_shuffle(ordered_inds, inds_mask,
                                           row_clustering)
                else:
                    np.random.shuffle(ordered_inds)

                increment = max(1, int(feature_size * step_fraction))
                for j in range(0, feature_size, increment):
                    mask[ordered_inds[np.arange(
                        j, min(feature_size, j + increment))]] = True
                    masks.append(mask.copy())
                mask_vals.append(masks)

                values = []
                masks_arr = np.array(masks)
                for j in range(0, len(masks_arr), self.batch_size):
                    values.append(
                        masked_model(masks_arr[j:j + self.batch_size]))
                values = np.concatenate(values)
                base_value = values[0]
                for l, v in enumerate(values):
                    values[l] = (
                        v - (base_value +
                             np.sum(sample_attributions[masks_arr[l]])))**2

                if total_values is None:
                    total_values = values
                else:
                    total_values += values
            total_values /= self.num_permutations

            svals.append(total_values)

            if pbar is None and time.time() - start_time > 5:
                pbar = tqdm(total=len(self.model_args[0]),
                            disable=silent,
                            leave=False,
                            desc=f"ExplanationError for {name}")
                pbar.update(i + 1)
            if pbar is not None:
                pbar.update(1)

        if pbar is not None:
            pbar.close()

        svals = np.array(svals)

        # reset the random seed so we don't mess up the caller
        np.random.seed(old_seed)

        return BenchmarkResult("explanation error",
                               name,
                               value=np.sqrt(
                                   np.sum(total_values) / len(total_values)))
예제 #5
0
    def model_score(self,
                    explanation,
                    X,
                    percent=0.01,
                    indices=[],
                    y=None,
                    label=None,
                    silent=False,
                    debug_mode=False):
        # if explainer is already the attributions
        if safe_isinstance(explanation, "numpy.ndarray"):
            attributions = explanation
        elif isinstance(explanation, Explanation):
            attributions = explanation.values

        if label is None:
            label = "Score %d" % len(self.score_values)

        # convert dataframes
        if safe_isinstance(X, "pandas.core.series.Series") or safe_isinstance(
                X, "pandas.core.frame.DataFrame"):
            X = X.values

        # convert all single-sample vectors to matrices
        if not hasattr(attributions[0], "__len__"):
            attributions = np.array([attributions])
        if not hasattr(X[0], "__len__") and self.data_type == "tabular":
            X = np.array([X])

        pbar = None
        start_time = time.time()
        svals = []
        mask_vals = []

        for i in range(len(X)):
            if self.data_type == "image":
                x_shape, y_shape = attributions[i].shape[0], attributions[
                    i].shape[1]
                feature_size = np.prod([x_shape, y_shape])
                sample_attributions = attributions[i].mean(2).reshape(
                    feature_size, -1)
                data = X[i].flatten()
                mask_shape = X[i].shape
            else:
                feature_size = attributions[i].shape[0]
                sample_attributions = attributions[i]
                data = X[i]
                mask_shape = feature_size

            self.masked_model = MaskedModel(self.model, self.masker,
                                            links.identity, data)

            if len(attributions[i].shape) == 1 or self.data_type == "tabular":
                output_size = 1
            else:
                output_size = attributions[i].shape[-1]

            masks = []
            for k in range(output_size):
                mask = np.ones(mask_shape, dtype=np.bool) * (self.perturbation
                                                             == "remove")
                masks.append(mask.copy().flatten())

                if output_size != 1:
                    test_attributions = sample_attributions[:, k]
                else:
                    test_attributions = sample_attributions

                ordered_inds = self.sort_order_map(test_attributions)
                increment = max(1, int(feature_size * percent))
                for j in range(0, feature_size, increment):
                    oind_list = [
                        ordered_inds[l]
                        for l in range(j, min(feature_size, j + increment))
                    ]

                    for oind in oind_list:
                        if not ((self.sort_order == "positive" and test_attributions[oind] <= 0) or \
                                (self.sort_order == "negative" and test_attributions[oind] >= 0)):
                            if self.data_type == "image":
                                xoind, yoind = oind // attributions[i].shape[
                                    1], oind % attributions[i].shape[1]
                                mask[xoind][
                                    yoind] = self.perturbation == "keep"
                            else:
                                mask[oind] = self.perturbation == "keep"

                    masks.append(mask.copy().flatten())

            mask_vals.append(masks)
            mask_size = len(range(0, feature_size, increment)) + 1
            values = self.masked_model(np.array(masks))
            if len(indices) == 0:
                outputs = range(output_size)
            else:
                outputs = indices

            index = 0
            for k in outputs:
                if output_size == 1:
                    svals.append(values[index:index + mask_size])
                else:
                    svals.append(values[index:index + mask_size, k])
                index += mask_size

            if pbar is None and time.time() - start_time > 5:
                pbar = tqdm(total=len(X), disable=silent, leave=False)
                pbar.update(i + 1)
            if pbar is not None:
                pbar.update(1)

        if pbar is not None:
            pbar.close()

        self.score_values.append(np.array(svals))

        if self.sort_order == "negative":
            curve_sign = -1
        else:
            curve_sign = 1

        self.labels.append(label)

        xs = np.linspace(0, 1, 100)
        curves = np.zeros((len(self.score_values[-1]), len(xs)))
        for j in range(len(self.score_values[-1])):
            xp = np.linspace(0, 1, len(self.score_values[-1][j]))
            yp = self.score_values[-1][j]
            curves[j, :] = np.interp(xs, xp, yp)
        ys = curves.mean(0)
        auc = sklearn.metrics.auc(np.linspace(0, 1, len(ys)),
                                  curve_sign * (ys - ys[0]))

        if not debug_mode:
            return xs, ys, auc
        else:
            aucs = []
            for j in range(len(self.score_values[-1])):
                curve = curves[j, :]
                auc = sklearn.metrics.auc(np.linspace(0, 1, len(curve)),
                                          curve_sign * (curve - curve[0]))
                aucs.append(auc)
            return mask_vals, curves, aucs