def __init__(self, forward_func: Callable) -> None: r""" Args: forward_func (callable): The forward function of the model or any modification of it """ Lime.__init__( self, forward_func, linear_regression_interpretable_model_trainer, kernel_shap_similarity_kernel, )
def __init__(self, forward_func: Callable) -> None: r""" Args: forward_func (callable): The forward function of the model or any modification of it """ Lime.__init__( self, forward_func, SkLearnLinearRegression(), kernel_shap_similarity_kernel, )
def __init__(self, forward_func: Callable) -> None: r""" Args: forward_func (callable): The forward function of the model or any modification of it """ Lime.__init__( self, forward_func, interpretable_model=SkLearnLinearRegression(), similarity_func=self.kernel_shap_similarity_kernel, perturb_func=self.kernel_shap_perturb_generator, ) self.inf_weight = 1000000.0
def _lime_test_assert( self, model: Callable, test_input: TensorOrTupleOfTensorsGeneric, expected_attr, expected_coefs_only=None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, additional_input: Any = None, perturbations_per_eval: Tuple[int, ...] = (1, ), baselines: BaselineType = None, target: Union[None, int] = 0, n_perturb_samples: int = 100, alpha: float = 1.0, delta: float = 1.0, batch_attr: bool = False, ) -> None: for batch_size in perturbations_per_eval: lime = Lime( model, similarity_func=get_exp_kernel_similarity_function( "cosine", 10.0), ) attributions = lime.attribute( test_input, target=target, feature_mask=feature_mask, additional_forward_args=additional_input, baselines=baselines, perturbations_per_eval=batch_size, n_perturb_samples=n_perturb_samples, alpha=alpha, ) assertTensorTuplesAlmostEqual(self, attributions, expected_attr, delta=delta, mode="max") if expected_coefs_only is not None: # Test with return_input_shape = False attributions = lime.attribute( test_input, target=target, feature_mask=feature_mask, additional_forward_args=additional_input, baselines=baselines, perturbations_per_eval=batch_size, n_perturb_samples=n_perturb_samples, alpha=alpha, return_input_shape=False, ) assertTensorAlmostEqual(self, attributions, expected_coefs_only, delta=delta, mode="max") lime_alt = LimeBase( model, lasso_interpretable_model_trainer, get_exp_kernel_similarity_function("euclidean", 1000.0), alt_perturb_func, False, None, alt_to_interp_rep, ) # Test with equivalent sampling in original input space formatted_inputs, baselines = _format_input_baseline( test_input, baselines) if feature_mask is None: ( formatted_feature_mask, num_interp_features, ) = _construct_default_feature_mask(formatted_inputs) else: formatted_feature_mask = _format_input(feature_mask) num_interp_features = int( max( torch.max(single_inp).item() for single_inp in feature_mask) + 1) if batch_attr: attributions = lime_alt.attribute( test_input, target=target, feature_mask=formatted_feature_mask if isinstance( test_input, tuple) else formatted_feature_mask[0], additional_forward_args=additional_input, baselines=baselines, perturbations_per_eval=batch_size, n_perturb_samples=n_perturb_samples, alpha=alpha, num_interp_features=num_interp_features, ) assertTensorAlmostEqual(self, attributions, expected_coefs_only, delta=delta, mode="max") return bsz = formatted_inputs[0].shape[0] for ( curr_inps, curr_target, curr_additional_args, curr_baselines, curr_feature_mask, expected_coef_single, ) in _batch_example_iterator( bsz, test_input, target, additional_input, baselines if isinstance(test_input, tuple) else baselines[0], formatted_feature_mask if isinstance( test_input, tuple) else formatted_feature_mask[0], expected_coefs_only, ): attributions = lime_alt.attribute( curr_inps, target=curr_target, feature_mask=curr_feature_mask, additional_forward_args=curr_additional_args, baselines=curr_baselines, perturbations_per_eval=batch_size, n_perturb_samples=n_perturb_samples, alpha=alpha, num_interp_features=num_interp_features, ) assertTensorAlmostEqual( self, attributions, expected_coef_single, delta=delta, mode="max", )