def visualize(self, attribution, data, contribution_frac) -> FeatureOutput: if self.visualization_transform: text = self.visualization_transform(data) else: text = data attribution = attribution.squeeze(0) data = data.squeeze(0) attribution = attribution.sum(dim=1) # L-Infinity norm attr_max = abs(attribution).max() normalized_attribution = safe_div(attribution, attr_max, default_value=attribution) modified = [x * 100 for x in normalized_attribution.tolist()] return FeatureOutput( name=self.name, base=text, modified=modified, type=self.visualization_type(), contribution=contribution_frac, )
def _calculate_net_contrib(self, attrs_per_input_feature: List[Tensor]): # get the net contribution per feature (input) net_contrib = torch.stack( [attrib.flatten().sum() for attrib in attrs_per_input_feature]) # normalise the contribution, s.t. sum(abs(x_i)) = 1 norm = torch.norm(net_contrib, p=1) net_contrib = safe_div(net_contrib, norm, default_value=net_contrib) return net_contrib.tolist()
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput: attribution = attribution.squeeze(0) data = data.squeeze(0) # L-2 norm l2_norm = attribution.norm() normalized_attribution = safe_div(attribution, l2_norm, default_value=attribution) modified = [x * 100 for x in normalized_attribution.tolist()] base = [ f"{c}: {d:.2f}" for c, d in zip(self.categories, data.tolist()) ] return FeatureOutput( name=self.name, base=base, modified=modified, type=self.visualization_type(), contribution=contribution_frac, )