示例#1
0
def insights(x: CaptumInterpretation, inp_data, debug=True):
    _baseline_func = lambda o: o * 0
    _get_vocab = lambda vocab: list(map(str, vocab)) if isinstance(
        vocab[0], bool) else vocab
    dl = x.dls.test_dl(L(inp_data), with_labels=True, bs=4)
    normalize_func = next(
        (func for func in dl.after_batch if type(func) == Normalize), noop)

    # captum v0.3 expects tensors without the batch dimension.
    if hasattr(normalize_func, 'mean'):
        if normalize_func.mean.ndim == 4: normalize_func.mean.squeeze_(0)
    if hasattr(normalize_func, 'std'):
        if normalize_func.std.ndim == 4: normalize_func.std.squeeze_(0)

    visualizer = AttributionVisualizer(
        models=[x.model],
        score_func=lambda o: torch.nn.functional.softmax(o, 1),
        classes=_get_vocab(dl.vocab),
        features=[
            ImageFeature(
                "Image",
                baseline_transforms=[_baseline_func],
                input_transforms=[normalize_func],
            )
        ],
        dataset=x._formatted_data_iter(dl, normalize_func))
    visualizer.render(debug=debug)
示例#2
0
    def test_one_feature(self):
        batch_size = 2
        classes = _get_classes()
        dataset = list(
            _labelled_img_data(num_labels=len(classes),
                               num_samples=batch_size))

        # NOTE: using DataLoader to batch the inputs
        # since AttributionVisualizer requires the input to be of size `B x ...`
        data_loader = torch.utils.data.DataLoader(list(dataset),
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=0)

        visualizer = AttributionVisualizer(
            models=[_get_cnn()],
            classes=classes,
            features=[
                ImageFeature(
                    "Photo",
                    input_transforms=[lambda x: x],
                    baseline_transforms=[lambda x: x * 0],
                )
            ],
            dataset=to_iter(data_loader),
            score_func=None,
        )
        visualizer._config = FilterConfig(attribution_arguments={"n_steps": 2})

        outputs = visualizer.visualize()

        for output in outputs:
            total_contrib = sum(
                abs(f.contribution) for f in output.feature_outputs)
            self.assertAlmostEqual(total_contrib, 1.0, places=6)
示例#3
0
def main():
    normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    model = get_pretrained_model()
    visualizer = AttributionVisualizer(
        models=[model],
        score_func=lambda o: torch.nn.functional.softmax(o, 1),
        classes=get_classes(),
        features=[
            ImageFeature(
                "Photo",
                baseline_transforms=[baseline_func],
                input_transforms=[normalize],
            )
        ],
        dataset=formatted_data_iter(),
    )

    visualizer.serve(debug=True)
示例#4
0
    def test_multi_features(self):
        batch_size = 2
        classes = _get_classes()
        img_dataset = list(
            _labelled_img_data(num_labels=len(classes),
                               num_samples=batch_size))

        misc_feature_size = 2
        dataset = _multi_modal_data(img_dataset=img_dataset,
                                    feature_size=misc_feature_size)
        # NOTE: using DataLoader to batch the inputs since
        # AttributionVisualizer requires the input to be of size `N x ...`
        data_loader = torch.utils.data.DataLoader(list(dataset),
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=0)

        visualizer = AttributionVisualizer(
            models=[_get_multimodal(input_size=misc_feature_size)],
            classes=classes,
            features=[
                ImageFeature(
                    "Photo",
                    input_transforms=[lambda x: x],
                    baseline_transforms=[lambda x: x * 0],
                ),
                RealFeature(
                    "Random",
                    input_transforms=[lambda x: x],
                    baseline_transforms=[lambda x: x * 0],
                ),
            ],
            dataset=to_iter(data_loader),
            score_func=None,
        )
        visualizer._config = FilterConfig(steps=2)

        outputs = visualizer.visualize()

        for output in outputs:
            total_contrib = sum(
                abs(f.contribution) for f in output.feature_outputs)
            self.assertAlmostEqual(total_contrib, 1.0, places=6)
示例#5
0
文件: captum.py 项目: hal-314/fastai2
    def visualize(self,inp_data,debug=True):
        _baseline_func= lambda o: o*0
        _get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
        dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=4)
        normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)

        visualizer = AttributionVisualizer(
            models=[self.model],
            score_func=lambda o: torch.nn.functional.softmax(o, 1),
            classes=_get_vocab(dl.vocab),
            features=[
                ImageFeature(
                    "Image",
                    baseline_transforms=[_baseline_func],
                    input_transforms=[normalize_func],
                )
            ],
            dataset=self._formatted_data_iter(dl,normalize_func)
        )
        visualizer.render(debug=debug)
示例#6
0
def captum_insights_report(model, dataset, folder_name):
    def formatted_data_iter():
        dataset = dataset['train'].X
        dataloader = iter(
            torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2)
        )
        while True:
            images, labels = next(dataloader)
            yield Batch(inputs=images, labels=labels)

    visualizer = AttributionVisualizer(
        models=[model],
        score_func=lambda o: torch.nn.functional.softmax(o, 1),
        classes=["1", "2", "3", "4", "5"],
        features=[
            ImageFeature(
                "TS"
            )
        ],
        dataset=dataset['train'].X,
    )
示例#7
0
                                           download=True,
                                           transform=transforms.ToTensor())
    dataloader = iter(
        torch.utils.data.DataLoader(dataset,
                                    batch_size=4,
                                    shuffle=False,
                                    num_workers=2))
    while True:
        images, labels = next(dataloader)
        yield Batch(inputs=images, labels=labels)


if __name__ == "__main__":
    normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    model = get_pretrained_model()
    visualizer = AttributionVisualizer(
        models=[model],
        score_func=lambda o: torch.nn.functional.softmax(o, 1),
        classes=get_classes(),
        features=[
            ImageFeature(
                "Photo",
                baseline_transforms=[baseline_func],
                input_transforms=[normalize],
            )
        ],
        dataset=formatted_data_iter(),
    )

    visualizer.render()
def full_img_transform(input):
    i = Image.open(input)
    i = transform(i)
    i = transform_normalize(i)
    i = i.unsqueeze(0)
    return i


input_imgs = torch.cat(list(map(lambda i: full_img_transform(i), imgs)), 0)

visualizer = AttributionVisualizer(
    models=[model],
    score_func=lambda o: torch.nn.functional.softmax(o, 1),
    classes=list(map(lambda k: idx_to_labels[k][1], idx_to_labels.keys())),
    features=[
        ImageFeature(
            "Photo",
            baseline_transforms=[baseline_func],
            input_transforms=[],
        )
    ],
    dataset=[Batch(input_imgs, labels=[282, 849, 69])])

#########################################################################
# Note that running the cell above didn’t take much time at all, unlike
# our attributions above. That’s because Captum Insights lets you
# configure different attribution algorithms in a visual widget, after
# which it will compute and display the attributions. *That* process will
# take a few minutes.
#
# Running the cell below will render the Captum Insights widget. You can
# then choose attributions methods and their arguments, filter model
示例#9
0
dataset_sizes = {x: len(image_datasets[x]) for x in stages}
class_names = image_datasets[stages[0]].classes

# Setup the device to run the computations
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device::', device)

# Load the best model from file
model_ = torch.load(model_file)
_ = model_.to(device).eval()

visualizer = AttributionVisualizer(
    models=[model_],
    score_func=lambda o: torch.nn.functional.softmax(o, 1),
    classes=class_names,
    features=[
        ImageFeature("Photo",
                     baseline_transforms=[baseline_func],
                     input_transforms=[
                         transforms.Resize(256),
                         transforms.CenterCrop(224),
                         transforms.ToTensor(),
                         transforms.Normalize([0.485, 0.456, 0.406],
                                              [0.229, 0.224, 0.225])
                     ])
    ],
    dataset=formatted_data_iter(dataloaders['test']),
)

visualizer.serve(port=8600)