Esempio n. 1
0
from torchray.attribution.excitation_backprop import contrastive_excitation_backprop
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Contrastive excitation backprop.
saliency = contrastive_excitation_backprop(
    model,
    x,
    category_id,
    saliency_layer='features.9',
    contrast_layer='features.30',
    classifier_layer='classifier.6',
)

# Plots.
plot_example(x, saliency, 'contrastive excitation backprop', category_id)
# Obtain example data.
model, x, category_id_1, category_id_2 = get_example_data()

# Run on GPU if available.
device = get_device()
model.to(device)
x = x.to(device)

# Extremal perturbation backprop.
masks_1, _ = extremal_perturbation(
    model,
    x,
    category_id_1,
    reward_func=contrastive_reward,
    debug=True,
    areas=[0.12],
)

masks_2, _ = extremal_perturbation(
    model,
    x,
    category_id_2,
    reward_func=contrastive_reward,
    debug=True,
    areas=[0.05],
)

# Plots.
plot_example(x, masks_1, 'extremal perturbation', category_id_1)
plot_example(x, masks_2, 'extremal perturbation', category_id_2)
from torchray.attribution.common import Probe, get_module
from torchray.attribution.linear_approx import gradient_to_linear_approx_saliency
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Linear approximation.
saliency_layer = get_module(model, 'features.29')

probe = Probe(saliency_layer, target='output')

y = model(x)
z = y[0, category_id]
z.backward()

saliency = gradient_to_linear_approx_saliency(probe.data[0])

# Plots.
plot_example(x, saliency, 'linear approx', category_id)
Esempio n. 4
0
from torchray.attribution.grad_cam import grad_cam
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Grad-CAM backprop.
saliency = grad_cam(model, x, category_id, saliency_layer='features.29')

# Plots.
plot_example(x, saliency, 'grad-cam backprop', category_id)
from torchray.attribution.common import gradient_to_saliency
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Gradient method.

x.requires_grad_(True)
y = model(x)
z = y[0, category_id]
z.backward()

saliency = gradient_to_saliency(x)

# Plots.
plot_example(x, saliency, 'gradient', category_id)
from torchray.attribution.common import Probe, get_module
from torchray.attribution.excitation_backprop import ExcitationBackpropContext
from torchray.attribution.excitation_backprop import gradient_to_excitation_backprop_saliency
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Contrastive excitation backprop.
saliency_layer = get_module(model, 'features.9')
saliency_probe = Probe(saliency_layer, target='output')

with ExcitationBackpropContext():
    y = model(x)
    z = y[0, category_id]
    z.backward()

saliency = gradient_to_excitation_backprop_saliency(saliency_probe.data[0])

saliency_probe.remove()

# Plots.
plot_example(x, saliency, 'excitation backprop', category_id)
from torchray.attribution.common import gradient_to_saliency
from torchray.attribution.deconvnet import DeConvNetContext
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# DeConvNet method.
x.requires_grad_(True)

with DeConvNetContext():
    y = model(x)
    z = y[0, category_id]
    z.backward()

saliency = gradient_to_saliency(x)

# Plots.
plot_example(x, saliency, 'deconvnet', category_id)
from torchray.attribution.guided_backprop import guided_backprop
from torchray.benchmark import get_example_data, plot_example

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Guided backprop.
saliency = guided_backprop(model, x, category_id)

# Plots.
plot_example(x, saliency, 'guided backprop', category_id)
Esempio n. 9
0
from torchray.attribution.rise import rise
from torchray.benchmark import get_example_data, plot_example
from torchray.utils import get_device

# Obtain example data.
model, x, category_id, _ = get_example_data()

# Run on GPU if available.
device = get_device()
model.to(device)
x = x.to(device)

# RISE method.
saliency = rise(model, x)
saliency = saliency[:, category_id].unsqueeze(0)

# Plots.
plot_example(x, saliency, 'RISE', category_id)