コード例 #1
0
def Lrp(X, _layers, _L):
    A = [X] + [None] * _L  # initiliza the values of the activation layers
    for l in range(_L):
        A[l + 1] = _layers[l].forward(
            A[l])  # we fill the activation values of matrix
    chosen_label = A[_L].argmax().item()
    T = 1  # \\this is the mask/indicator which shows who is the true label.
    R = [None] * _L + [
        (A[-1] * T).data
    ]  # we initialize the relavance by taking the last layer [(A[-1] * T).data]

    for l in range(0, _L)[::-1]:
        #the conditions holds for all layers except for the relu layer
        A[l] = (A[l].data).requires_grad_(True)
        #below incr and rho are help functions
        incr = lambda z: z + 1e-9
        rho = lambda p: p
        #Below we perform four steps for calculate the relevance scores for each layer.
        #The alorithm is iterative, we use the current relance scores layer to calculate the relevance scores
        #of the preavious layer
        z = incr(utils.newlayer(_layers[l], rho).forward(A[l]))  # step 1
        s = (R[l + 1] / z).data
        # print((z*s).sum())# )step 2
        (z * s).sum().backward()
        c = A[l].grad  # step 3
        R[l] = (A[l] * c).data  # step 4
    #print(R)
    return R[0]
def Sa(X, _layers, _L):
    A = [X] + [None] * _L  # initiliza the values of the activation layers
    for l in range(_L):
        if l == 2:
            A[l] = A[l].view((A[l].shape[0], -1))  # we fit the second shape
        A[l + 1] = _layers[l].forward(
            A[l])  # we fill the activation values of matrix
    chosen_label = A[_L].argmax().item()
    T = torch.FloatTensor(
        (1.0 * (np.arange(20) == chosen_label)
         ))  # \\this is the mask/indicator which shows who is the true label.
    R = [None] * _L + [
        (A[-1] * T).data
    ]  # we initialize the relavance by taking the last layer [(A[-1] * T).data]
    #SA calculation
    d = R[_L]
    #print(A)
    for l in range(1, _L)[::-1]:

        #if isinstance(_layers[l], torch.nn.Conv1d) or isinstance(_layers[l], torch.nn.Linear):

        A[l] = (A[l].data).requires_grad_(True)

        if l == 1:
            #A[l]=A[l].view((A[l].shape[0],-1))
            d = d.view((d.shape[0], 30, -1))  # we fit the second shape

        rho = lambda p: p
        z = utils.newlayer(_layers[l], rho).forward(A[l])

        #print(l, _layers[l])
        #print(A[l], d)
        #print(_layers[l])
        z.backward(d)
        d = A[l].grad
        #print(l, d.shape)

    return torch.sum(d.view(400, 300), dim=1)  #we return the relevance score
def Lrp(X, _layers, _L):
    A = [X] + [None] * _L  # initiliza the values of the activation layers
    for l in range(_L):
        if l == 2:
            A[l] = A[l].view((A[l].shape[0], -1))  #we fit the second shape
        A[l + 1] = _layers[l].forward(
            A[l])  # we fill the activation values of matrix
    chosen_label = A[_L].argmax().item()
    T = torch.FloatTensor(
        (1.0 * (np.arange(20) == chosen_label)
         ))  # \\this is the mask/indicator which shows who is the true label.
    R = [None] * _L + [
        (A[-1] * T).data
    ]  # we initialize the relavance by taking the last layer [(A[-1] * T).data]

    for l in range(1, _L)[::-1]:
        #the conditions holds for all layers except for the relu layer
        A[l] = (A[l].data).requires_grad_(True)
        #below incr and rho are help functions
        incr = lambda z: z + 1e-9
        rho = lambda p: p
        #Below we perform four steps for calculate the relevance scores for each layer.
        #The alorithm is iterative, we use the current relance scores layer to calculate the relevance scores
        #of the preavious layer
        z = incr(utils.newlayer(_layers[l], rho).forward(A[l]))  # step 1
        if l == 1:
            # print(R[l+1].view((1,30,-1)).shape,z.shape)
            R[l + 1] = R[l + 1].view((1, 30, -1))
        s = (R[l + 1] / z).data
        # print((z*s).sum())# )step 2
        (z * s).sum().backward()
        c = A[l].grad  # step 3
        R[l] = (A[l] * c).data  # step 4

    return torch.sum(
        R[1].view((400, 300)), dim=1
    )  #Recall the R[1] is the first layer relevance, R[0] is None ;400 and 300
コード例 #4
0
    def lrp(self, x, r, gamma_func):
        # Computig LRP relevances usig gamma-LRP

        mean = torch.Tensor([0.485, 0.456, 0.406]).reshape(1, -1, 1,
                                                           1).to(self.device)
        std = torch.Tensor([0.229, 0.224, 0.225]).reshape(1, -1, 1,
                                                          1).to(self.device)

        lbound = (0 - mean) / std
        hbound = (1 - mean) / std

        if int(self.feature_layer) in self.ls:
            feature_layers, project_layers = list(self.encoder), list(
                self.project)
            gamma_layers = [True] * len(feature_layers) + [False] * len(
                project_layers)
            layers = feature_layers + project_layers

        # Forward pass
        X = [x.data] + [None for l in layers]
        for i, layer in enumerate(layers):
            X[i + 1] = layer.forward(X[i]).data

        # Backward pass
        for i, layer in list(enumerate(layers))[::-1]:

            x = X[i].clone().detach().requires_grad_(True)

            # Set gamma=0. for projection layers
            gamma = gamma_func(i) if gamma_layers[i] else 0.

            # Handling projection layer
            if isinstance(layer, Flatten):
                if self.proj_case == 'random':
                    if int(self.feature_layer) in self.ls:
                        r = r.view((1, self.filter_dict[self.feature_layer],
                                    self.h_proj, self.w_proj))
                        continue

            if isinstance(layer, nn.Conv2d) or isinstance(
                    layer, nn.AvgPool2d) or isinstance(layer, nn.MaxPool2d):
                if i > 0:
                    # Handling intermediate Conv2D or AvgPool layers
                    z = newlayer(
                        layer, lambda p: p + gamma * p.clamp(min=0)).forward(x)
                    z = z + 1e-9
                    (z * (r / z).data).sum().backward()
                    r = (x * x.grad).data

                else:
                    # Input Conv2D layer
                    l = (x.data * 0 +
                         lbound).clone().detach().requires_grad_(True)
                    h = (x.data * 0 +
                         hbound).clone().detach().requires_grad_(True)
                    z = layer.forward(x) - newlayer(
                        layer, lambda p: p.clamp(min=0)).forward(l) - newlayer(
                            layer, lambda p: p.clamp(max=0)).forward(h)
                    (z * (r / (z + 1e-6)).data).sum().backward()
                    r = (x * x.grad + l * l.grad + h * h.grad).data
        return r
コード例 #5
0
def LRP(module, input_str, true_class, num_classes, encoded=False):
    if not encoded:
        test_data = encode(input_str)
    else:
        test_data = input_str
        test_data.resize_((1, 70, 1014))

    print(module(test_data), input_str)

    layers = list(module.modules())[1:]
    layers2 = []

    for i1 in range(len(layers)):
        if isinstance(layers[i1], torch.nn.modules.container.Sequential):
            for i2 in layers[i1]:
                layers2.append(i2)

    L = len(layers2)

    # print("\nSTART PROPAGATE")
    # propagate through the Network
    A = [test_data] + [None] * L
    first_linear = 0
    for l in range(L):
        # print(l, A[l].shape, layers2[l])
        if isinstance(layers2[l], torch.nn.Linear) and first_linear == 0:
            A[l + 1] = layers2[l].forward(A[l].view(A[l].size(0), -1))
            first_linear = 1
        else:
            A[l + 1] = layers2[l].forward(A[l])

    # print("Output: ", A[-1], A[-1].shape)
    # print("\nA:" , A[-1].shape)

    linear = 0
    T = torch.FloatTensor(
        (1.0 * (np.arange(num_classes) == true_class).reshape([num_classes])))
    R = [None] * L + [(A[-1] * T).data]

    for l in range(0, L)[::-1]:
        A[l] = (A[l].data).requires_grad_(True)

        if isinstance(layers2[l], torch.nn.MaxPool1d):
            layers2[l] = torch.nn.AvgPool1d(kernel_size=3, stride=3)

        rho = lambda p: p
        incr = lambda z: z + 1e-9

        if isinstance(layers2[l], torch.nn.Linear): linear += 1
        if linear == 3:
            z = incr(
                utils.newlayer(layers2[l],
                               rho).forward(A[l].view(A[l].size(0), -1)))
            linear += 1
        else:
            z = incr(utils.newlayer(layers2[l], rho).forward(A[l]))  # step 1

        s = (R[l + 1] / z).data  # step 2
        (z * s).sum().backward()
        c = A[l].grad  # step 3
        R[l] = (A[l] * c).data

    return R
コード例 #6
0
R = [None]*L + [(A[-1]*T).data]


for l in range(1,L)[::-1]:

    A[l] = (A[l].data).requires_grad_(True)

    if isinstance(layers[l],torch.nn.MaxPool2d): layers[l] = torch.nn.AvgPool2d(2)

    if isinstance(layers[l],torch.nn.Conv2d) or isinstance(layers[l],torch.nn.AvgPool2d):

        if l <= 16:       rho = lambda p: p + 0.25*p.clamp(min=0); incr = lambda z: z+1e-9
        if 17 <= l <= 30: rho = lambda p: p;                       incr = lambda z: z+1e-9+0.25*((z**2).mean()**.5).data
        if l >= 31:       rho = lambda p: p;                       incr = lambda z: z+1e-9

        z = incr(utils.newlayer(layers[l],rho).forward(A[l]))  # step 1
        s = (R[l+1]/z).data                                    # step 2
        (z*s).sum().backward(); c = A[l].grad                  # step 3
        R[l] = (A[l]*c).data                                   # step 4

    else:

        R[l] = R[l+1]

for i,l in enumerate([31,21,11,1]):
    utils.heatmap(i,np.array(R[l][0]).sum(axis=0),0.5*i+1.5,0.5*i+1.5)


A[0] = (A[0].data).requires_grad_(True)

lb = (A[0].data*0+(0-mean)/std).requires_grad_(True)