Beispiel #1
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, model):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)
    gradcam_list = []

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    if model is None:
        model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    #print(image_paths)
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")
    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            #print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Grad-CAM
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}.png".format(j, arch, target_layer))
            gradcam_list.append(filename)
            save_gradcam(filename, gcam=regions[j, 0], raw_image=raw_images[j])
    return gradcam_list
Beispiel #2
0
def gradcam_classify(model):
    image_paths = []
    for f in os.listdir("samples/"):
        fname = os.path.join("samples/", f)
        image_paths.append(fname)
    #print("my image paths")
    #print(image_paths)

    #for i in range(1,26):
    #    image_paths.append('samples/'+str(i)+'.PNG')
    target_layer = 'layer4'
    topk = 1
    output_dir = 'results'

    classes = getclasses()

    #model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    # =========================================================================

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM in action:")
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        print('len images = ', len(images))

        for j in range(len(images)):
            # Grad-CAM
            global filename
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}-{}.png".format(j, 'resnet', target_layer,
                                                 classes[ids[j, i]]),
            )
            #print(f'gradcam generated filenames {i}')
            save_gradcam(
                filename=filename,
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #3
0
def eye_status(image, name, net):
    img = torch.stack([image[name]])
    bp = BackPropagation(model=net)
    probs, ids = bp.forward(img)
    actual_status = ids[:, 0]
    prob = probs.data[:, 0]
    if actual_status == 0:
        prob = probs.data[:, 1]

    #print(name,classes[actual_status.data], probs.data[:,0] * 100)
    return classes[actual_status.data]
Beispiel #4
0
def process_a_batch(image_paths, target_layer, arch, topk, output_dir, cuda):
    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images = load_images_only(image_paths)
    images = torch.stack(images).to(device)

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)
        regions = regions.cpu().numpy()
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(image_paths[j],
                                              classes[ids[j, i]], probs[j, i]))
            # Grad-CAM
            save_cam(filename=os.path.join(
                output_dir,
                "{}-{}-{}-{:.3}.png".format(os.path.basename(image_paths[j]),
                                            classes[ids[j, i]], ids[j, i],
                                            probs[j, i]),
            ),
                     gcam=regions[j, 0])
    bp.remove_hook()
    gcam.remove_hook()
    del bp
    del images
    del gcam
    del model
    del regions
    del probs
    del ids
    del _
    torch.cuda.empty_cache()
Beispiel #5
0
def guided_backprop_eye(image, name, net):
    img = torch.stack([image[name]])
    bp = BackPropagation(model=net)
    probs, ids = bp.forward(img)
    gcam = GradCAM(model=net)
    _ = gcam.forward(img)

    gbp = GuidedBackPropagation(model=net)
    _ = gbp.forward(img)

    # Guided Backpropagation
    actual_status = ids[:, 0]
    gbp.backward(ids=actual_status.reshape(1, 1))
    gradients = gbp.generate()

    # Grad-CAM
    gcam.backward(ids=actual_status.reshape(1, 1))
    regions = gcam.generate(target_layer='last_conv')

    # Get Images
    prob = probs.data[:, 0]
    if actual_status == 0:
        prob = probs.data[:, 1]

    prob_image = np.zeros((shape[0], 60, 3), np.uint8)
    cv2.putText(prob_image, '%.1f%%' % (prob * 100), (5, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)

    guided_bpg_image = get_gradient_image(gradients[0])
    guided_bpg_image = cv2.merge(
        (guided_bpg_image, guided_bpg_image, guided_bpg_image))

    grad_cam_image = get_gradcam_image(gcam=regions[0, 0],
                                       raw_image=image[name + '_raw'])
    guided_gradcam_image = get_gradient_image(torch.mul(regions, gradients)[0])
    guided_gradcam_image = cv2.merge(
        (guided_gradcam_image, guided_gradcam_image, guided_gradcam_image))
    #print(image['path'],classes[actual_status.data], probs.data[:,0] * 100)
    print(classes[actual_status.data], probs.data[:, 0] * 100)

    return cv2.hconcat([
        image[name + '_raw'], prob_image, guided_bpg_image, grad_cam_image,
        guided_gradcam_image
    ])
def vanilla_backprop(model, device, raw_image, image, CONFIG, topk):
    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))
    results = []
    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()
                
        results.append(find_gradient(output))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))
    return (results, probs, idx)
Beispiel #7
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, checkpoint,
          distribute):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch]()
    model = model.cuda()
    # print(model)
    checkpoint = checkpoint + arch + '/model_best.pth.tar'
    # print(checkpoint)
    check_point = torch.load(checkpoint,
                             map_location=lambda storage, loc: storage.cuda(0))

    distributed_model = (distribute > 0.5)
    only_CAM = True

    if distributed_model == True:
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_check_point = OrderedDict()
        for k, v in check_point['state_dict'].items():
            # name = k[7:]  # remove `module.`
            # name = k[9:]  # remove `module.1.`
            if k.startswith('module.1.'):
                name = k[9:]
            else:
                name = k[7:]
            new_check_point[name] = v
        # load params
        model.load_state_dict(new_check_point)
    else:
        model.load_state_dict(check_point['state_dict'])

    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-vanilla-{}.png".format(j, arch,
                                                      classes[ids[j, i]]),
                    ),
                    gradient=gradients[j],
                )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-deconvnet-{}.png".format(
                            j, arch, classes[ids[j, i]]),
                    ),
                    gradient=gradients[j],
                )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-guided-{}.png".format(j, arch, classes[ids[j,
                                                                          i]]),
                    ),
                    gradient=gradients[j],
                )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-{}.png".format(
                        # j, arch, target_layer, classes[ids[j, i]]   image_path
                        osp.splitext(image_paths[j])[0],
                        arch,
                        target_layer),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-guided_gradcam-{}-{}.png".format(
                            j, arch, target_layer, classes[ids[j, i]]),
                    ),
                    gradient=torch.mul(regions, gradients)[j],
                )
Beispiel #8
0
def main():

    # Dataset
    print('Creating dataset...')
    transform_val= transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(MEAN, STD)
            ])
    valset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
    val_loader = DataLoader(valset, batch_size=100,shuffle=False, num_workers=4, pin_memory=True)

    # Model

    checkpoint = os.path.join(args.checkpoint, args.model + "_" + args.attention)
    model_path = os.path.join(checkpoint, args.attention + '_' + 'best_model.pt')
    print('Loading model...')
    model = get_model(args.model,'bn',args.attention)
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path))
    else:
        raise Exception('Cannot find model', model_path)
    # if torch.cuda.device_count() > 1:
    #     print("Using", torch.cuda.device_count(), "GPUs!")
    #     model = nn.DataParallel(model)
    model.cuda()
    cudnn.benchmark = True

    print('\tModel loaded: ' + args.model)
    print('\tAttention type: ' + args.attention)
    print("\tNumber of parameters: ", sum([param.nelement() for param in model.parameters()]))

    result_path = os.path.join('results', args.model + "_" + args.attention)
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    
    if True:
        image_paths = get_image_links()
        images = []
        raw_images = []
        print("Images:")
        for i, image_path in enumerate(image_paths):
            print("\t#{}: {}".format(i, image_path))
            image, raw_image = preprocess(image_path)
            images.append(image)
            raw_images.append(raw_image)
        images = torch.stack(images).to("cuda")
    
    model.eval()
    # if False:
    #     summary(model, (3, 32, 32))
    #     return

    # Get sample for evaluate
    GET_SAMPLE = False
    if GET_SAMPLE:
        for i, (inputs, labels) in enumerate(val_loader):
            inputs, labels = (Variable(inputs.cuda()),
                              Variable(labels.cuda()))
            outputs = model(inputs)

            _, preds = outputs.topk(1, 1, True, True)
            for sample in SAMPLES:
                image = get_image(inputs[sample])
                save_image(result_path, image, sample, preds[sample], labels[sample])
            break


    print("Vanilla Backpropagation:")
    topk = 1
    target_layer = "layer4.2"
    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i]))


    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i]))


    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, LABELS[ids[j, i]], probs[j, i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    result_path,
                    "{}-gradcam-{}-{}.png".format(
                        j, target_layer, LABELS[ids[j, i]]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )


    print('Finish!!!')
Beispiel #9
0
def fulltest(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    fold = 0
    device = get_device(cuda)

    # Synset words
    #classes = {0:'normal',1:'covid'}
    classes = get_classtable()

    # Model
    #model = models.resnet34(pretrained=False,num_classes=config.num_classes)
    best_model = torch.load(config.weights + 'ct-cn/' + 'model_best.pth.tar')
    print(best_model["state_dict"].keys())
    model = make_model(arch, num_classes=config.num_classes, pretrained=True)
    #best_model = torch.load(config.weights +'model_best.pth.tar')
    model.load_state_dict(best_model["state_dict"])
    print(best_model["state_dict"].keys())
    model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted
    print(probs)
    print(ids)

    for i in range(topk):
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Beispiel #10
0
def main():
    parser = make_parser()  #creates the parser
    args = parser.parse_args()
    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        'resnet50': {
            'target_layer': 'layer4',
            'input_size': 224
        },
        'resnet18': {
            'target_layer': 'layer4',
            'input_size': 224
        },
        # Add your model
    }.get(args.arch)

    device = torch.device(
        'cuda' if args.cuda and torch.cuda.is_available() else 'cpu')

    if args.cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            classes.append(line)

    # Model
    print("1")
    class_names = ['no',
                   'yes']  #important to define the classes for prediction
    model_ft = get_cnn(len(class_names),
                       args)  #retrieves the cnn - architecture to be used
    print("2")
    criterion = nn.CrossEntropyLoss(
    )  #creates the criterion (used in training and testing)
    optimizer_ft = get_optimizer(
        model_ft, args
    )  #changes the weights based on error (using Stochastic Gradient Descent)
    exp_lr_scheduler = lr_scheduler.StepLR(
        optimizer_ft, step_size=7, gamma=0.1
    )  #helps with the learning rate, to be zigzagging to get into the right function
    model = load_model(model_ft, args.weights)  #load the model with weights
    print("3")
    model = model.to(device)
    model.eval()

    # Image
    print("image path: " + str(args.image_path))
    raw_image = cv2.imread(args.image_path)[..., ::-1]

    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, args.topk):
        print("idx is: " + str(idx))
        print("idx is: " + str(probs))
        print("i is: " + str(i))
        gcam.backward(idx=idx[i])
        print("idx AFTER is: " + str(idx))
        print("idx[i]: " + str(idx[i]))
        output = gcam.generate(target_layer=CONFIG['target_layer'])
        print("classes[idx[i]]: " + str(classes[idx[i]]))
        save_gradcam(
            'results/{}_gcam_{}.png'.format(classes[idx[i]], args.arch),
            output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, args.topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient(
            'results/{}_bp_{}.png'.format(classes[idx[i]], args.arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, args.topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient(
            'results/{}_deconv_{}.png'.format(classes[idx[i]], args.arch),
            output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, args.topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient(
            'results/{}_gbp_{}.png'.format(classes[idx[i]], args.arch),
            feature)
        save_gradient(
            'results/{}_ggcam_{}.png'.format(classes[idx[i]], args.arch),
            output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #11
0
def main(image_path, arch, topk, cuda):

    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)

    cuda = cuda and torch.cuda.is_available()

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:', torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            classes.append(line)

    # Model
    model = models.__dict__[arch](pretrained=True)

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])(raw_image)

    if cuda:
        model.cuda()
        image = image.cuda()

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(to_var(image))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch), output, raw_image)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(to_var(image))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient('results/{}_bp_{}.png'.format(classes[idx[i]], arch), output)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(model=copy.deepcopy(model)) # TODO: remove hook func in advance
    probs, idx = deconv.forward(to_var(image))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient('results/{}_deconv_{}.png'.format(classes[idx[i]], arch), output)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(to_var(image))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient('results/{}_gbp_{}.png'.format(classes[idx[i]], arch), feature)  # NOQA
        save_gradient('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch), output)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #12
0
def main(image_path, target_layer, arch, topk, cuda):

    device = torch.device(
        "cuda" if cuda and torch.cuda.is_available() else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on the GPU:",
              torch.cuda.get_device_name(current_device))
    else:
        print("Running on the CPU")

    # Synset words
    classes = list()

    # Model from torchvision
    model = MGN()
    """
    pickle.load = partial(pickle.load, encoding="latin1")
    pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
    checkpoint = torch.load("hacnn_market_xent.pth.tar", pickle_module=pickle)

        #checkpoint = torch.load(args.load_weights)
    #checkpoint = torch.load("hacnn_market_xent.pth.tar")
    pretrain_dict = checkpoint['state_dict']
    model_dict = model.state_dict()
    pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
    model_dict.update(pretrain_dict)
    model.load_state_dict(model_dict)
    """
    pretrain_dict = torch.load("model_700.pt")
    model_dict = model.state_dict()
    pretrain_dict = {
        k: v
        for k, v in pretrain_dict.items()
        if k in model_dict and model_dict[k].size() == v.size()
    }
    model_dict.update(pretrain_dict)
    model.load_state_dict(model_dict)

    #model = models.__dict__[arch](pretrained=True)

    model.to(device)
    model.eval()
    place = []
    with open("samples/new.txt") as lines:
        for line in lines:
            line = line.strip().split(" ", 1)[1]
            line = line.split(", ", 1)[0].replace(" ", "_")
            classes.append(line)
    # Image preprocessing
    with open("new.txt") as lines:
        for line in lines:
            line = line.strip()
            line = "/mnt/SSD/jzwang/market1501/query/" + line
            place.append(line)
    for line in place:
        image_path = line
        raw_image = cv2.imread(image_path)[..., ::-1]
        raw_image = cv2.resize(raw_image, (128, 384))
        image = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])(raw_image).unsqueeze(0)
        image = image.to(device)
        """
        Common usage:
        1. Wrap your model with visualization classes defined in grad_cam.py
        2. Run forward() with an image
        3. Run backward() with a specific class
        4. Run generate() to export result
        """

        # =========================================================================
        print("Vanilla Backpropagation")

        bp = BackPropagation(model=model)
        predictions = bp.forward(image)

        for i in range(topk):
            print("[{:.5f}] {}".format(predictions[i][0],
                                       classes[predictions[i][1]]))

            bp.backward(idx=predictions[i][1])
            gradient = bp.generate()

        # Remove all the hook function in the "model"
        bp.remove_hook()

        # =========================================================================
        print("Deconvolution")

        deconv = Deconvnet(model=model)
        _ = deconv.forward(image)

        for i in range(topk):
            print("[{:.5f}] {}".format(predictions[i][0],
                                       classes[predictions[i][1]]))

            deconv.backward(idx=predictions[i][1])
            gradient = deconv.generate()

        deconv.remove_hook()

        # =========================================================================
        print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM")

        gcam = GradCAM(model=model)
        _ = gcam.forward(image)

        gbp = GuidedBackPropagation(model=model)

        _ = gbp.forward(image)

        t = ['p1', 'p2', 'p3']
        for i in range(topk):
            print("[{:.5f}] {}".format(predictions[i][0],
                                       classes[predictions[i][1]]))

            # Grad-CAM
            for target_layer in t:
                gcam.backward(idx=predictions[i][1])
                #print("1")
                region = gcam.generate(target_layer=target_layer)
                #print(2)
                line = line.strip().split("/")[-1]
                save_gradcam(
                    "results/{}-gradcam-{}.png".format(line, target_layer),
                    region,
                    raw_image,
                )
                #print(3)

                # Guided Backpropagation
                gbp.backward(idx=predictions[i][1])
                gradient = gbp.generate()

                # Guided Grad-CAM
                h, w, _ = gradient.shape
                region = cv2.resize(region, (w, h))[..., np.newaxis]
                output = gradient * region
Beispiel #13
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, checkpoint,
          distribute):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch]()
    model = model.cuda()
    # print(model)
    checkpoint = checkpoint + arch + '/model_best.pth.tar'
    # print(checkpoint)
    check_point = torch.load(checkpoint,
                             map_location=lambda storage, loc: storage.cuda(0))

    distributed_model = (distribute > 0.5)
    only_CAM = True

    if distributed_model == True:
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_check_point = OrderedDict()
        for k, v in check_point['state_dict'].items():
            # name = k[7:]  # remove `module.`
            # name = k[9:]  # remove `module.1.`
            if k.startswith('module.1.'):
                name = k[9:]
            else:
                name = k[7:]
            new_check_point[name] = v
        # load params
        model.load_state_dict(new_check_point)
    else:
        model.load_state_dict(check_point['state_dict'])

    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)
Beispiel #14
0
def main(image_path, arch, topk, cuda):

    CONFIG = {
        'resnet18': {
            'target_layer': 'layer4.1',
            'input_size': 224
        },
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)

    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = ["other", "rori"]
    #    with open('samples/synset_words.txt') as lines:
    #        for line in lines:
    #            line = line.strip().split(' ', 1)[1]
    #            line = line.split(', ', 1)[0].replace(' ', '_')
    #            classes.append(line)

    # Model
    model = models.__dict__[arch](pretrained=True)
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, 200)  #これにより512->2の層に変わった
    model.add_module('relu_fc', nn.ReLU())
    model.add_module('fc2', nn.Linear(200, 2))
    param = torch.load('weight_resnet18_3.pth')
    model.load_state_dict(param)

    model.to(device)
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch),
                     output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient('results/{}_bp_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient('results/{}_deconv_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient('results/{}_gbp_{}.png'.format(classes[idx[i]], arch),
                      feature)
        save_gradient('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #15
0
def main(image_path, arch, topk, cuda):

    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.35',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
    }.get(arch)

    cuda = cuda and torch.cuda.is_available()

    # Synset words
    classes = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            classes.append(line)

    # Model
    model = models.__dict__[arch](pretrained=True)
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    # Image
    raw_image = cv2.imread(image_path)[:, :, ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transform(raw_image).unsqueeze(0)
    image = Variable(image, volatile=False, requires_grad=True)

    if cuda:
        model.cuda()
        image = image.cuda()

    print('1. Grad-CAM')
    gcam = GradCAM(model=model, cuda=cuda)
    probs, idx = gcam.forward(image)

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        gcam.save('results/{}_gcam_{}.png'.format(classes[idx[i]], arch), output, raw_image)  # NOQA
        print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]]))

    print('2. Vanilla Backpropagation')
    bp = BackPropagation(model=model, cuda=cuda)
    probs, idx = bp.forward(image)

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()
        bp.save('results/{}_bp_{}.png'.format(classes[idx[i]], arch), output)
        print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]]))

    print('3. Guided Backpropagation/Grad-CAM')
    gbp = GuidedBackPropagation(model=model, cuda=cuda)
    probs, idx = gbp.forward(image)

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        output = feature * region[:, :, np.newaxis]
        gbp.save('results/{}_gbp_{}.png'.format(classes[idx[i]], arch), feature)  # NOQA
        gbp.save('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch), output)  # NOQA
        print('\t{:.5f}\t{}'.format(probs[i], classes[idx[i]]))
Beispiel #16
0
def main():
    root_path = '/media/palm/Unimportant/pdr2018/typesep_validate/Tomato/'
    image_name = 'c9ebc74c2177ce60a8230855333fb9e7.jpg'
    folder_name = '14_Tomato_Spider_Mite_Damage_Serious'
    # image_path = root_path+'/14_Tomato_Spider_Mite_Damage_Serious/1c0f1ae1374d01c2933069232735a331.jpg'
    image_path = os.path.join(root_path, folder_name, image_name)
    topk = 1
    cuda = 'cuda'
    arch = 'densenet201'
    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)
    a, b, c = getlookup()
    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = c['Tomato']

    # Model
    model = getmodel(20)
    checkpoint = torch.load('checkpoint/try_4_densesep-Tomatotemp.t7')
    model.load_state_dict(checkpoint['net'])
    model.to('cuda')
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam(
            'results/{}_{}_gcam_{}.png'.format(image_name, classes[idx[i]],
                                               arch), output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient(
            'results/{}_{}_bp_{}.png'.format(image_name, classes[idx[i]],
                                             arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient(
            'results/{}_{}_deconv_{}.png'.format(image_name, classes[idx[i]],
                                                 arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient(
            'results/{}_{}_gbp_{}.png'.format(image_name, classes[idx[i]],
                                              arch), feature)
        save_gradient(
            'results/{}_{}_ggcam_{}.png'.format(image_name, classes[idx[i]],
                                                arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #17
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]
                    ),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Beispiel #18
0
def main(image_path, target_layer, arch, topk, cuda):

    device = torch.device(
        "cuda" if cuda and torch.cuda.is_available() else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on the GPU:",
              torch.cuda.get_device_name(current_device))
    else:
        print("Running on the CPU")

    # Synset words
    classes = list()
    with open("samples/synset_words.txt") as lines:
        for line in lines:
            line = line.strip().split(" ", 1)[1]
            line = line.split(", ", 1)[0].replace(" ", "_")
            classes.append(line)

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Image preprocessing
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (224, ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])(raw_image).unsqueeze(0)
    image = image.to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with an image
    3. Run backward() with a specific class
    4. Run generate() to export result
    """

    # =========================================================================
    print("Vanilla Backpropagation")

    bp = BackPropagation(model=model)
    predictions = bp.forward(image)

    for i in range(topk):
        print("[{:.5f}] {}".format(predictions[i][0],
                                   classes[predictions[i][1]]))

        bp.backward(idx=predictions[i][1])
        gradient = bp.generate()

        save_gradient(
            "results/{}-vanilla-{}.png".format(arch,
                                               classes[predictions[i][1]]),
            gradient,
        )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(image)

    for i in range(topk):
        print("[{:.5f}] {}".format(predictions[i][0],
                                   classes[predictions[i][1]]))

        deconv.backward(idx=predictions[i][1])
        gradient = deconv.generate()

        save_gradient(
            "results/{}-deconvnet-{}.png".format(arch,
                                                 classes[predictions[i][1]]),
            gradient,
        )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM")

    gcam = GradCAM(model=model)
    _ = gcam.forward(image)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(image)

    for i in range(topk):
        print("[{:.5f}] {}".format(predictions[i][0],
                                   classes[predictions[i][1]]))

        # Grad-CAM
        gcam.backward(idx=predictions[i][1])
        region = gcam.generate(target_layer=target_layer)

        save_gradcam(
            "results/{}-gradcam-{}-{}.png".format(arch, target_layer,
                                                  classes[predictions[i][1]]),
            region,
            raw_image,
        )

        # Guided Backpropagation
        gbp.backward(idx=predictions[i][1])
        gradient = gbp.generate()

        # Guided Grad-CAM
        h, w, _ = gradient.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = gradient * region

        save_gradient(
            "results/{}-guided-{}.png".format(arch,
                                              classes[predictions[i][1]]),
            gradient,
        )
        save_gradient(
            "results/{}-guided_gradcam-{}-{}.png".format(
                arch, target_layer, classes[predictions[i][1]]),
            output,
        )
def main():
    output_dir = "./save_fig"

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    eps = 1e-8

    ### data config
    test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                            num_class=args.num_classes,
                                            mode="test")
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=2)

    ### novelty data
    out_test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                                num_class=args.num_classes,
                                                mode="OOD")
    out_test_loader = torch.utils.data.DataLoader(out_test_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=2)

    ##### model, optimizer config
    if args.net_type == "resnet50":
        model = models.resnet50(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "resnet34":
        model = models.resnet34(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg19":
        model = models.vgg19(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg16":
        model = models.vgg16(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)

    print("load checkpoint_last")
    checkpoint = torch.load(args.model_path)

    ##### load model
    model.load_state_dict(checkpoint["model"])
    start_epoch = checkpoint["epoch"]
    optimizer = optim.SGD(model.parameters(), lr=checkpoint["init_lr"])

    #### create folder
    Path(output_dir).mkdir(exist_ok=True, parents=True)

    model = model.to(device).eval()
    # Start grad-CAM
    bp = BackPropagation(model=model)
    inv_normalize = transforms.Normalize(
        mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],
        std=[1 / 0.229, 1 / 0.224, 1 / 0.255])
    target_layer = "layer4"

    stime = time.time()

    gcam = GradCAM(model=model)

    grad_cam = GradCAMmodule(target_layer, output_dir)
    grad_cam.model_config(model)
    for j, test_data in enumerate(test_loader):
        #### initialized
        org_image = test_data['input'].to(device)
        target_class = test_data['label'].to(device)

        target_class = int(target_class.argmax().cpu().detach())
        result = model(org_image).argmax()
        print("number: {} pred: {} target: {}".format(j, result, target_class))
        result = int(result.cpu().detach())
        grad_cam.saveGradCAM(org_image, result, j)
Beispiel #20
0
def main(args):

    # Load the synset words
    idx2cls = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            idx2cls.append(line)

    print('Loading a model...')
    model = torchvision.models.resnet152(pretrained=True)
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    print('\nGrad-CAM')
    gcam = GradCAM(model=model, target_layer='layer4.2', cuda=args.cuda)
    gcam.load_image(args.image, transform)
    gcam.forward()

    for i in range(0, 3):
        gcam.backward(idx=gcam.idx[i])
        cls_name = idx2cls[gcam.idx[i]]
        output = gcam.generate()
        print('\t{:.5f}\t{}'.format(gcam.prob[i], cls_name))
        gcam.save('results/{}_gcam.png'.format(cls_name), output)

    print('\nVanilla Backpropagation')
    bp = BackPropagation(model=model, target_layer='conv1', cuda=args.cuda)
    bp.load_image(args.image, transform)
    bp.forward()

    for i in range(0, 3):
        bp.backward(idx=bp.idx[i])
        cls_name = idx2cls[bp.idx[i]]
        output = bp.generate()
        print('\t{:.5f}\t{}'.format(bp.prob[i], cls_name))
        bp.save('results/{}_bp.png'.format(cls_name), output)

    print('\nGuided Backpropagation')
    gbp = GuidedBackPropagation(model=model,
                                target_layer='conv1',
                                cuda=args.cuda)
    gbp.load_image(args.image, transform)
    gbp.forward()

    for i in range(0, 3):
        cls_idx = gcam.idx[i]
        cls_name = idx2cls[cls_idx]

        gcam.backward(idx=cls_idx)
        output_gcam = gcam.generate()

        gbp.backward(idx=cls_idx)
        output_gbp = gbp.generate()

        output_gcam -= output_gcam.min()
        output_gcam /= output_gcam.max()
        output_gcam = cv2.resize(output_gcam, (224, 224))
        output_gcam = cv2.cvtColor(output_gcam, cv2.COLOR_GRAY2BGR)

        output = output_gbp * output_gcam

        print('\t{:.5f}\t{}'.format(gbp.prob[i], cls_name))
        gbp.save('results/{}_gbp.png'.format(cls_name), output_gbp)
        gbp.save('results/{}_ggcam.png'.format(cls_name), output)
Beispiel #21
0
def visualization(image_paths, model_name, model_path, target_layer, arch,
                  topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    kwargs = {}
    module = import_module(model_name)
    model = module.make_model().to(device)
    model.load_state_dict(torch.load(model_path, **kwargs), strict=False)
    #print(model)
    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")

    path = gb.glob(image_paths[0] + '/*.jpg')
    index = 0
    for img in path:
        print("\t#{}: {}".format(index, img))
        image, raw_image = preprocess(img)
        images.append(image)
        raw_images.append(raw_image)
        index = index + 1
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)
    #print(probs, ids)
    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    #"{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                    "{}-{}-vanilla-{}.png".format(j, arch, ids[j, i]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    #"{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]),
                    "{}-{}-deconvnet-{}.png".format(j, arch, ids[j, i]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        #print(ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)
        #print(regions)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, ids[j, i], probs[j, i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    #"{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                    "{}-{}-guided-{}.png".format(j, arch, ids[j, i]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        #j, arch, target_layer, classes[ids[j, i]]
                        j,
                        arch,
                        target_layer,
                        ids[j, i]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
            #print(regions.shape)
            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        #j, arch, target_layer, classes[ids[j, i]]
                        j,
                        arch,
                        target_layer,
                        ids[j, i]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
    # =========================================================================
    print("Channel Visialization:")
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)
    feature_map = gcam.channel_visualization(target_layer=target_layer)
    draw_features(4, 4, feature_map, output_dir, target_layer)
Beispiel #22
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    # check if CUDA is available
    train_on_gpu = torch.cuda.is_available()

    if not train_on_gpu:
        print('CUDA is not available.  Training on CPU ...')
    else:
        print('CUDA is available!  Training on GPU ...')

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    PRE_MODEL_DIR = '/content/gdrive/My Drive/UnB/TCC-1/TCC1-1-dataset-final/restnet_model152_trained_exp7.pt'

    model_name = 'resnet'
    num_classes = 9
    feature_extract = False

    model, input_size = initialize_model(model_name,
                                         num_classes,
                                         feature_extract,
                                         use_pretrained=True)

    if train_on_gpu:
        state = torch.load(PRE_MODEL_DIR)
    else:
        state = torch.load(PRE_MODEL_DIR, map_location='cpu')

    # Loading weights in restnet architecture
    model.load_state_dict(state['state_dict'])

    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    for i in range(topk):
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
def calc_cam(output_path, image_path, model, transform, arch, topk, cuda):
    results = []
    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)

    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Model
    model.to(device)
    model.eval()

    # Image
    pil_image = Image.open(image_path)
    #raw_image = cv2.imread(image_path)[..., ::-1]
    #raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    raw_image = transform(pil_image)
    image = raw_image.unsqueeze(0)

    raw_image = raw_image.numpy().transpose(1, 2, 0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        result_name = '{}_gcam_{}.png'.format(idx[i].cpu().numpy(), arch)
        save_gradcam(os.path.join(output_path, result_name), output, raw_image)
        results.append(os.path.join(output_path, result_name))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        result_name = '{}_bp_{}.png'.format(idx[i].cpu().numpy(), arch)
        save_gradient(os.path.join(output_path, result_name), output)
        results.append(os.path.join(output_path, result_name))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))


#    # =========================================================================
#    print('Deconvolution')
#    # =========================================================================
#    deconv = Deconvolution(model=copy.deepcopy(model))  # TODO: remove hook func in advance
#    probs, idx = deconv.forward(image.to(device))
#
#    for i in range(0, topk):
#        deconv.backward(idx=idx[i])
#        output = deconv.generate()
#
#        result_name = '{}_deconv_{}.png'.format(idx[i].cpu().numpy(), arch)
#        save_gradient(os.path.join(output_path, result_name), output)
#        results.append(os.path.join(output_path, result_name))
#        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))

# =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        result_name = '{}_gbp_{}.png'.format(idx[i].cpu().numpy(), arch)
        save_gradient(os.path.join(output_path, result_name), feature)
        results.append(os.path.join(output_path, result_name))
        result_name = '{}_ggcam_{}.png'.format(idx[i].cpu().numpy(), arch)
        save_gradient(os.path.join(output_path, result_name), output)
        results.append(os.path.join(output_path, result_name))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))
    return (results, probs)