def main(args):
    # 输入
    img = io.imread(args.image_path)
    img = np.float32(cv2.resize(img, (224, 224))) / 255
    inputs = prepare_input(img)
    # 输出图像
    image_dict = {}
    # 网络
    net = get_net(args.network, args.weight_path)
    # Grad-CAM
    layer_name = get_last_conv_name(net) if args.layer_name is None else args.layer_name
    grad_cam = GradCAM(net, layer_name)
    mask = grad_cam(inputs, args.class_id)  # cam mask
    image_dict['cam'], image_dict['heatmap'] = gen_cam(img, mask)
    grad_cam.remove_handlers()
    # Grad-CAM++
    grad_cam_plus_plus = GradCamPlusPlus(net, layer_name)
    mask_plus_plus = grad_cam_plus_plus(inputs, args.class_id)  # cam mask
    image_dict['cam++'], image_dict['heatmap++'] = gen_cam(img, mask_plus_plus)
    grad_cam_plus_plus.remove_handlers()

    # GuidedBackPropagation
    gbp = GuidedBackPropagation(net)
    inputs.grad.zero_()  # 梯度置零
    grad = gbp(inputs)

    gb = gen_gb(grad)
    image_dict['gb'] = norm_image(gb)
    # 生成Guided Grad-CAM
    cam_gb = gb * mask[..., np.newaxis]
    image_dict['cam_gb'] = norm_image(cam_gb)

    save_image(image_dict, os.path.basename(args.image_path), args.network, args.output_dir)
Пример #2
0
def grad_cam(image):
    model = models.vgg19_bn(pretrained=True)
    model.to(device)
    model.eval()
    image = cv2.resize(image, (224, 224))
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(image).unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image)

    output = None
    for i in range(0, 5):
        gcam.backward(idx=idx[i])
        temp = gcam.generate(target_layer='features.52')
        if not output is None:
            output += temp
        else:
            output = temp

    output /= 5
    return output
Пример #3
0
def main():

    # Dataset
    print('Creating dataset...')
    transform_val = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(MEAN, STD)])
    valset = torchvision.datasets.CIFAR100(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_val)
    val_loader = DataLoader(valset,
                            batch_size=100,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)

    # Model
    model_path = os.path.join(args.checkpoint, args.model, 'best_model.pt')
    print('Loading model...')
    model = get_model(args.model)
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path))
    else:
        raise Exception('Cannot find model', model_path)
    if torch.cuda.device_count() > 1:
        print("Using", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.cuda()
    cudnn.benchmark = True

    # result
    result_path = os.path.join('results', args.model)
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    gcam = GradCAM(model=model)

    model.eval()
    acc = 0
    for i, (inputs, labels) in enumerate(val_loader):
        inputs, labels = (Variable(inputs.cuda()), Variable(labels.cuda()))
        outputs = model(inputs)
        outputs, labels = outputs.data, labels.data
        _, preds = outputs.topk(1, 1, True, True)
        corrects = preds.eq(labels.view(-1, 1).expand_as(preds))
        acc += torch.sum(corrects)
        for sample in SAMPLES:
            image = get_image(inputs[sample])
            probs, idx = gcam.forward(inputs[sample].unsqueeze(0))
            gcam.backward(idx=idx[1])
            output = gcam.generate(target_layer='layer4.2')
            save_gradcam(result_path, '{}_gcam.png'.format(sample), output,
                         image)
            save_image(result_path, image, sample, preds[sample],
                       labels[sample])
        break
    acc = acc.item() / len(valset) * 100
    print('Finish!!!')
Пример #4
0
    def __init__(self, config_path, cuda):
        self.get_device(cuda)
        self.load_config(config_path)
        self.load_model()
        self.load_image()
        self.gcam = GradCAM(self.model)

        self.model_name = self.config['model']['arch']
Пример #5
0
def main():

    device = torch.device(
        'cuda' if args.cuda and torch.cuda.is_available() else 'cpu')

    if args.cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Load the LFLSeg module (ResNet-101 backbone)
    LFLSeg_model = models.resnet101()
    num_ftrs = LFLSeg_model.fc.in_features
    LFLSeg_model.fc = nn.Linear(
        num_ftrs, 3
    )  # Replace final layer with 3 outputs (full leaf, partial leaf, non-leaf)

    # Dowload the pretrained model: https://drive.google.com/drive/folders/1HqBYjUGXxl1eAkzhURoV5JAqWHvBvvTp?usp=sharing
    load_path = '/path/to/LFLSeg_resnet101.pth'
    LFLSeg_model.load_state_dict(torch.load(load_path), strict=True)

    LFLSeg_model.to(device)
    LFLSeg_model.eval()

    # Load the GradCAM function
    gcam = GradCAM(model=LFLSeg_model)

    if (os.path.exists(args.input) == False):
        print("The image path doesn't exist!")
        return
    else:
        # If output folder is not exists, create a new one
        if not os.path.exists(args.output):
            os.makedirs(args.output)

        filename = os.path.basename(args.input)

        raw_image = Image.open(args.input).convert('RGB')

        image = transforms.Compose([
            transforms.Resize(size=(224, 224), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])(raw_image).unsqueeze(0)

        probs, idx = gcam.forward(image.to(device))

        # Only get the heatmap for the "full leaf" class (i.e., idx=0)
        gcam.backward(idx=0)
        output = gcam.generate(target_layer=args.target_layer)

        save_gradcam('{}/{}_gcam.png'.format(args.output, filename[:-4]),
                     output,
                     raw_image,
                     threshold=args.threshold,
                     is_segment=args.segment)
Пример #6
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, model):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)
    gradcam_list = []

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    if model is None:
        model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    #print(image_paths)
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")
    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            #print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Grad-CAM
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}.png".format(j, arch, target_layer))
            gradcam_list.append(filename)
            save_gradcam(filename, gcam=regions[j, 0], raw_image=raw_images[j])
    return gradcam_list
def VideoSpatialPrediction(vid_name,
                           target,
                           net,
                           num_categories,
                           num_samples=25,
                           new_size=299,
                           batch_size=2):

    gc = GradCAM(model=net)

    clip_mean = [0.5] * num_samples
    clip_std = [0.226] * num_samples

    normalize = video_transforms.Normalize(mean=clip_mean, std=clip_std)
    val_transform = video_transforms.Compose([
        video_transforms.ToTensor(),
        normalize,
    ])

    deep = 1

    # inception = 299,299, resnet = 224,224
    dims = (new_size, new_size, deep, num_samples)
    rgb = np.zeros(shape=dims, dtype=np.float64)
    rgb_flip = np.zeros(shape=dims, dtype=np.float64)

    for i in range(num_samples):
        img_file = os.path.join(vid_name, 'vr_{0:02d}.png'.format(i))
        img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
        rgb[:, :, 0, i] = img
        rgb_flip[:, :, 0, i] = img[:, ::-1]

    _, _, _, c = rgb.shape
    rgb_list = []
    for c_index in range(c):
        cur_img = rgb[:, :, :, c_index]
        cur_img_tensor = val_transform(cur_img)
        rgb_list.append(np.expand_dims(cur_img_tensor.numpy(), 0))

    rgb_np = np.concatenate(rgb_list, axis=0)
    prediction = np.zeros((num_categories, rgb.shape[3]))

    index = 50
    input_data = rgb_np[index:index + 1, :, :, :]
    imgDataTensor = torch.from_numpy(input_data).type(torch.FloatTensor).cuda()
    imgDataVar = torch.autograd.Variable(imgDataTensor)

    probs, ids = gc.forward(imgDataVar)
    ids_ = torch.LongTensor([[target]] * len(imgDataVar)).to(
        torch.device("cuda"))
    gc.backward(ids=ids_)
    regions = gc.generate(target_layer="Mixed_7c")
    save_gradcam(vid_name.split("/")[-1] + ".png",
                 gcam=regions[0, 0],
                 raw_image=rgb[:, :, :, index])

    return prediction
Пример #8
0
def demo2(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"]
    target_class = 243  # "bull mastif"

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(
                j, classes[target_class], float(probs[ids == target_class])))

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "resnet152",
                                                     target_layer,
                                                     classes[target_class]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Пример #9
0
def gradcam_classify(model):
    image_paths = []
    for f in os.listdir("samples/"):
        fname = os.path.join("samples/", f)
        image_paths.append(fname)
    #print("my image paths")
    #print(image_paths)

    #for i in range(1,26):
    #    image_paths.append('samples/'+str(i)+'.PNG')
    target_layer = 'layer4'
    topk = 1
    output_dir = 'results'

    classes = getclasses()

    #model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    # =========================================================================

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM in action:")
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        print('len images = ', len(images))

        for j in range(len(images)):
            # Grad-CAM
            global filename
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}-{}.png".format(j, 'resnet', target_layer,
                                                 classes[ids[j, i]]),
            )
            #print(f'gradcam generated filenames {i}')
            save_gradcam(
                filename=filename,
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Пример #10
0
def main(image_path, cuda):
    device = torch.device(
        "cuda" if cuda and torch.cuda.is_available() else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on the GPU:",
              torch.cuda.get_device_name(current_device))
    else:
        print("Running on the CPU")

    # Synset words
    classes = list()
    with open("samples/synset_words.txt") as lines:
        for line in lines:
            line = line.strip().split(" ", 1)[1]
            line = line.split(", ", 1)[0].replace(" ", "_")
            classes.append(line)

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (224, ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])(raw_image).unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    predictions = gcam.forward(image)
    top_idx = predictions[0][1]

    for target_layer in ["layer1", "layer2", "layer3", "layer4"]:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        gcam.backward(idx=top_idx)
        region = gcam.generate(target_layer=target_layer)

        save_gradcam(
            "results/{}-gradcam-{}-{}.png".format("resnet152", target_layer,
                                                  classes[top_idx]),
            region,
            raw_image,
        )
Пример #11
0
def process_a_batch(image_paths, target_layer, arch, topk, output_dir, cuda):
    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images = load_images_only(image_paths)
    images = torch.stack(images).to(device)

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)
        regions = regions.cpu().numpy()
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(image_paths[j],
                                              classes[ids[j, i]], probs[j, i]))
            # Grad-CAM
            save_cam(filename=os.path.join(
                output_dir,
                "{}-{}-{}-{:.3}.png".format(os.path.basename(image_paths[j]),
                                            classes[ids[j, i]], ids[j, i],
                                            probs[j, i]),
            ),
                     gcam=regions[j, 0])
    bp.remove_hook()
    gcam.remove_hook()
    del bp
    del images
    del gcam
    del model
    del regions
    del probs
    del ids
    del _
    torch.cuda.empty_cache()
Пример #12
0
def demo2(image_paths, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["layer1", "layer2", "layer3", "layer4"]

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    top_ids = ids[:, [0]]
    gcam.backward(ids=top_ids)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, 0]], probs[j,
                                                                           0]))

            save_gradcam(
                filename="results/{}-{}-gradcam-{}-{}.png".format(
                    j, "resnet152", target_layer, classes[top_ids[j]]),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Пример #13
0
def main(args):
    #####
    #TODO:build model & load weight
    from mmdet.datasets.transforms import ImageTransform
    from tqdm import tqdm
    config_file = args.config_file
    checkpoint_file = args.checkpoint_file
    cfg = Config.fromfile(config_file)
    device = 'cuda:0'
    model = init_detector(config_file, checkpoint_file, device=device)
    print(model)
    
    ######
    # Grad-CAM
    # layer_name = get_last_conv_name(model)
    layer_name = 'backbone.layer4.2.conv3'

    folder = '/EHDD1/ADD/data/iSAID_Devkit/preprocess/dataset/iSAID_patches/val/images/'
    dst_folder = '/EHDD1/ADD/data/iSAID_Devkit/preprocess/dataset/iSAID_patches/val/cam'
    os.makedirs(dst_folder, exist_ok=True)
    os.makedirs(dst_folder+'++', exist_ok=True)
    imlist_total = os.listdir(folder)
    imlist = list(filter(ispure, imlist_total))
    #####
    for image in tqdm(imlist[1::2]):
        #TODO : prepare input
        grad_cam = GradCAM(model, layer_name)
        grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
        img = mmcv.imread(os.path.join(folder, image))
        if img.shape[0] != img.shape[1]:
            print(image)
            continue
        img_transform = ImageTransform(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
        data = _prepare_data(img, img_transform, model.cfg, device)
        #######
        image_dict = {}
        mask = grad_cam(data)  # cam mask
        grad_cam.remove_handlers()
        image_dict['overlay'], image_dict['heatmap'], image_dict['mask'] = gen_cam(img, mask)
        save_image(image_dict, image.split('.')[0], output_dir=dst_folder)
        # # Grad-CAM++
        # grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
        image_dict = {}
        mask_plus_plus = grad_cam_plus_plus(data)  # cam mask
        image_dict['overlay'], image_dict['heatmap'], image_dict['mask'] = gen_cam(img, mask_plus_plus)
        grad_cam_plus_plus.remove_handlers()
        save_image(image_dict, image.split('.')[0], output_dir=dst_folder+'++')
        torch.cuda.empty_cache()
Пример #14
0
def demo4(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = torch.load('best_all.pth')  # load model
    model.to(device)
    model.eval()

    # The layers you want to see
    target_layers = ["base_network.layer4.2.conv3"]
    target_class = 4  # "lake"

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(
                j, classes[target_class], float(probs[ids == target_class])))

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "resnet152",
                                                     target_layer,
                                                     classes[target_class]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Пример #15
0
def gradcam(model, device, raw_image, image, CONFIG, topk):
    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    y, ind = gcam.forward(image.to(device))
    results = []
#    for i in range(0, topk):
#        gcam.backward(idx=idx[i])
#        output = gcam.generate(target_layer=CONFIG['target_layer'])
#        
#        results.append(find_gradcam(output, raw_image))
#        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))
    probs = y.cpu().data.numpy()
    idx = ind.cpu().data.numpy()
    del gcam, y, ind
    torch.cuda.empty_cache()
    return (results, probs, idx)
Пример #16
0
    def __init__(self, model_name = None):
        """ Virtually private constructor. """
        if DRSeverePred.__instance != None:
            raise Exception("This class is a singleton!")
        else:
            DRSeverePred.__instance = self
            DRSeverePred.__device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        if torch.cuda.is_available():
            current_device = torch.cuda.current_device()
            print('Running on the GPU:', torch.cuda.get_device_name(current_device))
        else:
            print('Running on the CPU')
        # Model
        DRSeverePred.__model = torch.load(model_name)
        DRSeverePred.__model.to(DRSeverePred.__device)
        DRSeverePred.__model.eval()
        DRSeverePred.__gcam = GradCAM(model=DRSeverePred.__model)
Пример #17
0
def extract_object(original_image, cuda = 1):
    """
        Generate Grad-CAM at different layers of ResNet-152
        """
    output_dir = "./output"

    # device = get_device(cuda)
    device = torch.device("cuda" if cuda else "cpu")

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
  #  device = "cuda"
  #   model.to(device)
    model.eval()

    target_layer = "layer4"
    target_class = 243  # "bull mastif"


    # Images
    # images = original_image.to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(original_image)
#    ids_ = torch.LongTensor([[target_class]] * len(original_image)).to(device)
#     pdb.set_trace()
    gcam.backward(ids=ids)

    # Grad-CAM
    dogs = []
    red_bboxs = []
    regions = gcam.generate(target_layer=target_layer)
    for j in range(len(original_image)):
        dog, red_bbox = save_gradcam(gcam=regions[j, 0], raw_image=original_image[j])
        dogs.append(dog)
        red_bboxs.append(red_bbox)
        print(red_bbox)
    # tensor_dogs = torch.stack(dogs)
    # tensor_red_bbox = torch.stack(red_bboxs)
    #del gcam,model,dogs,red_bboxs
    return dogs, red_bboxs
Пример #18
0
def guided_backprop_eye(image, name, net):
    img = torch.stack([image[name]])
    bp = BackPropagation(model=net)
    probs, ids = bp.forward(img)
    gcam = GradCAM(model=net)
    _ = gcam.forward(img)

    gbp = GuidedBackPropagation(model=net)
    _ = gbp.forward(img)

    # Guided Backpropagation
    actual_status = ids[:, 0]
    gbp.backward(ids=actual_status.reshape(1, 1))
    gradients = gbp.generate()

    # Grad-CAM
    gcam.backward(ids=actual_status.reshape(1, 1))
    regions = gcam.generate(target_layer='last_conv')

    # Get Images
    prob = probs.data[:, 0]
    if actual_status == 0:
        prob = probs.data[:, 1]

    prob_image = np.zeros((shape[0], 60, 3), np.uint8)
    cv2.putText(prob_image, '%.1f%%' % (prob * 100), (5, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)

    guided_bpg_image = get_gradient_image(gradients[0])
    guided_bpg_image = cv2.merge(
        (guided_bpg_image, guided_bpg_image, guided_bpg_image))

    grad_cam_image = get_gradcam_image(gcam=regions[0, 0],
                                       raw_image=image[name + '_raw'])
    guided_gradcam_image = get_gradient_image(torch.mul(regions, gradients)[0])
    guided_gradcam_image = cv2.merge(
        (guided_gradcam_image, guided_gradcam_image, guided_gradcam_image))
    #print(image['path'],classes[actual_status.data], probs.data[:,0] * 100)
    print(classes[actual_status.data], probs.data[:, 0] * 100)

    return cv2.hconcat([
        image[name + '_raw'], prob_image, guided_bpg_image, grad_cam_image,
        guided_gradcam_image
    ])
Пример #19
0
def guided_gradcam(model, device, raw_image, image, CONFIG, topk):
    # =========================================================================
    print('Guided Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region
        
        results.append(find_gradient(output))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))
    return (results, probs, idx)
def main(config):
    model = config.init_obj('arch', module_arch)

    checkpoint = torch.load(config.resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    img = cv2.imread(INPUT_PATH)
    img = img[:, :, [0]]
    img = np.float32(img) / 255

    img_input = preprocess(img)

    # base: 3, deeper: 5, deeperer: 5, bn: 8, dropout: 5, both: 8
    chosen = '3'
    print(chosen)
    grad_cam = GradCAM(model=model, target=chosen)

    mask = grad_cam.get_cam(img_input)
    show_cam_on_image(img, mask, OUTPUT_PATH)
Пример #21
0
def get_grad_cam(device, model, classes, images, labels, target_layers):
    # move the model to device
    model.to(device)

    # set the model in evaluation mode
    model.eval()

    # get the grad cam
    gcam = GradCAM(model=model, candidate_layers=target_layers)

    # images = torch.stack(images).to(device)

    # predicted probabilities and class ids
    pred_probs, pred_ids = gcam.forward(images)

    # actual class ids
    # target_ids = torch.LongTensor(labels).view(len(images), -1).to(device)
    target_ids = labels.view(len(images), -1).to(device)

    # backward pass wrt to the actual ids
    gcam.backward(ids=target_ids)

    # we will store the layers and correspondings images activations here
    layers_region = {}

    # fetch the grad cam layers of all the images
    for target_layer in target_layers:
        # logger.info(f'generating Grad-CAM for {target_layer}')

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        layers_region[target_layer] = regions

    # we are done here, remove the hooks
    gcam.remove_hook()

    return layers_region, pred_probs, pred_ids
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    for i in range(topk):
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Пример #23
0
def main(image_path, arch, topk, cuda):

    CONFIG = {
        'resnet18': {
            'target_layer': 'layer4.1',
            'input_size': 224
        },
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)

    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = ["other", "rori"]
    #    with open('samples/synset_words.txt') as lines:
    #        for line in lines:
    #            line = line.strip().split(' ', 1)[1]
    #            line = line.split(', ', 1)[0].replace(' ', '_')
    #            classes.append(line)

    # Model
    model = models.__dict__[arch](pretrained=True)
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, 200)  #これにより512->2の層に変わった
    model.add_module('relu_fc', nn.ReLU())
    model.add_module('fc2', nn.Linear(200, 2))
    param = torch.load('weight_resnet18_3.pth')
    model.load_state_dict(param)

    model.to(device)
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch),
                     output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient('results/{}_bp_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient('results/{}_deconv_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient('results/{}_gbp_{}.png'.format(classes[idx[i]], arch),
                      feature)
        save_gradient('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
    # set target layer for CAM
    if args.model == 'vgg16' or args.model == 'densenet121':
        target_layer = model.features[-1]
    elif args.model == 'resnet18':
        target_layer = model.layer4[-1]

    # get given label's index
    label = {'covid_19': 0, 'lung_opacity': 1, 'normal': 2, 'pneumonia': 3}
    idx_to_label = {v: k for k, v in label.items()}
    if args.label is not None:
        label = label[args.label]
    else:
        label = None

    # load and preprocess image
    image = utils.load_image(args.image_path)

    warnings.filterwarnings("ignore", category=UserWarning)
    # pass image through model and get CAM for the given label
    cam = GradCAM(model=model, target_layer=target_layer)
    label, mask = cam(image, label)
    print(f'GradCAM generated for label "{idx_to_label[label]}".')

    # deprocess image and overlay CAM
    image = utils.deprocess_image(image)
    image = apply_mask(image, mask)

    # save the image
    utils.save_image(image, args.output_path)
Пример #25
0
    sample_input = Variable(torch.randn(1,3,in_size,in_size), volatile=False)
    if use_gpu:
        sampe_input = sample_input.cuda()

    def is_image(f):
        return f.endswith(".png") or f.endswith(".jpg")

    test_transform = transforms.Compose([
        transforms.Scale(in_size),
        transforms.CenterCrop(in_size),
        transforms.ToTensor(),
        transforms.Normalize(cf.mean, cf.std)
    ])

    gcam = GradCAM(list(model._modules.items())[0][1], cuda=use_gpu)

    print("\n[Phase 2] : Gradient Detection")
    if args.subtype != None:
        WBC_id = return_class_idx(args.subtype)

        if not (args.subtype in dset_classes):
            print("The given subtype does not exists!")
            sys.exit(1)

    if args.subtype == None:
        print("| Checking All Activated Regions...")
    else:
        print("| Checking Activated Regions for " + dset_classes[WBC_id] + "...")

    for subdir, dirs, files in os.walk(cf.test_base):
Пример #26
0
def main():
    """
	Visualize model responses given multiple images
	"""
    device = torch.device("cuda")

    test_dataset = CXRDataset(args.image_dir, ['TEST'])
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             pin_memory=True)

    model = CombineNet().to(device)
    model = nn.DataParallel(model)
    print("Parameters: {}".format(sum(p.numel() for p in model.parameters())))
    model.eval()

    if args.load_model:
        checkpoint = torch.load(args.load_model + "/best.ckpt")
        #model_weights = OrderedDict()
        #for k, v in checkpoint['model'].items():
        #	name = k[7:] # remove `module.`
        #	model_weights[name] = v
        model.load_state_dict(checkpoint['model'])
        epoch = checkpoint['epoch']
        print('Loading model: {}, from epoch: {}'.format(
            args.load_model, epoch))
    else:
        print('Model: {} not found'.format(args.load_model))

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    target_layer = 'module.feature_model.layer_block.3.1.conv2'
    #print(model.module.keys())
    print(model.module.feature_model.layer_block[3][1].conv2)

    epoch_iterator = tqdm(test_loader, desc="Iteration")
    for i, batch in enumerate(epoch_iterator, 0):
        raw_images, _, _ = batch
        batch_device = tuple(t.to(device, non_blocking=True) for t in batch)
        images, labels, img_ids = batch_device

        if (labels.cpu().detach().numpy() == np.array([0, 0, 0, 1])).all():
            probs, ids = gcam.forward(images)
            gcam.backward(ids=ids[:, [0]])
            regions = gcam.generate(target_layer=target_layer)

            for j in range(args.batch_size):
                gcam_im = save_gradcam(
                    filename=osp.join(
                        args.output_dir,
                        "{}-gradcam-{}_severe_{}.png".format(
                            j, target_layer, i),
                    ),
                    gcam=regions[j, 0],
                    raw_image=images[j],
                )
                img_path = osp.join(
                    args.output_dir,
                    "{}-image-{}_severe_{}.png".format(j, target_layer, i),
                )
                raw_image = np.array(raw_images[j])
                xray = save_xray(img_path, raw_image)
                '''
				# TODO: Blending code
				#cv2.imwrite(img_path, raw_images[j])

				img_path=osp.join(
						args.output_dir,
						"{}-blended-{}_severe_{}.png".format(j, target_layer, i),
						)
				beta = (1.0 - args.alpha)
				xray = np.array(xray)
				xray = xray.squeeze()
				xray = np.stack([xray, xray, xray],axis=2)
				print(xray.dtype)
				print(gcam.dtype)
				blended = cv2.addWeighted(xray, args.alpha, gcam, beta, 0.0)
				save_xray(img_path, blended)
				'''

    print('finished!')
Пример #27
0
def main(image_path, arch, topk, cuda, target_layer):

    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
        'se_resnet': {
            'target_layer': target_layer,
            'input_size': 32
        },
    }.get(arch)

    cuda = cuda and torch.cuda.is_available()

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            classes.append(line)

    # Model
    #model = models.__dict__[arch](pretrained=True)
    model = models.se_resnet.se_resnet50(num_classes=100)

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])(raw_image)

    if cuda:
        model.cuda()
        image = image.cuda()

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(to_var(image))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        #save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch), output, raw_image)  # NOQA
        save_gradcam('results/{}.png'.format(target_layer), output,
                     raw_image)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Пример #28
0
def demo3(image_paths, topk, output_dir, cuda, num_class, da_model):
    """
    Generate Grad-CAM with original models
    """
    device = get_device(cuda)

    # Synset words
    #classes = get_classtable()

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras
    #model = torch.hub.load(
    #    "kazuto1011/pytorch-ported-models", "xception_v1", pretrained=True
    #)

    #target_images = ['mediumresidential01','mediumresidential03', 'mediumresidential14',\
    #        'mediumresidential21', 'mediumresidential36','mediumresidential37'] + \
    #['mediumresidential38','mediumresidential39', 'mediumresidential40',\
    #        'mediumresidential41', 'mediumresidential43','mediumresidential44']
    num_class, = num_class
    #target_images = ['baseballdiamond96', 'airplane18', 'runway42', 'sparseresidential77', 'mediumresidential21']
    target_images = [
        'runway00', 'runway02', 'runway03', 'runway04', 'runway05', 'runway06',
        'runway07', 'runway08'
    ]
    classes = [str(i) for i in range(num_class)]
    if da_model == 'STA':
        feature_extractor = ResNetFc(
            model_name='resnet50',
            model_path=
            '/home/at7133/Research/Domain_adaptation/Separate_to_Adapt/resnet50.pth'
        )
        cls = CLS(feature_extractor.output_num(),
                  num_class,
                  bottle_neck_dim=256)
        model = nn.Sequential(feature_extractor, cls).cuda()
        model.load_state_dict(
            torch.load(
                '/home/at7133/Research/Domain_adaptation/Separate_to_Adapt/Only_source_classifier.pth'
            ))
    elif da_model == 'OPDA':
        G, C = get_model('vgg', num_class=num_class, unit_size=1000)
        load_model(
            G, C,
            '/home/at7133/Research/Domain_adaptation/OPDA_BP/checkpoint/checkpoint_99'
        )
        model = nn.Sequential(G, C).cuda()
    model.to(device)
    #unset_training(model)
    model.eval()
    # Check available layer names
    print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])

    # Here we choose the last convolution layer
    #target_layer = "exit_flow.conv4"
    if da_model == 'STA':
        target_layer = "0.model_resnet.layer4.2.conv3"
    elif da_model == 'OPDA':
        target_layer = "0.lower.36"  #TODO find proper target layer

    # Preprocessing
    def _Normalize(img, mean, std):
        if isinstance(img, torch.FloatTensor):
            mean = torch.FloatTensor(mean)
            std = torch.FloatTensor(std)
        else:
            raise TypeError(f'Expected Torch floattensor, got {type(img)}')
        return (img - mean) / std

    def _preprocess(image_path, img_shape):
        raw_image = cv2.imread(image_path)
        #raw_image = cv2.resize(raw_image, model.image_shape)
        raw_image = cv2.resize(raw_image, (img_shape, img_shape))
        image = torch.FloatTensor(raw_image[..., ::-1].copy())
        image = image / 255.0
        if da_model == "OPDA":
            image = _Normalize(image, [0.485, 0.456, 0.406],
                               [0.229, 0.224, 0.225])
        #pdb.set_trace()
        #image -= model.mean
        #image /= model.std
        image = image.permute(2, 0, 1)
        return image, raw_image

    # Images
    def Load_txt(img_paths):
        assert isfile(img_paths), f"Image path {img_paths} doesn't exist"
        with open(img_paths, 'rb') as fr:
            image_paths = [
                img_path.split()[0].decode("utf-8")
                for img_path in fr.readlines()
            ]
        return image_paths

    def Filter_imgages(image_paths, req_images):
        img_files = list(
            map(lambda x: x.split('/')[-1].split('.')[0], image_paths))
        satisfied_images = []
        for item in req_images:
            try:
                [[idx]] = np.argwhere(np.isin(img_files, item))
            except:
                raise ValueError(f'{item} not found in the given paths')
            satisfied_images.append(image_paths[idx])
        return satisfied_images

    def Load_images(image_paths, req_images):
        if image_paths[0].endswith('.txt'):
            assert len(image_paths
                       ) == 1  #make sure only one text file is given as input
            image_paths = Load_txt(image_paths[0])
            image_paths = Filter_imgages(image_paths, req_images)
            assert len(image_paths) == len(
                req_images), " All target images are not found"
        images = []
        raw_images = []
        print("Images:")
        for i, image_path in enumerate(image_paths):
            print("\t#{}: {}".format(i, image_path))
            image, raw_image = _preprocess(image_path, 224)
            images.append(image)
            raw_images.append(raw_image)
        images = torch.stack(images).to(device)
        return images, raw_images

    images, raw_images = Load_images(image_paths, target_images)
    print("Grad-CAM:")
    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    if not isdir(output_dir):
        os.makedirs(output_dir)
    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "xception_v1",
                                                     target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Пример #29
0
#    print(label_id[pred])
    print(output, pred.item())
    return pred

state_dict = torch.load(image_model_path)
model.load_state_dict(state_dict)

model.eval()
label_id = {0: 'butterfly',1: 'ibis'}






grad_cam = GradCAM(model=model, feature_layer=list(model.layer4.modules())[-1])

VISUALIZE_SIZE = (224, 224)

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

image_transform = transforms.Compose([
        transforms.Resize(VISUALIZE_SIZE),
        transforms.ToTensor(),
        normalize])

image.thumbnail(VISUALIZE_SIZE, Image.ANTIALIAS)

# save image origin size
image_orig_size = image.size # (W, H)
Пример #30
0
def demo3(image_paths, topk, output_dir, cuda):
    """
    Generate Grad-CAM with original models
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras
    model = torch.hub.load("kazuto1011/pytorch-ported-models",
                           "xception_v1",
                           pretrained=True)
    model.to(device)
    model.eval()

    # Check available layer names
    print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])

    # Here we choose the last convolution layer
    target_layer = "exit_flow.conv4"

    # Preprocessing
    def _preprocess(image_path):
        raw_image = cv2.imread(image_path)
        raw_image = cv2.resize(raw_image, model.image_shape)
        image = torch.FloatTensor(raw_image[..., ::-1].copy())
        image -= model.mean
        image /= model.std
        image = image.permute(2, 0, 1)
        return image, raw_image

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = _preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)

    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "xception_v1",
                                                     target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )