Beispiel #1
0
def grad_cam(image):
    model = models.vgg19_bn(pretrained=True)
    model.to(device)
    model.eval()
    image = cv2.resize(image, (224, 224))
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(image).unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image)

    output = None
    for i in range(0, 5):
        gcam.backward(idx=idx[i])
        temp = gcam.generate(target_layer='features.52')
        if not output is None:
            output += temp
        else:
            output = temp

    output /= 5
    return output
Beispiel #2
0
class GradCamExplainer(ImageExplainer):
    def __init__(self, model_wrapper, target_layer, label):
        self.model = model_wrapper.model
        self.explainer = GradCAM(model=self.model)
        self.target_layer = target_layer
        self.label = label

    def explain(self, instance, budget):
        instance = instance.unsqueeze(0).cuda()

        probs, ids = self.explainer.forward(instance)
        # print(probs, ids)
        target_ids = torch.LongTensor([[self.label]]).cuda()
        self.explainer.backward(ids=target_ids)
        regions = self.explainer.generate(target_layer=self.target_layer)
        explanation_3d = regions[0].double().detach().cpu().numpy()

        explanation_2d = np.sum(np.abs(explanation_3d), axis=0)
        top_percentile = np.percentile(explanation_2d, 100 - budget)

        # only return above percentile
        explanation_2d[explanation_2d < top_percentile] = 0.0
        explanation_2d[explanation_2d >= top_percentile] = 1.0

        return explanation_2d.astype(np.float32)
Beispiel #3
0
def main():

    # Dataset
    print('Creating dataset...')
    transform_val = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(MEAN, STD)])
    valset = torchvision.datasets.CIFAR100(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_val)
    val_loader = DataLoader(valset,
                            batch_size=100,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)

    # Model
    model_path = os.path.join(args.checkpoint, args.model, 'best_model.pt')
    print('Loading model...')
    model = get_model(args.model)
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path))
    else:
        raise Exception('Cannot find model', model_path)
    if torch.cuda.device_count() > 1:
        print("Using", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.cuda()
    cudnn.benchmark = True

    # result
    result_path = os.path.join('results', args.model)
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    gcam = GradCAM(model=model)

    model.eval()
    acc = 0
    for i, (inputs, labels) in enumerate(val_loader):
        inputs, labels = (Variable(inputs.cuda()), Variable(labels.cuda()))
        outputs = model(inputs)
        outputs, labels = outputs.data, labels.data
        _, preds = outputs.topk(1, 1, True, True)
        corrects = preds.eq(labels.view(-1, 1).expand_as(preds))
        acc += torch.sum(corrects)
        for sample in SAMPLES:
            image = get_image(inputs[sample])
            probs, idx = gcam.forward(inputs[sample].unsqueeze(0))
            gcam.backward(idx=idx[1])
            output = gcam.generate(target_layer='layer4.2')
            save_gradcam(result_path, '{}_gcam.png'.format(sample), output,
                         image)
            save_image(result_path, image, sample, preds[sample],
                       labels[sample])
        break
    acc = acc.item() / len(valset) * 100
    print('Finish!!!')
Beispiel #4
0
def demo2(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"]
    target_class = 243  # "bull mastif"

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print(
                "\t#{}: {} ({:.5f})".format(
                    j, classes[target_class], float(probs[ids == target_class])
                )
            )

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        j, "resnet152", target_layer, classes[target_class]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #5
0
def main():

    device = torch.device(
        'cuda' if args.cuda and torch.cuda.is_available() else 'cpu')

    if args.cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Load the LFLSeg module (ResNet-101 backbone)
    LFLSeg_model = models.resnet101()
    num_ftrs = LFLSeg_model.fc.in_features
    LFLSeg_model.fc = nn.Linear(
        num_ftrs, 3
    )  # Replace final layer with 3 outputs (full leaf, partial leaf, non-leaf)

    # Dowload the pretrained model: https://drive.google.com/drive/folders/1HqBYjUGXxl1eAkzhURoV5JAqWHvBvvTp?usp=sharing
    load_path = '/path/to/LFLSeg_resnet101.pth'
    LFLSeg_model.load_state_dict(torch.load(load_path), strict=True)

    LFLSeg_model.to(device)
    LFLSeg_model.eval()

    # Load the GradCAM function
    gcam = GradCAM(model=LFLSeg_model)

    if (os.path.exists(args.input) == False):
        print("The image path doesn't exist!")
        return
    else:
        # If output folder is not exists, create a new one
        if not os.path.exists(args.output):
            os.makedirs(args.output)

        filename = os.path.basename(args.input)

        raw_image = Image.open(args.input).convert('RGB')

        image = transforms.Compose([
            transforms.Resize(size=(224, 224), interpolation=3),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])(raw_image).unsqueeze(0)

        probs, idx = gcam.forward(image.to(device))

        # Only get the heatmap for the "full leaf" class (i.e., idx=0)
        gcam.backward(idx=0)
        output = gcam.generate(target_layer=args.target_layer)

        save_gradcam('{}/{}_gcam.png'.format(args.output, filename[:-4]),
                     output,
                     raw_image,
                     threshold=args.threshold,
                     is_segment=args.segment)
Beispiel #6
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, model):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)
    gradcam_list = []

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    if model is None:
        model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    #print(image_paths)
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")
    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            #print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Grad-CAM
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}.png".format(j, arch, target_layer))
            gradcam_list.append(filename)
            save_gradcam(filename, gcam=regions[j, 0], raw_image=raw_images[j])
    return gradcam_list
def VideoSpatialPrediction(vid_name,
                           target,
                           net,
                           num_categories,
                           num_samples=25,
                           new_size=299,
                           batch_size=2):

    gc = GradCAM(model=net)

    clip_mean = [0.5] * num_samples
    clip_std = [0.226] * num_samples

    normalize = video_transforms.Normalize(mean=clip_mean, std=clip_std)
    val_transform = video_transforms.Compose([
        video_transforms.ToTensor(),
        normalize,
    ])

    deep = 1

    # inception = 299,299, resnet = 224,224
    dims = (new_size, new_size, deep, num_samples)
    rgb = np.zeros(shape=dims, dtype=np.float64)
    rgb_flip = np.zeros(shape=dims, dtype=np.float64)

    for i in range(num_samples):
        img_file = os.path.join(vid_name, 'vr_{0:02d}.png'.format(i))
        img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
        rgb[:, :, 0, i] = img
        rgb_flip[:, :, 0, i] = img[:, ::-1]

    _, _, _, c = rgb.shape
    rgb_list = []
    for c_index in range(c):
        cur_img = rgb[:, :, :, c_index]
        cur_img_tensor = val_transform(cur_img)
        rgb_list.append(np.expand_dims(cur_img_tensor.numpy(), 0))

    rgb_np = np.concatenate(rgb_list, axis=0)
    prediction = np.zeros((num_categories, rgb.shape[3]))

    index = 50
    input_data = rgb_np[index:index + 1, :, :, :]
    imgDataTensor = torch.from_numpy(input_data).type(torch.FloatTensor).cuda()
    imgDataVar = torch.autograd.Variable(imgDataTensor)

    probs, ids = gc.forward(imgDataVar)
    ids_ = torch.LongTensor([[target]] * len(imgDataVar)).to(
        torch.device("cuda"))
    gc.backward(ids=ids_)
    regions = gc.generate(target_layer="Mixed_7c")
    save_gradcam(vid_name.split("/")[-1] + ".png",
                 gcam=regions[0, 0],
                 raw_image=rgb[:, :, :, index])

    return prediction
Beispiel #8
0
def demo2(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"]
    target_class = 243  # "bull mastif"

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(
                j, classes[target_class], float(probs[ids == target_class])))

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "resnet152",
                                                     target_layer,
                                                     classes[target_class]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #9
0
def gradcam_classify(model):
    image_paths = []
    for f in os.listdir("samples/"):
        fname = os.path.join("samples/", f)
        image_paths.append(fname)
    #print("my image paths")
    #print(image_paths)

    #for i in range(1,26):
    #    image_paths.append('samples/'+str(i)+'.PNG')
    target_layer = 'layer4'
    topk = 1
    output_dir = 'results'

    classes = getclasses()

    #model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    # =========================================================================

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM in action:")
    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        print('len images = ', len(images))

        for j in range(len(images)):
            # Grad-CAM
            global filename
            filename = osp.join(
                output_dir,
                "{}-{}-gradcam-{}-{}.png".format(j, 'resnet', target_layer,
                                                 classes[ids[j, i]]),
            )
            #print(f'gradcam generated filenames {i}')
            save_gradcam(
                filename=filename,
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #10
0
def process_a_batch(image_paths, target_layer, arch, topk, output_dir, cuda):
    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images = load_images_only(image_paths)
    images = torch.stack(images).to(device)

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    # =========================================================================
    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    for i in range(topk):
        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)
        regions = regions.cpu().numpy()
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(image_paths[j],
                                              classes[ids[j, i]], probs[j, i]))
            # Grad-CAM
            save_cam(filename=os.path.join(
                output_dir,
                "{}-{}-{}-{:.3}.png".format(os.path.basename(image_paths[j]),
                                            classes[ids[j, i]], ids[j, i],
                                            probs[j, i]),
            ),
                     gcam=regions[j, 0])
    bp.remove_hook()
    gcam.remove_hook()
    del bp
    del images
    del gcam
    del model
    del regions
    del probs
    del ids
    del _
    torch.cuda.empty_cache()
Beispiel #11
0
def main(image_path, cuda):
    device = torch.device(
        "cuda" if cuda and torch.cuda.is_available() else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on the GPU:",
              torch.cuda.get_device_name(current_device))
    else:
        print("Running on the CPU")

    # Synset words
    classes = list()
    with open("samples/synset_words.txt") as lines:
        for line in lines:
            line = line.strip().split(" ", 1)[1]
            line = line.split(", ", 1)[0].replace(" ", "_")
            classes.append(line)

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (224, ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])(raw_image).unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    predictions = gcam.forward(image)
    top_idx = predictions[0][1]

    for target_layer in ["layer1", "layer2", "layer3", "layer4"]:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        gcam.backward(idx=top_idx)
        region = gcam.generate(target_layer=target_layer)

        save_gradcam(
            "results/{}-gradcam-{}-{}.png".format("resnet152", target_layer,
                                                  classes[top_idx]),
            region,
            raw_image,
        )
Beispiel #12
0
def demo2(image_paths, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["layer1", "layer2", "layer3", "layer4"]

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    top_ids = ids[:, [0]]
    gcam.backward(ids=top_ids)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, 0]], probs[j,
                                                                           0]))

            save_gradcam(
                filename="results/{}-{}-gradcam-{}-{}.png".format(
                    j, "resnet152", target_layer, classes[top_ids[j]]),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #13
0
def demo4(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = torch.load('best_all.pth')  # load model
    model.to(device)
    model.eval()

    # The layers you want to see
    target_layers = ["base_network.layer4.2.conv3"]
    target_class = 4  # "lake"

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(
                j, classes[target_class], float(probs[ids == target_class])))

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "resnet152",
                                                     target_layer,
                                                     classes[target_class]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #14
0
def extract_object(original_image, cuda = 1):
    """
        Generate Grad-CAM at different layers of ResNet-152
        """
    output_dir = "./output"

    # device = get_device(cuda)
    device = torch.device("cuda" if cuda else "cpu")

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
  #  device = "cuda"
  #   model.to(device)
    model.eval()

    target_layer = "layer4"
    target_class = 243  # "bull mastif"


    # Images
    # images = original_image.to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(original_image)
#    ids_ = torch.LongTensor([[target_class]] * len(original_image)).to(device)
#     pdb.set_trace()
    gcam.backward(ids=ids)

    # Grad-CAM
    dogs = []
    red_bboxs = []
    regions = gcam.generate(target_layer=target_layer)
    for j in range(len(original_image)):
        dog, red_bbox = save_gradcam(gcam=regions[j, 0], raw_image=original_image[j])
        dogs.append(dog)
        red_bboxs.append(red_bbox)
        print(red_bbox)
    # tensor_dogs = torch.stack(dogs)
    # tensor_red_bbox = torch.stack(red_bboxs)
    #del gcam,model,dogs,red_bboxs
    return dogs, red_bboxs
Beispiel #15
0
def guided_backprop_eye(image, name, net):
    img = torch.stack([image[name]])
    bp = BackPropagation(model=net)
    probs, ids = bp.forward(img)
    gcam = GradCAM(model=net)
    _ = gcam.forward(img)

    gbp = GuidedBackPropagation(model=net)
    _ = gbp.forward(img)

    # Guided Backpropagation
    actual_status = ids[:, 0]
    gbp.backward(ids=actual_status.reshape(1, 1))
    gradients = gbp.generate()

    # Grad-CAM
    gcam.backward(ids=actual_status.reshape(1, 1))
    regions = gcam.generate(target_layer='last_conv')

    # Get Images
    prob = probs.data[:, 0]
    if actual_status == 0:
        prob = probs.data[:, 1]

    prob_image = np.zeros((shape[0], 60, 3), np.uint8)
    cv2.putText(prob_image, '%.1f%%' % (prob * 100), (5, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)

    guided_bpg_image = get_gradient_image(gradients[0])
    guided_bpg_image = cv2.merge(
        (guided_bpg_image, guided_bpg_image, guided_bpg_image))

    grad_cam_image = get_gradcam_image(gcam=regions[0, 0],
                                       raw_image=image[name + '_raw'])
    guided_gradcam_image = get_gradient_image(torch.mul(regions, gradients)[0])
    guided_gradcam_image = cv2.merge(
        (guided_gradcam_image, guided_gradcam_image, guided_gradcam_image))
    #print(image['path'],classes[actual_status.data], probs.data[:,0] * 100)
    print(classes[actual_status.data], probs.data[:, 0] * 100)

    return cv2.hconcat([
        image[name + '_raw'], prob_image, guided_bpg_image, grad_cam_image,
        guided_gradcam_image
    ])
def guided_gradcam(model, device, raw_image, image, CONFIG, topk):
    # =========================================================================
    print('Guided Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region
        
        results.append(find_gradient(output))
        print('[{:.5f}] {}'.format(probs[i], idx[i].cpu().numpy()))
    return (results, probs, idx)
Beispiel #17
0
def get_grad_cam(device, model, classes, images, labels, target_layers):
    # move the model to device
    model.to(device)

    # set the model in evaluation mode
    model.eval()

    # get the grad cam
    gcam = GradCAM(model=model, candidate_layers=target_layers)

    # images = torch.stack(images).to(device)

    # predicted probabilities and class ids
    pred_probs, pred_ids = gcam.forward(images)

    # actual class ids
    # target_ids = torch.LongTensor(labels).view(len(images), -1).to(device)
    target_ids = labels.view(len(images), -1).to(device)

    # backward pass wrt to the actual ids
    gcam.backward(ids=target_ids)

    # we will store the layers and correspondings images activations here
    layers_region = {}

    # fetch the grad cam layers of all the images
    for target_layer in target_layers:
        # logger.info(f'generating Grad-CAM for {target_layer}')

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        layers_region[target_layer] = regions

    # we are done here, remove the hooks
    gcam.remove_hook()

    return layers_region, pred_probs, pred_ids
Beispiel #18
0
def demo3(image_paths, topk, output_dir, cuda):
    """
    Generate Grad-CAM with original models
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras
    model = torch.hub.load("kazuto1011/pytorch-ported-models",
                           "xception_v1",
                           pretrained=True)
    model.to(device)
    model.eval()

    # Check available layer names
    print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])

    # Here we choose the last convolution layer
    target_layer = "exit_flow.conv4"

    # Preprocessing
    def _preprocess(image_path):
        raw_image = cv2.imread(image_path)
        raw_image = cv2.resize(raw_image, model.image_shape)
        image = torch.FloatTensor(raw_image[..., ::-1].copy())
        image -= model.mean
        image /= model.std
        image = image.permute(2, 0, 1)
        return image, raw_image

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = _preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)

    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "xception_v1",
                                                     target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #19
0
model = build_model(cfg, join(opt.model, 'best.ckpt'))
dataset = ImageDataset(opt.dataset, cfg, mode='heatmap')
dataloader = DataLoader(dataset,
                        batch_size=opt.batch_size,
                        shuffle=False,
                        drop_last=False)

gcam = GradCAM(model=model)
layer = 'backbone.features.denseblock4.denselayer15'

for images, paths, labels in dataloader:
    probs, ids = gcam.forward(images)
    processed_images = [[] for _ in range(len(images))]
    for i in range(ids.shape[1]):
        gcam.backward(ids[:, [i]])
        regions = gcam.generate(target_layer=layer)
        for j in range(len(images)):
            print(f"#{j}: {classes[ids[j, i]]} ({probs[j, i]:.5f})")
            # Grad-CAM
            raw_image = imread(paths[j])
            combined = combine_image_and_gcam(regions[j, 0], raw_image)
            processed_images[j].append(combined.astype(np.uint8))

    for j, (image_list, path) in enumerate(zip(processed_images, paths)):
        plt.figure(figsize=(16, 4))
        for i, image in enumerate(image_list):
            plt.subplot(1,
                        len(image_list),
                        i + 1,
                        xticks=[],
                        yticks=[],
Beispiel #20
0
def main(image_path, arch, topk, cuda, target_layer):

    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
        'se_resnet': {
            'target_layer': target_layer,
            'input_size': 32
        },
    }.get(arch)

    cuda = cuda and torch.cuda.is_available()

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            classes.append(line)

    # Model
    #model = models.__dict__[arch](pretrained=True)
    model = models.se_resnet.se_resnet50(num_classes=100)

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])(raw_image)

    if cuda:
        model.cuda()
        image = image.cuda()

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(to_var(image))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        #save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch), output, raw_image)  # NOQA
        save_gradcam('results/{}.png'.format(target_layer), output,
                     raw_image)  # NOQA
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #21
0
def fulltest(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    fold = 0
    device = get_device(cuda)

    # Synset words
    #classes = {0:'normal',1:'covid'}
    classes = get_classtable()

    # Model
    #model = models.resnet34(pretrained=False,num_classes=config.num_classes)
    best_model = torch.load(config.weights + 'ct-cn/' + 'model_best.pth.tar')
    print(best_model["state_dict"].keys())
    model = make_model(arch, num_classes=config.num_classes, pretrained=True)
    #best_model = torch.load(config.weights +'model_best.pth.tar')
    model.load_state_dict(best_model["state_dict"])
    print(best_model["state_dict"].keys())
    model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted
    print(probs)
    print(ids)

    for i in range(topk):
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Beispiel #22
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]
                    ),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Beispiel #23
0
def demo3(image_paths, topk, output_dir, cuda):
    """
    Generate Grad-CAM with original models
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras
    model = torch.hub.load(
        "kazuto1011/pytorch-ported-models", "xception_v1", pretrained=True
    )
    model.to(device)
    model.eval()

    # Check available layer names
    print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])

    # Here we choose the last convolution layer
    target_layer = "exit_flow.conv4"

    # Preprocessing
    def _preprocess(image_path):
        raw_image = cv2.imread(image_path)
        raw_image = cv2.resize(raw_image, model.image_shape)
        image = torch.FloatTensor(raw_image[..., ::-1].copy())
        image -= model.mean
        image /= model.std
        image = image.permute(2, 0, 1)
        return image, raw_image

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = _preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)

    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        j, "xception_v1", target_layer, classes[ids[j, i]]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #24
0
                if use_gpu:
                    inputs = inputs.cuda()

                inputs = inputs.view(1, inputs.size(0), inputs.size(1), inputs.size(2))

                probs, idx = gcam.forward(inputs)

                if (args.subtype == None):
                    comp_idx = idx[0]
                    item_id = 0
                else:
                    comp_idx = WBC_id
                    item_id = (np.where(idx.cpu().numpy() == (WBC_id)))[0][0]

                gcam.backward(idx=comp_idx)
                output = gcam.generate(target_layer = 'layer4.2') # for resnet

                heatmap = output
                original = inputs.data.cpu().numpy()

                original = np.transpose(original, (0,2,3,1))[0]
                original = original * cf.std + cf.mean
                original = np.uint8(original * 255.0)

                mask = np.uint8(heatmap * 255.0)

                blank_heatmap[:, (mode*160):480+(mode*160)] = cv2.resize(heatmap, (480, 480))
                mode += 1

            blank_heatmap[blank_heatmap > 1] = 1
            blank_heatmap = cv2.GaussianBlur(blank_heatmap, (15, 15), 0)
Beispiel #25
0
def grad_cam(model, image_paths, topk, output_dir, cuda, model_name,
             checkpoint):
    """
    Generate Grad-CAM with original models
    """

    device = get_device(cuda)

    # Synset words
    classes = [
        'apple',  # id 0
        'aquarium_fish',
        'baby',
        'bear',
        'beaver',
        'bed',
        'bee',
        'beetle',
        'bicycle',
        'bottle',
        'bowl',
        'boy',
        'bridge',
        'bus',
        'butterfly',
        'camel',
        'can',
        'castle',
        'caterpillar',
        'cattle',
        'chair',
        'chimpanzee',
        'clock',
        'cloud',
        'cockroach',
        'couch',
        'crab',
        'crocodile',
        'cup',
        'dinosaur',
        'dolphin',
        'elephant',
        'flatfish',
        'forest',
        'fox',
        'girl',
        'hamster',
        'house',
        'kangaroo',
        'computer_keyboard',
        'lamp',
        'lawn_mower',
        'leopard',
        'lion',
        'lizard',
        'lobster',
        'man',
        'maple_tree',
        'motorcycle',
        'mountain',
        'mouse',
        'mushroom',
        'oak_tree',
        'orange',
        'orchid',
        'otter',
        'palm_tree',
        'pear',
        'pickup_truck',
        'pine_tree',
        'plain',
        'plate',
        'poppy',
        'porcupine',
        'possum',
        'rabbit',
        'raccoon',
        'ray',
        'road',
        'rocket',
        'rose',
        'sea',
        'seal',
        'shark',
        'shrew',
        'skunk',
        'skyscraper',
        'snail',
        'snake',
        'spider',
        'squirrel',
        'streetcar',
        'sunflower',
        'sweet_pepper',
        'table',
        'tank',
        'telephone',
        'television',
        'tiger',
        'tractor',
        'train',
        'trout',
        'tulip',
        'turtle',
        'wardrobe',
        'whale',
        'willow_tree',
        'wolf',
        'woman',
        'worm',
    ]

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras

    checkpoint = checkpoint
    model.load_state_dict(checkpoint['state_dict'])
    model.to(device)
    model.eval()

    # Check available layer names
    '''print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])'''

    # Here we choose the last convolution layer
    target_layer = "module.layer4"
    #print(target_layer)

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    print("Grad-CAM:")

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)

    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "top{}-{}-{}-gradcam-{}-{}-prob-({:.5f}).png".format(
                        i + 1, j, model_name, target_layer, classes[ids[j, i]],
                        probs[j, i]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #26
0
def demo3(image_paths, topk, output_dir, cuda, num_class, da_model):
    """
    Generate Grad-CAM with original models
    """
    device = get_device(cuda)

    # Synset words
    #classes = get_classtable()

    # Third-party model from my other repository, e.g. Xception v1 ported from Keras
    #model = torch.hub.load(
    #    "kazuto1011/pytorch-ported-models", "xception_v1", pretrained=True
    #)

    #target_images = ['mediumresidential01','mediumresidential03', 'mediumresidential14',\
    #        'mediumresidential21', 'mediumresidential36','mediumresidential37'] + \
    #['mediumresidential38','mediumresidential39', 'mediumresidential40',\
    #        'mediumresidential41', 'mediumresidential43','mediumresidential44']
    num_class, = num_class
    #target_images = ['baseballdiamond96', 'airplane18', 'runway42', 'sparseresidential77', 'mediumresidential21']
    target_images = [
        'runway00', 'runway02', 'runway03', 'runway04', 'runway05', 'runway06',
        'runway07', 'runway08'
    ]
    classes = [str(i) for i in range(num_class)]
    if da_model == 'STA':
        feature_extractor = ResNetFc(
            model_name='resnet50',
            model_path=
            '/home/at7133/Research/Domain_adaptation/Separate_to_Adapt/resnet50.pth'
        )
        cls = CLS(feature_extractor.output_num(),
                  num_class,
                  bottle_neck_dim=256)
        model = nn.Sequential(feature_extractor, cls).cuda()
        model.load_state_dict(
            torch.load(
                '/home/at7133/Research/Domain_adaptation/Separate_to_Adapt/Only_source_classifier.pth'
            ))
    elif da_model == 'OPDA':
        G, C = get_model('vgg', num_class=num_class, unit_size=1000)
        load_model(
            G, C,
            '/home/at7133/Research/Domain_adaptation/OPDA_BP/checkpoint/checkpoint_99'
        )
        model = nn.Sequential(G, C).cuda()
    model.to(device)
    #unset_training(model)
    model.eval()
    # Check available layer names
    print("Layers:")
    for m in model.named_modules():
        print("\t", m[0])

    # Here we choose the last convolution layer
    #target_layer = "exit_flow.conv4"
    if da_model == 'STA':
        target_layer = "0.model_resnet.layer4.2.conv3"
    elif da_model == 'OPDA':
        target_layer = "0.lower.36"  #TODO find proper target layer

    # Preprocessing
    def _Normalize(img, mean, std):
        if isinstance(img, torch.FloatTensor):
            mean = torch.FloatTensor(mean)
            std = torch.FloatTensor(std)
        else:
            raise TypeError(f'Expected Torch floattensor, got {type(img)}')
        return (img - mean) / std

    def _preprocess(image_path, img_shape):
        raw_image = cv2.imread(image_path)
        #raw_image = cv2.resize(raw_image, model.image_shape)
        raw_image = cv2.resize(raw_image, (img_shape, img_shape))
        image = torch.FloatTensor(raw_image[..., ::-1].copy())
        image = image / 255.0
        if da_model == "OPDA":
            image = _Normalize(image, [0.485, 0.456, 0.406],
                               [0.229, 0.224, 0.225])
        #pdb.set_trace()
        #image -= model.mean
        #image /= model.std
        image = image.permute(2, 0, 1)
        return image, raw_image

    # Images
    def Load_txt(img_paths):
        assert isfile(img_paths), f"Image path {img_paths} doesn't exist"
        with open(img_paths, 'rb') as fr:
            image_paths = [
                img_path.split()[0].decode("utf-8")
                for img_path in fr.readlines()
            ]
        return image_paths

    def Filter_imgages(image_paths, req_images):
        img_files = list(
            map(lambda x: x.split('/')[-1].split('.')[0], image_paths))
        satisfied_images = []
        for item in req_images:
            try:
                [[idx]] = np.argwhere(np.isin(img_files, item))
            except:
                raise ValueError(f'{item} not found in the given paths')
            satisfied_images.append(image_paths[idx])
        return satisfied_images

    def Load_images(image_paths, req_images):
        if image_paths[0].endswith('.txt'):
            assert len(image_paths
                       ) == 1  #make sure only one text file is given as input
            image_paths = Load_txt(image_paths[0])
            image_paths = Filter_imgages(image_paths, req_images)
            assert len(image_paths) == len(
                req_images), " All target images are not found"
        images = []
        raw_images = []
        print("Images:")
        for i, image_path in enumerate(image_paths):
            print("\t#{}: {}".format(i, image_path))
            image, raw_image = _preprocess(image_path, 224)
            images.append(image)
            raw_images.append(raw_image)
        images = torch.stack(images).to(device)
        return images, raw_images

    images, raw_images = Load_images(image_paths, target_images)
    print("Grad-CAM:")
    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    if not isdir(output_dir):
        os.makedirs(output_dir)
    for i in range(topk):

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "xception_v1",
                                                     target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
Beispiel #27
0
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda, checkpoint,
          distribute):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch]()
    model = model.cuda()
    # print(model)
    checkpoint = checkpoint + arch + '/model_best.pth.tar'
    # print(checkpoint)
    check_point = torch.load(checkpoint,
                             map_location=lambda storage, loc: storage.cuda(0))

    distributed_model = (distribute > 0.5)
    only_CAM = True

    if distributed_model == True:
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_check_point = OrderedDict()
        for k, v in check_point['state_dict'].items():
            # name = k[7:]  # remove `module.`
            # name = k[9:]  # remove `module.1.`
            if k.startswith('module.1.'):
                name = k[9:]
            else:
                name = k[7:]
            new_check_point[name] = v
        # load params
        model.load_state_dict(new_check_point)
    else:
        model.load_state_dict(check_point['state_dict'])

    model.to(device)
    model.eval()

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)

    for i in range(topk):
        # In this example, we specify the high confidence classes
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-vanilla-{}.png".format(j, arch,
                                                      classes[ids[j, i]]),
                    ),
                    gradient=gradients[j],
                )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-deconvnet-{}.png".format(
                            j, arch, classes[ids[j, i]]),
                    ),
                    gradient=gradients[j],
                )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-guided-{}.png".format(j, arch, classes[ids[j,
                                                                          i]]),
                    ),
                    gradient=gradients[j],
                )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-{}.png".format(
                        # j, arch, target_layer, classes[ids[j, i]]   image_path
                        osp.splitext(image_paths[j])[0],
                        arch,
                        target_layer),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            if not only_CAM:
                save_gradient(
                    filename=osp.join(
                        output_dir,
                        "{}-{}-guided_gradcam-{}-{}.png".format(
                            j, arch, target_layer, classes[ids[j, i]]),
                    ),
                    gradient=torch.mul(regions, gradients)[j],
                )
Beispiel #28
0
def main():
    """
	Visualize model responses given multiple images
	"""
    device = torch.device("cuda")

    test_dataset = CXRDataset(args.image_dir, ['TEST'])
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             pin_memory=True)

    model = CombineNet().to(device)
    model = nn.DataParallel(model)
    print("Parameters: {}".format(sum(p.numel() for p in model.parameters())))
    model.eval()

    if args.load_model:
        checkpoint = torch.load(args.load_model + "/best.ckpt")
        #model_weights = OrderedDict()
        #for k, v in checkpoint['model'].items():
        #	name = k[7:] # remove `module.`
        #	model_weights[name] = v
        model.load_state_dict(checkpoint['model'])
        epoch = checkpoint['epoch']
        print('Loading model: {}, from epoch: {}'.format(
            args.load_model, epoch))
    else:
        print('Model: {} not found'.format(args.load_model))

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    target_layer = 'module.feature_model.layer_block.3.1.conv2'
    #print(model.module.keys())
    print(model.module.feature_model.layer_block[3][1].conv2)

    epoch_iterator = tqdm(test_loader, desc="Iteration")
    for i, batch in enumerate(epoch_iterator, 0):
        raw_images, _, _ = batch
        batch_device = tuple(t.to(device, non_blocking=True) for t in batch)
        images, labels, img_ids = batch_device

        if (labels.cpu().detach().numpy() == np.array([0, 0, 0, 1])).all():
            probs, ids = gcam.forward(images)
            gcam.backward(ids=ids[:, [0]])
            regions = gcam.generate(target_layer=target_layer)

            for j in range(args.batch_size):
                gcam_im = save_gradcam(
                    filename=osp.join(
                        args.output_dir,
                        "{}-gradcam-{}_severe_{}.png".format(
                            j, target_layer, i),
                    ),
                    gcam=regions[j, 0],
                    raw_image=images[j],
                )
                img_path = osp.join(
                    args.output_dir,
                    "{}-image-{}_severe_{}.png".format(j, target_layer, i),
                )
                raw_image = np.array(raw_images[j])
                xray = save_xray(img_path, raw_image)
                '''
				# TODO: Blending code
				#cv2.imwrite(img_path, raw_images[j])

				img_path=osp.join(
						args.output_dir,
						"{}-blended-{}_severe_{}.png".format(j, target_layer, i),
						)
				beta = (1.0 - args.alpha)
				xray = np.array(xray)
				xray = xray.squeeze()
				xray = np.stack([xray, xray, xray],axis=2)
				print(xray.dtype)
				print(gcam.dtype)
				blended = cv2.addWeighted(xray, args.alpha, gcam, beta, 0.0)
				save_xray(img_path, blended)
				'''

    print('finished!')
Beispiel #29
0
def main():
    root_path = '/media/palm/Unimportant/pdr2018/typesep_validate/Tomato/'
    image_name = 'c9ebc74c2177ce60a8230855333fb9e7.jpg'
    folder_name = '14_Tomato_Spider_Mite_Damage_Serious'
    # image_path = root_path+'/14_Tomato_Spider_Mite_Damage_Serious/1c0f1ae1374d01c2933069232735a331.jpg'
    image_path = os.path.join(root_path, folder_name, image_name)
    topk = 1
    cuda = 'cuda'
    arch = 'densenet201'
    CONFIG = {
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)
    a, b, c = getlookup()
    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = c['Tomato']

    # Model
    model = getmodel(20)
    checkpoint = torch.load('checkpoint/try_4_densesep-Tomatotemp.t7')
    model.load_state_dict(checkpoint['net'])
    model.to('cuda')
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam(
            'results/{}_{}_gcam_{}.png'.format(image_name, classes[idx[i]],
                                               arch), output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient(
            'results/{}_{}_bp_{}.png'.format(image_name, classes[idx[i]],
                                             arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient(
            'results/{}_{}_deconv_{}.png'.format(image_name, classes[idx[i]],
                                                 arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient(
            'results/{}_{}_gbp_{}.png'.format(image_name, classes[idx[i]],
                                              arch), feature)
        save_gradient(
            'results/{}_{}_ggcam_{}.png'.format(image_name, classes[idx[i]],
                                                arch), output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #30
0
def main(image_path, arch, topk, cuda):

    CONFIG = {
        'resnet18': {
            'target_layer': 'layer4.1',
            'input_size': 224
        },
        'resnet152': {
            'target_layer': 'layer4.2',
            'input_size': 224
        },
        'vgg19': {
            'target_layer': 'features.36',
            'input_size': 224
        },
        'vgg19_bn': {
            'target_layer': 'features.52',
            'input_size': 224
        },
        'inception_v3': {
            'target_layer': 'Mixed_7c',
            'input_size': 299
        },
        'densenet201': {
            'target_layer': 'features.denseblock4',
            'input_size': 224
        },
        # Add your model
    }.get(arch)

    device = torch.device(
        'cuda' if cuda and torch.cuda.is_available() else 'cpu')

    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on the GPU:',
              torch.cuda.get_device_name(current_device))
    else:
        print('Running on the CPU')

    # Synset words
    classes = ["other", "rori"]
    #    with open('samples/synset_words.txt') as lines:
    #        for line in lines:
    #            line = line.strip().split(' ', 1)[1]
    #            line = line.split(', ', 1)[0].replace(' ', '_')
    #            classes.append(line)

    # Model
    model = models.__dict__[arch](pretrained=True)
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, 200)  #これにより512->2の層に変わった
    model.add_module('relu_fc', nn.ReLU())
    model.add_module('fc2', nn.Linear(200, 2))
    param = torch.load('weight_resnet18_3.pth')
    model.load_state_dict(param)

    model.to(device)
    model.eval()

    # Image
    raw_image = cv2.imread(image_path)[..., ::-1]
    raw_image = cv2.resize(raw_image, (CONFIG['input_size'], ) * 2)
    image = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )
    ])(raw_image).unsqueeze(0)

    # =========================================================================
    print('Grad-CAM')
    # =========================================================================
    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=CONFIG['target_layer'])

        save_gradcam('results/{}_gcam_{}.png'.format(classes[idx[i]], arch),
                     output, raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Vanilla Backpropagation')
    # =========================================================================
    bp = BackPropagation(model=model)
    probs, idx = bp.forward(image.to(device))

    for i in range(0, topk):
        bp.backward(idx=idx[i])
        output = bp.generate()

        save_gradient('results/{}_bp_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Deconvolution')
    # =========================================================================
    deconv = Deconvolution(
        model=copy.deepcopy(model))  # TODO: remove hook func in advance
    probs, idx = deconv.forward(image.to(device))

    for i in range(0, topk):
        deconv.backward(idx=idx[i])
        output = deconv.generate()

        save_gradient('results/{}_deconv_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))

    # =========================================================================
    print('Guided Backpropagation/Guided Grad-CAM')
    # =========================================================================
    gbp = GuidedBackPropagation(model=model)
    probs, idx = gbp.forward(image.to(device))

    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        region = gcam.generate(target_layer=CONFIG['target_layer'])

        gbp.backward(idx=idx[i])
        feature = gbp.generate()

        h, w, _ = feature.shape
        region = cv2.resize(region, (w, h))[..., np.newaxis]
        output = feature * region

        save_gradient('results/{}_gbp_{}.png'.format(classes[idx[i]], arch),
                      feature)
        save_gradient('results/{}_ggcam_{}.png'.format(classes[idx[i]], arch),
                      output)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
Beispiel #31
0
    def validate(self,test_epoch=False):
        self.model.train()
        if test_epoch:
            out = osp.join(self.out, 'test')
        else:
            out = osp.join(self.out, 'visualization')
        mkdir(out)
        log_file = osp.join(out, 'test_accurarcy.txt')
        fv = open(log_file, 'a')
        log_file2 = osp.join(out, 'test_accurarcy_perepoch.txt')
        fv2 = open(log_file2, 'a')
        log_file3 = osp.join(out, 'test_recall_f1_acc_perepoch.txt')
        fv3 = open(log_file3, 'a')
        correct = 0
        correct_binary = 0

        pred_history=[]
        target_history=[]
        loss_history=[]
        sofar = 0


        for batch_idx, (data,target,sub_name) in tqdm.tqdm(
                # enumerate(self.test_loader), total=len(self.test_loader),
                enumerate(self.test_loader), total=len(self.test_loader),
                desc='Valid epoch=%d' % self.epoch, ncols=80,
                leave=False):

            if self.cuda:
                data, target = data.cuda(), target.cuda()
            # data, target = Variable(data), Variable(target)
            data, target = Variable(data,volatile=True), Variable(target,volatile=True)

            if self.dual_network:
                if self.add_calcium_mask:
                    data = data[:,0:2,: :,:]
                else:
                    data = data[:,0,:,:,:]
                    data = torch.unsqueeze(data, 1)
            pred_prob = self.model(data)

            if test_epoch:
                #get attention
                gcam = GradCAM(model=self.model)
                probs, idx = gcam.forward(data)
                topk = 3
                target_layer = 'ec6.2'
                # target_layer = 'ec1.2'
                test_attention_out = osp.join(out, target_layer)
                mkdir(test_attention_out)

                input_img = data[0, 0].data.cpu().numpy()
                input_size = (input_img.shape[0], input_img.shape[1], input_img.shape[2])
                input_mask = data[0, 1].data.cpu().numpy()
                nii_img = nib.Nifti1Image(input_img, affine=np.eye(4))
                output_img_file = os.path.join(out, ('%s_img.nii.gz' % sub_name[0]))
                nib.save(nii_img, output_img_file)
                nii_mask = nib.Nifti1Image(input_mask, affine=np.eye(4))
                output_mask_file = os.path.join(out, ('%s_mask.nii.gz' % sub_name[0]))
                nib.save(nii_mask, output_mask_file)
                del input_img,input_mask
                del nii_img, nii_mask

                for i in range(0, topk):
                    gcam.backward(idx=idx[i])
                    output = gcam.generate(target_layer=target_layer)
                    output = resize(output,input_size , mode='constant', preserve_range=True)

                    nii_seg = nib.Nifti1Image(output, affine=np.eye(4))
                    output_att_file = os.path.join(test_attention_out, ('%s_test_att%d_clss%d.nii.gz' % (sub_name[0],i,idx[i])))
                    nib.save(nii_seg, output_att_file)
                gcam.backward_del(idx=idx[i])
                del gcam, output, nii_seg, probs


                #training attention
                subnum = data.size(0)
                for subi in range(subnum):
                    attentions = []
                    i = 1
                    self.input = data
                    fmap = self.get_feature_maps('compatibility_score%d' % i, upscale=False)
                    try:
                        attmap = fmap[1][1]
                    except:
                        aaaa = 1
                    attention = attmap[subi,0].cpu().numpy()
                    # attention = attention[:, :]
                    # attention = numpy.expand_dims(resize(attention, (fmap_size[0], fmap_size[1]), mode='constant', preserve_range=True), axis=2)
                    attention = resize(attention, input_size, mode='constant', preserve_range=True)
                    attention = (attention-np.min(attention))/(np.max(attention)-np.min(attention))
                    # this one is useless
                    # plotNNFilter(fmap_0, figure_id=i+3, interp='bilinear', colormap=cm.jet, title='compat. feature %d' %i)

                    nii_seg = nib.Nifti1Image(attention, affine=np.eye(4))
                    output_att_file = os.path.join(out, ('%s_train_att.nii.gz' % sub_name[subi]))
                    nib.save(nii_seg, output_att_file)
                    del nii_seg, fmap, attmap, attention

                # plotNNFilterOverlay(input_img, attention, figure_id=i, interp='bilinear', colormap=cm.jet,
                #                     title='a', alpha=0.5)
                # attentions.append(attention)


            pred_clss = F.log_softmax(pred_prob)
            pred = pred_clss.data.max(1)[1]  # get th
            correct += pred.eq(target.data).cpu().sum()

            pred_binary = pred>0
            target_binary = target.data>0
            correct_binary += pred_binary.eq(target_binary).cpu().sum()

            sofar += data.size(0)

            test_loss = F.nll_loss(pred_clss, target)

            for batch_num in range(data.size(0)):
            # test_loss /= len(self.test_loader)  # loss function already averages over batch size
                results_strs = '[Epoch %04d] True=[%d],Pred=[%d],Pred_prob=%s,Test set: Average loss: %.4f, Accuracy: %d/%d (%.3f) binary (%.3f), subname=[%s]\n' % (
                    self.epoch, target.data.cpu().numpy()[batch_num], pred.cpu().numpy()[batch_num], np.array2string(pred_clss[batch_num].data.cpu().numpy()), test_loss.data[0], correct, sofar,
                    100. * float(correct) / sofar, 100 * float(correct_binary) / sofar, sub_name[batch_num])
                print(results_strs)
                fv.write(results_strs)

            loss_history.append(test_loss.data.cpu().numpy().tolist())
            pred_history += pred_binary.cpu().numpy().tolist()
            target_history += target_binary.data.cpu().numpy().tolist()

        f1 = f1_score(target_history, pred_history)
        recall = recall_score(target_history, pred_history)
        precision = precision_score(target_history, pred_history)
        accuracy = accuracy_score(target_history, pred_history)

        print_str='test epoch='+str(self.epoch)+',accuracy='+str(accuracy)+",f1="+str(f1)+",recall="+str(recall)+',precision='+str(precision)+",loss="+str(np.mean(loss_history))+"\n"

        fv2.write(results_strs)
        fv3.write(print_str)
        fv.close()
        fv2.close()
        fv3.close()
def demo1(image_paths, target_layer, arch, topk, output_dir, cuda):
    """
    Visualize model responses given multiple images
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model from torchvision
    model = models.__dict__[arch](pretrained=True)
    model.to(device)
    model.eval()

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)
    """
    Common usage:
    1. Wrap your model with visualization classes defined in grad_cam.py
    2. Run forward() with images
    3. Run backward() with a list of specific classes
    4. Run generate() to export results
    """

    # =========================================================================
    print("Vanilla Backpropagation:")

    bp = BackPropagation(model=model)
    probs, ids = bp.forward(images)  # sorted

    for i in range(topk):
        bp.backward(ids=ids[:, [i]])
        gradients = bp.generate()

        # Save results as image files
        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-vanilla-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

    # Remove all the hook function in the "model"
    bp.remove_hook()

    # =========================================================================
    print("Deconvolution:")

    deconv = Deconvnet(model=model)
    _ = deconv.forward(images)

    for i in range(topk):
        deconv.backward(ids=ids[:, [i]])
        gradients = deconv.generate()

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-deconvnet-{}.png".format(j, arch, classes[ids[j,
                                                                         i]]),
                ),
                gradient=gradients[j],
            )

    deconv.remove_hook()

    # =========================================================================
    print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")

    gcam = GradCAM(model=model)
    _ = gcam.forward(images)

    gbp = GuidedBackPropagation(model=model)
    _ = gbp.forward(images)

    for i in range(topk):
        # Guided Backpropagation
        gbp.backward(ids=ids[:, [i]])
        gradients = gbp.generate()

        # Grad-CAM
        gcam.backward(ids=ids[:, [i]])
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j,
                                                                           i]))

            # Guided Backpropagation
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided-{}.png".format(j, arch, classes[ids[j, i]]),
                ),
                gradient=gradients[j],
            )

            # Grad-CAM
            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, arch, target_layer,
                                                     classes[ids[j, i]]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )

            # Guided Grad-CAM
            save_gradient(
                filename=osp.join(
                    output_dir,
                    "{}-{}-guided_gradcam-{}-{}.png".format(
                        j, arch, target_layer, classes[ids[j, i]]),
                ),
                gradient=torch.mul(regions, gradients)[j],
            )
Beispiel #33
0
def test(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """
    fold = 0
    device = get_device(cuda)

    # Synset words
    classes = {0: 'normal', 1: 'covid'}

    # Model
    #model = models.resnet34(pretrained=False,num_classes=config.num_classes)
    best_model = torch.load(config.best_models + config.model_name + os.sep +
                            str(fold) + os.sep + 'model_best.pth.tar')
    print(best_model["state_dict"].keys())
    model = make_model('mynet',
                       num_classes=config.num_classes,
                       pretrained=False)
    #best_model = torch.load(config.weights +'model_best.pth.tar')
    model.load_state_dict(best_model["state_dict"])
    print(best_model["state_dict"].keys())
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = [
        "_features.0", "_features.1", "_features.2", "_features.3",
        "_features.4", "_features.5", "_features.6", "_features.7"
    ]
    target_class = 1  # "bull mastif"

    # Images
    images, raw_images = load_images(image_paths)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    print(ids_.shape)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print("\t#{}: {} ({:.5f})".format(
                j, classes[target_class], float(probs[ids == target_class])))

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(j, "resnet152",
                                                     target_layer,
                                                     classes[target_class]),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )