Ejemplo n.º 1
0
def main():
    target_example = 3  # roof
    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) = \
        get_example_params(target_example)
    # Grad cam
    grad_cam = GradCam(pretrained_model, target_layer=11)
    # Generate cam mask
    cam = grad_cam.generate_cam(prep_img, target_class)
    # Save mask
    # save_class_activation_images(original_image, cam, file_name_to_export)
    import misc_functions
    import matplotlib.pyplot as plt
    grades, im_grades = misc_functions.apply_colormap_on_image(
        original_image, cam, 'winter')
    plt.imshow(im_grades)
    plt.show()
    print('Grad cam completed')
Ejemplo n.º 2
0
        self.model.zero_grad()
        # Target for backprop
        one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
        one_hot_output[0][target_class] = 1
        # Backward pass
        model_output.backward(gradient=one_hot_output)
        # Convert Pytorch variable to numpy array
        # [0] to get rid of the first channel (1,3,224,224)
        gradients_as_arr = self.gradients.data.numpy()[0]
        return gradients_as_arr


if __name__ == '__main__':
    target_example = 0  # Snake
    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
        get_example_params(target_example)

    # Guided backprop
    GBP = GuidedBackprop(pretrained_model)
    # Get gradients
    guided_grads = GBP.generate_gradients(prep_img, target_class)
    # Save colored gradients
    save_gradient_images(guided_grads, file_name_to_export + '_Guided_BP_color')
    print("exported to: " + str(os.getcwd()))
    # Convert to grayscale
    grayscale_guided_grads = convert_to_grayscale(guided_grads)
    # Save grayscale gradients
    save_gradient_images(grayscale_guided_grads, file_name_to_export + '_Guided_BP_gray')
    # Positive and negative saliency maps
    pos_sal, neg_sal = get_positive_negative_saliency(guided_grads)
    save_gradient_images(pos_sal, file_name_to_export + '_pos_sal')
Ejemplo n.º 3
0
    X[0] = torch.from_numpy(X[0]).float()
    X[1] = torch.from_numpy(X[1]).float()
    combined_image = torch.from_numpy(X).float()
    net.eval()
    softmax = torch.nn.Softmax(dim=1)

    with torch.no_grad():
        net.zero_grad()
        output = net(torch.unsqueeze(combined_image, 0).to(device))
        output_softmax = softmax(output)
        pred_prob = output_softmax[:, 1]

    print("Probability of surgery:::{:6.3f}".format(float(pred_prob)))
    target_class = 1 if pred_prob >= 0.5 else 0

    original_image_sag = get_example_params(img_path_sag)
    original_image_trans = get_example_params(img_path_trans)

    # Grad cam

    sag_filename_id = os.path.join(outdir, sag_path)
    trans_filename_id = os.path.join(outdir, trans_path)

    grad_cam = GradCam(net, view='sag')
    # Generate cam mask
    cam = grad_cam.generate_cam(combined_image, target_class)
    # Save mask
    save_class_activation_images(original_image_sag, cam, sag_filename_id)

    grad_cam = GradCam(net, view='trans')
    # Generate cam mask
Ejemplo n.º 4
0
        cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)
                                     )  # Normalize between 0-1
        cam = np.uint8(cam * 255)  # Scale between 0-255 to visualize
        cam = np.uint8(
            Image.fromarray(cam).resize(
                (input_image.shape[2], input_image.shape[3]),
                Image.ANTIALIAS))  #/255
        # ^ I am extremely unhappy with this line. Originally resizing was done in cv2 which
        # supports resizing numpy matrices with antialiasing, however,
        # when I moved the repository to PIL, this option was out of the window.
        # So, in order to use resizing with ANTIALIAS feature of PIL,
        # I briefly convert matrix to PIL image and then back.
        # If there is a more beautiful way, do not hesitate to send a PR.
        return cam


if __name__ == '__main__':
    # Get params
    # target_example = 0  # Snake
    target_example = 1  # cat_dog
    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
        get_example_params(target_example, 'resnet50')
    # Grad cam
    # grad_cam = GradCam(pretrained_model, target_layer=11)
    grad_cam = GradCam(pretrained_model, target_layer='layer4')
    # Generate cam mask
    cam = grad_cam.generate_cam(prep_img, target_class)
    # Save mask
    save_class_activation_images(original_image, cam, file_name_to_export)
    print('Grad cam completed')
    Args:
        grad_cam_mask (np_arr): Class activation map mask
        guided_backprop_mask (np_arr):Guided backprop mask
    """
    cam_gb = np.multiply(grad_cam_mask, guided_backprop_mask)
    return cam_gb


if __name__ == '__main__':
    # Get params
    class_no = 6  #denseresidential
    image_no = 84
    check_target_class = 1

    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
        get_example_params(class_no,image_no,check_target_class)

    # Grad cam
    target_layer = 35
    gcv2 = GradCam(pretrained_model, target_layer=target_layer)
    # Generate cam mask
    cam = gcv2.generate_cam(prep_img, target_class)
    print('Grad cam completed')

    # Guided backprop
    GBP = GuidedBackprop(pretrained_model)
    # Get gradients
    guided_grads = GBP.generate_gradients(prep_img, target_class)
    print('Guided backpropagation completed')

    # Guided Grad cam
    path = args.data_root_dir

    for cnn_layer in range(args.k_start, args.k_stop):
        if out_of_range:
            break

        print('cnn_layer = ' + str(cnn_layer))

        for target_class in os.listdir(path):
            if out_of_range:
                break
            class_path = os.path.join(path, target_class)
            for img in os.listdir(class_path):

                img_path = os.path.join(class_path, img)
                (original_image, prep_img, image_name) = get_example_params(img_path, args.n_classes)

                # File export name
                file_name_to_export =  'layer_' + str(cnn_layer) + '_class_' + str(target_class) + '_' + image_name + '_filter' + str(filter_pos)
                
                try:
                    # File export name
                    file_name_to_export =  'layer_' + str(cnn_layer) + '_class_' + str(target_class) + '_' + image_name + '_filter' + str(filter_pos)
                    # Guided backprop
                    GBP = GuidedBackprop(pretrained_model)
                    # Get gradients
                    guided_grads, out_of_range = GBP.generate_gradients(prep_img, target_class, cnn_layer, filter_pos)
                    if out_of_range:
                        print('The requested layer value is out of range for the selected model !')
                        break
                    # Save colored gradients
Ejemplo n.º 7
0
    pretrained_model, model_name = model_selection(args.model, args.n_classes,
                                                   args.model_path)
    print('Using model ' + model_name)
    failure_dset = pd.DataFrame({'class': [], 'image_name': [], 'layer': []})
    out_of_range = False

    for cnn_layer in range(args.k_start, args.k_stop):
        print('cnn_layer = ' + str(cnn_layer))
        for target_class in os.listdir(path):
            class_path = os.path.join(path, target_class)
            for img in os.listdir(class_path):
                img_path = os.path.join(class_path, img)

                (original_image, prep_img,
                 image_name) = get_example_params(img_path)
                file_name_to_export = 'layer_' + str(
                    cnn_layer) + '_class_' + str(
                        target_class) + '_' + image_name

                # Grad cam
                # grad_cam = GradCam(pretrained_model, target_layer=cnn_layer)
                # # Generate cam mask
                # cam, out_of_range = grad_cam.generate_cam(prep_img, int(target_class))
                # if out_of_range:
                #     print('The requested layer value is out of range for the selected model !')
                #     break
                # # Save mask
                # save_class_activation_images(original_image, cam, file_name_to_export)

                try:
Ejemplo n.º 8
0
            # Get the target score
            w = F.softmax(self.extractor.forward_pass(input_image *
                                                      norm_saliency_map)[1],
                          dim=1)[0][target_class]
            cam += w.data.numpy() * target[i, :, :].data.numpy()
        cam = np.maximum(cam, 0)
        cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)
                                     )  # Normalize between 0-1
        cam = np.uint8(cam * 255)  # Scale between 0-255 to visualize
        cam = np.uint8(
            Image.fromarray(cam).resize(
                (input_image.shape[2], input_image.shape[3]),
                Image.ANTIALIAS)) / 255
        return cam

    pass


if __name__ == '__main__':
    # Get params
    target_example = 1  # Snake
    original_image, prep_img, target_class, file_name_to_export, pretrained_model = get_example_params(
        target_example)
    # Score cam
    score_cam = ScoreCam(pretrained_model, target_layer=11)
    # Generate cam mask
    cam = score_cam.generate_cam(prep_img, target_class)
    # Save mask
    save_class_activation_images(original_image, cam, file_name_to_export)
    print('Score cam completed')