Пример #1
0
def get_top_prediction(model_name, img):
    if isinstance(model_name, str):
        model, classes, layer = get_model_info(model_name)
    else:
        model = model_name
    logits = model(img)
    probs = F.softmax(logits, dim=1)
    prediction = probs.topk(1)
    return classes[prediction[1][0].detach().cpu().numpy()[0]]
Пример #2
0
def gen_grounding_lime_batch(imgs,
                             model='resnet18',
                             label_name='explanation',
                             from_saved=True,
                             target_index=1,
                             layer='layer4',
                             device=0,
                             topk=True,
                             classes=get_imagenet_classes(),
                             save=True,
                             save_path='./results/gradcam_examples/',
                             show=True):
    #CUDA_VISIBLE_DEVICES=str(device)
    # Create result directory if it doesn't exist; all explanations should
    # be stored in a folder that is the predicted class

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if save:
        print('result path: {0}'.format(save_path))

    if isinstance(model, str):
        model_name = model
        model, classes, target_layer = get_model_info(model, device=device)
    else:
        model_name = 'custom'

    # Generate the explanations
    masks = generate_lime_explanation_batch(imgs,
                                            model,
                                            pred_rank=1,
                                            positive_only=True,
                                            show=show,
                                            device='cuda:' + str(device))

    cams = []
    for mask, img in zip(masks, imgs):
        cams += [get_cam(img, mask)]

    if save:
        for i in range(len(imgs)):
            res_path = save_path + str(target_index[i].cpu().numpy()) + '/'
            if not os.path.exists(res_path):
                os.makedirs(res_path)
            #print("saving explanation mask....\n")
            cv2.imwrite(res_path + 'original_img.png', get_displ_img(imgs[i]))
            np.save(res_path + "lime_mask.npy", masks[i])

    #just in case
    torch.cuda.empty_cache()

    return masks
Пример #3
0
def get_model_layer(model_name):
    return get_model_info(model_name)[2]
Пример #4
0
def get_model(model_name):
    return get_model_info(model_name)[0]
Пример #5
0
def gen_grounding_bp_batch(imgs,
                           model='resnet18',
                           label_name='explanation',
                           from_saved=True,
                           target_index=1,
                           layer='layer4',
                           device=0,
                           topk=True,
                           classes=get_imagenet_classes(),
                           save=True,
                           save_path='./results/gradcam_examples/',
                           show=True):
    #CUDA_VISIBLE_DEVICES=str(device)
    # Create result directory if it doesn't exist; all explanations should
    # be stored in a folder that is the predicted class
    dateTimeObj = datetime.now()
    timestampStr = dateTimeObj.strftime("%d-%b-%Y_%H")
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if save:
        print('result path: {0}'.format(save_path))

    if isinstance(model, str):
        model_name = model
        model, classes, target_layer = get_model_info(model, device=device)
    else:
        model_name = 'custom'

    # Generate the explanations
    if topk:
        masks = gen_bp(imgs,
                       model,
                       target_index=target_index,
                       target_layer=layer,
                       device=device,
                       single=False,
                       prep=False,
                       classes=classes)
    else:
        masks = gen_bp_target(imgs,
                              model,
                              target_index=target_index,
                              device=device,
                              single=False,
                              prep=False,
                              classes=classes)

    cams = []
    for mask, img in zip(masks, imgs):
        cams += [get_cam(img, mask)]

    if show:
        #plot heatmaps
        fig = plt.figure(figsize=(10, 10))
        grid = ImageGrid(
            fig,
            111,  # similar to subplot(111)
            nrows_ncols=(2, 2),
            axes_pad=0.35,  # pad between axes in inch.
        )

        for ax, im in zip(grid, cams[:4]):
            ax.axis('off')
            # Iterating over the grid returns the Axes.
            ax.imshow(im)

    if save:
        for i in range(len(imgs)):
            res_path = save_path + str(target_index[i].cpu().numpy()) + '/'
            if not os.path.exists(res_path):
                os.makedirs(res_path)
            #print("saving explanation mask....\n")
            cv2.imwrite(res_path + 'original_img.png', get_displ_img(imgs[i]))
            cv2.imwrite(res_path + "bp_mask.png", np.uint8(cams[i] * 255))
            np.save(res_path + "bp_mask.npy", masks[i])

    #just in case
    torch.cuda.empty_cache()

    return masks
def gen_grounding(img,
                  technique,
                  model='resnet18',
                  show=False,
                  layer='layer4',
                  save_path='./results/explanation',
                  target_index=1,
                  save=True,
                  device=0,
                  index=False):
    # Create result directory if it doesn't exist; all explanations should
    # be stored in a folder that is the predicted class

    if save_path[-1] != '/':
        save_path += '/'
    if save:
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        print('result path: {0}'.format(save_path))

    # convert image if needed
    if np.max(img) < 2:
        img = np.uint8(img * 255)
    if isinstance(model, str):
        model_name = model
        model, classes, layer = get_model_info(model, device=device)
    else:
        model_name = 'custom'
    model.eval()

    device = 'cuda:' + str(device)
    if not torch.cuda.is_available():
        device = 'cpu'

    # Generate the explanations
    if technique == 'lime' or technique == 'LIME':
        if not index:
            mask = generate_lime_explanation(img,
                                             model,
                                             pred_rank=target_index,
                                             positive_only=True,
                                             device=device)
        else:
            mask = generate_lime_explanation(img,
                                             model,
                                             pred_rank=target_index,
                                             target_index=target_index,
                                             positive_only=True,
                                             device=device)
    elif technique == 'gradcam' or technique == 'GradCam' or technique == 'gcam':
        if not index:
            mask = gen_gcam([img],
                            model,
                            target_index=target_index,
                            target_layer=layer)
        else:
            mask = gen_gcam_target([img],
                                   model,
                                   target_index=[target_index],
                                   target_layer=layer)
    elif technique == 'backprop' or technique == 'bp':
        if not index:
            mask = gen_bp([img], model, target_index=target_index)
        else:
            mask = gen_bp_target([img], model, target_index=[target_index])
    elif technique == 'guided_backprop' or technique == 'gbp':
        if not index:
            mask = gen_gbp([img], model, target_index=target_index)
        else:
            mask = gen_gbp_target([img], model, target_index=[target_index])
    elif technique == 'deconv' or technique == 'deconvolution':
        if not index:
            mask = gen_deconv([img], model, target_index=target_index)
        else:
            mask = gen_deconv_target([img], model, target_index=[target_index])
    elif technique == 'ig' or technique == 'integrated-gradients':
        if not index:
            mask = generate_ig(img, model, cuda=device)
        else:
            mask = generate_ig(img,
                               model,
                               target_index=target_index,
                               cuda=device)
    elif technique == 'rise' or technique == 'RISE':
        mask = gen_rise_grounding(img,
                                  model,
                                  index=target_index,
                                  device=device)
    else:
        print('ERROR: invalid explainability technique {0}'.format(technique))
        return

    # For visualization, zero out bottom 90% of values
    if technique in ['bp', 'gbp', 'deconv', 'ig']:
        displ_mask = mask
        displ_mask[displ_mask < np.percentile(displ_mask, 90)] = 0
    else:
        displ_mask = mask

    cam = get_cam(img, displ_mask)
    if show:
        plt.axis('off')
        cam = cv2.resize(cam, (224, 224))
        plt.imshow(cam)

    if save:
        print("saving explanation mask....\n")
        np.save(os.path.join(save_path + 'original_img'), img)
        cv2.imwrite(os.path.join(save_path + 'original_img.png'), img)
        np.save(os.path.join(save_path + technique + '-' + model_name), mask)
        if not cv2.imwrite(
                os.path.join(save_path + technique + '-' + str(model_name) +
                             ".png"), cam * 255):
            print('error saving explanation')
        print('saved to {0}'.format(
            os.path.join(save_path + technique + '-' + model_name)))

    return mask
def gen_grounding(img,
                  technique,
                  label_name='explanation',
                  model='resnet18',
                  show=False,
                  reg=False,
                  layer='layer4',
                  save_path='./results/master_examples/',
                  target_index=1,
                  unique_id=None,
                  patch=False,
                  save=True,
                  device=5,
                  index=False):
    # Create result directory if it doesn't exist; all explanations should
    # be stored in a folder that is the predicted class

    save_path += label_name + '/'
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if patch:
        save_path = os.path.join(save_path + 'patch/')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if save:
        print('result path: {0}'.format(save_path))

    # convert image if needed
    if np.max(img) < 2:
        img = np.uint8(img * 255)
    if isinstance(model, str):
        model_name = model
        model, classes, layer = get_model_info(model, device=device)
    else:
        model_name = 'custom'
    model.eval()

    device = 'cuda:' + str(device)
    if not torch.cuda.is_available():
        device = 'cpu'

    # Generate the explanations
    if technique == 'lime' or technique == 'LIME':
        if not index:
            mask = generate_lime_explanation(img,
                                             model,
                                             pred_rank=target_index,
                                             positive_only=True,
                                             device=device)
        else:
            mask = generate_lime_explanation(img,
                                             model,
                                             pred_rank=target_index,
                                             target_index=target_index,
                                             positive_only=True,
                                             device=device)
    elif technique == 'gradcam' or technique == 'GradCam' or technique == 'gcam':
        if not index:
            mask = gen_gcam([img],
                            model,
                            target_index=target_index,
                            show_labels=True,
                            target_layer=layer)
        else:
            mask = gen_gcam_target([img],
                                   model,
                                   target_index=[target_index],
                                   target_layer=layer)
    elif technique == 'backprop' or technique == 'bp':
        if not index:
            mask = gen_bp([img],
                          model,
                          target_index=target_index,
                          show_labels=True,
                          target_layer=layer)
        else:
            mask = gen_bp_target([img], model, target_index=[target_index])
    elif technique == 'guided_backprop' or technique == 'gbp':
        if not index:
            mask = gen_gbp([img],
                           model,
                           target_index=target_index,
                           show_labels=True,
                           target_layer=layer)
        else:
            mask = gen_gbp_target([img], model, target_index=[target_index])
    elif technique == 'ig' or technique == 'integrated-gradients':
        if not index:
            mask = generate_ig(img, model, cuda=device)
        else:
            mask = generate_ig(img,
                               model,
                               target_index=target_index,
                               cuda=device)
    elif technique == 'rise' or technique == 'RISE':
        mask = gen_rise_grounding(img,
                                  model,
                                  index=target_index,
                                  device=device)
    else:
        print('ERROR: invalid explainability technique {0}'.format(technique))
        return

    print('after ', mask.shape)
    cam = get_cam(img, mask)
    if show:
        plt.axis('off')
        cam = cv2.resize(cam, (224, 224))
        plt.imshow(cam)

    if save:
        print("saving explanation mask....\n")
        np.save(os.path.join(save_path + 'original_img'), img)
        cv2.imwrite(os.path.join(save_path + 'original_img.png'), img)
        np.save(os.path.join(save_path + technique + '-' + model_name), mask)
        if not cv2.imwrite(
                os.path.join(save_path + technique + '-' + str(model_name) +
                             ".png"), cam * 255):
            print('error saving explanation')
        print('saved to {0}'.format(
            os.path.join(save_path + technique + '-' + model_name)))

    return mask
Пример #8
0
def gen_adversarial_patch(img,
                          model_name,
                          label_name,
                          save_path='./results/patch_imagenet/',
                          show=True,
                          save=True,
                          device='cuda'):

    # Create result directory if it doesn't exist; all explanations sshould
    # be stored in a folder that is the predicted class
    save_path = save_path + str(label_name) + '/patch/'
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # Setting the seed for reproducibility for demo
    # Comment the below 4 lines for the target category to be random across runs
    #np.random.seed(1)
    #torch.manual_seed(1)
    #torch.backends.cudnn.deterministic = True
    #torch.backends.cudnn.benchmark = False

    # Can work with any model, but it assumes that the model has a feature method,
    # and a classifier method, as in the VGG models in torchvision
    pretrained_net, classes, net_layer = get_model_info(model_name)
    #gradcam_attack = GradCamAttack(model=pretrained_net, target_layer_names=[net_layer])
    gradcam_reg_patch_attack = GradCamRegPatchAttack(
        model=pretrained_net, target_layer_names=[net_layer], device=device)
    #gradcam = GradCam(model=pretrained_net, target_layer_names=[net_layer])
    pretrained_net = pretrained_net.to(device)
    pretrained_net = pretrained_net.eval()

    # Create result directory if it doesn't exist
    if save:
        if not os.path.exists(save_path):
            os.makedirs(save_path)

    # Read the input image and preprocess to a tensor
    #img = cv2.imread(args.image_path, 1)
    #img = np.float32(cv2.resize(img, (224, 224))) / 255
    preprocessed_img = preprocess_image(img, device=device)

    # Get the original prediction index and the corresponding probability
    orig_index, orig_prob = forward_inference(pretrained_net, preprocessed_img)

    # Pick a random target from the remaining 999 categories excluding the original prediction
    list_of_idx = np.delete(np.arange(1000), orig_index)
    rand_idx = np.random.randint(999)
    target_index = list_of_idx[rand_idx]

    preprocess = transforms.Compose([
        lambda x: Image.fromarray(x.astype('uint8'), 'RGB'),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        # Normalization for ImageNet
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # Compute the regular adv patch attack image and the corresponding GradCAM
    reg_patch_adv_img, reg_patch_adv_tensor = gradcam_reg_patch_attack(
        preprocessed_img, orig_index, target_index)
    test_img = np.uint8((reg_patch_adv_img / np.max(reg_patch_adv_img)) * 255)
    reg_patch_pred_index, reg_patch_pred_prob = forward_inference(
        pretrained_net,
        preprocess_image(preprocess(test_img[:, :, ::-1]), device=device))
    print("original index: {0}    adv index: {1}".format(
        orig_index, reg_patch_pred_index))

    # save adversarial image
    if save:
        #cv2.imwrite(os.path.join(save_path + 'patch_image-%s.png'%datetime.now().strftime('%Y-%m-%d-%H-%M')),
        #            np.uint8(255 * np.clip(reg_patch_adv_img[:, :, ::-1], 0, 1)))
        np.save(
            os.path.join(save_path + 'patch_image-%s.png' %
                         datetime.now().strftime('%Y-%m-%d-%H-%M')),
            reg_patch_adv_img)

    # Generate the GradCAM heatmap for the target category using the regular patch adversarial image
    # reg_patch_adv_mask = gradcam(reg_patch_adv_tensor, target_index)
    #gcam_expl, reg_patch_adv_mask = gen_grounding(reg_patch_adv_img, 'vgg19_bn', 'gcam', label_name, show=True)
    if show:
        plt.imshow(reg_patch_adv_img)

    print('finished generating adveersarial patch')
    return reg_patch_adv_img, orig_index, target_index