def gen_grounding_lime_batch(imgs, model='resnet18', label_name='explanation', from_saved=True, target_index=1, layer='layer4', device=0, topk=True, classes=get_imagenet_classes(), save=True, save_path='./results/gradcam_examples/', show=True): #CUDA_VISIBLE_DEVICES=str(device) # Create result directory if it doesn't exist; all explanations should # be stored in a folder that is the predicted class if not os.path.exists(save_path): os.makedirs(save_path) if save: print('result path: {0}'.format(save_path)) if isinstance(model, str): model_name = model model, classes, target_layer = get_model_info(model, device=device) else: model_name = 'custom' # Generate the explanations masks = generate_lime_explanation_batch(imgs, model, pred_rank=1, positive_only=True, show=show, device='cuda:' + str(device)) cams = [] for mask, img in zip(masks, imgs): cams += [get_cam(img, mask)] if save: for i in range(len(imgs)): res_path = save_path + str(target_index[i].cpu().numpy()) + '/' if not os.path.exists(res_path): os.makedirs(res_path) #print("saving explanation mask....\n") cv2.imwrite(res_path + 'original_img.png', get_displ_img(imgs[i])) np.save(res_path + "lime_mask.npy", masks[i]) #just in case torch.cuda.empty_cache() return masks
def generate_lime_explanation_batch(imgs, model_t, pred_rank=1, positive_only=True, show=True, device='cuda'): #img = get_image(path) #image for display purposes global model model = model_t.to(device) device = device # image for generating mask #img = Image.fromarray(img.astype('uint8'), 'RGB') model.eval() masks = [] displ_imgs = [] for im in imgs: displ_imgs += [get_displ_img(im)] explainer = lime_image.LimeImageExplainer() '''explanations = explainer.explain_instance(np.array(displ_imgs), batch_predict_tensor, # classification function top_labels=pred_rank, hide_color=0, num_samples=1000)''' for displ_img in displ_imgs: explanation = explainer.explain_instance( (displ_img / np.max(displ_img).astype(float)), batch_predict, # classification function top_labels=pred_rank, hide_color=0, num_samples=1000) print('explained') temp, mask = explanation.get_image_and_mask( explanation.top_labels[pred_rank - 1], positive_only, num_features=5, hide_rest=False) masks += [mask] print('finished lime explanation') return np.array(masks, dtype=float)
def generate_ig_batch(imgs, model, cuda=False, show=True, reg=False, outlines=False, target_index=None): """ generate Integrated Gradients on given numpy image """ # start to create models... model.eval() # for displaying explanation # calculate the gradient and the label index imgs = [get_displ_img(im) for im in imgs] gradients, label_index = calculate_outputs_and_gradients( imgs, model, target_index, cuda) #classes = get_imagenet_classes() #print('integrated gradients clasification: {0}'.format(classes[label_index])) gradients = [np.transpose(grad, (1, 2, 0)) for grad in gradients] masks = [] for (grad, img, idx) in zip(gradients, imgs, label_index): img_gradient_overlay = visualize(grad, img, clip_above_percentile=95, clip_below_percentile=58, overlay=True, mask_mode=True, outlines=outlines) img_gradient = visualize(grad, img, clip_above_percentile=95, clip_below_percentile=58, overlay=False, outlines=outlines) # calculae the integrated gradients attributions = random_baseline_integrated_gradients(img, model, idx, calculate_outputs_and_gradients, \ steps=50, num_random_trials=10, cuda=cuda) img_integrated_gradient_overlay= visualize(attributions, img, clip_above_percentile=95, clip_below_percentile=58, \ morphological_cleanup=True, overlay=True, mask_mode=True, outlines=outlines, threshold=.01) img_integrated_gradient = visualize(attributions, img, clip_above_percentile=95, clip_below_percentile=58, morphological_cleanup=True, overlay=False, outlines=outlines, threshold=.01) output_img = generate_entrie_images(img, img_gradient, img_gradient_overlay, img_integrated_gradient, \ img_integrated_gradient_overlay) # overlay mask on image #ig_mask = img_fill(np.uint8(img_integrated_gradient[:,:,1]), 0) #ig_mask[ig_mask != 0] = 1 #cam = img[:, :, 1]+np.uint8(ig_mask) #masks += [np.float32(ig_mask)] masks += [attributions] #if show: # plt.imshow(img_integrated_gradient_overlay) if reg: return img_gradient_overlay, img_gradient print('finished Integrated Gradients explanation') #return cam, np.float32(ig_mask) return masks
def gen_grounding_bp_batch(imgs, model='resnet18', label_name='explanation', from_saved=True, target_index=1, layer='layer4', device=0, topk=True, classes=get_imagenet_classes(), save=True, save_path='./results/gradcam_examples/', show=True): #CUDA_VISIBLE_DEVICES=str(device) # Create result directory if it doesn't exist; all explanations should # be stored in a folder that is the predicted class dateTimeObj = datetime.now() timestampStr = dateTimeObj.strftime("%d-%b-%Y_%H") if not os.path.exists(save_path): os.makedirs(save_path) if save: print('result path: {0}'.format(save_path)) if isinstance(model, str): model_name = model model, classes, target_layer = get_model_info(model, device=device) else: model_name = 'custom' # Generate the explanations if topk: masks = gen_bp(imgs, model, target_index=target_index, target_layer=layer, device=device, single=False, prep=False, classes=classes) else: masks = gen_bp_target(imgs, model, target_index=target_index, device=device, single=False, prep=False, classes=classes) cams = [] for mask, img in zip(masks, imgs): cams += [get_cam(img, mask)] if show: #plot heatmaps fig = plt.figure(figsize=(10, 10)) grid = ImageGrid( fig, 111, # similar to subplot(111) nrows_ncols=(2, 2), axes_pad=0.35, # pad between axes in inch. ) for ax, im in zip(grid, cams[:4]): ax.axis('off') # Iterating over the grid returns the Axes. ax.imshow(im) if save: for i in range(len(imgs)): res_path = save_path + str(target_index[i].cpu().numpy()) + '/' if not os.path.exists(res_path): os.makedirs(res_path) #print("saving explanation mask....\n") cv2.imwrite(res_path + 'original_img.png', get_displ_img(imgs[i])) cv2.imwrite(res_path + "bp_mask.png", np.uint8(cams[i] * 255)) np.save(res_path + "bp_mask.npy", masks[i]) #just in case torch.cuda.empty_cache() return masks
'vgg19', label, save_path='./results/patch_imagenet/', show=False, save=False, device='cuda:7') save_path = '/work/lisabdunlap/explain-eval/results/patch_imagenet/{0}/{1}/'.format( idx, str(i)) if not os.path.exists(save_path): os.makedirs(save_path) print('saving to ', save_path) success = (cv2.imwrite( save_path + 'adv_' + str(i) + '-' + str(adv_idx) + '.png', np.uint8((patch / np.max(patch)) * 255)) and cv2.imwrite( save_path + 'orig_' + str(i) + '-' + str(adv_idx) + '.png', get_displ_img(img))) if not success: print('error saving') '''while i < 1: img, _ = dataset[start+i] top = get_top_prediction('vgg19', Variable(torch.unsqueeze(img,0).float().to('cuda:7')), device='cuda:7')[1] print('pred: ', top) if label == top: patch, idx, adv_idx = gen_adversarial_patch(img, 'vgg19', label, save_path='./results/patch_imagenet/', show=False, save=False, device='cuda:7') save_path = '/work/lisabdunlap/explain-eval/results/patch_imagenet/{0}/{1}/'.format(idx, str(i)) if not os.path.exists(save_path): os.makedirs(save_path) print("this {0} should match this {1}".format(label, idx)) print('saving to ', save_path) success = (cv2.imwrite(save_path+'adv_'+str(i)+'-'+str(adv_idx)+'.png', np.uint8((patch/np.max(patch))*255)) or